1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/compat.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/io.h> 21 #include <linux/ip.h> 22 #include <linux/tcp.h> 23 #include <linux/mii.h> 24 #include <linux/mm.h> 25 #include <linux/kthread.h> 26 #include <linux/slab.h> 27 #include <linux/if_vlan.h> 28 #include <linux/netdevice.h> 29 #include <linux/netdev_features.h> 30 #include <linux/rcutree.h> 31 #include <linux/skbuff.h> 32 #include <linux/vmalloc.h> 33 34 #include <net/iucv/af_iucv.h> 35 #include <net/dsfield.h> 36 37 #include <asm/ebcdic.h> 38 #include <asm/chpid.h> 39 #include <asm/sysinfo.h> 40 #include <asm/diag.h> 41 #include <asm/cio.h> 42 #include <asm/ccwdev.h> 43 #include <asm/cpcmd.h> 44 45 #include "qeth_core.h" 46 47 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 48 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 49 /* N P A M L V H */ 50 [QETH_DBF_SETUP] = {"qeth_setup", 51 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, 52 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, 53 &debug_sprintf_view, NULL}, 54 [QETH_DBF_CTRL] = {"qeth_control", 55 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, 56 }; 57 EXPORT_SYMBOL_GPL(qeth_dbf); 58 59 struct kmem_cache *qeth_core_header_cache; 60 EXPORT_SYMBOL_GPL(qeth_core_header_cache); 61 static struct kmem_cache *qeth_qdio_outbuf_cache; 62 63 static struct device *qeth_core_root_dev; 64 static struct dentry *qeth_debugfs_root; 65 static struct lock_class_key qdio_out_skb_queue_key; 66 67 static void qeth_issue_next_read_cb(struct qeth_card *card, 68 struct qeth_cmd_buffer *iob, 69 unsigned int data_length); 70 static int qeth_qdio_establish(struct qeth_card *); 71 static void qeth_free_qdio_queues(struct qeth_card *card); 72 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, 73 struct qeth_qdio_out_buffer *buf, 74 enum iucv_tx_notify notification); 75 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, 76 int budget); 77 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 78 79 static void qeth_close_dev_handler(struct work_struct *work) 80 { 81 struct qeth_card *card; 82 83 card = container_of(work, struct qeth_card, close_dev_work); 84 QETH_CARD_TEXT(card, 2, "cldevhdl"); 85 ccwgroup_set_offline(card->gdev); 86 } 87 88 static const char *qeth_get_cardname(struct qeth_card *card) 89 { 90 if (IS_VM_NIC(card)) { 91 switch (card->info.type) { 92 case QETH_CARD_TYPE_OSD: 93 return " Virtual NIC QDIO"; 94 case QETH_CARD_TYPE_IQD: 95 return " Virtual NIC Hiper"; 96 case QETH_CARD_TYPE_OSM: 97 return " Virtual NIC QDIO - OSM"; 98 case QETH_CARD_TYPE_OSX: 99 return " Virtual NIC QDIO - OSX"; 100 default: 101 return " unknown"; 102 } 103 } else { 104 switch (card->info.type) { 105 case QETH_CARD_TYPE_OSD: 106 return " OSD Express"; 107 case QETH_CARD_TYPE_IQD: 108 return " HiperSockets"; 109 case QETH_CARD_TYPE_OSN: 110 return " OSN QDIO"; 111 case QETH_CARD_TYPE_OSM: 112 return " OSM QDIO"; 113 case QETH_CARD_TYPE_OSX: 114 return " OSX QDIO"; 115 default: 116 return " unknown"; 117 } 118 } 119 return " n/a"; 120 } 121 122 /* max length to be returned: 14 */ 123 const char *qeth_get_cardname_short(struct qeth_card *card) 124 { 125 if (IS_VM_NIC(card)) { 126 switch (card->info.type) { 127 case QETH_CARD_TYPE_OSD: 128 return "Virt.NIC QDIO"; 129 case QETH_CARD_TYPE_IQD: 130 return "Virt.NIC Hiper"; 131 case QETH_CARD_TYPE_OSM: 132 return "Virt.NIC OSM"; 133 case QETH_CARD_TYPE_OSX: 134 return "Virt.NIC OSX"; 135 default: 136 return "unknown"; 137 } 138 } else { 139 switch (card->info.type) { 140 case QETH_CARD_TYPE_OSD: 141 switch (card->info.link_type) { 142 case QETH_LINK_TYPE_FAST_ETH: 143 return "OSD_100"; 144 case QETH_LINK_TYPE_HSTR: 145 return "HSTR"; 146 case QETH_LINK_TYPE_GBIT_ETH: 147 return "OSD_1000"; 148 case QETH_LINK_TYPE_10GBIT_ETH: 149 return "OSD_10GIG"; 150 case QETH_LINK_TYPE_25GBIT_ETH: 151 return "OSD_25GIG"; 152 case QETH_LINK_TYPE_LANE_ETH100: 153 return "OSD_FE_LANE"; 154 case QETH_LINK_TYPE_LANE_TR: 155 return "OSD_TR_LANE"; 156 case QETH_LINK_TYPE_LANE_ETH1000: 157 return "OSD_GbE_LANE"; 158 case QETH_LINK_TYPE_LANE: 159 return "OSD_ATM_LANE"; 160 default: 161 return "OSD_Express"; 162 } 163 case QETH_CARD_TYPE_IQD: 164 return "HiperSockets"; 165 case QETH_CARD_TYPE_OSN: 166 return "OSN"; 167 case QETH_CARD_TYPE_OSM: 168 return "OSM_1000"; 169 case QETH_CARD_TYPE_OSX: 170 return "OSX_10GIG"; 171 default: 172 return "unknown"; 173 } 174 } 175 return "n/a"; 176 } 177 178 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 179 int clear_start_mask) 180 { 181 unsigned long flags; 182 183 spin_lock_irqsave(&card->thread_mask_lock, flags); 184 card->thread_allowed_mask = threads; 185 if (clear_start_mask) 186 card->thread_start_mask &= threads; 187 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 188 wake_up(&card->wait_q); 189 } 190 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); 191 192 int qeth_threads_running(struct qeth_card *card, unsigned long threads) 193 { 194 unsigned long flags; 195 int rc = 0; 196 197 spin_lock_irqsave(&card->thread_mask_lock, flags); 198 rc = (card->thread_running_mask & threads); 199 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 200 return rc; 201 } 202 EXPORT_SYMBOL_GPL(qeth_threads_running); 203 204 static void qeth_clear_working_pool_list(struct qeth_card *card) 205 { 206 struct qeth_buffer_pool_entry *pool_entry, *tmp; 207 struct qeth_qdio_q *queue = card->qdio.in_q; 208 unsigned int i; 209 210 QETH_CARD_TEXT(card, 5, "clwrklst"); 211 list_for_each_entry_safe(pool_entry, tmp, 212 &card->qdio.in_buf_pool.entry_list, list) 213 list_del(&pool_entry->list); 214 215 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++) 216 queue->bufs[i].pool_entry = NULL; 217 } 218 219 static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) 220 { 221 unsigned int i; 222 223 for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { 224 if (entry->elements[i]) 225 __free_page(entry->elements[i]); 226 } 227 228 kfree(entry); 229 } 230 231 static void qeth_free_buffer_pool(struct qeth_card *card) 232 { 233 struct qeth_buffer_pool_entry *entry, *tmp; 234 235 list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, 236 init_list) { 237 list_del(&entry->init_list); 238 qeth_free_pool_entry(entry); 239 } 240 } 241 242 static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) 243 { 244 struct qeth_buffer_pool_entry *entry; 245 unsigned int i; 246 247 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 248 if (!entry) 249 return NULL; 250 251 for (i = 0; i < pages; i++) { 252 entry->elements[i] = __dev_alloc_page(GFP_KERNEL); 253 254 if (!entry->elements[i]) { 255 qeth_free_pool_entry(entry); 256 return NULL; 257 } 258 } 259 260 return entry; 261 } 262 263 static int qeth_alloc_buffer_pool(struct qeth_card *card) 264 { 265 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 266 unsigned int i; 267 268 QETH_CARD_TEXT(card, 5, "alocpool"); 269 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 270 struct qeth_buffer_pool_entry *entry; 271 272 entry = qeth_alloc_pool_entry(buf_elements); 273 if (!entry) { 274 qeth_free_buffer_pool(card); 275 return -ENOMEM; 276 } 277 278 list_add(&entry->init_list, &card->qdio.init_pool.entry_list); 279 } 280 return 0; 281 } 282 283 int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) 284 { 285 unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); 286 struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; 287 struct qeth_buffer_pool_entry *entry, *tmp; 288 int delta = count - pool->buf_count; 289 LIST_HEAD(entries); 290 291 QETH_CARD_TEXT(card, 2, "realcbp"); 292 293 /* Defer until queue is allocated: */ 294 if (!card->qdio.in_q) 295 goto out; 296 297 /* Remove entries from the pool: */ 298 while (delta < 0) { 299 entry = list_first_entry(&pool->entry_list, 300 struct qeth_buffer_pool_entry, 301 init_list); 302 list_del(&entry->init_list); 303 qeth_free_pool_entry(entry); 304 305 delta++; 306 } 307 308 /* Allocate additional entries: */ 309 while (delta > 0) { 310 entry = qeth_alloc_pool_entry(buf_elements); 311 if (!entry) { 312 list_for_each_entry_safe(entry, tmp, &entries, 313 init_list) { 314 list_del(&entry->init_list); 315 qeth_free_pool_entry(entry); 316 } 317 318 return -ENOMEM; 319 } 320 321 list_add(&entry->init_list, &entries); 322 323 delta--; 324 } 325 326 list_splice(&entries, &pool->entry_list); 327 328 out: 329 card->qdio.in_buf_pool.buf_count = count; 330 pool->buf_count = count; 331 return 0; 332 } 333 EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool); 334 335 static void qeth_free_qdio_queue(struct qeth_qdio_q *q) 336 { 337 if (!q) 338 return; 339 340 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 341 kfree(q); 342 } 343 344 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) 345 { 346 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 347 int i; 348 349 if (!q) 350 return NULL; 351 352 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 353 kfree(q); 354 return NULL; 355 } 356 357 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 358 q->bufs[i].buffer = q->qdio_bufs[i]; 359 360 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); 361 return q; 362 } 363 364 static int qeth_cq_init(struct qeth_card *card) 365 { 366 int rc; 367 368 if (card->options.cq == QETH_CQ_ENABLED) { 369 QETH_CARD_TEXT(card, 2, "cqinit"); 370 qdio_reset_buffers(card->qdio.c_q->qdio_bufs, 371 QDIO_MAX_BUFFERS_PER_Q); 372 card->qdio.c_q->next_buf_to_init = 127; 373 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 374 card->qdio.no_in_queues - 1, 0, 375 127); 376 if (rc) { 377 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 378 goto out; 379 } 380 } 381 rc = 0; 382 out: 383 return rc; 384 } 385 386 static int qeth_alloc_cq(struct qeth_card *card) 387 { 388 int rc; 389 390 if (card->options.cq == QETH_CQ_ENABLED) { 391 int i; 392 struct qdio_outbuf_state *outbuf_states; 393 394 QETH_CARD_TEXT(card, 2, "cqon"); 395 card->qdio.c_q = qeth_alloc_qdio_queue(); 396 if (!card->qdio.c_q) { 397 rc = -1; 398 goto kmsg_out; 399 } 400 card->qdio.no_in_queues = 2; 401 card->qdio.out_bufstates = 402 kcalloc(card->qdio.no_out_queues * 403 QDIO_MAX_BUFFERS_PER_Q, 404 sizeof(struct qdio_outbuf_state), 405 GFP_KERNEL); 406 outbuf_states = card->qdio.out_bufstates; 407 if (outbuf_states == NULL) { 408 rc = -1; 409 goto free_cq_out; 410 } 411 for (i = 0; i < card->qdio.no_out_queues; ++i) { 412 card->qdio.out_qs[i]->bufstates = outbuf_states; 413 outbuf_states += QDIO_MAX_BUFFERS_PER_Q; 414 } 415 } else { 416 QETH_CARD_TEXT(card, 2, "nocq"); 417 card->qdio.c_q = NULL; 418 card->qdio.no_in_queues = 1; 419 } 420 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues); 421 rc = 0; 422 out: 423 return rc; 424 free_cq_out: 425 qeth_free_qdio_queue(card->qdio.c_q); 426 card->qdio.c_q = NULL; 427 kmsg_out: 428 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); 429 goto out; 430 } 431 432 static void qeth_free_cq(struct qeth_card *card) 433 { 434 if (card->qdio.c_q) { 435 --card->qdio.no_in_queues; 436 qeth_free_qdio_queue(card->qdio.c_q); 437 card->qdio.c_q = NULL; 438 } 439 kfree(card->qdio.out_bufstates); 440 card->qdio.out_bufstates = NULL; 441 } 442 443 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, 444 int delayed) 445 { 446 enum iucv_tx_notify n; 447 448 switch (sbalf15) { 449 case 0: 450 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; 451 break; 452 case 4: 453 case 16: 454 case 17: 455 case 18: 456 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : 457 TX_NOTIFY_UNREACHABLE; 458 break; 459 default: 460 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : 461 TX_NOTIFY_GENERALERROR; 462 break; 463 } 464 465 return n; 466 } 467 468 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, 469 int forced_cleanup) 470 { 471 if (q->card->options.cq != QETH_CQ_ENABLED) 472 return; 473 474 if (q->bufs[bidx]->next_pending != NULL) { 475 struct qeth_qdio_out_buffer *head = q->bufs[bidx]; 476 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; 477 478 while (c) { 479 if (forced_cleanup || 480 atomic_read(&c->state) == 481 QETH_QDIO_BUF_HANDLED_DELAYED) { 482 struct qeth_qdio_out_buffer *f = c; 483 484 QETH_CARD_TEXT(f->q->card, 5, "fp"); 485 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); 486 /* release here to avoid interleaving between 487 outbound tasklet and inbound tasklet 488 regarding notifications and lifecycle */ 489 qeth_tx_complete_buf(c, forced_cleanup, 0); 490 491 c = f->next_pending; 492 WARN_ON_ONCE(head->next_pending != f); 493 head->next_pending = c; 494 kmem_cache_free(qeth_qdio_outbuf_cache, f); 495 } else { 496 head = c; 497 c = c->next_pending; 498 } 499 500 } 501 } 502 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == 503 QETH_QDIO_BUF_HANDLED_DELAYED)) { 504 /* for recovery situations */ 505 qeth_init_qdio_out_buf(q, bidx); 506 QETH_CARD_TEXT(q->card, 2, "clprecov"); 507 } 508 } 509 510 static void qeth_qdio_handle_aob(struct qeth_card *card, 511 unsigned long phys_aob_addr) 512 { 513 struct qaob *aob; 514 struct qeth_qdio_out_buffer *buffer; 515 enum iucv_tx_notify notification; 516 unsigned int i; 517 518 aob = (struct qaob *) phys_to_virt(phys_aob_addr); 519 QETH_CARD_TEXT(card, 5, "haob"); 520 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); 521 buffer = (struct qeth_qdio_out_buffer *) aob->user1; 522 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); 523 524 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, 525 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) { 526 notification = TX_NOTIFY_OK; 527 } else { 528 WARN_ON_ONCE(atomic_read(&buffer->state) != 529 QETH_QDIO_BUF_PENDING); 530 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); 531 notification = TX_NOTIFY_DELAYED_OK; 532 } 533 534 if (aob->aorc != 0) { 535 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); 536 notification = qeth_compute_cq_notification(aob->aorc, 1); 537 } 538 qeth_notify_skbs(buffer->q, buffer, notification); 539 540 /* Free dangling allocations. The attached skbs are handled by 541 * qeth_cleanup_handled_pending(). 542 */ 543 for (i = 0; 544 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); 545 i++) { 546 void *data = phys_to_virt(aob->sba[i]); 547 548 if (data && buffer->is_header[i]) 549 kmem_cache_free(qeth_core_header_cache, data); 550 } 551 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); 552 553 qdio_release_aob(aob); 554 } 555 556 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, 557 void *data) 558 { 559 ccw->cmd_code = cmd_code; 560 ccw->flags = flags | CCW_FLAG_SLI; 561 ccw->count = len; 562 ccw->cda = (__u32) __pa(data); 563 } 564 565 static int __qeth_issue_next_read(struct qeth_card *card) 566 { 567 struct qeth_cmd_buffer *iob = card->read_cmd; 568 struct qeth_channel *channel = iob->channel; 569 struct ccw1 *ccw = __ccw_from_cmd(iob); 570 int rc; 571 572 QETH_CARD_TEXT(card, 5, "issnxrd"); 573 if (channel->state != CH_STATE_UP) 574 return -EIO; 575 576 memset(iob->data, 0, iob->length); 577 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data); 578 iob->callback = qeth_issue_next_read_cb; 579 /* keep the cmd alive after completion: */ 580 qeth_get_cmd(iob); 581 582 QETH_CARD_TEXT(card, 6, "noirqpnd"); 583 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); 584 if (!rc) { 585 channel->active_cmd = iob; 586 } else { 587 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 588 rc, CARD_DEVID(card)); 589 qeth_unlock_channel(card, channel); 590 qeth_put_cmd(iob); 591 card->read_or_write_problem = 1; 592 qeth_schedule_recovery(card); 593 } 594 return rc; 595 } 596 597 static int qeth_issue_next_read(struct qeth_card *card) 598 { 599 int ret; 600 601 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 602 ret = __qeth_issue_next_read(card); 603 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 604 605 return ret; 606 } 607 608 static void qeth_enqueue_cmd(struct qeth_card *card, 609 struct qeth_cmd_buffer *iob) 610 { 611 spin_lock_irq(&card->lock); 612 list_add_tail(&iob->list, &card->cmd_waiter_list); 613 spin_unlock_irq(&card->lock); 614 } 615 616 static void qeth_dequeue_cmd(struct qeth_card *card, 617 struct qeth_cmd_buffer *iob) 618 { 619 spin_lock_irq(&card->lock); 620 list_del(&iob->list); 621 spin_unlock_irq(&card->lock); 622 } 623 624 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) 625 { 626 iob->rc = reason; 627 complete(&iob->done); 628 } 629 EXPORT_SYMBOL_GPL(qeth_notify_cmd); 630 631 static void qeth_flush_local_addrs4(struct qeth_card *card) 632 { 633 struct qeth_local_addr *addr; 634 struct hlist_node *tmp; 635 unsigned int i; 636 637 spin_lock_irq(&card->local_addrs4_lock); 638 hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) { 639 hash_del_rcu(&addr->hnode); 640 kfree_rcu(addr, rcu); 641 } 642 spin_unlock_irq(&card->local_addrs4_lock); 643 } 644 645 static void qeth_flush_local_addrs6(struct qeth_card *card) 646 { 647 struct qeth_local_addr *addr; 648 struct hlist_node *tmp; 649 unsigned int i; 650 651 spin_lock_irq(&card->local_addrs6_lock); 652 hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) { 653 hash_del_rcu(&addr->hnode); 654 kfree_rcu(addr, rcu); 655 } 656 spin_unlock_irq(&card->local_addrs6_lock); 657 } 658 659 static void qeth_flush_local_addrs(struct qeth_card *card) 660 { 661 qeth_flush_local_addrs4(card); 662 qeth_flush_local_addrs6(card); 663 } 664 665 static void qeth_add_local_addrs4(struct qeth_card *card, 666 struct qeth_ipacmd_local_addrs4 *cmd) 667 { 668 unsigned int i; 669 670 if (cmd->addr_length != 671 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 672 dev_err_ratelimited(&card->gdev->dev, 673 "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n", 674 cmd->addr_length); 675 return; 676 } 677 678 spin_lock(&card->local_addrs4_lock); 679 for (i = 0; i < cmd->count; i++) { 680 unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr); 681 struct qeth_local_addr *addr; 682 bool duplicate = false; 683 684 hash_for_each_possible(card->local_addrs4, addr, hnode, key) { 685 if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) { 686 duplicate = true; 687 break; 688 } 689 } 690 691 if (duplicate) 692 continue; 693 694 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 695 if (!addr) { 696 dev_err(&card->gdev->dev, 697 "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n", 698 &cmd->addrs[i].addr); 699 continue; 700 } 701 702 ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr); 703 hash_add_rcu(card->local_addrs4, &addr->hnode, key); 704 } 705 spin_unlock(&card->local_addrs4_lock); 706 } 707 708 static void qeth_add_local_addrs6(struct qeth_card *card, 709 struct qeth_ipacmd_local_addrs6 *cmd) 710 { 711 unsigned int i; 712 713 if (cmd->addr_length != 714 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 715 dev_err_ratelimited(&card->gdev->dev, 716 "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n", 717 cmd->addr_length); 718 return; 719 } 720 721 spin_lock(&card->local_addrs6_lock); 722 for (i = 0; i < cmd->count; i++) { 723 u32 key = ipv6_addr_hash(&cmd->addrs[i].addr); 724 struct qeth_local_addr *addr; 725 bool duplicate = false; 726 727 hash_for_each_possible(card->local_addrs6, addr, hnode, key) { 728 if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) { 729 duplicate = true; 730 break; 731 } 732 } 733 734 if (duplicate) 735 continue; 736 737 addr = kmalloc(sizeof(*addr), GFP_ATOMIC); 738 if (!addr) { 739 dev_err(&card->gdev->dev, 740 "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n", 741 &cmd->addrs[i].addr); 742 continue; 743 } 744 745 addr->addr = cmd->addrs[i].addr; 746 hash_add_rcu(card->local_addrs6, &addr->hnode, key); 747 } 748 spin_unlock(&card->local_addrs6_lock); 749 } 750 751 static void qeth_del_local_addrs4(struct qeth_card *card, 752 struct qeth_ipacmd_local_addrs4 *cmd) 753 { 754 unsigned int i; 755 756 if (cmd->addr_length != 757 sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { 758 dev_err_ratelimited(&card->gdev->dev, 759 "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n", 760 cmd->addr_length); 761 return; 762 } 763 764 spin_lock(&card->local_addrs4_lock); 765 for (i = 0; i < cmd->count; i++) { 766 struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i]; 767 unsigned int key = ipv4_addr_hash(addr->addr); 768 struct qeth_local_addr *tmp; 769 770 hash_for_each_possible(card->local_addrs4, tmp, hnode, key) { 771 if (tmp->addr.s6_addr32[3] == addr->addr) { 772 hash_del_rcu(&tmp->hnode); 773 kfree_rcu(tmp, rcu); 774 break; 775 } 776 } 777 } 778 spin_unlock(&card->local_addrs4_lock); 779 } 780 781 static void qeth_del_local_addrs6(struct qeth_card *card, 782 struct qeth_ipacmd_local_addrs6 *cmd) 783 { 784 unsigned int i; 785 786 if (cmd->addr_length != 787 sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { 788 dev_err_ratelimited(&card->gdev->dev, 789 "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n", 790 cmd->addr_length); 791 return; 792 } 793 794 spin_lock(&card->local_addrs6_lock); 795 for (i = 0; i < cmd->count; i++) { 796 struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i]; 797 u32 key = ipv6_addr_hash(&addr->addr); 798 struct qeth_local_addr *tmp; 799 800 hash_for_each_possible(card->local_addrs6, tmp, hnode, key) { 801 if (ipv6_addr_equal(&tmp->addr, &addr->addr)) { 802 hash_del_rcu(&tmp->hnode); 803 kfree_rcu(tmp, rcu); 804 break; 805 } 806 } 807 } 808 spin_unlock(&card->local_addrs6_lock); 809 } 810 811 static bool qeth_next_hop_is_local_v4(struct qeth_card *card, 812 struct sk_buff *skb) 813 { 814 struct qeth_local_addr *tmp; 815 bool is_local = false; 816 unsigned int key; 817 __be32 next_hop; 818 819 if (hash_empty(card->local_addrs4)) 820 return false; 821 822 rcu_read_lock(); 823 next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4)); 824 key = ipv4_addr_hash(next_hop); 825 826 hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) { 827 if (tmp->addr.s6_addr32[3] == next_hop) { 828 is_local = true; 829 break; 830 } 831 } 832 rcu_read_unlock(); 833 834 return is_local; 835 } 836 837 static bool qeth_next_hop_is_local_v6(struct qeth_card *card, 838 struct sk_buff *skb) 839 { 840 struct qeth_local_addr *tmp; 841 struct in6_addr *next_hop; 842 bool is_local = false; 843 u32 key; 844 845 if (hash_empty(card->local_addrs6)) 846 return false; 847 848 rcu_read_lock(); 849 next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6)); 850 key = ipv6_addr_hash(next_hop); 851 852 hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) { 853 if (ipv6_addr_equal(&tmp->addr, next_hop)) { 854 is_local = true; 855 break; 856 } 857 } 858 rcu_read_unlock(); 859 860 return is_local; 861 } 862 863 static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v) 864 { 865 struct qeth_card *card = m->private; 866 struct qeth_local_addr *tmp; 867 unsigned int i; 868 869 rcu_read_lock(); 870 hash_for_each_rcu(card->local_addrs4, i, tmp, hnode) 871 seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]); 872 hash_for_each_rcu(card->local_addrs6, i, tmp, hnode) 873 seq_printf(m, "%pI6c\n", &tmp->addr); 874 rcu_read_unlock(); 875 876 return 0; 877 } 878 879 DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr); 880 881 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 882 struct qeth_card *card) 883 { 884 const char *ipa_name; 885 int com = cmd->hdr.command; 886 887 ipa_name = qeth_get_ipa_cmd_name(com); 888 889 if (rc) 890 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", 891 ipa_name, com, CARD_DEVID(card), rc, 892 qeth_get_ipa_msg(rc)); 893 else 894 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", 895 ipa_name, com, CARD_DEVID(card)); 896 } 897 898 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 899 struct qeth_ipa_cmd *cmd) 900 { 901 QETH_CARD_TEXT(card, 5, "chkipad"); 902 903 if (IS_IPA_REPLY(cmd)) { 904 if (cmd->hdr.command != IPA_CMD_SETCCID && 905 cmd->hdr.command != IPA_CMD_DELCCID && 906 cmd->hdr.command != IPA_CMD_MODCCID && 907 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) 908 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 909 return cmd; 910 } 911 912 /* handle unsolicited event: */ 913 switch (cmd->hdr.command) { 914 case IPA_CMD_STOPLAN: 915 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 916 dev_err(&card->gdev->dev, 917 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", 918 netdev_name(card->dev)); 919 schedule_work(&card->close_dev_work); 920 } else { 921 dev_warn(&card->gdev->dev, 922 "The link for interface %s on CHPID 0x%X failed\n", 923 netdev_name(card->dev), card->info.chpid); 924 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 925 netif_carrier_off(card->dev); 926 } 927 return NULL; 928 case IPA_CMD_STARTLAN: 929 dev_info(&card->gdev->dev, 930 "The link for %s on CHPID 0x%X has been restored\n", 931 netdev_name(card->dev), card->info.chpid); 932 if (card->info.hwtrap) 933 card->info.hwtrap = 2; 934 qeth_schedule_recovery(card); 935 return NULL; 936 case IPA_CMD_SETBRIDGEPORT_IQD: 937 case IPA_CMD_SETBRIDGEPORT_OSA: 938 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 939 if (card->discipline->control_event_handler(card, cmd)) 940 return cmd; 941 return NULL; 942 case IPA_CMD_MODCCID: 943 return cmd; 944 case IPA_CMD_REGISTER_LOCAL_ADDR: 945 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 946 qeth_add_local_addrs4(card, &cmd->data.local_addrs4); 947 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 948 qeth_add_local_addrs6(card, &cmd->data.local_addrs6); 949 950 QETH_CARD_TEXT(card, 3, "irla"); 951 return NULL; 952 case IPA_CMD_UNREGISTER_LOCAL_ADDR: 953 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 954 qeth_del_local_addrs4(card, &cmd->data.local_addrs4); 955 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 956 qeth_del_local_addrs6(card, &cmd->data.local_addrs6); 957 958 QETH_CARD_TEXT(card, 3, "urla"); 959 return NULL; 960 default: 961 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); 962 return cmd; 963 } 964 } 965 966 static void qeth_clear_ipacmd_list(struct qeth_card *card) 967 { 968 struct qeth_cmd_buffer *iob; 969 unsigned long flags; 970 971 QETH_CARD_TEXT(card, 4, "clipalst"); 972 973 spin_lock_irqsave(&card->lock, flags); 974 list_for_each_entry(iob, &card->cmd_waiter_list, list) 975 qeth_notify_cmd(iob, -ECANCELED); 976 spin_unlock_irqrestore(&card->lock, flags); 977 } 978 979 static int qeth_check_idx_response(struct qeth_card *card, 980 unsigned char *buffer) 981 { 982 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 983 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { 984 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", 985 buffer[4]); 986 QETH_CARD_TEXT(card, 2, "ckidxres"); 987 QETH_CARD_TEXT(card, 2, " idxterm"); 988 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); 989 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || 990 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { 991 dev_err(&card->gdev->dev, 992 "The device does not support the configured transport mode\n"); 993 return -EPROTONOSUPPORT; 994 } 995 return -EIO; 996 } 997 return 0; 998 } 999 1000 void qeth_put_cmd(struct qeth_cmd_buffer *iob) 1001 { 1002 if (refcount_dec_and_test(&iob->ref_count)) { 1003 kfree(iob->data); 1004 kfree(iob); 1005 } 1006 } 1007 EXPORT_SYMBOL_GPL(qeth_put_cmd); 1008 1009 static void qeth_release_buffer_cb(struct qeth_card *card, 1010 struct qeth_cmd_buffer *iob, 1011 unsigned int data_length) 1012 { 1013 qeth_put_cmd(iob); 1014 } 1015 1016 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) 1017 { 1018 qeth_notify_cmd(iob, rc); 1019 qeth_put_cmd(iob); 1020 } 1021 1022 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, 1023 unsigned int length, unsigned int ccws, 1024 long timeout) 1025 { 1026 struct qeth_cmd_buffer *iob; 1027 1028 if (length > QETH_BUFSIZE) 1029 return NULL; 1030 1031 iob = kzalloc(sizeof(*iob), GFP_KERNEL); 1032 if (!iob) 1033 return NULL; 1034 1035 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1), 1036 GFP_KERNEL | GFP_DMA); 1037 if (!iob->data) { 1038 kfree(iob); 1039 return NULL; 1040 } 1041 1042 init_completion(&iob->done); 1043 spin_lock_init(&iob->lock); 1044 INIT_LIST_HEAD(&iob->list); 1045 refcount_set(&iob->ref_count, 1); 1046 iob->channel = channel; 1047 iob->timeout = timeout; 1048 iob->length = length; 1049 return iob; 1050 } 1051 EXPORT_SYMBOL_GPL(qeth_alloc_cmd); 1052 1053 static void qeth_issue_next_read_cb(struct qeth_card *card, 1054 struct qeth_cmd_buffer *iob, 1055 unsigned int data_length) 1056 { 1057 struct qeth_cmd_buffer *request = NULL; 1058 struct qeth_ipa_cmd *cmd = NULL; 1059 struct qeth_reply *reply = NULL; 1060 struct qeth_cmd_buffer *tmp; 1061 unsigned long flags; 1062 int rc = 0; 1063 1064 QETH_CARD_TEXT(card, 4, "sndctlcb"); 1065 rc = qeth_check_idx_response(card, iob->data); 1066 switch (rc) { 1067 case 0: 1068 break; 1069 case -EIO: 1070 qeth_schedule_recovery(card); 1071 fallthrough; 1072 default: 1073 qeth_clear_ipacmd_list(card); 1074 goto err_idx; 1075 } 1076 1077 cmd = __ipa_reply(iob); 1078 if (cmd) { 1079 cmd = qeth_check_ipa_data(card, cmd); 1080 if (!cmd) 1081 goto out; 1082 if (IS_OSN(card) && card->osn_info.assist_cb && 1083 cmd->hdr.command != IPA_CMD_STARTLAN) { 1084 card->osn_info.assist_cb(card->dev, cmd); 1085 goto out; 1086 } 1087 } 1088 1089 /* match against pending cmd requests */ 1090 spin_lock_irqsave(&card->lock, flags); 1091 list_for_each_entry(tmp, &card->cmd_waiter_list, list) { 1092 if (tmp->match && tmp->match(tmp, iob)) { 1093 request = tmp; 1094 /* take the object outside the lock */ 1095 qeth_get_cmd(request); 1096 break; 1097 } 1098 } 1099 spin_unlock_irqrestore(&card->lock, flags); 1100 1101 if (!request) 1102 goto out; 1103 1104 reply = &request->reply; 1105 if (!reply->callback) { 1106 rc = 0; 1107 goto no_callback; 1108 } 1109 1110 spin_lock_irqsave(&request->lock, flags); 1111 if (request->rc) 1112 /* Bail out when the requestor has already left: */ 1113 rc = request->rc; 1114 else 1115 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : 1116 (unsigned long)iob); 1117 spin_unlock_irqrestore(&request->lock, flags); 1118 1119 no_callback: 1120 if (rc <= 0) 1121 qeth_notify_cmd(request, rc); 1122 qeth_put_cmd(request); 1123 out: 1124 memcpy(&card->seqno.pdu_hdr_ack, 1125 QETH_PDU_HEADER_SEQ_NO(iob->data), 1126 QETH_SEQ_NO_LENGTH); 1127 __qeth_issue_next_read(card); 1128 err_idx: 1129 qeth_put_cmd(iob); 1130 } 1131 1132 static int qeth_set_thread_start_bit(struct qeth_card *card, 1133 unsigned long thread) 1134 { 1135 unsigned long flags; 1136 int rc = 0; 1137 1138 spin_lock_irqsave(&card->thread_mask_lock, flags); 1139 if (!(card->thread_allowed_mask & thread)) 1140 rc = -EPERM; 1141 else if (card->thread_start_mask & thread) 1142 rc = -EBUSY; 1143 else 1144 card->thread_start_mask |= thread; 1145 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1146 1147 return rc; 1148 } 1149 1150 static void qeth_clear_thread_start_bit(struct qeth_card *card, 1151 unsigned long thread) 1152 { 1153 unsigned long flags; 1154 1155 spin_lock_irqsave(&card->thread_mask_lock, flags); 1156 card->thread_start_mask &= ~thread; 1157 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1158 wake_up(&card->wait_q); 1159 } 1160 1161 static void qeth_clear_thread_running_bit(struct qeth_card *card, 1162 unsigned long thread) 1163 { 1164 unsigned long flags; 1165 1166 spin_lock_irqsave(&card->thread_mask_lock, flags); 1167 card->thread_running_mask &= ~thread; 1168 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1169 wake_up_all(&card->wait_q); 1170 } 1171 1172 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1173 { 1174 unsigned long flags; 1175 int rc = 0; 1176 1177 spin_lock_irqsave(&card->thread_mask_lock, flags); 1178 if (card->thread_start_mask & thread) { 1179 if ((card->thread_allowed_mask & thread) && 1180 !(card->thread_running_mask & thread)) { 1181 rc = 1; 1182 card->thread_start_mask &= ~thread; 1183 card->thread_running_mask |= thread; 1184 } else 1185 rc = -EPERM; 1186 } 1187 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1188 return rc; 1189 } 1190 1191 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 1192 { 1193 int rc = 0; 1194 1195 wait_event(card->wait_q, 1196 (rc = __qeth_do_run_thread(card, thread)) >= 0); 1197 return rc; 1198 } 1199 1200 int qeth_schedule_recovery(struct qeth_card *card) 1201 { 1202 int rc; 1203 1204 QETH_CARD_TEXT(card, 2, "startrec"); 1205 1206 rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD); 1207 if (!rc) 1208 schedule_work(&card->kernel_thread_starter); 1209 1210 return rc; 1211 } 1212 1213 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, 1214 struct irb *irb) 1215 { 1216 int dstat, cstat; 1217 char *sense; 1218 1219 sense = (char *) irb->ecw; 1220 cstat = irb->scsw.cmd.cstat; 1221 dstat = irb->scsw.cmd.dstat; 1222 1223 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 1224 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 1225 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 1226 QETH_CARD_TEXT(card, 2, "CGENCHK"); 1227 dev_warn(&cdev->dev, "The qeth device driver " 1228 "failed to recover an error on the device\n"); 1229 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", 1230 CCW_DEVID(cdev), dstat, cstat); 1231 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 1232 16, 1, irb, 64, 1); 1233 return -EIO; 1234 } 1235 1236 if (dstat & DEV_STAT_UNIT_CHECK) { 1237 if (sense[SENSE_RESETTING_EVENT_BYTE] & 1238 SENSE_RESETTING_EVENT_FLAG) { 1239 QETH_CARD_TEXT(card, 2, "REVIND"); 1240 return -EIO; 1241 } 1242 if (sense[SENSE_COMMAND_REJECT_BYTE] & 1243 SENSE_COMMAND_REJECT_FLAG) { 1244 QETH_CARD_TEXT(card, 2, "CMDREJi"); 1245 return -EIO; 1246 } 1247 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 1248 QETH_CARD_TEXT(card, 2, "AFFE"); 1249 return -EIO; 1250 } 1251 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 1252 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 1253 return 0; 1254 } 1255 QETH_CARD_TEXT(card, 2, "DGENCHK"); 1256 return -EIO; 1257 } 1258 return 0; 1259 } 1260 1261 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, 1262 struct irb *irb) 1263 { 1264 if (!IS_ERR(irb)) 1265 return 0; 1266 1267 switch (PTR_ERR(irb)) { 1268 case -EIO: 1269 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", 1270 CCW_DEVID(cdev)); 1271 QETH_CARD_TEXT(card, 2, "ckirberr"); 1272 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 1273 return -EIO; 1274 case -ETIMEDOUT: 1275 dev_warn(&cdev->dev, "A hardware operation timed out" 1276 " on the device\n"); 1277 QETH_CARD_TEXT(card, 2, "ckirberr"); 1278 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 1279 return -ETIMEDOUT; 1280 default: 1281 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", 1282 PTR_ERR(irb), CCW_DEVID(cdev)); 1283 QETH_CARD_TEXT(card, 2, "ckirberr"); 1284 QETH_CARD_TEXT(card, 2, " rc???"); 1285 return PTR_ERR(irb); 1286 } 1287 } 1288 1289 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, 1290 struct irb *irb) 1291 { 1292 int rc; 1293 int cstat, dstat; 1294 struct qeth_cmd_buffer *iob = NULL; 1295 struct ccwgroup_device *gdev; 1296 struct qeth_channel *channel; 1297 struct qeth_card *card; 1298 1299 /* while we hold the ccwdev lock, this stays valid: */ 1300 gdev = dev_get_drvdata(&cdev->dev); 1301 card = dev_get_drvdata(&gdev->dev); 1302 1303 QETH_CARD_TEXT(card, 5, "irq"); 1304 1305 if (card->read.ccwdev == cdev) { 1306 channel = &card->read; 1307 QETH_CARD_TEXT(card, 5, "read"); 1308 } else if (card->write.ccwdev == cdev) { 1309 channel = &card->write; 1310 QETH_CARD_TEXT(card, 5, "write"); 1311 } else { 1312 channel = &card->data; 1313 QETH_CARD_TEXT(card, 5, "data"); 1314 } 1315 1316 if (intparm == 0) { 1317 QETH_CARD_TEXT(card, 5, "irqunsol"); 1318 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { 1319 QETH_CARD_TEXT(card, 5, "irqunexp"); 1320 1321 dev_err(&cdev->dev, 1322 "Received IRQ with intparm %lx, expected %px\n", 1323 intparm, channel->active_cmd); 1324 if (channel->active_cmd) 1325 qeth_cancel_cmd(channel->active_cmd, -EIO); 1326 } else { 1327 iob = (struct qeth_cmd_buffer *) (addr_t)intparm; 1328 } 1329 1330 channel->active_cmd = NULL; 1331 qeth_unlock_channel(card, channel); 1332 1333 rc = qeth_check_irb_error(card, cdev, irb); 1334 if (rc) { 1335 /* IO was terminated, free its resources. */ 1336 if (iob) 1337 qeth_cancel_cmd(iob, rc); 1338 return; 1339 } 1340 1341 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { 1342 channel->state = CH_STATE_STOPPED; 1343 wake_up(&card->wait_q); 1344 } 1345 1346 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { 1347 channel->state = CH_STATE_HALTED; 1348 wake_up(&card->wait_q); 1349 } 1350 1351 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | 1352 SCSW_FCTL_HALT_FUNC))) { 1353 qeth_cancel_cmd(iob, -ECANCELED); 1354 iob = NULL; 1355 } 1356 1357 cstat = irb->scsw.cmd.cstat; 1358 dstat = irb->scsw.cmd.dstat; 1359 1360 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1361 (dstat & DEV_STAT_UNIT_CHECK) || 1362 (cstat)) { 1363 if (irb->esw.esw0.erw.cons) { 1364 dev_warn(&channel->ccwdev->dev, 1365 "The qeth device driver failed to recover " 1366 "an error on the device\n"); 1367 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", 1368 CCW_DEVID(channel->ccwdev), cstat, 1369 dstat); 1370 print_hex_dump(KERN_WARNING, "qeth: irb ", 1371 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1372 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1373 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); 1374 } 1375 1376 rc = qeth_get_problem(card, cdev, irb); 1377 if (rc) { 1378 card->read_or_write_problem = 1; 1379 if (iob) 1380 qeth_cancel_cmd(iob, rc); 1381 qeth_clear_ipacmd_list(card); 1382 qeth_schedule_recovery(card); 1383 return; 1384 } 1385 } 1386 1387 if (iob) { 1388 /* sanity check: */ 1389 if (irb->scsw.cmd.count > iob->length) { 1390 qeth_cancel_cmd(iob, -EIO); 1391 return; 1392 } 1393 if (iob->callback) 1394 iob->callback(card, iob, 1395 iob->length - irb->scsw.cmd.count); 1396 } 1397 } 1398 1399 static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 1400 struct qeth_qdio_out_buffer *buf, 1401 enum iucv_tx_notify notification) 1402 { 1403 struct sk_buff *skb; 1404 1405 skb_queue_walk(&buf->skb_list, skb) { 1406 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1407 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1408 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk) 1409 iucv_sk(skb->sk)->sk_txnotify(skb, notification); 1410 } 1411 } 1412 1413 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, 1414 int budget) 1415 { 1416 struct qeth_qdio_out_q *queue = buf->q; 1417 struct sk_buff *skb; 1418 1419 /* release may never happen from within CQ tasklet scope */ 1420 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); 1421 1422 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) 1423 qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR); 1424 1425 /* Empty buffer? */ 1426 if (buf->next_element_to_fill == 0) 1427 return; 1428 1429 QETH_TXQ_STAT_INC(queue, bufs); 1430 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); 1431 if (error) { 1432 QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames); 1433 } else { 1434 QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames); 1435 QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes); 1436 } 1437 1438 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { 1439 unsigned int bytes = qdisc_pkt_len(skb); 1440 bool is_tso = skb_is_gso(skb); 1441 unsigned int packets; 1442 1443 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; 1444 if (!error) { 1445 if (skb->ip_summed == CHECKSUM_PARTIAL) 1446 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); 1447 if (skb_is_nonlinear(skb)) 1448 QETH_TXQ_STAT_INC(queue, skbs_sg); 1449 if (is_tso) { 1450 QETH_TXQ_STAT_INC(queue, skbs_tso); 1451 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); 1452 } 1453 } 1454 1455 napi_consume_skb(skb, budget); 1456 } 1457 } 1458 1459 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1460 struct qeth_qdio_out_buffer *buf, 1461 bool error, int budget) 1462 { 1463 int i; 1464 1465 /* is PCI flag set on buffer? */ 1466 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) 1467 atomic_dec(&queue->set_pci_flags_count); 1468 1469 qeth_tx_complete_buf(buf, error, budget); 1470 1471 for (i = 0; i < queue->max_elements; ++i) { 1472 void *data = phys_to_virt(buf->buffer->element[i].addr); 1473 1474 if (data && buf->is_header[i]) 1475 kmem_cache_free(qeth_core_header_cache, data); 1476 buf->is_header[i] = 0; 1477 } 1478 1479 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); 1480 buf->next_element_to_fill = 0; 1481 buf->frames = 0; 1482 buf->bytes = 0; 1483 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1484 } 1485 1486 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) 1487 { 1488 int j; 1489 1490 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 1491 if (!q->bufs[j]) 1492 continue; 1493 qeth_cleanup_handled_pending(q, j, 1); 1494 qeth_clear_output_buffer(q, q->bufs[j], true, 0); 1495 if (free) { 1496 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); 1497 q->bufs[j] = NULL; 1498 } 1499 } 1500 } 1501 1502 static void qeth_drain_output_queues(struct qeth_card *card) 1503 { 1504 int i; 1505 1506 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1507 /* clear outbound buffers to free skbs */ 1508 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1509 if (card->qdio.out_qs[i]) 1510 qeth_drain_output_queue(card->qdio.out_qs[i], false); 1511 } 1512 } 1513 1514 static void qeth_osa_set_output_queues(struct qeth_card *card, bool single) 1515 { 1516 unsigned int max = single ? 1 : card->dev->num_tx_queues; 1517 1518 if (card->qdio.no_out_queues == max) 1519 return; 1520 1521 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) 1522 qeth_free_qdio_queues(card); 1523 1524 if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT) 1525 dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); 1526 1527 card->qdio.no_out_queues = max; 1528 } 1529 1530 static int qeth_update_from_chp_desc(struct qeth_card *card) 1531 { 1532 struct ccw_device *ccwdev; 1533 struct channel_path_desc_fmt0 *chp_dsc; 1534 1535 QETH_CARD_TEXT(card, 2, "chp_desc"); 1536 1537 ccwdev = card->data.ccwdev; 1538 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 1539 if (!chp_dsc) 1540 return -ENOMEM; 1541 1542 card->info.func_level = 0x4100 + chp_dsc->desc; 1543 1544 if (IS_OSD(card) || IS_OSX(card)) 1545 /* CHPP field bit 6 == 1 -> single queue */ 1546 qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); 1547 1548 kfree(chp_dsc); 1549 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues); 1550 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level); 1551 return 0; 1552 } 1553 1554 static void qeth_init_qdio_info(struct qeth_card *card) 1555 { 1556 QETH_CARD_TEXT(card, 4, "intqdinf"); 1557 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1558 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; 1559 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; 1560 1561 /* inbound */ 1562 card->qdio.no_in_queues = 1; 1563 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1564 if (IS_IQD(card)) 1565 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; 1566 else 1567 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1568 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1569 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1570 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1571 } 1572 1573 static void qeth_set_initial_options(struct qeth_card *card) 1574 { 1575 card->options.route4.type = NO_ROUTER; 1576 card->options.route6.type = NO_ROUTER; 1577 card->options.isolation = ISOLATION_MODE_NONE; 1578 card->options.cq = QETH_CQ_DISABLED; 1579 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 1580 } 1581 1582 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1583 { 1584 unsigned long flags; 1585 int rc = 0; 1586 1587 spin_lock_irqsave(&card->thread_mask_lock, flags); 1588 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", 1589 (u8) card->thread_start_mask, 1590 (u8) card->thread_allowed_mask, 1591 (u8) card->thread_running_mask); 1592 rc = (card->thread_start_mask & thread); 1593 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1594 return rc; 1595 } 1596 1597 static int qeth_do_reset(void *data); 1598 static void qeth_start_kernel_thread(struct work_struct *work) 1599 { 1600 struct task_struct *ts; 1601 struct qeth_card *card = container_of(work, struct qeth_card, 1602 kernel_thread_starter); 1603 QETH_CARD_TEXT(card, 2, "strthrd"); 1604 1605 if (card->read.state != CH_STATE_UP && 1606 card->write.state != CH_STATE_UP) 1607 return; 1608 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1609 ts = kthread_run(qeth_do_reset, card, "qeth_recover"); 1610 if (IS_ERR(ts)) { 1611 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1612 qeth_clear_thread_running_bit(card, 1613 QETH_RECOVER_THREAD); 1614 } 1615 } 1616 } 1617 1618 static void qeth_buffer_reclaim_work(struct work_struct *); 1619 static void qeth_setup_card(struct qeth_card *card) 1620 { 1621 QETH_CARD_TEXT(card, 2, "setupcrd"); 1622 1623 card->info.type = CARD_RDEV(card)->id.driver_info; 1624 card->state = CARD_STATE_DOWN; 1625 spin_lock_init(&card->lock); 1626 spin_lock_init(&card->thread_mask_lock); 1627 mutex_init(&card->conf_mutex); 1628 mutex_init(&card->discipline_mutex); 1629 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1630 INIT_LIST_HEAD(&card->cmd_waiter_list); 1631 init_waitqueue_head(&card->wait_q); 1632 qeth_set_initial_options(card); 1633 /* IP address takeover */ 1634 INIT_LIST_HEAD(&card->ipato.entries); 1635 qeth_init_qdio_info(card); 1636 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1637 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); 1638 hash_init(card->rx_mode_addrs); 1639 hash_init(card->local_addrs4); 1640 hash_init(card->local_addrs6); 1641 spin_lock_init(&card->local_addrs4_lock); 1642 spin_lock_init(&card->local_addrs6_lock); 1643 } 1644 1645 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) 1646 { 1647 struct qeth_card *card = container_of(slr, struct qeth_card, 1648 qeth_service_level); 1649 if (card->info.mcl_level[0]) 1650 seq_printf(m, "qeth: %s firmware level %s\n", 1651 CARD_BUS_ID(card), card->info.mcl_level); 1652 } 1653 1654 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) 1655 { 1656 struct qeth_card *card; 1657 1658 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1659 card = kzalloc(sizeof(*card), GFP_KERNEL); 1660 if (!card) 1661 goto out; 1662 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1663 1664 card->gdev = gdev; 1665 dev_set_drvdata(&gdev->dev, card); 1666 CARD_RDEV(card) = gdev->cdev[0]; 1667 CARD_WDEV(card) = gdev->cdev[1]; 1668 CARD_DDEV(card) = gdev->cdev[2]; 1669 1670 card->event_wq = alloc_ordered_workqueue("%s_event", 0, 1671 dev_name(&gdev->dev)); 1672 if (!card->event_wq) 1673 goto out_wq; 1674 1675 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0); 1676 if (!card->read_cmd) 1677 goto out_read_cmd; 1678 1679 card->debugfs = debugfs_create_dir(dev_name(&gdev->dev), 1680 qeth_debugfs_root); 1681 debugfs_create_file("local_addrs", 0400, card->debugfs, card, 1682 &qeth_debugfs_local_addr_fops); 1683 1684 card->qeth_service_level.seq_print = qeth_core_sl_print; 1685 register_service_level(&card->qeth_service_level); 1686 return card; 1687 1688 out_read_cmd: 1689 destroy_workqueue(card->event_wq); 1690 out_wq: 1691 dev_set_drvdata(&gdev->dev, NULL); 1692 kfree(card); 1693 out: 1694 return NULL; 1695 } 1696 1697 static int qeth_clear_channel(struct qeth_card *card, 1698 struct qeth_channel *channel) 1699 { 1700 int rc; 1701 1702 QETH_CARD_TEXT(card, 3, "clearch"); 1703 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1704 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); 1705 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1706 1707 if (rc) 1708 return rc; 1709 rc = wait_event_interruptible_timeout(card->wait_q, 1710 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); 1711 if (rc == -ERESTARTSYS) 1712 return rc; 1713 if (channel->state != CH_STATE_STOPPED) 1714 return -ETIME; 1715 channel->state = CH_STATE_DOWN; 1716 return 0; 1717 } 1718 1719 static int qeth_halt_channel(struct qeth_card *card, 1720 struct qeth_channel *channel) 1721 { 1722 int rc; 1723 1724 QETH_CARD_TEXT(card, 3, "haltch"); 1725 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1726 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); 1727 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1728 1729 if (rc) 1730 return rc; 1731 rc = wait_event_interruptible_timeout(card->wait_q, 1732 channel->state == CH_STATE_HALTED, QETH_TIMEOUT); 1733 if (rc == -ERESTARTSYS) 1734 return rc; 1735 if (channel->state != CH_STATE_HALTED) 1736 return -ETIME; 1737 return 0; 1738 } 1739 1740 static int qeth_stop_channel(struct qeth_channel *channel) 1741 { 1742 struct ccw_device *cdev = channel->ccwdev; 1743 int rc; 1744 1745 rc = ccw_device_set_offline(cdev); 1746 1747 spin_lock_irq(get_ccwdev_lock(cdev)); 1748 if (channel->active_cmd) { 1749 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", 1750 channel->active_cmd); 1751 channel->active_cmd = NULL; 1752 } 1753 cdev->handler = NULL; 1754 spin_unlock_irq(get_ccwdev_lock(cdev)); 1755 1756 return rc; 1757 } 1758 1759 static int qeth_start_channel(struct qeth_channel *channel) 1760 { 1761 struct ccw_device *cdev = channel->ccwdev; 1762 int rc; 1763 1764 channel->state = CH_STATE_DOWN; 1765 atomic_set(&channel->irq_pending, 0); 1766 1767 spin_lock_irq(get_ccwdev_lock(cdev)); 1768 cdev->handler = qeth_irq; 1769 spin_unlock_irq(get_ccwdev_lock(cdev)); 1770 1771 rc = ccw_device_set_online(cdev); 1772 if (rc) 1773 goto err; 1774 1775 return 0; 1776 1777 err: 1778 spin_lock_irq(get_ccwdev_lock(cdev)); 1779 cdev->handler = NULL; 1780 spin_unlock_irq(get_ccwdev_lock(cdev)); 1781 return rc; 1782 } 1783 1784 static int qeth_halt_channels(struct qeth_card *card) 1785 { 1786 int rc1 = 0, rc2 = 0, rc3 = 0; 1787 1788 QETH_CARD_TEXT(card, 3, "haltchs"); 1789 rc1 = qeth_halt_channel(card, &card->read); 1790 rc2 = qeth_halt_channel(card, &card->write); 1791 rc3 = qeth_halt_channel(card, &card->data); 1792 if (rc1) 1793 return rc1; 1794 if (rc2) 1795 return rc2; 1796 return rc3; 1797 } 1798 1799 static int qeth_clear_channels(struct qeth_card *card) 1800 { 1801 int rc1 = 0, rc2 = 0, rc3 = 0; 1802 1803 QETH_CARD_TEXT(card, 3, "clearchs"); 1804 rc1 = qeth_clear_channel(card, &card->read); 1805 rc2 = qeth_clear_channel(card, &card->write); 1806 rc3 = qeth_clear_channel(card, &card->data); 1807 if (rc1) 1808 return rc1; 1809 if (rc2) 1810 return rc2; 1811 return rc3; 1812 } 1813 1814 static int qeth_clear_halt_card(struct qeth_card *card, int halt) 1815 { 1816 int rc = 0; 1817 1818 QETH_CARD_TEXT(card, 3, "clhacrd"); 1819 1820 if (halt) 1821 rc = qeth_halt_channels(card); 1822 if (rc) 1823 return rc; 1824 return qeth_clear_channels(card); 1825 } 1826 1827 static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) 1828 { 1829 int rc = 0; 1830 1831 QETH_CARD_TEXT(card, 3, "qdioclr"); 1832 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, 1833 QETH_QDIO_CLEANING)) { 1834 case QETH_QDIO_ESTABLISHED: 1835 if (IS_IQD(card)) 1836 rc = qdio_shutdown(CARD_DDEV(card), 1837 QDIO_FLAG_CLEANUP_USING_HALT); 1838 else 1839 rc = qdio_shutdown(CARD_DDEV(card), 1840 QDIO_FLAG_CLEANUP_USING_CLEAR); 1841 if (rc) 1842 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1843 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1844 break; 1845 case QETH_QDIO_CLEANING: 1846 return rc; 1847 default: 1848 break; 1849 } 1850 rc = qeth_clear_halt_card(card, use_halt); 1851 if (rc) 1852 QETH_CARD_TEXT_(card, 3, "2err%d", rc); 1853 return rc; 1854 } 1855 1856 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) 1857 { 1858 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1859 struct diag26c_vnic_resp *response = NULL; 1860 struct diag26c_vnic_req *request = NULL; 1861 struct ccw_dev_id id; 1862 char userid[80]; 1863 int rc = 0; 1864 1865 QETH_CARD_TEXT(card, 2, "vmlayer"); 1866 1867 cpcmd("QUERY USERID", userid, sizeof(userid), &rc); 1868 if (rc) 1869 goto out; 1870 1871 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 1872 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 1873 if (!request || !response) { 1874 rc = -ENOMEM; 1875 goto out; 1876 } 1877 1878 ccw_device_get_id(CARD_RDEV(card), &id); 1879 request->resp_buf_len = sizeof(*response); 1880 request->resp_version = DIAG26C_VERSION6_VM65918; 1881 request->req_format = DIAG26C_VNIC_INFO; 1882 ASCEBC(userid, 8); 1883 memcpy(&request->sys_name, userid, 8); 1884 request->devno = id.devno; 1885 1886 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1887 rc = diag26c(request, response, DIAG26C_PORT_VNIC); 1888 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1889 if (rc) 1890 goto out; 1891 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 1892 1893 if (request->resp_buf_len < sizeof(*response) || 1894 response->version != request->resp_version) { 1895 rc = -EIO; 1896 goto out; 1897 } 1898 1899 if (response->protocol == VNIC_INFO_PROT_L2) 1900 disc = QETH_DISCIPLINE_LAYER2; 1901 else if (response->protocol == VNIC_INFO_PROT_L3) 1902 disc = QETH_DISCIPLINE_LAYER3; 1903 1904 out: 1905 kfree(response); 1906 kfree(request); 1907 if (rc) 1908 QETH_CARD_TEXT_(card, 2, "err%x", rc); 1909 return disc; 1910 } 1911 1912 /* Determine whether the device requires a specific layer discipline */ 1913 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) 1914 { 1915 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1916 1917 if (IS_OSM(card) || IS_OSN(card)) 1918 disc = QETH_DISCIPLINE_LAYER2; 1919 else if (IS_VM_NIC(card)) 1920 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 1921 qeth_vm_detect_layer(card); 1922 1923 switch (disc) { 1924 case QETH_DISCIPLINE_LAYER2: 1925 QETH_CARD_TEXT(card, 3, "force l2"); 1926 break; 1927 case QETH_DISCIPLINE_LAYER3: 1928 QETH_CARD_TEXT(card, 3, "force l3"); 1929 break; 1930 default: 1931 QETH_CARD_TEXT(card, 3, "force no"); 1932 } 1933 1934 return disc; 1935 } 1936 1937 static void qeth_set_blkt_defaults(struct qeth_card *card) 1938 { 1939 QETH_CARD_TEXT(card, 2, "cfgblkt"); 1940 1941 if (card->info.use_v1_blkt) { 1942 card->info.blkt.time_total = 0; 1943 card->info.blkt.inter_packet = 0; 1944 card->info.blkt.inter_packet_jumbo = 0; 1945 } else { 1946 card->info.blkt.time_total = 250; 1947 card->info.blkt.inter_packet = 5; 1948 card->info.blkt.inter_packet_jumbo = 15; 1949 } 1950 } 1951 1952 static void qeth_idx_init(struct qeth_card *card) 1953 { 1954 memset(&card->seqno, 0, sizeof(card->seqno)); 1955 1956 card->token.issuer_rm_w = 0x00010103UL; 1957 card->token.cm_filter_w = 0x00010108UL; 1958 card->token.cm_connection_w = 0x0001010aUL; 1959 card->token.ulp_filter_w = 0x0001010bUL; 1960 card->token.ulp_connection_w = 0x0001010dUL; 1961 1962 switch (card->info.type) { 1963 case QETH_CARD_TYPE_IQD: 1964 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; 1965 break; 1966 case QETH_CARD_TYPE_OSD: 1967 case QETH_CARD_TYPE_OSN: 1968 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; 1969 break; 1970 default: 1971 break; 1972 } 1973 } 1974 1975 static void qeth_idx_finalize_cmd(struct qeth_card *card, 1976 struct qeth_cmd_buffer *iob) 1977 { 1978 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, 1979 QETH_SEQ_NO_LENGTH); 1980 if (iob->channel == &card->write) 1981 card->seqno.trans_hdr++; 1982 } 1983 1984 static int qeth_peer_func_level(int level) 1985 { 1986 if ((level & 0xff) == 8) 1987 return (level & 0xff) + 0x400; 1988 if (((level >> 8) & 3) == 1) 1989 return (level & 0xff) + 0x200; 1990 return level; 1991 } 1992 1993 static void qeth_mpc_finalize_cmd(struct qeth_card *card, 1994 struct qeth_cmd_buffer *iob) 1995 { 1996 qeth_idx_finalize_cmd(card, iob); 1997 1998 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), 1999 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); 2000 card->seqno.pdu_hdr++; 2001 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), 2002 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 2003 2004 iob->callback = qeth_release_buffer_cb; 2005 } 2006 2007 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, 2008 struct qeth_cmd_buffer *reply) 2009 { 2010 /* MPC cmds are issued strictly in sequence. */ 2011 return !IS_IPA(reply->data); 2012 } 2013 2014 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, 2015 const void *data, 2016 unsigned int data_length) 2017 { 2018 struct qeth_cmd_buffer *iob; 2019 2020 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT); 2021 if (!iob) 2022 return NULL; 2023 2024 memcpy(iob->data, data, data_length); 2025 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length, 2026 iob->data); 2027 iob->finalize = qeth_mpc_finalize_cmd; 2028 iob->match = qeth_mpc_match_reply; 2029 return iob; 2030 } 2031 2032 /** 2033 * qeth_send_control_data() - send control command to the card 2034 * @card: qeth_card structure pointer 2035 * @iob: qeth_cmd_buffer pointer 2036 * @reply_cb: callback function pointer 2037 * @cb_card: pointer to the qeth_card structure 2038 * @cb_reply: pointer to the qeth_reply structure 2039 * @cb_cmd: pointer to the original iob for non-IPA 2040 * commands, or to the qeth_ipa_cmd structure 2041 * for the IPA commands. 2042 * @reply_param: private pointer passed to the callback 2043 * 2044 * Callback function gets called one or more times, with cb_cmd 2045 * pointing to the response returned by the hardware. Callback 2046 * function must return 2047 * > 0 if more reply blocks are expected, 2048 * 0 if the last or only reply block is received, and 2049 * < 0 on error. 2050 * Callback function can get the value of the reply_param pointer from the 2051 * field 'param' of the structure qeth_reply. 2052 */ 2053 2054 static int qeth_send_control_data(struct qeth_card *card, 2055 struct qeth_cmd_buffer *iob, 2056 int (*reply_cb)(struct qeth_card *cb_card, 2057 struct qeth_reply *cb_reply, 2058 unsigned long cb_cmd), 2059 void *reply_param) 2060 { 2061 struct qeth_channel *channel = iob->channel; 2062 struct qeth_reply *reply = &iob->reply; 2063 long timeout = iob->timeout; 2064 int rc; 2065 2066 QETH_CARD_TEXT(card, 2, "sendctl"); 2067 2068 reply->callback = reply_cb; 2069 reply->param = reply_param; 2070 2071 timeout = wait_event_interruptible_timeout(card->wait_q, 2072 qeth_trylock_channel(channel), 2073 timeout); 2074 if (timeout <= 0) { 2075 qeth_put_cmd(iob); 2076 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 2077 } 2078 2079 if (iob->finalize) 2080 iob->finalize(card, iob); 2081 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); 2082 2083 qeth_enqueue_cmd(card, iob); 2084 2085 /* This pairs with iob->callback, and keeps the iob alive after IO: */ 2086 qeth_get_cmd(iob); 2087 2088 QETH_CARD_TEXT(card, 6, "noirqpnd"); 2089 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 2090 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), 2091 (addr_t) iob, 0, 0, timeout); 2092 if (!rc) 2093 channel->active_cmd = iob; 2094 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 2095 if (rc) { 2096 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", 2097 CARD_DEVID(card), rc); 2098 QETH_CARD_TEXT_(card, 2, " err%d", rc); 2099 qeth_dequeue_cmd(card, iob); 2100 qeth_put_cmd(iob); 2101 qeth_unlock_channel(card, channel); 2102 goto out; 2103 } 2104 2105 timeout = wait_for_completion_interruptible_timeout(&iob->done, 2106 timeout); 2107 if (timeout <= 0) 2108 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 2109 2110 qeth_dequeue_cmd(card, iob); 2111 2112 if (reply_cb) { 2113 /* Wait until the callback for a late reply has completed: */ 2114 spin_lock_irq(&iob->lock); 2115 if (rc) 2116 /* Zap any callback that's still pending: */ 2117 iob->rc = rc; 2118 spin_unlock_irq(&iob->lock); 2119 } 2120 2121 if (!rc) 2122 rc = iob->rc; 2123 2124 out: 2125 qeth_put_cmd(iob); 2126 return rc; 2127 } 2128 2129 struct qeth_node_desc { 2130 struct node_descriptor nd1; 2131 struct node_descriptor nd2; 2132 struct node_descriptor nd3; 2133 }; 2134 2135 static void qeth_read_conf_data_cb(struct qeth_card *card, 2136 struct qeth_cmd_buffer *iob, 2137 unsigned int data_length) 2138 { 2139 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; 2140 int rc = 0; 2141 u8 *tag; 2142 2143 QETH_CARD_TEXT(card, 2, "cfgunit"); 2144 2145 if (data_length < sizeof(*nd)) { 2146 rc = -EINVAL; 2147 goto out; 2148 } 2149 2150 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && 2151 nd->nd1.plant[1] == _ascebc['M']; 2152 tag = (u8 *)&nd->nd1.tag; 2153 card->info.chpid = tag[0]; 2154 card->info.unit_addr2 = tag[1]; 2155 2156 tag = (u8 *)&nd->nd2.tag; 2157 card->info.cula = tag[1]; 2158 2159 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && 2160 nd->nd3.model[1] == 0xF0 && 2161 nd->nd3.model[2] >= 0xF1 && 2162 nd->nd3.model[2] <= 0xF4; 2163 2164 out: 2165 qeth_notify_cmd(iob, rc); 2166 qeth_put_cmd(iob); 2167 } 2168 2169 static int qeth_read_conf_data(struct qeth_card *card) 2170 { 2171 struct qeth_channel *channel = &card->data; 2172 struct qeth_cmd_buffer *iob; 2173 struct ciw *ciw; 2174 2175 /* scan for RCD command in extended SenseID data */ 2176 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); 2177 if (!ciw || ciw->cmd == 0) 2178 return -EOPNOTSUPP; 2179 if (ciw->count < sizeof(struct qeth_node_desc)) 2180 return -EINVAL; 2181 2182 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); 2183 if (!iob) 2184 return -ENOMEM; 2185 2186 iob->callback = qeth_read_conf_data_cb; 2187 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length, 2188 iob->data); 2189 2190 return qeth_send_control_data(card, iob, NULL, NULL); 2191 } 2192 2193 static int qeth_idx_check_activate_response(struct qeth_card *card, 2194 struct qeth_channel *channel, 2195 struct qeth_cmd_buffer *iob) 2196 { 2197 int rc; 2198 2199 rc = qeth_check_idx_response(card, iob->data); 2200 if (rc) 2201 return rc; 2202 2203 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) 2204 return 0; 2205 2206 /* negative reply: */ 2207 QETH_CARD_TEXT_(card, 2, "idxneg%c", 2208 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 2209 2210 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { 2211 case QETH_IDX_ACT_ERR_EXCL: 2212 dev_err(&channel->ccwdev->dev, 2213 "The adapter is used exclusively by another host\n"); 2214 return -EBUSY; 2215 case QETH_IDX_ACT_ERR_AUTH: 2216 case QETH_IDX_ACT_ERR_AUTH_USER: 2217 dev_err(&channel->ccwdev->dev, 2218 "Setting the device online failed because of insufficient authorization\n"); 2219 return -EPERM; 2220 default: 2221 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 2222 CCW_DEVID(channel->ccwdev)); 2223 return -EIO; 2224 } 2225 } 2226 2227 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, 2228 struct qeth_cmd_buffer *iob, 2229 unsigned int data_length) 2230 { 2231 struct qeth_channel *channel = iob->channel; 2232 u16 peer_level; 2233 int rc; 2234 2235 QETH_CARD_TEXT(card, 2, "idxrdcb"); 2236 2237 rc = qeth_idx_check_activate_response(card, channel, iob); 2238 if (rc) 2239 goto out; 2240 2241 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2242 if (peer_level != qeth_peer_func_level(card->info.func_level)) { 2243 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2244 CCW_DEVID(channel->ccwdev), 2245 card->info.func_level, peer_level); 2246 rc = -EINVAL; 2247 goto out; 2248 } 2249 2250 memcpy(&card->token.issuer_rm_r, 2251 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2252 QETH_MPC_TOKEN_LENGTH); 2253 memcpy(&card->info.mcl_level[0], 2254 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); 2255 2256 out: 2257 qeth_notify_cmd(iob, rc); 2258 qeth_put_cmd(iob); 2259 } 2260 2261 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, 2262 struct qeth_cmd_buffer *iob, 2263 unsigned int data_length) 2264 { 2265 struct qeth_channel *channel = iob->channel; 2266 u16 peer_level; 2267 int rc; 2268 2269 QETH_CARD_TEXT(card, 2, "idxwrcb"); 2270 2271 rc = qeth_idx_check_activate_response(card, channel, iob); 2272 if (rc) 2273 goto out; 2274 2275 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 2276 if ((peer_level & ~0x0100) != 2277 qeth_peer_func_level(card->info.func_level)) { 2278 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 2279 CCW_DEVID(channel->ccwdev), 2280 card->info.func_level, peer_level); 2281 rc = -EINVAL; 2282 } 2283 2284 out: 2285 qeth_notify_cmd(iob, rc); 2286 qeth_put_cmd(iob); 2287 } 2288 2289 static void qeth_idx_setup_activate_cmd(struct qeth_card *card, 2290 struct qeth_cmd_buffer *iob) 2291 { 2292 u16 addr = (card->info.cula << 8) + card->info.unit_addr2; 2293 u8 port = ((u8)card->dev->dev_port) | 0x80; 2294 struct ccw1 *ccw = __ccw_from_cmd(iob); 2295 2296 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE, 2297 iob->data); 2298 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data); 2299 iob->finalize = qeth_idx_finalize_cmd; 2300 2301 port |= QETH_IDX_ACT_INVAL_FRAME; 2302 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); 2303 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2304 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); 2305 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2306 &card->info.func_level, 2); 2307 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2); 2308 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); 2309 } 2310 2311 static int qeth_idx_activate_read_channel(struct qeth_card *card) 2312 { 2313 struct qeth_channel *channel = &card->read; 2314 struct qeth_cmd_buffer *iob; 2315 int rc; 2316 2317 QETH_CARD_TEXT(card, 2, "idxread"); 2318 2319 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2320 if (!iob) 2321 return -ENOMEM; 2322 2323 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); 2324 qeth_idx_setup_activate_cmd(card, iob); 2325 iob->callback = qeth_idx_activate_read_channel_cb; 2326 2327 rc = qeth_send_control_data(card, iob, NULL, NULL); 2328 if (rc) 2329 return rc; 2330 2331 channel->state = CH_STATE_UP; 2332 return 0; 2333 } 2334 2335 static int qeth_idx_activate_write_channel(struct qeth_card *card) 2336 { 2337 struct qeth_channel *channel = &card->write; 2338 struct qeth_cmd_buffer *iob; 2339 int rc; 2340 2341 QETH_CARD_TEXT(card, 2, "idxwrite"); 2342 2343 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2344 if (!iob) 2345 return -ENOMEM; 2346 2347 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); 2348 qeth_idx_setup_activate_cmd(card, iob); 2349 iob->callback = qeth_idx_activate_write_channel_cb; 2350 2351 rc = qeth_send_control_data(card, iob, NULL, NULL); 2352 if (rc) 2353 return rc; 2354 2355 channel->state = CH_STATE_UP; 2356 return 0; 2357 } 2358 2359 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2360 unsigned long data) 2361 { 2362 struct qeth_cmd_buffer *iob; 2363 2364 QETH_CARD_TEXT(card, 2, "cmenblcb"); 2365 2366 iob = (struct qeth_cmd_buffer *) data; 2367 memcpy(&card->token.cm_filter_r, 2368 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), 2369 QETH_MPC_TOKEN_LENGTH); 2370 return 0; 2371 } 2372 2373 static int qeth_cm_enable(struct qeth_card *card) 2374 { 2375 struct qeth_cmd_buffer *iob; 2376 2377 QETH_CARD_TEXT(card, 2, "cmenable"); 2378 2379 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE); 2380 if (!iob) 2381 return -ENOMEM; 2382 2383 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), 2384 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2385 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), 2386 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); 2387 2388 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL); 2389 } 2390 2391 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2392 unsigned long data) 2393 { 2394 struct qeth_cmd_buffer *iob; 2395 2396 QETH_CARD_TEXT(card, 2, "cmsetpcb"); 2397 2398 iob = (struct qeth_cmd_buffer *) data; 2399 memcpy(&card->token.cm_connection_r, 2400 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), 2401 QETH_MPC_TOKEN_LENGTH); 2402 return 0; 2403 } 2404 2405 static int qeth_cm_setup(struct qeth_card *card) 2406 { 2407 struct qeth_cmd_buffer *iob; 2408 2409 QETH_CARD_TEXT(card, 2, "cmsetup"); 2410 2411 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE); 2412 if (!iob) 2413 return -ENOMEM; 2414 2415 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), 2416 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2417 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), 2418 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); 2419 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), 2420 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); 2421 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); 2422 } 2423 2424 static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type) 2425 { 2426 if (link_type == QETH_LINK_TYPE_LANE_TR || 2427 link_type == QETH_LINK_TYPE_HSTR) { 2428 dev_err(&card->gdev->dev, "Unsupported Token Ring device\n"); 2429 return false; 2430 } 2431 2432 return true; 2433 } 2434 2435 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) 2436 { 2437 struct net_device *dev = card->dev; 2438 unsigned int new_mtu; 2439 2440 if (!max_mtu) { 2441 /* IQD needs accurate max MTU to set up its RX buffers: */ 2442 if (IS_IQD(card)) 2443 return -EINVAL; 2444 /* tolerate quirky HW: */ 2445 max_mtu = ETH_MAX_MTU; 2446 } 2447 2448 rtnl_lock(); 2449 if (IS_IQD(card)) { 2450 /* move any device with default MTU to new max MTU: */ 2451 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; 2452 2453 /* adjust RX buffer size to new max MTU: */ 2454 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; 2455 if (dev->max_mtu && dev->max_mtu != max_mtu) 2456 qeth_free_qdio_queues(card); 2457 } else { 2458 if (dev->mtu) 2459 new_mtu = dev->mtu; 2460 /* default MTUs for first setup: */ 2461 else if (IS_LAYER2(card)) 2462 new_mtu = ETH_DATA_LEN; 2463 else 2464 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ 2465 } 2466 2467 dev->max_mtu = max_mtu; 2468 dev->mtu = min(new_mtu, max_mtu); 2469 rtnl_unlock(); 2470 return 0; 2471 } 2472 2473 static int qeth_get_mtu_outof_framesize(int framesize) 2474 { 2475 switch (framesize) { 2476 case 0x4000: 2477 return 8192; 2478 case 0x6000: 2479 return 16384; 2480 case 0xa000: 2481 return 32768; 2482 case 0xffff: 2483 return 57344; 2484 default: 2485 return 0; 2486 } 2487 } 2488 2489 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2490 unsigned long data) 2491 { 2492 __u16 mtu, framesize; 2493 __u16 len; 2494 struct qeth_cmd_buffer *iob; 2495 u8 link_type = 0; 2496 2497 QETH_CARD_TEXT(card, 2, "ulpenacb"); 2498 2499 iob = (struct qeth_cmd_buffer *) data; 2500 memcpy(&card->token.ulp_filter_r, 2501 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 2502 QETH_MPC_TOKEN_LENGTH); 2503 if (IS_IQD(card)) { 2504 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 2505 mtu = qeth_get_mtu_outof_framesize(framesize); 2506 } else { 2507 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); 2508 } 2509 *(u16 *)reply->param = mtu; 2510 2511 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); 2512 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { 2513 memcpy(&link_type, 2514 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); 2515 if (!qeth_is_supported_link_type(card, link_type)) 2516 return -EPROTONOSUPPORT; 2517 } 2518 2519 card->info.link_type = link_type; 2520 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); 2521 return 0; 2522 } 2523 2524 static u8 qeth_mpc_select_prot_type(struct qeth_card *card) 2525 { 2526 if (IS_OSN(card)) 2527 return QETH_PROT_OSN2; 2528 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; 2529 } 2530 2531 static int qeth_ulp_enable(struct qeth_card *card) 2532 { 2533 u8 prot_type = qeth_mpc_select_prot_type(card); 2534 struct qeth_cmd_buffer *iob; 2535 u16 max_mtu; 2536 int rc; 2537 2538 QETH_CARD_TEXT(card, 2, "ulpenabl"); 2539 2540 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE); 2541 if (!iob) 2542 return -ENOMEM; 2543 2544 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; 2545 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); 2546 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), 2547 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2548 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), 2549 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); 2550 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu); 2551 if (rc) 2552 return rc; 2553 return qeth_update_max_mtu(card, max_mtu); 2554 } 2555 2556 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2557 unsigned long data) 2558 { 2559 struct qeth_cmd_buffer *iob; 2560 2561 QETH_CARD_TEXT(card, 2, "ulpstpcb"); 2562 2563 iob = (struct qeth_cmd_buffer *) data; 2564 memcpy(&card->token.ulp_connection_r, 2565 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2566 QETH_MPC_TOKEN_LENGTH); 2567 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2568 3)) { 2569 QETH_CARD_TEXT(card, 2, "olmlimit"); 2570 dev_err(&card->gdev->dev, "A connection could not be " 2571 "established because of an OLM limit\n"); 2572 return -EMLINK; 2573 } 2574 return 0; 2575 } 2576 2577 static int qeth_ulp_setup(struct qeth_card *card) 2578 { 2579 __u16 temp; 2580 struct qeth_cmd_buffer *iob; 2581 2582 QETH_CARD_TEXT(card, 2, "ulpsetup"); 2583 2584 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE); 2585 if (!iob) 2586 return -ENOMEM; 2587 2588 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), 2589 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2590 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), 2591 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); 2592 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), 2593 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); 2594 2595 memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2); 2596 temp = (card->info.cula << 8) + card->info.unit_addr2; 2597 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); 2598 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); 2599 } 2600 2601 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) 2602 { 2603 struct qeth_qdio_out_buffer *newbuf; 2604 2605 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); 2606 if (!newbuf) 2607 return -ENOMEM; 2608 2609 newbuf->buffer = q->qdio_bufs[bidx]; 2610 skb_queue_head_init(&newbuf->skb_list); 2611 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2612 newbuf->q = q; 2613 newbuf->next_pending = q->bufs[bidx]; 2614 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); 2615 q->bufs[bidx] = newbuf; 2616 return 0; 2617 } 2618 2619 static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2620 { 2621 if (!q) 2622 return; 2623 2624 qeth_drain_output_queue(q, true); 2625 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2626 kfree(q); 2627 } 2628 2629 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) 2630 { 2631 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 2632 2633 if (!q) 2634 return NULL; 2635 2636 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 2637 kfree(q); 2638 return NULL; 2639 } 2640 return q; 2641 } 2642 2643 static void qeth_tx_completion_timer(struct timer_list *timer) 2644 { 2645 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); 2646 2647 napi_schedule(&queue->napi); 2648 QETH_TXQ_STAT_INC(queue, completion_timer); 2649 } 2650 2651 static int qeth_alloc_qdio_queues(struct qeth_card *card) 2652 { 2653 int i, j; 2654 2655 QETH_CARD_TEXT(card, 2, "allcqdbf"); 2656 2657 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, 2658 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2659 return 0; 2660 2661 QETH_CARD_TEXT(card, 2, "inq"); 2662 card->qdio.in_q = qeth_alloc_qdio_queue(); 2663 if (!card->qdio.in_q) 2664 goto out_nomem; 2665 2666 /* inbound buffer pool */ 2667 if (qeth_alloc_buffer_pool(card)) 2668 goto out_freeinq; 2669 2670 /* outbound */ 2671 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2672 struct qeth_qdio_out_q *queue; 2673 2674 queue = qeth_alloc_output_queue(); 2675 if (!queue) 2676 goto out_freeoutq; 2677 QETH_CARD_TEXT_(card, 2, "outq %i", i); 2678 QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); 2679 card->qdio.out_qs[i] = queue; 2680 queue->card = card; 2681 queue->queue_no = i; 2682 spin_lock_init(&queue->lock); 2683 timer_setup(&queue->timer, qeth_tx_completion_timer, 0); 2684 queue->coalesce_usecs = QETH_TX_COALESCE_USECS; 2685 queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; 2686 queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; 2687 2688 /* give outbound qeth_qdio_buffers their qdio_buffers */ 2689 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2690 WARN_ON(queue->bufs[j]); 2691 if (qeth_init_qdio_out_buf(queue, j)) 2692 goto out_freeoutqbufs; 2693 } 2694 } 2695 2696 /* completion */ 2697 if (qeth_alloc_cq(card)) 2698 goto out_freeoutq; 2699 2700 return 0; 2701 2702 out_freeoutqbufs: 2703 while (j > 0) { 2704 --j; 2705 kmem_cache_free(qeth_qdio_outbuf_cache, 2706 card->qdio.out_qs[i]->bufs[j]); 2707 card->qdio.out_qs[i]->bufs[j] = NULL; 2708 } 2709 out_freeoutq: 2710 while (i > 0) { 2711 qeth_free_output_queue(card->qdio.out_qs[--i]); 2712 card->qdio.out_qs[i] = NULL; 2713 } 2714 qeth_free_buffer_pool(card); 2715 out_freeinq: 2716 qeth_free_qdio_queue(card->qdio.in_q); 2717 card->qdio.in_q = NULL; 2718 out_nomem: 2719 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2720 return -ENOMEM; 2721 } 2722 2723 static void qeth_free_qdio_queues(struct qeth_card *card) 2724 { 2725 int i, j; 2726 2727 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 2728 QETH_QDIO_UNINITIALIZED) 2729 return; 2730 2731 qeth_free_cq(card); 2732 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2733 if (card->qdio.in_q->bufs[j].rx_skb) 2734 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); 2735 } 2736 qeth_free_qdio_queue(card->qdio.in_q); 2737 card->qdio.in_q = NULL; 2738 /* inbound buffer pool */ 2739 qeth_free_buffer_pool(card); 2740 /* free outbound qdio_qs */ 2741 for (i = 0; i < card->qdio.no_out_queues; i++) { 2742 qeth_free_output_queue(card->qdio.out_qs[i]); 2743 card->qdio.out_qs[i] = NULL; 2744 } 2745 } 2746 2747 static void qeth_fill_qib_parms(struct qeth_card *card, 2748 struct qeth_qib_parms *parms) 2749 { 2750 struct qeth_qdio_out_q *queue; 2751 unsigned int i; 2752 2753 parms->pcit_magic[0] = 'P'; 2754 parms->pcit_magic[1] = 'C'; 2755 parms->pcit_magic[2] = 'I'; 2756 parms->pcit_magic[3] = 'T'; 2757 ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic)); 2758 parms->pcit_a = QETH_PCI_THRESHOLD_A(card); 2759 parms->pcit_b = QETH_PCI_THRESHOLD_B(card); 2760 parms->pcit_c = QETH_PCI_TIMER_VALUE(card); 2761 2762 parms->blkt_magic[0] = 'B'; 2763 parms->blkt_magic[1] = 'L'; 2764 parms->blkt_magic[2] = 'K'; 2765 parms->blkt_magic[3] = 'T'; 2766 ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic)); 2767 parms->blkt_total = card->info.blkt.time_total; 2768 parms->blkt_inter_packet = card->info.blkt.inter_packet; 2769 parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo; 2770 2771 /* Prio-queueing implicitly uses the default priorities: */ 2772 if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1) 2773 return; 2774 2775 parms->pque_magic[0] = 'P'; 2776 parms->pque_magic[1] = 'Q'; 2777 parms->pque_magic[2] = 'U'; 2778 parms->pque_magic[3] = 'E'; 2779 ASCEBC(parms->pque_magic, sizeof(parms->pque_magic)); 2780 parms->pque_order = QETH_QIB_PQUE_ORDER_RR; 2781 parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL; 2782 2783 qeth_for_each_output_queue(card, queue, i) 2784 parms->pque_priority[i] = queue->priority; 2785 } 2786 2787 static int qeth_qdio_activate(struct qeth_card *card) 2788 { 2789 QETH_CARD_TEXT(card, 3, "qdioact"); 2790 return qdio_activate(CARD_DDEV(card)); 2791 } 2792 2793 static int qeth_dm_act(struct qeth_card *card) 2794 { 2795 struct qeth_cmd_buffer *iob; 2796 2797 QETH_CARD_TEXT(card, 2, "dmact"); 2798 2799 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE); 2800 if (!iob) 2801 return -ENOMEM; 2802 2803 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), 2804 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2805 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), 2806 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2807 return qeth_send_control_data(card, iob, NULL, NULL); 2808 } 2809 2810 static int qeth_mpc_initialize(struct qeth_card *card) 2811 { 2812 int rc; 2813 2814 QETH_CARD_TEXT(card, 2, "mpcinit"); 2815 2816 rc = qeth_issue_next_read(card); 2817 if (rc) { 2818 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2819 return rc; 2820 } 2821 rc = qeth_cm_enable(card); 2822 if (rc) { 2823 QETH_CARD_TEXT_(card, 2, "2err%d", rc); 2824 return rc; 2825 } 2826 rc = qeth_cm_setup(card); 2827 if (rc) { 2828 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2829 return rc; 2830 } 2831 rc = qeth_ulp_enable(card); 2832 if (rc) { 2833 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 2834 return rc; 2835 } 2836 rc = qeth_ulp_setup(card); 2837 if (rc) { 2838 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2839 return rc; 2840 } 2841 rc = qeth_alloc_qdio_queues(card); 2842 if (rc) { 2843 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2844 return rc; 2845 } 2846 rc = qeth_qdio_establish(card); 2847 if (rc) { 2848 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2849 qeth_free_qdio_queues(card); 2850 return rc; 2851 } 2852 rc = qeth_qdio_activate(card); 2853 if (rc) { 2854 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 2855 return rc; 2856 } 2857 rc = qeth_dm_act(card); 2858 if (rc) { 2859 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 2860 return rc; 2861 } 2862 2863 return 0; 2864 } 2865 2866 static void qeth_print_status_message(struct qeth_card *card) 2867 { 2868 switch (card->info.type) { 2869 case QETH_CARD_TYPE_OSD: 2870 case QETH_CARD_TYPE_OSM: 2871 case QETH_CARD_TYPE_OSX: 2872 /* VM will use a non-zero first character 2873 * to indicate a HiperSockets like reporting 2874 * of the level OSA sets the first character to zero 2875 * */ 2876 if (!card->info.mcl_level[0]) { 2877 sprintf(card->info.mcl_level, "%02x%02x", 2878 card->info.mcl_level[2], 2879 card->info.mcl_level[3]); 2880 break; 2881 } 2882 fallthrough; 2883 case QETH_CARD_TYPE_IQD: 2884 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { 2885 card->info.mcl_level[0] = (char) _ebcasc[(__u8) 2886 card->info.mcl_level[0]]; 2887 card->info.mcl_level[1] = (char) _ebcasc[(__u8) 2888 card->info.mcl_level[1]]; 2889 card->info.mcl_level[2] = (char) _ebcasc[(__u8) 2890 card->info.mcl_level[2]]; 2891 card->info.mcl_level[3] = (char) _ebcasc[(__u8) 2892 card->info.mcl_level[3]]; 2893 card->info.mcl_level[QETH_MCL_LENGTH] = 0; 2894 } 2895 break; 2896 default: 2897 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); 2898 } 2899 dev_info(&card->gdev->dev, 2900 "Device is a%s card%s%s%s\nwith link type %s.\n", 2901 qeth_get_cardname(card), 2902 (card->info.mcl_level[0]) ? " (level: " : "", 2903 (card->info.mcl_level[0]) ? card->info.mcl_level : "", 2904 (card->info.mcl_level[0]) ? ")" : "", 2905 qeth_get_cardname_short(card)); 2906 } 2907 2908 static void qeth_initialize_working_pool_list(struct qeth_card *card) 2909 { 2910 struct qeth_buffer_pool_entry *entry; 2911 2912 QETH_CARD_TEXT(card, 5, "inwrklst"); 2913 2914 list_for_each_entry(entry, 2915 &card->qdio.init_pool.entry_list, init_list) { 2916 qeth_put_buffer_pool_entry(card, entry); 2917 } 2918 } 2919 2920 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( 2921 struct qeth_card *card) 2922 { 2923 struct qeth_buffer_pool_entry *entry; 2924 int i, free; 2925 2926 if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2927 return NULL; 2928 2929 list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) { 2930 free = 1; 2931 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2932 if (page_count(entry->elements[i]) > 1) { 2933 free = 0; 2934 break; 2935 } 2936 } 2937 if (free) { 2938 list_del_init(&entry->list); 2939 return entry; 2940 } 2941 } 2942 2943 /* no free buffer in pool so take first one and swap pages */ 2944 entry = list_first_entry(&card->qdio.in_buf_pool.entry_list, 2945 struct qeth_buffer_pool_entry, list); 2946 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2947 if (page_count(entry->elements[i]) > 1) { 2948 struct page *page = dev_alloc_page(); 2949 2950 if (!page) 2951 return NULL; 2952 2953 __free_page(entry->elements[i]); 2954 entry->elements[i] = page; 2955 QETH_CARD_STAT_INC(card, rx_sg_alloc_page); 2956 } 2957 } 2958 list_del_init(&entry->list); 2959 return entry; 2960 } 2961 2962 static int qeth_init_input_buffer(struct qeth_card *card, 2963 struct qeth_qdio_buffer *buf) 2964 { 2965 struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry; 2966 int i; 2967 2968 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { 2969 buf->rx_skb = netdev_alloc_skb(card->dev, 2970 ETH_HLEN + 2971 sizeof(struct ipv6hdr)); 2972 if (!buf->rx_skb) 2973 return -ENOMEM; 2974 } 2975 2976 if (!pool_entry) { 2977 pool_entry = qeth_find_free_buffer_pool_entry(card); 2978 if (!pool_entry) 2979 return -ENOBUFS; 2980 2981 buf->pool_entry = pool_entry; 2982 } 2983 2984 /* 2985 * since the buffer is accessed only from the input_tasklet 2986 * there shouldn't be a need to synchronize; also, since we use 2987 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off 2988 * buffers 2989 */ 2990 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2991 buf->buffer->element[i].length = PAGE_SIZE; 2992 buf->buffer->element[i].addr = 2993 page_to_phys(pool_entry->elements[i]); 2994 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2995 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2996 else 2997 buf->buffer->element[i].eflags = 0; 2998 buf->buffer->element[i].sflags = 0; 2999 } 3000 return 0; 3001 } 3002 3003 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card, 3004 struct qeth_qdio_out_q *queue) 3005 { 3006 if (!IS_IQD(card) || 3007 qeth_iqd_is_mcast_queue(card, queue) || 3008 card->options.cq == QETH_CQ_ENABLED || 3009 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd)) 3010 return 1; 3011 3012 return card->ssqd.mmwc ? card->ssqd.mmwc : 1; 3013 } 3014 3015 static int qeth_init_qdio_queues(struct qeth_card *card) 3016 { 3017 unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count; 3018 unsigned int i; 3019 int rc; 3020 3021 QETH_CARD_TEXT(card, 2, "initqdqs"); 3022 3023 /* inbound queue */ 3024 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 3025 memset(&card->rx, 0, sizeof(struct qeth_rx)); 3026 3027 qeth_initialize_working_pool_list(card); 3028 /*give only as many buffers to hardware as we have buffer pool entries*/ 3029 for (i = 0; i < rx_bufs; i++) { 3030 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); 3031 if (rc) 3032 return rc; 3033 } 3034 3035 card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs); 3036 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs); 3037 if (rc) { 3038 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 3039 return rc; 3040 } 3041 3042 /* completion */ 3043 rc = qeth_cq_init(card); 3044 if (rc) { 3045 return rc; 3046 } 3047 3048 /* outbound queue */ 3049 for (i = 0; i < card->qdio.no_out_queues; ++i) { 3050 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; 3051 3052 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 3053 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); 3054 queue->next_buf_to_fill = 0; 3055 queue->do_pack = 0; 3056 queue->prev_hdr = NULL; 3057 queue->coalesced_frames = 0; 3058 queue->bulk_start = 0; 3059 queue->bulk_count = 0; 3060 queue->bulk_max = qeth_tx_select_bulk_max(card, queue); 3061 atomic_set(&queue->used_buffers, 0); 3062 atomic_set(&queue->set_pci_flags_count, 0); 3063 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); 3064 } 3065 return 0; 3066 } 3067 3068 static void qeth_ipa_finalize_cmd(struct qeth_card *card, 3069 struct qeth_cmd_buffer *iob) 3070 { 3071 qeth_mpc_finalize_cmd(card, iob); 3072 3073 /* override with IPA-specific values: */ 3074 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; 3075 } 3076 3077 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 3078 u16 cmd_length, 3079 bool (*match)(struct qeth_cmd_buffer *iob, 3080 struct qeth_cmd_buffer *reply)) 3081 { 3082 u8 prot_type = qeth_mpc_select_prot_type(card); 3083 u16 total_length = iob->length; 3084 3085 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, 3086 iob->data); 3087 iob->finalize = qeth_ipa_finalize_cmd; 3088 iob->match = match; 3089 3090 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 3091 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); 3092 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); 3093 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); 3094 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); 3095 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 3096 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 3097 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); 3098 } 3099 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); 3100 3101 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob, 3102 struct qeth_cmd_buffer *reply) 3103 { 3104 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply); 3105 3106 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno); 3107 } 3108 3109 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, 3110 enum qeth_ipa_cmds cmd_code, 3111 enum qeth_prot_versions prot, 3112 unsigned int data_length) 3113 { 3114 struct qeth_cmd_buffer *iob; 3115 struct qeth_ipacmd_hdr *hdr; 3116 3117 data_length += offsetof(struct qeth_ipa_cmd, data); 3118 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1, 3119 QETH_IPA_TIMEOUT); 3120 if (!iob) 3121 return NULL; 3122 3123 qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply); 3124 3125 hdr = &__ipa_cmd(iob)->hdr; 3126 hdr->command = cmd_code; 3127 hdr->initiator = IPA_CMD_INITIATOR_HOST; 3128 /* hdr->seqno is set by qeth_send_control_data() */ 3129 hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH; 3130 hdr->rel_adapter_no = (u8) card->dev->dev_port; 3131 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; 3132 hdr->param_count = 1; 3133 hdr->prot_version = prot; 3134 return iob; 3135 } 3136 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd); 3137 3138 static int qeth_send_ipa_cmd_cb(struct qeth_card *card, 3139 struct qeth_reply *reply, unsigned long data) 3140 { 3141 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3142 3143 return (cmd->hdr.return_code) ? -EIO : 0; 3144 } 3145 3146 /** 3147 * qeth_send_ipa_cmd() - send an IPA command 3148 * 3149 * See qeth_send_control_data() for explanation of the arguments. 3150 */ 3151 3152 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 3153 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 3154 unsigned long), 3155 void *reply_param) 3156 { 3157 int rc; 3158 3159 QETH_CARD_TEXT(card, 4, "sendipa"); 3160 3161 if (card->read_or_write_problem) { 3162 qeth_put_cmd(iob); 3163 return -EIO; 3164 } 3165 3166 if (reply_cb == NULL) 3167 reply_cb = qeth_send_ipa_cmd_cb; 3168 rc = qeth_send_control_data(card, iob, reply_cb, reply_param); 3169 if (rc == -ETIME) { 3170 qeth_clear_ipacmd_list(card); 3171 qeth_schedule_recovery(card); 3172 } 3173 return rc; 3174 } 3175 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 3176 3177 static int qeth_send_startlan_cb(struct qeth_card *card, 3178 struct qeth_reply *reply, unsigned long data) 3179 { 3180 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3181 3182 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) 3183 return -ENETDOWN; 3184 3185 return (cmd->hdr.return_code) ? -EIO : 0; 3186 } 3187 3188 static int qeth_send_startlan(struct qeth_card *card) 3189 { 3190 struct qeth_cmd_buffer *iob; 3191 3192 QETH_CARD_TEXT(card, 2, "strtlan"); 3193 3194 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0); 3195 if (!iob) 3196 return -ENOMEM; 3197 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); 3198 } 3199 3200 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) 3201 { 3202 if (!cmd->hdr.return_code) 3203 cmd->hdr.return_code = 3204 cmd->data.setadapterparms.hdr.return_code; 3205 return cmd->hdr.return_code; 3206 } 3207 3208 static int qeth_query_setadapterparms_cb(struct qeth_card *card, 3209 struct qeth_reply *reply, unsigned long data) 3210 { 3211 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3212 struct qeth_query_cmds_supp *query_cmd; 3213 3214 QETH_CARD_TEXT(card, 3, "quyadpcb"); 3215 if (qeth_setadpparms_inspect_rc(cmd)) 3216 return -EIO; 3217 3218 query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp; 3219 if (query_cmd->lan_type & 0x7f) { 3220 if (!qeth_is_supported_link_type(card, query_cmd->lan_type)) 3221 return -EPROTONOSUPPORT; 3222 3223 card->info.link_type = query_cmd->lan_type; 3224 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); 3225 } 3226 3227 card->options.adp.supported = query_cmd->supported_cmds; 3228 return 0; 3229 } 3230 3231 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 3232 enum qeth_ipa_setadp_cmd adp_cmd, 3233 unsigned int data_length) 3234 { 3235 struct qeth_ipacmd_setadpparms_hdr *hdr; 3236 struct qeth_cmd_buffer *iob; 3237 3238 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4, 3239 data_length + 3240 offsetof(struct qeth_ipacmd_setadpparms, 3241 data)); 3242 if (!iob) 3243 return NULL; 3244 3245 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr; 3246 hdr->cmdlength = sizeof(*hdr) + data_length; 3247 hdr->command_code = adp_cmd; 3248 hdr->used_total = 1; 3249 hdr->seq_no = 1; 3250 return iob; 3251 } 3252 3253 static int qeth_query_setadapterparms(struct qeth_card *card) 3254 { 3255 int rc; 3256 struct qeth_cmd_buffer *iob; 3257 3258 QETH_CARD_TEXT(card, 3, "queryadp"); 3259 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 3260 SETADP_DATA_SIZEOF(query_cmds_supp)); 3261 if (!iob) 3262 return -ENOMEM; 3263 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 3264 return rc; 3265 } 3266 3267 static int qeth_query_ipassists_cb(struct qeth_card *card, 3268 struct qeth_reply *reply, unsigned long data) 3269 { 3270 struct qeth_ipa_cmd *cmd; 3271 3272 QETH_CARD_TEXT(card, 2, "qipasscb"); 3273 3274 cmd = (struct qeth_ipa_cmd *) data; 3275 3276 switch (cmd->hdr.return_code) { 3277 case IPA_RC_SUCCESS: 3278 break; 3279 case IPA_RC_NOTSUPP: 3280 case IPA_RC_L2_UNSUPPORTED_CMD: 3281 QETH_CARD_TEXT(card, 2, "ipaunsup"); 3282 card->options.ipa4.supported |= IPA_SETADAPTERPARMS; 3283 card->options.ipa6.supported |= IPA_SETADAPTERPARMS; 3284 return -EOPNOTSUPP; 3285 default: 3286 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", 3287 CARD_DEVID(card), cmd->hdr.return_code); 3288 return -EIO; 3289 } 3290 3291 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 3292 card->options.ipa4 = cmd->hdr.assists; 3293 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 3294 card->options.ipa6 = cmd->hdr.assists; 3295 else 3296 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", 3297 CARD_DEVID(card)); 3298 return 0; 3299 } 3300 3301 static int qeth_query_ipassists(struct qeth_card *card, 3302 enum qeth_prot_versions prot) 3303 { 3304 int rc; 3305 struct qeth_cmd_buffer *iob; 3306 3307 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot); 3308 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0); 3309 if (!iob) 3310 return -ENOMEM; 3311 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 3312 return rc; 3313 } 3314 3315 static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3316 struct qeth_reply *reply, unsigned long data) 3317 { 3318 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3319 struct qeth_query_switch_attributes *attrs; 3320 struct qeth_switch_info *sw_info; 3321 3322 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3323 if (qeth_setadpparms_inspect_rc(cmd)) 3324 return -EIO; 3325 3326 sw_info = (struct qeth_switch_info *)reply->param; 3327 attrs = &cmd->data.setadapterparms.data.query_switch_attributes; 3328 sw_info->capabilities = attrs->capabilities; 3329 sw_info->settings = attrs->settings; 3330 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, 3331 sw_info->settings); 3332 return 0; 3333 } 3334 3335 int qeth_query_switch_attributes(struct qeth_card *card, 3336 struct qeth_switch_info *sw_info) 3337 { 3338 struct qeth_cmd_buffer *iob; 3339 3340 QETH_CARD_TEXT(card, 2, "qswiattr"); 3341 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) 3342 return -EOPNOTSUPP; 3343 if (!netif_carrier_ok(card->dev)) 3344 return -ENOMEDIUM; 3345 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0); 3346 if (!iob) 3347 return -ENOMEM; 3348 return qeth_send_ipa_cmd(card, iob, 3349 qeth_query_switch_attributes_cb, sw_info); 3350 } 3351 3352 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, 3353 enum qeth_diags_cmds sub_cmd, 3354 unsigned int data_length) 3355 { 3356 struct qeth_ipacmd_diagass *cmd; 3357 struct qeth_cmd_buffer *iob; 3358 3359 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE, 3360 DIAG_HDR_LEN + data_length); 3361 if (!iob) 3362 return NULL; 3363 3364 cmd = &__ipa_cmd(iob)->data.diagass; 3365 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length; 3366 cmd->subcmd = sub_cmd; 3367 return iob; 3368 } 3369 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd); 3370 3371 static int qeth_query_setdiagass_cb(struct qeth_card *card, 3372 struct qeth_reply *reply, unsigned long data) 3373 { 3374 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3375 u16 rc = cmd->hdr.return_code; 3376 3377 if (rc) { 3378 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); 3379 return -EIO; 3380 } 3381 3382 card->info.diagass_support = cmd->data.diagass.ext; 3383 return 0; 3384 } 3385 3386 static int qeth_query_setdiagass(struct qeth_card *card) 3387 { 3388 struct qeth_cmd_buffer *iob; 3389 3390 QETH_CARD_TEXT(card, 2, "qdiagass"); 3391 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0); 3392 if (!iob) 3393 return -ENOMEM; 3394 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); 3395 } 3396 3397 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) 3398 { 3399 unsigned long info = get_zeroed_page(GFP_KERNEL); 3400 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 3401 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 3402 struct ccw_dev_id ccwid; 3403 int level; 3404 3405 tid->chpid = card->info.chpid; 3406 ccw_device_get_id(CARD_RDEV(card), &ccwid); 3407 tid->ssid = ccwid.ssid; 3408 tid->devno = ccwid.devno; 3409 if (!info) 3410 return; 3411 level = stsi(NULL, 0, 0, 0); 3412 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 3413 tid->lparnr = info222->lpar_number; 3414 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 3415 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3416 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3417 } 3418 free_page(info); 3419 } 3420 3421 static int qeth_hw_trap_cb(struct qeth_card *card, 3422 struct qeth_reply *reply, unsigned long data) 3423 { 3424 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3425 u16 rc = cmd->hdr.return_code; 3426 3427 if (rc) { 3428 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); 3429 return -EIO; 3430 } 3431 return 0; 3432 } 3433 3434 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) 3435 { 3436 struct qeth_cmd_buffer *iob; 3437 struct qeth_ipa_cmd *cmd; 3438 3439 QETH_CARD_TEXT(card, 2, "diagtrap"); 3440 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64); 3441 if (!iob) 3442 return -ENOMEM; 3443 cmd = __ipa_cmd(iob); 3444 cmd->data.diagass.type = 1; 3445 cmd->data.diagass.action = action; 3446 switch (action) { 3447 case QETH_DIAGS_TRAP_ARM: 3448 cmd->data.diagass.options = 0x0003; 3449 cmd->data.diagass.ext = 0x00010000 + 3450 sizeof(struct qeth_trap_id); 3451 qeth_get_trap_id(card, 3452 (struct qeth_trap_id *)cmd->data.diagass.cdata); 3453 break; 3454 case QETH_DIAGS_TRAP_DISARM: 3455 cmd->data.diagass.options = 0x0001; 3456 break; 3457 case QETH_DIAGS_TRAP_CAPTURE: 3458 break; 3459 } 3460 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); 3461 } 3462 3463 static int qeth_check_qdio_errors(struct qeth_card *card, 3464 struct qdio_buffer *buf, 3465 unsigned int qdio_error, 3466 const char *dbftext) 3467 { 3468 if (qdio_error) { 3469 QETH_CARD_TEXT(card, 2, dbftext); 3470 QETH_CARD_TEXT_(card, 2, " F15=%02X", 3471 buf->element[15].sflags); 3472 QETH_CARD_TEXT_(card, 2, " F14=%02X", 3473 buf->element[14].sflags); 3474 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 3475 if ((buf->element[15].sflags) == 0x12) { 3476 QETH_CARD_STAT_INC(card, rx_fifo_errors); 3477 return 0; 3478 } else 3479 return 1; 3480 } 3481 return 0; 3482 } 3483 3484 static unsigned int qeth_rx_refill_queue(struct qeth_card *card, 3485 unsigned int count) 3486 { 3487 struct qeth_qdio_q *queue = card->qdio.in_q; 3488 struct list_head *lh; 3489 int i; 3490 int rc; 3491 int newcount = 0; 3492 3493 /* only requeue at a certain threshold to avoid SIGAs */ 3494 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { 3495 for (i = queue->next_buf_to_init; 3496 i < queue->next_buf_to_init + count; ++i) { 3497 if (qeth_init_input_buffer(card, 3498 &queue->bufs[QDIO_BUFNR(i)])) { 3499 break; 3500 } else { 3501 newcount++; 3502 } 3503 } 3504 3505 if (newcount < count) { 3506 /* we are in memory shortage so we switch back to 3507 traditional skb allocation and drop packages */ 3508 atomic_set(&card->force_alloc_skb, 3); 3509 count = newcount; 3510 } else { 3511 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3512 } 3513 3514 if (!count) { 3515 i = 0; 3516 list_for_each(lh, &card->qdio.in_buf_pool.entry_list) 3517 i++; 3518 if (i == card->qdio.in_buf_pool.buf_count) { 3519 QETH_CARD_TEXT(card, 2, "qsarbw"); 3520 schedule_delayed_work( 3521 &card->buffer_reclaim_work, 3522 QETH_RECLAIM_WORK_TIME); 3523 } 3524 return 0; 3525 } 3526 3527 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 3528 queue->next_buf_to_init, count); 3529 if (rc) { 3530 QETH_CARD_TEXT(card, 2, "qinberr"); 3531 } 3532 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init + 3533 count); 3534 return count; 3535 } 3536 3537 return 0; 3538 } 3539 3540 static void qeth_buffer_reclaim_work(struct work_struct *work) 3541 { 3542 struct qeth_card *card = container_of(to_delayed_work(work), 3543 struct qeth_card, 3544 buffer_reclaim_work); 3545 3546 local_bh_disable(); 3547 napi_schedule(&card->napi); 3548 /* kick-start the NAPI softirq: */ 3549 local_bh_enable(); 3550 } 3551 3552 static void qeth_handle_send_error(struct qeth_card *card, 3553 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 3554 { 3555 int sbalf15 = buffer->buffer->element[15].sflags; 3556 3557 QETH_CARD_TEXT(card, 6, "hdsnderr"); 3558 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); 3559 3560 if (!qdio_err) 3561 return; 3562 3563 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 3564 return; 3565 3566 QETH_CARD_TEXT(card, 1, "lnkfail"); 3567 QETH_CARD_TEXT_(card, 1, "%04x %02x", 3568 (u16)qdio_err, (u8)sbalf15); 3569 } 3570 3571 /** 3572 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. 3573 * @queue: queue to check for packing buffer 3574 * 3575 * Returns number of buffers that were prepared for flush. 3576 */ 3577 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) 3578 { 3579 struct qeth_qdio_out_buffer *buffer; 3580 3581 buffer = queue->bufs[queue->next_buf_to_fill]; 3582 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3583 (buffer->next_element_to_fill > 0)) { 3584 /* it's a packing buffer */ 3585 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3586 queue->next_buf_to_fill = 3587 QDIO_BUFNR(queue->next_buf_to_fill + 1); 3588 return 1; 3589 } 3590 return 0; 3591 } 3592 3593 /* 3594 * Switched to packing state if the number of used buffers on a queue 3595 * reaches a certain limit. 3596 */ 3597 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 3598 { 3599 if (!queue->do_pack) { 3600 if (atomic_read(&queue->used_buffers) 3601 >= QETH_HIGH_WATERMARK_PACK){ 3602 /* switch non-PACKING -> PACKING */ 3603 QETH_CARD_TEXT(queue->card, 6, "np->pack"); 3604 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3605 queue->do_pack = 1; 3606 } 3607 } 3608 } 3609 3610 /* 3611 * Switches from packing to non-packing mode. If there is a packing 3612 * buffer on the queue this buffer will be prepared to be flushed. 3613 * In that case 1 is returned to inform the caller. If no buffer 3614 * has to be flushed, zero is returned. 3615 */ 3616 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 3617 { 3618 if (queue->do_pack) { 3619 if (atomic_read(&queue->used_buffers) 3620 <= QETH_LOW_WATERMARK_PACK) { 3621 /* switch PACKING -> non-PACKING */ 3622 QETH_CARD_TEXT(queue->card, 6, "pack->np"); 3623 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3624 queue->do_pack = 0; 3625 return qeth_prep_flush_pack_buffer(queue); 3626 } 3627 } 3628 return 0; 3629 } 3630 3631 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, 3632 int count) 3633 { 3634 struct qeth_qdio_out_buffer *buf = queue->bufs[index]; 3635 unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 3636 struct qeth_card *card = queue->card; 3637 int rc; 3638 int i; 3639 3640 for (i = index; i < index + count; ++i) { 3641 unsigned int bidx = QDIO_BUFNR(i); 3642 struct sk_buff *skb; 3643 3644 buf = queue->bufs[bidx]; 3645 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3646 SBAL_EFLAGS_LAST_ENTRY; 3647 queue->coalesced_frames += buf->frames; 3648 3649 if (queue->bufstates) 3650 queue->bufstates[bidx].user = buf; 3651 3652 if (IS_IQD(card)) { 3653 skb_queue_walk(&buf->skb_list, skb) 3654 skb_tx_timestamp(skb); 3655 } 3656 } 3657 3658 if (!IS_IQD(card)) { 3659 if (!queue->do_pack) { 3660 if ((atomic_read(&queue->used_buffers) >= 3661 (QETH_HIGH_WATERMARK_PACK - 3662 QETH_WATERMARK_PACK_FUZZ)) && 3663 !atomic_read(&queue->set_pci_flags_count)) { 3664 /* it's likely that we'll go to packing 3665 * mode soon */ 3666 atomic_inc(&queue->set_pci_flags_count); 3667 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3668 } 3669 } else { 3670 if (!atomic_read(&queue->set_pci_flags_count)) { 3671 /* 3672 * there's no outstanding PCI any more, so we 3673 * have to request a PCI to be sure the the PCI 3674 * will wake at some time in the future then we 3675 * can flush packed buffers that might still be 3676 * hanging around, which can happen if no 3677 * further send was requested by the stack 3678 */ 3679 atomic_inc(&queue->set_pci_flags_count); 3680 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3681 } 3682 } 3683 3684 if (atomic_read(&queue->set_pci_flags_count)) 3685 qdio_flags |= QDIO_FLAG_PCI_OUT; 3686 } 3687 3688 QETH_TXQ_STAT_INC(queue, doorbell); 3689 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, 3690 queue->queue_no, index, count); 3691 3692 /* Fake the TX completion interrupt: */ 3693 if (IS_IQD(card)) { 3694 unsigned int frames = READ_ONCE(queue->max_coalesced_frames); 3695 unsigned int usecs = READ_ONCE(queue->coalesce_usecs); 3696 3697 if (frames && queue->coalesced_frames >= frames) { 3698 napi_schedule(&queue->napi); 3699 queue->coalesced_frames = 0; 3700 QETH_TXQ_STAT_INC(queue, coal_frames); 3701 } else if (usecs) { 3702 qeth_tx_arm_timer(queue, usecs); 3703 } 3704 } 3705 3706 if (rc) { 3707 /* ignore temporary SIGA errors without busy condition */ 3708 if (rc == -ENOBUFS) 3709 return; 3710 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3711 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3712 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); 3713 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); 3714 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3715 3716 /* this must not happen under normal circumstances. if it 3717 * happens something is really wrong -> recover */ 3718 qeth_schedule_recovery(queue->card); 3719 return; 3720 } 3721 } 3722 3723 static void qeth_flush_queue(struct qeth_qdio_out_q *queue) 3724 { 3725 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count); 3726 3727 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count); 3728 queue->prev_hdr = NULL; 3729 queue->bulk_count = 0; 3730 } 3731 3732 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3733 { 3734 /* 3735 * check if weed have to switch to non-packing mode or if 3736 * we have to get a pci flag out on the queue 3737 */ 3738 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 3739 !atomic_read(&queue->set_pci_flags_count)) { 3740 unsigned int index, flush_cnt; 3741 bool q_was_packing; 3742 3743 spin_lock(&queue->lock); 3744 3745 index = queue->next_buf_to_fill; 3746 q_was_packing = queue->do_pack; 3747 3748 flush_cnt = qeth_switch_to_nonpacking_if_needed(queue); 3749 if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) 3750 flush_cnt = qeth_prep_flush_pack_buffer(queue); 3751 3752 if (flush_cnt) { 3753 qeth_flush_buffers(queue, index, flush_cnt); 3754 if (q_was_packing) 3755 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); 3756 } 3757 3758 spin_unlock(&queue->lock); 3759 } 3760 } 3761 3762 static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr) 3763 { 3764 struct qeth_card *card = (struct qeth_card *)card_ptr; 3765 3766 napi_schedule_irqoff(&card->napi); 3767 } 3768 3769 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3770 { 3771 int rc; 3772 3773 if (card->options.cq == QETH_CQ_NOTAVAILABLE) { 3774 rc = -1; 3775 goto out; 3776 } else { 3777 if (card->options.cq == cq) { 3778 rc = 0; 3779 goto out; 3780 } 3781 3782 qeth_free_qdio_queues(card); 3783 card->options.cq = cq; 3784 rc = 0; 3785 } 3786 out: 3787 return rc; 3788 3789 } 3790 EXPORT_SYMBOL_GPL(qeth_configure_cq); 3791 3792 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, 3793 unsigned int queue, int first_element, 3794 int count) 3795 { 3796 struct qeth_qdio_q *cq = card->qdio.c_q; 3797 int i; 3798 int rc; 3799 3800 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); 3801 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); 3802 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); 3803 3804 if (qdio_err) { 3805 netif_tx_stop_all_queues(card->dev); 3806 qeth_schedule_recovery(card); 3807 return; 3808 } 3809 3810 for (i = first_element; i < first_element + count; ++i) { 3811 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)]; 3812 int e = 0; 3813 3814 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3815 buffer->element[e].addr) { 3816 unsigned long phys_aob_addr = buffer->element[e].addr; 3817 3818 qeth_qdio_handle_aob(card, phys_aob_addr); 3819 ++e; 3820 } 3821 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); 3822 } 3823 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, 3824 card->qdio.c_q->next_buf_to_init, 3825 count); 3826 if (rc) { 3827 dev_warn(&card->gdev->dev, 3828 "QDIO reported an error, rc=%i\n", rc); 3829 QETH_CARD_TEXT(card, 2, "qcqherr"); 3830 } 3831 3832 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count); 3833 } 3834 3835 static void qeth_qdio_input_handler(struct ccw_device *ccwdev, 3836 unsigned int qdio_err, int queue, 3837 int first_elem, int count, 3838 unsigned long card_ptr) 3839 { 3840 struct qeth_card *card = (struct qeth_card *)card_ptr; 3841 3842 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3843 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); 3844 3845 if (qdio_err) 3846 qeth_schedule_recovery(card); 3847 } 3848 3849 static void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3850 unsigned int qdio_error, int __queue, 3851 int first_element, int count, 3852 unsigned long card_ptr) 3853 { 3854 struct qeth_card *card = (struct qeth_card *) card_ptr; 3855 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 3856 struct net_device *dev = card->dev; 3857 struct netdev_queue *txq; 3858 int i; 3859 3860 QETH_CARD_TEXT(card, 6, "qdouhdl"); 3861 if (qdio_error & QDIO_ERROR_FATAL) { 3862 QETH_CARD_TEXT(card, 2, "achkcond"); 3863 netif_tx_stop_all_queues(dev); 3864 qeth_schedule_recovery(card); 3865 return; 3866 } 3867 3868 for (i = first_element; i < (first_element + count); ++i) { 3869 struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)]; 3870 3871 qeth_handle_send_error(card, buf, qdio_error); 3872 qeth_clear_output_buffer(queue, buf, qdio_error, 0); 3873 } 3874 3875 atomic_sub(count, &queue->used_buffers); 3876 qeth_check_outbound_queue(queue); 3877 3878 txq = netdev_get_tx_queue(dev, __queue); 3879 /* xmit may have observed the full-condition, but not yet stopped the 3880 * txq. In which case the code below won't trigger. So before returning, 3881 * xmit will re-check the txq's fill level and wake it up if needed. 3882 */ 3883 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue)) 3884 netif_tx_wake_queue(txq); 3885 } 3886 3887 /** 3888 * Note: Function assumes that we have 4 outbound queues. 3889 */ 3890 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) 3891 { 3892 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 3893 u8 tos; 3894 3895 switch (card->qdio.do_prio_queueing) { 3896 case QETH_PRIO_Q_ING_TOS: 3897 case QETH_PRIO_Q_ING_PREC: 3898 switch (qeth_get_ip_version(skb)) { 3899 case 4: 3900 tos = ipv4_get_dsfield(ip_hdr(skb)); 3901 break; 3902 case 6: 3903 tos = ipv6_get_dsfield(ipv6_hdr(skb)); 3904 break; 3905 default: 3906 return card->qdio.default_out_queue; 3907 } 3908 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3909 return ~tos >> 6 & 3; 3910 if (tos & IPTOS_MINCOST) 3911 return 3; 3912 if (tos & IPTOS_RELIABILITY) 3913 return 2; 3914 if (tos & IPTOS_THROUGHPUT) 3915 return 1; 3916 if (tos & IPTOS_LOWDELAY) 3917 return 0; 3918 break; 3919 case QETH_PRIO_Q_ING_SKB: 3920 if (skb->priority > 5) 3921 return 0; 3922 return ~skb->priority >> 1 & 3; 3923 case QETH_PRIO_Q_ING_VLAN: 3924 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) 3925 return ~ntohs(veth->h_vlan_TCI) >> 3926 (VLAN_PRIO_SHIFT + 1) & 3; 3927 break; 3928 case QETH_PRIO_Q_ING_FIXED: 3929 return card->qdio.default_out_queue; 3930 default: 3931 break; 3932 } 3933 return card->qdio.default_out_queue; 3934 } 3935 EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3936 3937 /** 3938 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. 3939 * @skb: SKB address 3940 * 3941 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3942 * fragmented part of the SKB. Returns zero for linear SKB. 3943 */ 3944 static int qeth_get_elements_for_frags(struct sk_buff *skb) 3945 { 3946 int cnt, elements = 0; 3947 3948 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3949 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3950 3951 elements += qeth_get_elements_for_range( 3952 (addr_t)skb_frag_address(frag), 3953 (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); 3954 } 3955 return elements; 3956 } 3957 3958 /** 3959 * qeth_count_elements() - Counts the number of QDIO buffer elements needed 3960 * to transmit an skb. 3961 * @skb: the skb to operate on. 3962 * @data_offset: skip this part of the skb's linear data 3963 * 3964 * Returns the number of pages, and thus QDIO buffer elements, needed to map the 3965 * skb's data (both its linear part and paged fragments). 3966 */ 3967 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset) 3968 { 3969 unsigned int elements = qeth_get_elements_for_frags(skb); 3970 addr_t end = (addr_t)skb->data + skb_headlen(skb); 3971 addr_t start = (addr_t)skb->data + data_offset; 3972 3973 if (start != end) 3974 elements += qeth_get_elements_for_range(start, end); 3975 return elements; 3976 } 3977 EXPORT_SYMBOL_GPL(qeth_count_elements); 3978 3979 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ 3980 MAX_TCP_HEADER) 3981 3982 /** 3983 * qeth_add_hw_header() - add a HW header to an skb. 3984 * @skb: skb that the HW header should be added to. 3985 * @hdr: double pointer to a qeth_hdr. When returning with >= 0, 3986 * it contains a valid pointer to a qeth_hdr. 3987 * @hdr_len: length of the HW header. 3988 * @proto_len: length of protocol headers that need to be in same page as the 3989 * HW header. 3990 * 3991 * Returns the pushed length. If the header can't be pushed on 3992 * (eg. because it would cross a page boundary), it is allocated from 3993 * the cache instead and 0 is returned. 3994 * The number of needed buffer elements is returned in @elements. 3995 * Error to create the hdr is indicated by returning with < 0. 3996 */ 3997 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, 3998 struct sk_buff *skb, struct qeth_hdr **hdr, 3999 unsigned int hdr_len, unsigned int proto_len, 4000 unsigned int *elements) 4001 { 4002 gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0); 4003 const unsigned int contiguous = proto_len ? proto_len : 1; 4004 const unsigned int max_elements = queue->max_elements; 4005 unsigned int __elements; 4006 addr_t start, end; 4007 bool push_ok; 4008 int rc; 4009 4010 check_layout: 4011 start = (addr_t)skb->data - hdr_len; 4012 end = (addr_t)skb->data; 4013 4014 if (qeth_get_elements_for_range(start, end + contiguous) == 1) { 4015 /* Push HW header into same page as first protocol header. */ 4016 push_ok = true; 4017 /* ... but TSO always needs a separate element for headers: */ 4018 if (skb_is_gso(skb)) 4019 __elements = 1 + qeth_count_elements(skb, proto_len); 4020 else 4021 __elements = qeth_count_elements(skb, 0); 4022 } else if (!proto_len && PAGE_ALIGNED(skb->data)) { 4023 /* Push HW header into preceding page, flush with skb->data. */ 4024 push_ok = true; 4025 __elements = 1 + qeth_count_elements(skb, 0); 4026 } else { 4027 /* Use header cache, copy protocol headers up. */ 4028 push_ok = false; 4029 __elements = 1 + qeth_count_elements(skb, proto_len); 4030 } 4031 4032 /* Compress skb to fit into one IO buffer: */ 4033 if (__elements > max_elements) { 4034 if (!skb_is_nonlinear(skb)) { 4035 /* Drop it, no easy way of shrinking it further. */ 4036 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", 4037 max_elements, __elements, skb->len); 4038 return -E2BIG; 4039 } 4040 4041 rc = skb_linearize(skb); 4042 if (rc) { 4043 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); 4044 return rc; 4045 } 4046 4047 QETH_TXQ_STAT_INC(queue, skbs_linearized); 4048 /* Linearization changed the layout, re-evaluate: */ 4049 goto check_layout; 4050 } 4051 4052 *elements = __elements; 4053 /* Add the header: */ 4054 if (push_ok) { 4055 *hdr = skb_push(skb, hdr_len); 4056 return hdr_len; 4057 } 4058 4059 /* Fall back to cache element with known-good alignment: */ 4060 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) 4061 return -E2BIG; 4062 *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp); 4063 if (!*hdr) 4064 return -ENOMEM; 4065 /* Copy protocol headers behind HW header: */ 4066 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); 4067 return 0; 4068 } 4069 4070 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, 4071 struct sk_buff *curr_skb, 4072 struct qeth_hdr *curr_hdr) 4073 { 4074 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; 4075 struct qeth_hdr *prev_hdr = queue->prev_hdr; 4076 4077 if (!prev_hdr) 4078 return true; 4079 4080 /* All packets must have the same target: */ 4081 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 4082 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); 4083 4084 return ether_addr_equal(eth_hdr(prev_skb)->h_dest, 4085 eth_hdr(curr_skb)->h_dest) && 4086 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); 4087 } 4088 4089 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && 4090 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); 4091 } 4092 4093 /** 4094 * qeth_fill_buffer() - map skb into an output buffer 4095 * @buf: buffer to transport the skb 4096 * @skb: skb to map into the buffer 4097 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated 4098 * from qeth_core_header_cache. 4099 * @offset: when mapping the skb, start at skb->data + offset 4100 * @hd_len: if > 0, build a dedicated header element of this size 4101 */ 4102 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, 4103 struct sk_buff *skb, struct qeth_hdr *hdr, 4104 unsigned int offset, unsigned int hd_len) 4105 { 4106 struct qdio_buffer *buffer = buf->buffer; 4107 int element = buf->next_element_to_fill; 4108 int length = skb_headlen(skb) - offset; 4109 char *data = skb->data + offset; 4110 unsigned int elem_length, cnt; 4111 bool is_first_elem = true; 4112 4113 __skb_queue_tail(&buf->skb_list, skb); 4114 4115 /* build dedicated element for HW Header */ 4116 if (hd_len) { 4117 is_first_elem = false; 4118 4119 buffer->element[element].addr = virt_to_phys(hdr); 4120 buffer->element[element].length = hd_len; 4121 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 4122 4123 /* HW header is allocated from cache: */ 4124 if ((void *)hdr != skb->data) 4125 buf->is_header[element] = 1; 4126 /* HW header was pushed and is contiguous with linear part: */ 4127 else if (length > 0 && !PAGE_ALIGNED(data) && 4128 (data == (char *)hdr + hd_len)) 4129 buffer->element[element].eflags |= 4130 SBAL_EFLAGS_CONTIGUOUS; 4131 4132 element++; 4133 } 4134 4135 /* map linear part into buffer element(s) */ 4136 while (length > 0) { 4137 elem_length = min_t(unsigned int, length, 4138 PAGE_SIZE - offset_in_page(data)); 4139 4140 buffer->element[element].addr = virt_to_phys(data); 4141 buffer->element[element].length = elem_length; 4142 length -= elem_length; 4143 if (is_first_elem) { 4144 is_first_elem = false; 4145 if (length || skb_is_nonlinear(skb)) 4146 /* skb needs additional elements */ 4147 buffer->element[element].eflags = 4148 SBAL_EFLAGS_FIRST_FRAG; 4149 else 4150 buffer->element[element].eflags = 0; 4151 } else { 4152 buffer->element[element].eflags = 4153 SBAL_EFLAGS_MIDDLE_FRAG; 4154 } 4155 4156 data += elem_length; 4157 element++; 4158 } 4159 4160 /* map page frags into buffer element(s) */ 4161 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 4162 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 4163 4164 data = skb_frag_address(frag); 4165 length = skb_frag_size(frag); 4166 while (length > 0) { 4167 elem_length = min_t(unsigned int, length, 4168 PAGE_SIZE - offset_in_page(data)); 4169 4170 buffer->element[element].addr = virt_to_phys(data); 4171 buffer->element[element].length = elem_length; 4172 buffer->element[element].eflags = 4173 SBAL_EFLAGS_MIDDLE_FRAG; 4174 4175 length -= elem_length; 4176 data += elem_length; 4177 element++; 4178 } 4179 } 4180 4181 if (buffer->element[element - 1].eflags) 4182 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; 4183 buf->next_element_to_fill = element; 4184 return element; 4185 } 4186 4187 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4188 struct sk_buff *skb, unsigned int elements, 4189 struct qeth_hdr *hdr, unsigned int offset, 4190 unsigned int hd_len) 4191 { 4192 unsigned int bytes = qdisc_pkt_len(skb); 4193 struct qeth_qdio_out_buffer *buffer; 4194 unsigned int next_element; 4195 struct netdev_queue *txq; 4196 bool stopped = false; 4197 bool flush; 4198 4199 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)]; 4200 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4201 4202 /* Just a sanity check, the wake/stop logic should ensure that we always 4203 * get a free buffer. 4204 */ 4205 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4206 return -EBUSY; 4207 4208 flush = !qeth_iqd_may_bulk(queue, skb, hdr); 4209 4210 if (flush || 4211 (buffer->next_element_to_fill + elements > queue->max_elements)) { 4212 if (buffer->next_element_to_fill > 0) { 4213 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4214 queue->bulk_count++; 4215 } 4216 4217 if (queue->bulk_count >= queue->bulk_max) 4218 flush = true; 4219 4220 if (flush) 4221 qeth_flush_queue(queue); 4222 4223 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + 4224 queue->bulk_count)]; 4225 4226 /* Sanity-check again: */ 4227 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4228 return -EBUSY; 4229 } 4230 4231 if (buffer->next_element_to_fill == 0 && 4232 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4233 /* If a TX completion happens right _here_ and misses to wake 4234 * the txq, then our re-check below will catch the race. 4235 */ 4236 QETH_TXQ_STAT_INC(queue, stopped); 4237 netif_tx_stop_queue(txq); 4238 stopped = true; 4239 } 4240 4241 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4242 buffer->bytes += bytes; 4243 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4244 queue->prev_hdr = hdr; 4245 4246 flush = __netdev_tx_sent_queue(txq, bytes, 4247 !stopped && netdev_xmit_more()); 4248 4249 if (flush || next_element >= queue->max_elements) { 4250 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4251 queue->bulk_count++; 4252 4253 if (queue->bulk_count >= queue->bulk_max) 4254 flush = true; 4255 4256 if (flush) 4257 qeth_flush_queue(queue); 4258 } 4259 4260 if (stopped && !qeth_out_queue_is_full(queue)) 4261 netif_tx_start_queue(txq); 4262 return 0; 4263 } 4264 4265 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4266 struct sk_buff *skb, struct qeth_hdr *hdr, 4267 unsigned int offset, unsigned int hd_len, 4268 int elements_needed) 4269 { 4270 unsigned int start_index = queue->next_buf_to_fill; 4271 struct qeth_qdio_out_buffer *buffer; 4272 unsigned int next_element; 4273 struct netdev_queue *txq; 4274 bool stopped = false; 4275 int flush_count = 0; 4276 int do_pack = 0; 4277 int rc = 0; 4278 4279 buffer = queue->bufs[queue->next_buf_to_fill]; 4280 4281 /* Just a sanity check, the wake/stop logic should ensure that we always 4282 * get a free buffer. 4283 */ 4284 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 4285 return -EBUSY; 4286 4287 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 4288 4289 /* check if we need to switch packing state of this queue */ 4290 qeth_switch_to_packing_if_needed(queue); 4291 if (queue->do_pack) { 4292 do_pack = 1; 4293 /* does packet fit in current buffer? */ 4294 if (buffer->next_element_to_fill + elements_needed > 4295 queue->max_elements) { 4296 /* ... no -> set state PRIMED */ 4297 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4298 flush_count++; 4299 queue->next_buf_to_fill = 4300 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4301 buffer = queue->bufs[queue->next_buf_to_fill]; 4302 4303 /* We stepped forward, so sanity-check again: */ 4304 if (atomic_read(&buffer->state) != 4305 QETH_QDIO_BUF_EMPTY) { 4306 qeth_flush_buffers(queue, start_index, 4307 flush_count); 4308 rc = -EBUSY; 4309 goto out; 4310 } 4311 } 4312 } 4313 4314 if (buffer->next_element_to_fill == 0 && 4315 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 4316 /* If a TX completion happens right _here_ and misses to wake 4317 * the txq, then our re-check below will catch the race. 4318 */ 4319 QETH_TXQ_STAT_INC(queue, stopped); 4320 netif_tx_stop_queue(txq); 4321 stopped = true; 4322 } 4323 4324 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 4325 buffer->bytes += qdisc_pkt_len(skb); 4326 buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 4327 4328 if (queue->do_pack) 4329 QETH_TXQ_STAT_INC(queue, skbs_pack); 4330 if (!queue->do_pack || stopped || next_element >= queue->max_elements) { 4331 flush_count++; 4332 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 4333 queue->next_buf_to_fill = 4334 QDIO_BUFNR(queue->next_buf_to_fill + 1); 4335 } 4336 4337 if (flush_count) 4338 qeth_flush_buffers(queue, start_index, flush_count); 4339 4340 out: 4341 if (do_pack) 4342 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); 4343 4344 if (stopped && !qeth_out_queue_is_full(queue)) 4345 netif_tx_start_queue(txq); 4346 return rc; 4347 } 4348 EXPORT_SYMBOL_GPL(qeth_do_send_packet); 4349 4350 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, 4351 unsigned int payload_len, struct sk_buff *skb, 4352 unsigned int proto_len) 4353 { 4354 struct qeth_hdr_ext_tso *ext = &hdr->ext; 4355 4356 ext->hdr_tot_len = sizeof(*ext); 4357 ext->imb_hdr_no = 1; 4358 ext->hdr_type = 1; 4359 ext->hdr_version = 1; 4360 ext->hdr_len = 28; 4361 ext->payload_len = payload_len; 4362 ext->mss = skb_shinfo(skb)->gso_size; 4363 ext->dg_hdr_len = proto_len; 4364 } 4365 4366 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, 4367 struct qeth_qdio_out_q *queue, int ipv, 4368 void (*fill_header)(struct qeth_qdio_out_q *queue, 4369 struct qeth_hdr *hdr, struct sk_buff *skb, 4370 int ipv, unsigned int data_len)) 4371 { 4372 unsigned int proto_len, hw_hdr_len; 4373 unsigned int frame_len = skb->len; 4374 bool is_tso = skb_is_gso(skb); 4375 unsigned int data_offset = 0; 4376 struct qeth_hdr *hdr = NULL; 4377 unsigned int hd_len = 0; 4378 unsigned int elements; 4379 int push_len, rc; 4380 4381 if (is_tso) { 4382 hw_hdr_len = sizeof(struct qeth_hdr_tso); 4383 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4384 } else { 4385 hw_hdr_len = sizeof(struct qeth_hdr); 4386 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; 4387 } 4388 4389 rc = skb_cow_head(skb, hw_hdr_len); 4390 if (rc) 4391 return rc; 4392 4393 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, 4394 &elements); 4395 if (push_len < 0) 4396 return push_len; 4397 if (is_tso || !push_len) { 4398 /* HW header needs its own buffer element. */ 4399 hd_len = hw_hdr_len + proto_len; 4400 data_offset = push_len + proto_len; 4401 } 4402 memset(hdr, 0, hw_hdr_len); 4403 fill_header(queue, hdr, skb, ipv, frame_len); 4404 if (is_tso) 4405 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, 4406 frame_len - proto_len, skb, proto_len); 4407 4408 if (IS_IQD(card)) { 4409 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, 4410 hd_len); 4411 } else { 4412 /* TODO: drop skb_orphan() once TX completion is fast enough */ 4413 skb_orphan(skb); 4414 spin_lock(&queue->lock); 4415 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, 4416 hd_len, elements); 4417 spin_unlock(&queue->lock); 4418 } 4419 4420 if (rc && !push_len) 4421 kmem_cache_free(qeth_core_header_cache, hdr); 4422 4423 return rc; 4424 } 4425 EXPORT_SYMBOL_GPL(qeth_xmit); 4426 4427 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4428 struct qeth_reply *reply, unsigned long data) 4429 { 4430 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4431 struct qeth_ipacmd_setadpparms *setparms; 4432 4433 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4434 4435 setparms = &(cmd->data.setadapterparms); 4436 if (qeth_setadpparms_inspect_rc(cmd)) { 4437 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4438 setparms->data.mode = SET_PROMISC_MODE_OFF; 4439 } 4440 card->info.promisc_mode = setparms->data.mode; 4441 return (cmd->hdr.return_code) ? -EIO : 0; 4442 } 4443 4444 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) 4445 { 4446 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : 4447 SET_PROMISC_MODE_OFF; 4448 struct qeth_cmd_buffer *iob; 4449 struct qeth_ipa_cmd *cmd; 4450 4451 QETH_CARD_TEXT(card, 4, "setprom"); 4452 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4453 4454 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4455 SETADP_DATA_SIZEOF(mode)); 4456 if (!iob) 4457 return; 4458 cmd = __ipa_cmd(iob); 4459 cmd->data.setadapterparms.data.mode = mode; 4460 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4461 } 4462 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); 4463 4464 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4465 struct qeth_reply *reply, unsigned long data) 4466 { 4467 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4468 struct qeth_ipacmd_setadpparms *adp_cmd; 4469 4470 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4471 if (qeth_setadpparms_inspect_rc(cmd)) 4472 return -EIO; 4473 4474 adp_cmd = &cmd->data.setadapterparms; 4475 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) 4476 return -EADDRNOTAVAIL; 4477 4478 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && 4479 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) 4480 return -EADDRNOTAVAIL; 4481 4482 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); 4483 return 0; 4484 } 4485 4486 int qeth_setadpparms_change_macaddr(struct qeth_card *card) 4487 { 4488 int rc; 4489 struct qeth_cmd_buffer *iob; 4490 struct qeth_ipa_cmd *cmd; 4491 4492 QETH_CARD_TEXT(card, 4, "chgmac"); 4493 4494 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4495 SETADP_DATA_SIZEOF(change_addr)); 4496 if (!iob) 4497 return -ENOMEM; 4498 cmd = __ipa_cmd(iob); 4499 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4500 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; 4501 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, 4502 card->dev->dev_addr); 4503 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, 4504 NULL); 4505 return rc; 4506 } 4507 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 4508 4509 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4510 struct qeth_reply *reply, unsigned long data) 4511 { 4512 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4513 struct qeth_set_access_ctrl *access_ctrl_req; 4514 4515 QETH_CARD_TEXT(card, 4, "setaccb"); 4516 4517 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4518 QETH_CARD_TEXT_(card, 2, "rc=%d", 4519 cmd->data.setadapterparms.hdr.return_code); 4520 if (cmd->data.setadapterparms.hdr.return_code != 4521 SET_ACCESS_CTRL_RC_SUCCESS) 4522 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", 4523 access_ctrl_req->subcmd_code, CARD_DEVID(card), 4524 cmd->data.setadapterparms.hdr.return_code); 4525 switch (qeth_setadpparms_inspect_rc(cmd)) { 4526 case SET_ACCESS_CTRL_RC_SUCCESS: 4527 if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE) 4528 dev_info(&card->gdev->dev, 4529 "QDIO data connection isolation is deactivated\n"); 4530 else 4531 dev_info(&card->gdev->dev, 4532 "QDIO data connection isolation is activated\n"); 4533 return 0; 4534 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4535 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", 4536 CARD_DEVID(card)); 4537 return 0; 4538 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4539 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", 4540 CARD_DEVID(card)); 4541 return 0; 4542 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: 4543 dev_err(&card->gdev->dev, "Adapter does not " 4544 "support QDIO data connection isolation\n"); 4545 return -EOPNOTSUPP; 4546 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: 4547 dev_err(&card->gdev->dev, 4548 "Adapter is dedicated. " 4549 "QDIO data connection isolation not supported\n"); 4550 return -EOPNOTSUPP; 4551 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: 4552 dev_err(&card->gdev->dev, 4553 "TSO does not permit QDIO data connection isolation\n"); 4554 return -EPERM; 4555 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: 4556 dev_err(&card->gdev->dev, "The adjacent switch port does not " 4557 "support reflective relay mode\n"); 4558 return -EOPNOTSUPP; 4559 case SET_ACCESS_CTRL_RC_REFLREL_FAILED: 4560 dev_err(&card->gdev->dev, "The reflective relay mode cannot be " 4561 "enabled at the adjacent switch port"); 4562 return -EREMOTEIO; 4563 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: 4564 dev_warn(&card->gdev->dev, "Turning off reflective relay mode " 4565 "at the adjacent switch failed\n"); 4566 /* benign error while disabling ISOLATION_MODE_FWD */ 4567 return 0; 4568 default: 4569 return -EIO; 4570 } 4571 } 4572 4573 int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, 4574 enum qeth_ipa_isolation_modes mode) 4575 { 4576 int rc; 4577 struct qeth_cmd_buffer *iob; 4578 struct qeth_ipa_cmd *cmd; 4579 struct qeth_set_access_ctrl *access_ctrl_req; 4580 4581 QETH_CARD_TEXT(card, 4, "setacctl"); 4582 4583 if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { 4584 dev_err(&card->gdev->dev, 4585 "Adapter does not support QDIO data connection isolation\n"); 4586 return -EOPNOTSUPP; 4587 } 4588 4589 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4590 SETADP_DATA_SIZEOF(set_access_ctrl)); 4591 if (!iob) 4592 return -ENOMEM; 4593 cmd = __ipa_cmd(iob); 4594 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4595 access_ctrl_req->subcmd_code = mode; 4596 4597 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, 4598 NULL); 4599 if (rc) { 4600 QETH_CARD_TEXT_(card, 2, "rc=%d", rc); 4601 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", 4602 rc, CARD_DEVID(card)); 4603 } 4604 4605 return rc; 4606 } 4607 4608 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue) 4609 { 4610 struct qeth_card *card; 4611 4612 card = dev->ml_priv; 4613 QETH_CARD_TEXT(card, 4, "txtimeo"); 4614 qeth_schedule_recovery(card); 4615 } 4616 EXPORT_SYMBOL_GPL(qeth_tx_timeout); 4617 4618 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 4619 { 4620 struct qeth_card *card = dev->ml_priv; 4621 int rc = 0; 4622 4623 switch (regnum) { 4624 case MII_BMCR: /* Basic mode control register */ 4625 rc = BMCR_FULLDPLX; 4626 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4627 (card->info.link_type != QETH_LINK_TYPE_OSN) && 4628 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && 4629 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) 4630 rc |= BMCR_SPEED100; 4631 break; 4632 case MII_BMSR: /* Basic mode status register */ 4633 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | 4634 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | 4635 BMSR_100BASE4; 4636 break; 4637 case MII_PHYSID1: /* PHYS ID 1 */ 4638 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | 4639 dev->dev_addr[2]; 4640 rc = (rc >> 5) & 0xFFFF; 4641 break; 4642 case MII_PHYSID2: /* PHYS ID 2 */ 4643 rc = (dev->dev_addr[2] << 10) & 0xFFFF; 4644 break; 4645 case MII_ADVERTISE: /* Advertisement control reg */ 4646 rc = ADVERTISE_ALL; 4647 break; 4648 case MII_LPA: /* Link partner ability reg */ 4649 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | 4650 LPA_100BASE4 | LPA_LPACK; 4651 break; 4652 case MII_EXPANSION: /* Expansion register */ 4653 break; 4654 case MII_DCOUNTER: /* disconnect counter */ 4655 break; 4656 case MII_FCSCOUNTER: /* false carrier counter */ 4657 break; 4658 case MII_NWAYTEST: /* N-way auto-neg test register */ 4659 break; 4660 case MII_RERRCOUNTER: /* rx error counter */ 4661 rc = card->stats.rx_length_errors + 4662 card->stats.rx_frame_errors + 4663 card->stats.rx_fifo_errors; 4664 break; 4665 case MII_SREVISION: /* silicon revision */ 4666 break; 4667 case MII_RESV1: /* reserved 1 */ 4668 break; 4669 case MII_LBRERROR: /* loopback, rx, bypass error */ 4670 break; 4671 case MII_PHYADDR: /* physical address */ 4672 break; 4673 case MII_RESV2: /* reserved 2 */ 4674 break; 4675 case MII_TPISTATUS: /* TPI status for 10mbps */ 4676 break; 4677 case MII_NCONFIG: /* network interface config */ 4678 break; 4679 default: 4680 break; 4681 } 4682 return rc; 4683 } 4684 4685 static int qeth_snmp_command_cb(struct qeth_card *card, 4686 struct qeth_reply *reply, unsigned long data) 4687 { 4688 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4689 struct qeth_arp_query_info *qinfo = reply->param; 4690 struct qeth_ipacmd_setadpparms *adp_cmd; 4691 unsigned int data_len; 4692 void *snmp_data; 4693 4694 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4695 4696 if (cmd->hdr.return_code) { 4697 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4698 return -EIO; 4699 } 4700 if (cmd->data.setadapterparms.hdr.return_code) { 4701 cmd->hdr.return_code = 4702 cmd->data.setadapterparms.hdr.return_code; 4703 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); 4704 return -EIO; 4705 } 4706 4707 adp_cmd = &cmd->data.setadapterparms; 4708 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); 4709 if (adp_cmd->hdr.seq_no == 1) { 4710 snmp_data = &adp_cmd->data.snmp; 4711 } else { 4712 snmp_data = &adp_cmd->data.snmp.request; 4713 data_len -= offsetof(struct qeth_snmp_cmd, request); 4714 } 4715 4716 /* check if there is enough room in userspace */ 4717 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4718 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); 4719 return -ENOSPC; 4720 } 4721 QETH_CARD_TEXT_(card, 4, "snore%i", 4722 cmd->data.setadapterparms.hdr.used_total); 4723 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4724 cmd->data.setadapterparms.hdr.seq_no); 4725 /*copy entries to user buffer*/ 4726 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); 4727 qinfo->udata_offset += data_len; 4728 4729 if (cmd->data.setadapterparms.hdr.seq_no < 4730 cmd->data.setadapterparms.hdr.used_total) 4731 return 1; 4732 return 0; 4733 } 4734 4735 static int qeth_snmp_command(struct qeth_card *card, char __user *udata) 4736 { 4737 struct qeth_snmp_ureq __user *ureq; 4738 struct qeth_cmd_buffer *iob; 4739 unsigned int req_len; 4740 struct qeth_arp_query_info qinfo = {0, }; 4741 int rc = 0; 4742 4743 QETH_CARD_TEXT(card, 3, "snmpcmd"); 4744 4745 if (IS_VM_NIC(card)) 4746 return -EOPNOTSUPP; 4747 4748 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && 4749 IS_LAYER3(card)) 4750 return -EOPNOTSUPP; 4751 4752 ureq = (struct qeth_snmp_ureq __user *) udata; 4753 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) || 4754 get_user(req_len, &ureq->hdr.req_len)) 4755 return -EFAULT; 4756 4757 /* Sanitize user input, to avoid overflows in iob size calculation: */ 4758 if (req_len > QETH_BUFSIZE) 4759 return -EINVAL; 4760 4761 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4762 if (!iob) 4763 return -ENOMEM; 4764 4765 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp, 4766 &ureq->cmd, req_len)) { 4767 qeth_put_cmd(iob); 4768 return -EFAULT; 4769 } 4770 4771 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 4772 if (!qinfo.udata) { 4773 qeth_put_cmd(iob); 4774 return -ENOMEM; 4775 } 4776 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); 4777 4778 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); 4779 if (rc) 4780 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", 4781 CARD_DEVID(card), rc); 4782 else { 4783 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4784 rc = -EFAULT; 4785 } 4786 4787 kfree(qinfo.udata); 4788 return rc; 4789 } 4790 4791 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4792 struct qeth_reply *reply, 4793 unsigned long data) 4794 { 4795 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4796 struct qeth_qoat_priv *priv = reply->param; 4797 int resdatalen; 4798 4799 QETH_CARD_TEXT(card, 3, "qoatcb"); 4800 if (qeth_setadpparms_inspect_rc(cmd)) 4801 return -EIO; 4802 4803 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4804 4805 if (resdatalen > (priv->buffer_len - priv->response_len)) 4806 return -ENOSPC; 4807 4808 memcpy(priv->buffer + priv->response_len, 4809 &cmd->data.setadapterparms.hdr, resdatalen); 4810 priv->response_len += resdatalen; 4811 4812 if (cmd->data.setadapterparms.hdr.seq_no < 4813 cmd->data.setadapterparms.hdr.used_total) 4814 return 1; 4815 return 0; 4816 } 4817 4818 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) 4819 { 4820 int rc = 0; 4821 struct qeth_cmd_buffer *iob; 4822 struct qeth_ipa_cmd *cmd; 4823 struct qeth_query_oat *oat_req; 4824 struct qeth_query_oat_data oat_data; 4825 struct qeth_qoat_priv priv; 4826 void __user *tmp; 4827 4828 QETH_CARD_TEXT(card, 3, "qoatcmd"); 4829 4830 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) 4831 return -EOPNOTSUPP; 4832 4833 if (copy_from_user(&oat_data, udata, sizeof(oat_data))) 4834 return -EFAULT; 4835 4836 priv.buffer_len = oat_data.buffer_len; 4837 priv.response_len = 0; 4838 priv.buffer = vzalloc(oat_data.buffer_len); 4839 if (!priv.buffer) 4840 return -ENOMEM; 4841 4842 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4843 SETADP_DATA_SIZEOF(query_oat)); 4844 if (!iob) { 4845 rc = -ENOMEM; 4846 goto out_free; 4847 } 4848 cmd = __ipa_cmd(iob); 4849 oat_req = &cmd->data.setadapterparms.data.query_oat; 4850 oat_req->subcmd_code = oat_data.command; 4851 4852 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); 4853 if (!rc) { 4854 tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : 4855 u64_to_user_ptr(oat_data.ptr); 4856 oat_data.response_len = priv.response_len; 4857 4858 if (copy_to_user(tmp, priv.buffer, priv.response_len) || 4859 copy_to_user(udata, &oat_data, sizeof(oat_data))) 4860 rc = -EFAULT; 4861 } 4862 4863 out_free: 4864 vfree(priv.buffer); 4865 return rc; 4866 } 4867 4868 static int qeth_query_card_info_cb(struct qeth_card *card, 4869 struct qeth_reply *reply, unsigned long data) 4870 { 4871 struct carrier_info *carrier_info = (struct carrier_info *)reply->param; 4872 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4873 struct qeth_query_card_info *card_info; 4874 4875 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4876 if (qeth_setadpparms_inspect_rc(cmd)) 4877 return -EIO; 4878 4879 card_info = &cmd->data.setadapterparms.data.card_info; 4880 carrier_info->card_type = card_info->card_type; 4881 carrier_info->port_mode = card_info->port_mode; 4882 carrier_info->port_speed = card_info->port_speed; 4883 return 0; 4884 } 4885 4886 int qeth_query_card_info(struct qeth_card *card, 4887 struct carrier_info *carrier_info) 4888 { 4889 struct qeth_cmd_buffer *iob; 4890 4891 QETH_CARD_TEXT(card, 2, "qcrdinfo"); 4892 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) 4893 return -EOPNOTSUPP; 4894 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0); 4895 if (!iob) 4896 return -ENOMEM; 4897 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, 4898 (void *)carrier_info); 4899 } 4900 4901 /** 4902 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address 4903 * @card: pointer to a qeth_card 4904 * 4905 * Returns 4906 * 0, if a MAC address has been set for the card's netdevice 4907 * a return code, for various error conditions 4908 */ 4909 int qeth_vm_request_mac(struct qeth_card *card) 4910 { 4911 struct diag26c_mac_resp *response; 4912 struct diag26c_mac_req *request; 4913 int rc; 4914 4915 QETH_CARD_TEXT(card, 2, "vmreqmac"); 4916 4917 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 4918 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 4919 if (!request || !response) { 4920 rc = -ENOMEM; 4921 goto out; 4922 } 4923 4924 request->resp_buf_len = sizeof(*response); 4925 request->resp_version = DIAG26C_VERSION2; 4926 request->op_code = DIAG26C_GET_MAC; 4927 request->devno = card->info.ddev_devno; 4928 4929 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4930 rc = diag26c(request, response, DIAG26C_MAC_SERVICES); 4931 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4932 if (rc) 4933 goto out; 4934 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 4935 4936 if (request->resp_buf_len < sizeof(*response) || 4937 response->version != request->resp_version) { 4938 rc = -EIO; 4939 QETH_CARD_TEXT(card, 2, "badresp"); 4940 QETH_CARD_HEX(card, 2, &request->resp_buf_len, 4941 sizeof(request->resp_buf_len)); 4942 } else if (!is_valid_ether_addr(response->mac)) { 4943 rc = -EINVAL; 4944 QETH_CARD_TEXT(card, 2, "badmac"); 4945 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN); 4946 } else { 4947 ether_addr_copy(card->dev->dev_addr, response->mac); 4948 } 4949 4950 out: 4951 kfree(response); 4952 kfree(request); 4953 return rc; 4954 } 4955 EXPORT_SYMBOL_GPL(qeth_vm_request_mac); 4956 4957 static void qeth_determine_capabilities(struct qeth_card *card) 4958 { 4959 struct qeth_channel *channel = &card->data; 4960 struct ccw_device *ddev = channel->ccwdev; 4961 int rc; 4962 int ddev_offline = 0; 4963 4964 QETH_CARD_TEXT(card, 2, "detcapab"); 4965 if (!ddev->online) { 4966 ddev_offline = 1; 4967 rc = qeth_start_channel(channel); 4968 if (rc) { 4969 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 4970 goto out; 4971 } 4972 } 4973 4974 rc = qeth_read_conf_data(card); 4975 if (rc) { 4976 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", 4977 CARD_DEVID(card), rc); 4978 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 4979 goto out_offline; 4980 } 4981 4982 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 4983 if (rc) 4984 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 4985 4986 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt); 4987 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1); 4988 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2); 4989 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3); 4990 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt); 4991 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || 4992 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || 4993 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { 4994 dev_info(&card->gdev->dev, 4995 "Completion Queueing supported\n"); 4996 } else { 4997 card->options.cq = QETH_CQ_NOTAVAILABLE; 4998 } 4999 5000 out_offline: 5001 if (ddev_offline == 1) 5002 qeth_stop_channel(channel); 5003 out: 5004 return; 5005 } 5006 5007 static void qeth_read_ccw_conf_data(struct qeth_card *card) 5008 { 5009 struct qeth_card_info *info = &card->info; 5010 struct ccw_device *cdev = CARD_DDEV(card); 5011 struct ccw_dev_id dev_id; 5012 5013 QETH_CARD_TEXT(card, 2, "ccwconfd"); 5014 ccw_device_get_id(cdev, &dev_id); 5015 5016 info->ddev_devno = dev_id.devno; 5017 info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) && 5018 !ccw_device_get_iid(cdev, &info->iid) && 5019 !ccw_device_get_chid(cdev, 0, &info->chid); 5020 info->ssid = dev_id.ssid; 5021 5022 dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n", 5023 info->chid, info->chpid); 5024 5025 QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno); 5026 QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid); 5027 QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid); 5028 QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid); 5029 QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid); 5030 QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid); 5031 QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid); 5032 } 5033 5034 static int qeth_qdio_establish(struct qeth_card *card) 5035 { 5036 struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES]; 5037 struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES]; 5038 struct qeth_qib_parms *qib_parms = NULL; 5039 struct qdio_initialize init_data; 5040 unsigned int i; 5041 int rc = 0; 5042 5043 QETH_CARD_TEXT(card, 2, "qdioest"); 5044 5045 if (!IS_IQD(card) && !IS_VM_NIC(card)) { 5046 qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); 5047 if (!qib_parms) 5048 return -ENOMEM; 5049 5050 qeth_fill_qib_parms(card, qib_parms); 5051 } 5052 5053 in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs; 5054 if (card->options.cq == QETH_CQ_ENABLED) 5055 in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs; 5056 5057 for (i = 0; i < card->qdio.no_out_queues; i++) 5058 out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs; 5059 5060 memset(&init_data, 0, sizeof(struct qdio_initialize)); 5061 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : 5062 QDIO_QETH_QFMT; 5063 init_data.qib_param_field_format = 0; 5064 init_data.qib_param_field = (void *)qib_parms; 5065 init_data.no_input_qs = card->qdio.no_in_queues; 5066 init_data.no_output_qs = card->qdio.no_out_queues; 5067 init_data.input_handler = qeth_qdio_input_handler; 5068 init_data.output_handler = qeth_qdio_output_handler; 5069 init_data.irq_poll = qeth_qdio_poll; 5070 init_data.int_parm = (unsigned long) card; 5071 init_data.input_sbal_addr_array = in_sbal_ptrs; 5072 init_data.output_sbal_addr_array = out_sbal_ptrs; 5073 init_data.output_sbal_state_array = card->qdio.out_bufstates; 5074 init_data.scan_threshold = IS_IQD(card) ? 0 : 32; 5075 5076 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 5077 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 5078 rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs, 5079 init_data.no_output_qs); 5080 if (rc) { 5081 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5082 goto out; 5083 } 5084 rc = qdio_establish(CARD_DDEV(card), &init_data); 5085 if (rc) { 5086 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 5087 qdio_free(CARD_DDEV(card)); 5088 } 5089 } 5090 5091 switch (card->options.cq) { 5092 case QETH_CQ_ENABLED: 5093 dev_info(&card->gdev->dev, "Completion Queue support enabled"); 5094 break; 5095 case QETH_CQ_DISABLED: 5096 dev_info(&card->gdev->dev, "Completion Queue support disabled"); 5097 break; 5098 default: 5099 break; 5100 } 5101 5102 out: 5103 kfree(qib_parms); 5104 return rc; 5105 } 5106 5107 static void qeth_core_free_card(struct qeth_card *card) 5108 { 5109 QETH_CARD_TEXT(card, 2, "freecrd"); 5110 5111 unregister_service_level(&card->qeth_service_level); 5112 debugfs_remove_recursive(card->debugfs); 5113 qeth_put_cmd(card->read_cmd); 5114 destroy_workqueue(card->event_wq); 5115 dev_set_drvdata(&card->gdev->dev, NULL); 5116 kfree(card); 5117 } 5118 5119 static void qeth_trace_features(struct qeth_card *card) 5120 { 5121 QETH_CARD_TEXT(card, 2, "features"); 5122 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); 5123 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); 5124 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); 5125 QETH_CARD_HEX(card, 2, &card->info.diagass_support, 5126 sizeof(card->info.diagass_support)); 5127 } 5128 5129 static struct ccw_device_id qeth_ids[] = { 5130 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 5131 .driver_info = QETH_CARD_TYPE_OSD}, 5132 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), 5133 .driver_info = QETH_CARD_TYPE_IQD}, 5134 #ifdef CONFIG_QETH_OSN 5135 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), 5136 .driver_info = QETH_CARD_TYPE_OSN}, 5137 #endif 5138 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), 5139 .driver_info = QETH_CARD_TYPE_OSM}, 5140 #ifdef CONFIG_QETH_OSX 5141 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), 5142 .driver_info = QETH_CARD_TYPE_OSX}, 5143 #endif 5144 {}, 5145 }; 5146 MODULE_DEVICE_TABLE(ccw, qeth_ids); 5147 5148 static struct ccw_driver qeth_ccw_driver = { 5149 .driver = { 5150 .owner = THIS_MODULE, 5151 .name = "qeth", 5152 }, 5153 .ids = qeth_ids, 5154 .probe = ccwgroup_probe_ccwdev, 5155 .remove = ccwgroup_remove_ccwdev, 5156 }; 5157 5158 static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok) 5159 { 5160 int retries = 3; 5161 int rc; 5162 5163 QETH_CARD_TEXT(card, 2, "hrdsetup"); 5164 atomic_set(&card->force_alloc_skb, 0); 5165 rc = qeth_update_from_chp_desc(card); 5166 if (rc) 5167 return rc; 5168 retry: 5169 if (retries < 3) 5170 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 5171 CARD_DEVID(card)); 5172 rc = qeth_qdio_clear_card(card, !IS_IQD(card)); 5173 qeth_stop_channel(&card->data); 5174 qeth_stop_channel(&card->write); 5175 qeth_stop_channel(&card->read); 5176 qdio_free(CARD_DDEV(card)); 5177 5178 rc = qeth_start_channel(&card->read); 5179 if (rc) 5180 goto retriable; 5181 rc = qeth_start_channel(&card->write); 5182 if (rc) 5183 goto retriable; 5184 rc = qeth_start_channel(&card->data); 5185 if (rc) 5186 goto retriable; 5187 retriable: 5188 if (rc == -ERESTARTSYS) { 5189 QETH_CARD_TEXT(card, 2, "break1"); 5190 return rc; 5191 } else if (rc) { 5192 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5193 if (--retries < 0) 5194 goto out; 5195 else 5196 goto retry; 5197 } 5198 5199 qeth_determine_capabilities(card); 5200 qeth_read_ccw_conf_data(card); 5201 qeth_idx_init(card); 5202 5203 rc = qeth_idx_activate_read_channel(card); 5204 if (rc == -EINTR) { 5205 QETH_CARD_TEXT(card, 2, "break2"); 5206 return rc; 5207 } else if (rc) { 5208 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 5209 if (--retries < 0) 5210 goto out; 5211 else 5212 goto retry; 5213 } 5214 5215 rc = qeth_idx_activate_write_channel(card); 5216 if (rc == -EINTR) { 5217 QETH_CARD_TEXT(card, 2, "break3"); 5218 return rc; 5219 } else if (rc) { 5220 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 5221 if (--retries < 0) 5222 goto out; 5223 else 5224 goto retry; 5225 } 5226 card->read_or_write_problem = 0; 5227 rc = qeth_mpc_initialize(card); 5228 if (rc) { 5229 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 5230 goto out; 5231 } 5232 5233 rc = qeth_send_startlan(card); 5234 if (rc) { 5235 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 5236 if (rc == -ENETDOWN) { 5237 dev_warn(&card->gdev->dev, "The LAN is offline\n"); 5238 *carrier_ok = false; 5239 } else { 5240 goto out; 5241 } 5242 } else { 5243 *carrier_ok = true; 5244 } 5245 5246 card->options.ipa4.supported = 0; 5247 card->options.ipa6.supported = 0; 5248 card->options.adp.supported = 0; 5249 card->options.sbp.supported_funcs = 0; 5250 card->info.diagass_support = 0; 5251 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 5252 if (rc == -ENOMEM) 5253 goto out; 5254 if (qeth_is_supported(card, IPA_IPV6)) { 5255 rc = qeth_query_ipassists(card, QETH_PROT_IPV6); 5256 if (rc == -ENOMEM) 5257 goto out; 5258 } 5259 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 5260 rc = qeth_query_setadapterparms(card); 5261 if (rc < 0) { 5262 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 5263 goto out; 5264 } 5265 } 5266 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 5267 rc = qeth_query_setdiagass(card); 5268 if (rc) 5269 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 5270 } 5271 5272 qeth_trace_features(card); 5273 5274 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || 5275 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) 5276 card->info.hwtrap = 0; 5277 5278 if (card->options.isolation != ISOLATION_MODE_NONE) { 5279 rc = qeth_setadpparms_set_access_ctrl(card, 5280 card->options.isolation); 5281 if (rc) 5282 goto out; 5283 } 5284 5285 rc = qeth_init_qdio_queues(card); 5286 if (rc) { 5287 QETH_CARD_TEXT_(card, 2, "9err%d", rc); 5288 goto out; 5289 } 5290 5291 return 0; 5292 out: 5293 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5294 "an error on the device\n"); 5295 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", 5296 CARD_DEVID(card), rc); 5297 return rc; 5298 } 5299 5300 static int qeth_set_online(struct qeth_card *card) 5301 { 5302 bool carrier_ok; 5303 int rc; 5304 5305 mutex_lock(&card->discipline_mutex); 5306 mutex_lock(&card->conf_mutex); 5307 QETH_CARD_TEXT(card, 2, "setonlin"); 5308 5309 rc = qeth_hardsetup_card(card, &carrier_ok); 5310 if (rc) { 5311 QETH_CARD_TEXT_(card, 2, "2err%04x", rc); 5312 rc = -ENODEV; 5313 goto err_hardsetup; 5314 } 5315 5316 qeth_print_status_message(card); 5317 5318 if (card->dev->reg_state != NETREG_REGISTERED) 5319 /* no need for locking / error handling at this early stage: */ 5320 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card)); 5321 5322 rc = card->discipline->set_online(card, carrier_ok); 5323 if (rc) 5324 goto err_online; 5325 5326 /* let user_space know that device is online */ 5327 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5328 5329 mutex_unlock(&card->conf_mutex); 5330 mutex_unlock(&card->discipline_mutex); 5331 return 0; 5332 5333 err_online: 5334 err_hardsetup: 5335 qeth_qdio_clear_card(card, 0); 5336 qeth_clear_working_pool_list(card); 5337 qeth_flush_local_addrs(card); 5338 5339 qeth_stop_channel(&card->data); 5340 qeth_stop_channel(&card->write); 5341 qeth_stop_channel(&card->read); 5342 qdio_free(CARD_DDEV(card)); 5343 5344 mutex_unlock(&card->conf_mutex); 5345 mutex_unlock(&card->discipline_mutex); 5346 return rc; 5347 } 5348 5349 int qeth_set_offline(struct qeth_card *card, bool resetting) 5350 { 5351 int rc, rc2, rc3; 5352 5353 mutex_lock(&card->discipline_mutex); 5354 mutex_lock(&card->conf_mutex); 5355 QETH_CARD_TEXT(card, 3, "setoffl"); 5356 5357 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) { 5358 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 5359 card->info.hwtrap = 1; 5360 } 5361 5362 /* cancel any stalled cmd that might block the rtnl: */ 5363 qeth_clear_ipacmd_list(card); 5364 5365 rtnl_lock(); 5366 card->info.open_when_online = card->dev->flags & IFF_UP; 5367 dev_close(card->dev); 5368 netif_device_detach(card->dev); 5369 netif_carrier_off(card->dev); 5370 rtnl_unlock(); 5371 5372 cancel_work_sync(&card->rx_mode_work); 5373 5374 card->discipline->set_offline(card); 5375 5376 qeth_qdio_clear_card(card, 0); 5377 qeth_drain_output_queues(card); 5378 qeth_clear_working_pool_list(card); 5379 qeth_flush_local_addrs(card); 5380 card->info.promisc_mode = 0; 5381 5382 rc = qeth_stop_channel(&card->data); 5383 rc2 = qeth_stop_channel(&card->write); 5384 rc3 = qeth_stop_channel(&card->read); 5385 if (!rc) 5386 rc = (rc2) ? rc2 : rc3; 5387 if (rc) 5388 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5389 qdio_free(CARD_DDEV(card)); 5390 5391 /* let user_space know that device is offline */ 5392 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5393 5394 mutex_unlock(&card->conf_mutex); 5395 mutex_unlock(&card->discipline_mutex); 5396 return 0; 5397 } 5398 EXPORT_SYMBOL_GPL(qeth_set_offline); 5399 5400 static int qeth_do_reset(void *data) 5401 { 5402 struct qeth_card *card = data; 5403 int rc; 5404 5405 QETH_CARD_TEXT(card, 2, "recover1"); 5406 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 5407 return 0; 5408 QETH_CARD_TEXT(card, 2, "recover2"); 5409 dev_warn(&card->gdev->dev, 5410 "A recovery process has been started for the device\n"); 5411 5412 qeth_set_offline(card, true); 5413 rc = qeth_set_online(card); 5414 if (!rc) { 5415 dev_info(&card->gdev->dev, 5416 "Device successfully recovered!\n"); 5417 } else { 5418 ccwgroup_set_offline(card->gdev); 5419 dev_warn(&card->gdev->dev, 5420 "The qeth device driver failed to recover an error on the device\n"); 5421 } 5422 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 5423 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 5424 return 0; 5425 } 5426 5427 #if IS_ENABLED(CONFIG_QETH_L3) 5428 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 5429 struct qeth_hdr *hdr) 5430 { 5431 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; 5432 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 5433 struct net_device *dev = skb->dev; 5434 5435 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { 5436 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, 5437 "FAKELL", skb->len); 5438 return; 5439 } 5440 5441 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) { 5442 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 5443 ETH_P_IP; 5444 unsigned char tg_addr[ETH_ALEN]; 5445 5446 skb_reset_network_header(skb); 5447 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) { 5448 case QETH_CAST_MULTICAST: 5449 if (prot == ETH_P_IP) 5450 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 5451 else 5452 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 5453 QETH_CARD_STAT_INC(card, rx_multicast); 5454 break; 5455 case QETH_CAST_BROADCAST: 5456 ether_addr_copy(tg_addr, dev->broadcast); 5457 QETH_CARD_STAT_INC(card, rx_multicast); 5458 break; 5459 default: 5460 if (card->options.sniffer) 5461 skb->pkt_type = PACKET_OTHERHOST; 5462 ether_addr_copy(tg_addr, dev->dev_addr); 5463 } 5464 5465 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 5466 dev_hard_header(skb, dev, prot, tg_addr, 5467 &l3_hdr->next_hop.rx.src_mac, skb->len); 5468 else 5469 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL", 5470 skb->len); 5471 } 5472 5473 /* copy VLAN tag from hdr into skb */ 5474 if (!card->options.sniffer && 5475 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 5476 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 5477 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 5478 l3_hdr->vlan_id : 5479 l3_hdr->next_hop.rx.vlan_id; 5480 5481 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 5482 } 5483 } 5484 #endif 5485 5486 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, 5487 struct qeth_hdr *hdr, bool uses_frags) 5488 { 5489 struct napi_struct *napi = &card->napi; 5490 bool is_cso; 5491 5492 switch (hdr->hdr.l2.id) { 5493 case QETH_HEADER_TYPE_OSN: 5494 skb_push(skb, sizeof(*hdr)); 5495 skb_copy_to_linear_data(skb, hdr, sizeof(*hdr)); 5496 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5497 QETH_CARD_STAT_INC(card, rx_packets); 5498 5499 card->osn_info.data_cb(skb); 5500 return; 5501 #if IS_ENABLED(CONFIG_QETH_L3) 5502 case QETH_HEADER_TYPE_LAYER3: 5503 qeth_l3_rebuild_skb(card, skb, hdr); 5504 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5505 break; 5506 #endif 5507 case QETH_HEADER_TYPE_LAYER2: 5508 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5509 break; 5510 default: 5511 /* never happens */ 5512 if (uses_frags) 5513 napi_free_frags(napi); 5514 else 5515 dev_kfree_skb_any(skb); 5516 return; 5517 } 5518 5519 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) { 5520 skb->ip_summed = CHECKSUM_UNNECESSARY; 5521 QETH_CARD_STAT_INC(card, rx_skb_csum); 5522 } else { 5523 skb->ip_summed = CHECKSUM_NONE; 5524 } 5525 5526 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5527 QETH_CARD_STAT_INC(card, rx_packets); 5528 if (skb_is_nonlinear(skb)) { 5529 QETH_CARD_STAT_INC(card, rx_sg_skbs); 5530 QETH_CARD_STAT_ADD(card, rx_sg_frags, 5531 skb_shinfo(skb)->nr_frags); 5532 } 5533 5534 if (uses_frags) { 5535 napi_gro_frags(napi); 5536 } else { 5537 skb->protocol = eth_type_trans(skb, skb->dev); 5538 napi_gro_receive(napi, skb); 5539 } 5540 } 5541 5542 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len) 5543 { 5544 struct page *page = virt_to_page(data); 5545 unsigned int next_frag; 5546 5547 next_frag = skb_shinfo(skb)->nr_frags; 5548 get_page(page); 5549 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len, 5550 data_len); 5551 } 5552 5553 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 5554 { 5555 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); 5556 } 5557 5558 static int qeth_extract_skb(struct qeth_card *card, 5559 struct qeth_qdio_buffer *qethbuffer, u8 *element_no, 5560 int *__offset) 5561 { 5562 struct qeth_priv *priv = netdev_priv(card->dev); 5563 struct qdio_buffer *buffer = qethbuffer->buffer; 5564 struct napi_struct *napi = &card->napi; 5565 struct qdio_buffer_element *element; 5566 unsigned int linear_len = 0; 5567 bool uses_frags = false; 5568 int offset = *__offset; 5569 bool use_rx_sg = false; 5570 unsigned int headroom; 5571 struct qeth_hdr *hdr; 5572 struct sk_buff *skb; 5573 int skb_len = 0; 5574 5575 element = &buffer->element[*element_no]; 5576 5577 next_packet: 5578 /* qeth_hdr must not cross element boundaries */ 5579 while (element->length < offset + sizeof(struct qeth_hdr)) { 5580 if (qeth_is_last_sbale(element)) 5581 return -ENODATA; 5582 element++; 5583 offset = 0; 5584 } 5585 5586 hdr = phys_to_virt(element->addr) + offset; 5587 offset += sizeof(*hdr); 5588 skb = NULL; 5589 5590 switch (hdr->hdr.l2.id) { 5591 case QETH_HEADER_TYPE_LAYER2: 5592 skb_len = hdr->hdr.l2.pkt_length; 5593 linear_len = ETH_HLEN; 5594 headroom = 0; 5595 break; 5596 case QETH_HEADER_TYPE_LAYER3: 5597 skb_len = hdr->hdr.l3.length; 5598 if (!IS_LAYER3(card)) { 5599 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5600 goto walk_packet; 5601 } 5602 5603 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { 5604 linear_len = ETH_HLEN; 5605 headroom = 0; 5606 break; 5607 } 5608 5609 if (hdr->hdr.l3.flags & QETH_HDR_IPV6) 5610 linear_len = sizeof(struct ipv6hdr); 5611 else 5612 linear_len = sizeof(struct iphdr); 5613 headroom = ETH_HLEN; 5614 break; 5615 case QETH_HEADER_TYPE_OSN: 5616 skb_len = hdr->hdr.osn.pdu_length; 5617 if (!IS_OSN(card)) { 5618 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5619 goto walk_packet; 5620 } 5621 5622 linear_len = skb_len; 5623 headroom = sizeof(struct qeth_hdr); 5624 break; 5625 default: 5626 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL) 5627 QETH_CARD_STAT_INC(card, rx_frame_errors); 5628 else 5629 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5630 5631 /* Can't determine packet length, drop the whole buffer. */ 5632 return -EPROTONOSUPPORT; 5633 } 5634 5635 if (skb_len < linear_len) { 5636 QETH_CARD_STAT_INC(card, rx_dropped_runt); 5637 goto walk_packet; 5638 } 5639 5640 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || 5641 (skb_len > READ_ONCE(priv->rx_copybreak) && 5642 !atomic_read(&card->force_alloc_skb) && 5643 !IS_OSN(card)); 5644 5645 if (use_rx_sg) { 5646 /* QETH_CQ_ENABLED only: */ 5647 if (qethbuffer->rx_skb && 5648 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) { 5649 skb = qethbuffer->rx_skb; 5650 qethbuffer->rx_skb = NULL; 5651 goto use_skb; 5652 } 5653 5654 skb = napi_get_frags(napi); 5655 if (!skb) { 5656 /* -ENOMEM, no point in falling back further. */ 5657 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5658 goto walk_packet; 5659 } 5660 5661 if (skb_tailroom(skb) >= linear_len + headroom) { 5662 uses_frags = true; 5663 goto use_skb; 5664 } 5665 5666 netdev_info_once(card->dev, 5667 "Insufficient linear space in NAPI frags skb, need %u but have %u\n", 5668 linear_len + headroom, skb_tailroom(skb)); 5669 /* Shouldn't happen. Don't optimize, fall back to linear skb. */ 5670 } 5671 5672 linear_len = skb_len; 5673 skb = napi_alloc_skb(napi, linear_len + headroom); 5674 if (!skb) { 5675 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5676 goto walk_packet; 5677 } 5678 5679 use_skb: 5680 if (headroom) 5681 skb_reserve(skb, headroom); 5682 walk_packet: 5683 while (skb_len) { 5684 int data_len = min(skb_len, (int)(element->length - offset)); 5685 char *data = phys_to_virt(element->addr) + offset; 5686 5687 skb_len -= data_len; 5688 offset += data_len; 5689 5690 /* Extract data from current element: */ 5691 if (skb && data_len) { 5692 if (linear_len) { 5693 unsigned int copy_len; 5694 5695 copy_len = min_t(unsigned int, linear_len, 5696 data_len); 5697 5698 skb_put_data(skb, data, copy_len); 5699 linear_len -= copy_len; 5700 data_len -= copy_len; 5701 data += copy_len; 5702 } 5703 5704 if (data_len) 5705 qeth_create_skb_frag(skb, data, data_len); 5706 } 5707 5708 /* Step forward to next element: */ 5709 if (skb_len) { 5710 if (qeth_is_last_sbale(element)) { 5711 QETH_CARD_TEXT(card, 4, "unexeob"); 5712 QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); 5713 if (skb) { 5714 if (uses_frags) 5715 napi_free_frags(napi); 5716 else 5717 dev_kfree_skb_any(skb); 5718 QETH_CARD_STAT_INC(card, 5719 rx_length_errors); 5720 } 5721 return -EMSGSIZE; 5722 } 5723 element++; 5724 offset = 0; 5725 } 5726 } 5727 5728 /* This packet was skipped, go get another one: */ 5729 if (!skb) 5730 goto next_packet; 5731 5732 *element_no = element - &buffer->element[0]; 5733 *__offset = offset; 5734 5735 qeth_receive_skb(card, skb, hdr, uses_frags); 5736 return 0; 5737 } 5738 5739 static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget, 5740 struct qeth_qdio_buffer *buf, bool *done) 5741 { 5742 unsigned int work_done = 0; 5743 5744 while (budget) { 5745 if (qeth_extract_skb(card, buf, &card->rx.buf_element, 5746 &card->rx.e_offset)) { 5747 *done = true; 5748 break; 5749 } 5750 5751 work_done++; 5752 budget--; 5753 } 5754 5755 return work_done; 5756 } 5757 5758 static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) 5759 { 5760 struct qeth_rx *ctx = &card->rx; 5761 unsigned int work_done = 0; 5762 5763 while (budget > 0) { 5764 struct qeth_qdio_buffer *buffer; 5765 unsigned int skbs_done = 0; 5766 bool done = false; 5767 5768 /* Fetch completed RX buffers: */ 5769 if (!card->rx.b_count) { 5770 card->rx.qdio_err = 0; 5771 card->rx.b_count = qdio_get_next_buffers( 5772 card->data.ccwdev, 0, &card->rx.b_index, 5773 &card->rx.qdio_err); 5774 if (card->rx.b_count <= 0) { 5775 card->rx.b_count = 0; 5776 break; 5777 } 5778 } 5779 5780 /* Process one completed RX buffer: */ 5781 buffer = &card->qdio.in_q->bufs[card->rx.b_index]; 5782 if (!(card->rx.qdio_err && 5783 qeth_check_qdio_errors(card, buffer->buffer, 5784 card->rx.qdio_err, "qinerr"))) 5785 skbs_done = qeth_extract_skbs(card, budget, buffer, 5786 &done); 5787 else 5788 done = true; 5789 5790 work_done += skbs_done; 5791 budget -= skbs_done; 5792 5793 if (done) { 5794 QETH_CARD_STAT_INC(card, rx_bufs); 5795 qeth_put_buffer_pool_entry(card, buffer->pool_entry); 5796 buffer->pool_entry = NULL; 5797 card->rx.b_count--; 5798 ctx->bufs_refill++; 5799 ctx->bufs_refill -= qeth_rx_refill_queue(card, 5800 ctx->bufs_refill); 5801 5802 /* Step forward to next buffer: */ 5803 card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1); 5804 card->rx.buf_element = 0; 5805 card->rx.e_offset = 0; 5806 } 5807 } 5808 5809 return work_done; 5810 } 5811 5812 static void qeth_cq_poll(struct qeth_card *card) 5813 { 5814 unsigned int work_done = 0; 5815 5816 while (work_done < QDIO_MAX_BUFFERS_PER_Q) { 5817 unsigned int start, error; 5818 int completed; 5819 5820 completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start, 5821 &error); 5822 if (completed <= 0) 5823 return; 5824 5825 qeth_qdio_cq_handler(card, error, 1, start, completed); 5826 work_done += completed; 5827 } 5828 } 5829 5830 int qeth_poll(struct napi_struct *napi, int budget) 5831 { 5832 struct qeth_card *card = container_of(napi, struct qeth_card, napi); 5833 unsigned int work_done; 5834 5835 work_done = qeth_rx_poll(card, budget); 5836 5837 if (card->options.cq == QETH_CQ_ENABLED) 5838 qeth_cq_poll(card); 5839 5840 if (budget) { 5841 struct qeth_rx *ctx = &card->rx; 5842 5843 /* Process any substantial refill backlog: */ 5844 ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill); 5845 5846 /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */ 5847 if (work_done >= budget) 5848 return work_done; 5849 } 5850 5851 if (napi_complete_done(napi, work_done) && 5852 qdio_start_irq(CARD_DDEV(card))) 5853 napi_schedule(napi); 5854 5855 return work_done; 5856 } 5857 EXPORT_SYMBOL_GPL(qeth_poll); 5858 5859 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, 5860 unsigned int bidx, bool error, int budget) 5861 { 5862 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; 5863 u8 sflags = buffer->buffer->element[15].sflags; 5864 struct qeth_card *card = queue->card; 5865 5866 if (queue->bufstates && (queue->bufstates[bidx].flags & 5867 QDIO_OUTBUF_STATE_FLAG_PENDING)) { 5868 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); 5869 5870 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, 5871 QETH_QDIO_BUF_PENDING) == 5872 QETH_QDIO_BUF_PRIMED) 5873 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); 5874 5875 QETH_CARD_TEXT_(card, 5, "pel%u", bidx); 5876 5877 /* prepare the queue slot for re-use: */ 5878 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); 5879 if (qeth_init_qdio_out_buf(queue, bidx)) { 5880 QETH_CARD_TEXT(card, 2, "outofbuf"); 5881 qeth_schedule_recovery(card); 5882 } 5883 5884 return; 5885 } 5886 5887 if (card->options.cq == QETH_CQ_ENABLED) 5888 qeth_notify_skbs(queue, buffer, 5889 qeth_compute_cq_notification(sflags, 0)); 5890 qeth_clear_output_buffer(queue, buffer, error, budget); 5891 } 5892 5893 static int qeth_tx_poll(struct napi_struct *napi, int budget) 5894 { 5895 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); 5896 unsigned int queue_no = queue->queue_no; 5897 struct qeth_card *card = queue->card; 5898 struct net_device *dev = card->dev; 5899 unsigned int work_done = 0; 5900 struct netdev_queue *txq; 5901 5902 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); 5903 5904 while (1) { 5905 unsigned int start, error, i; 5906 unsigned int packets = 0; 5907 unsigned int bytes = 0; 5908 int completed; 5909 5910 if (qeth_out_queue_is_empty(queue)) { 5911 napi_complete(napi); 5912 return 0; 5913 } 5914 5915 /* Give the CPU a breather: */ 5916 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { 5917 QETH_TXQ_STAT_INC(queue, completion_yield); 5918 if (napi_complete_done(napi, 0)) 5919 napi_schedule(napi); 5920 return 0; 5921 } 5922 5923 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, 5924 &start, &error); 5925 if (completed <= 0) { 5926 /* Ensure we see TX completion for pending work: */ 5927 if (napi_complete_done(napi, 0)) 5928 qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS); 5929 return 0; 5930 } 5931 5932 for (i = start; i < start + completed; i++) { 5933 struct qeth_qdio_out_buffer *buffer; 5934 unsigned int bidx = QDIO_BUFNR(i); 5935 5936 buffer = queue->bufs[bidx]; 5937 packets += buffer->frames; 5938 bytes += buffer->bytes; 5939 5940 qeth_handle_send_error(card, buffer, error); 5941 qeth_iqd_tx_complete(queue, bidx, error, budget); 5942 qeth_cleanup_handled_pending(queue, bidx, false); 5943 } 5944 5945 netdev_tx_completed_queue(txq, packets, bytes); 5946 atomic_sub(completed, &queue->used_buffers); 5947 work_done += completed; 5948 5949 /* xmit may have observed the full-condition, but not yet 5950 * stopped the txq. In which case the code below won't trigger. 5951 * So before returning, xmit will re-check the txq's fill level 5952 * and wake it up if needed. 5953 */ 5954 if (netif_tx_queue_stopped(txq) && 5955 !qeth_out_queue_is_full(queue)) 5956 netif_tx_wake_queue(txq); 5957 } 5958 } 5959 5960 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) 5961 { 5962 if (!cmd->hdr.return_code) 5963 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 5964 return cmd->hdr.return_code; 5965 } 5966 5967 static int qeth_setassparms_get_caps_cb(struct qeth_card *card, 5968 struct qeth_reply *reply, 5969 unsigned long data) 5970 { 5971 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5972 struct qeth_ipa_caps *caps = reply->param; 5973 5974 if (qeth_setassparms_inspect_rc(cmd)) 5975 return -EIO; 5976 5977 caps->supported = cmd->data.setassparms.data.caps.supported; 5978 caps->enabled = cmd->data.setassparms.data.caps.enabled; 5979 return 0; 5980 } 5981 5982 int qeth_setassparms_cb(struct qeth_card *card, 5983 struct qeth_reply *reply, unsigned long data) 5984 { 5985 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5986 5987 QETH_CARD_TEXT(card, 4, "defadpcb"); 5988 5989 if (cmd->hdr.return_code) 5990 return -EIO; 5991 5992 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 5993 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 5994 card->options.ipa4.enabled = cmd->hdr.assists.enabled; 5995 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 5996 card->options.ipa6.enabled = cmd->hdr.assists.enabled; 5997 return 0; 5998 } 5999 EXPORT_SYMBOL_GPL(qeth_setassparms_cb); 6000 6001 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 6002 enum qeth_ipa_funcs ipa_func, 6003 u16 cmd_code, 6004 unsigned int data_length, 6005 enum qeth_prot_versions prot) 6006 { 6007 struct qeth_ipacmd_setassparms *setassparms; 6008 struct qeth_ipacmd_setassparms_hdr *hdr; 6009 struct qeth_cmd_buffer *iob; 6010 6011 QETH_CARD_TEXT(card, 4, "getasscm"); 6012 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot, 6013 data_length + 6014 offsetof(struct qeth_ipacmd_setassparms, 6015 data)); 6016 if (!iob) 6017 return NULL; 6018 6019 setassparms = &__ipa_cmd(iob)->data.setassparms; 6020 setassparms->assist_no = ipa_func; 6021 6022 hdr = &setassparms->hdr; 6023 hdr->length = sizeof(*hdr) + data_length; 6024 hdr->command_code = cmd_code; 6025 return iob; 6026 } 6027 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 6028 6029 int qeth_send_simple_setassparms_prot(struct qeth_card *card, 6030 enum qeth_ipa_funcs ipa_func, 6031 u16 cmd_code, u32 *data, 6032 enum qeth_prot_versions prot) 6033 { 6034 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0; 6035 struct qeth_cmd_buffer *iob; 6036 6037 QETH_CARD_TEXT_(card, 4, "simassp%i", prot); 6038 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); 6039 if (!iob) 6040 return -ENOMEM; 6041 6042 if (data) 6043 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data; 6044 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); 6045 } 6046 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); 6047 6048 static void qeth_unregister_dbf_views(void) 6049 { 6050 int x; 6051 6052 for (x = 0; x < QETH_DBF_INFOS; x++) { 6053 debug_unregister(qeth_dbf[x].id); 6054 qeth_dbf[x].id = NULL; 6055 } 6056 } 6057 6058 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) 6059 { 6060 char dbf_txt_buf[32]; 6061 va_list args; 6062 6063 if (!debug_level_enabled(id, level)) 6064 return; 6065 va_start(args, fmt); 6066 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 6067 va_end(args); 6068 debug_text_event(id, level, dbf_txt_buf); 6069 } 6070 EXPORT_SYMBOL_GPL(qeth_dbf_longtext); 6071 6072 static int qeth_register_dbf_views(void) 6073 { 6074 int ret; 6075 int x; 6076 6077 for (x = 0; x < QETH_DBF_INFOS; x++) { 6078 /* register the areas */ 6079 qeth_dbf[x].id = debug_register(qeth_dbf[x].name, 6080 qeth_dbf[x].pages, 6081 qeth_dbf[x].areas, 6082 qeth_dbf[x].len); 6083 if (qeth_dbf[x].id == NULL) { 6084 qeth_unregister_dbf_views(); 6085 return -ENOMEM; 6086 } 6087 6088 /* register a view */ 6089 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); 6090 if (ret) { 6091 qeth_unregister_dbf_views(); 6092 return ret; 6093 } 6094 6095 /* set a passing level */ 6096 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); 6097 } 6098 6099 return 0; 6100 } 6101 6102 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ 6103 6104 int qeth_core_load_discipline(struct qeth_card *card, 6105 enum qeth_discipline_id discipline) 6106 { 6107 mutex_lock(&qeth_mod_mutex); 6108 switch (discipline) { 6109 case QETH_DISCIPLINE_LAYER3: 6110 card->discipline = try_then_request_module( 6111 symbol_get(qeth_l3_discipline), "qeth_l3"); 6112 break; 6113 case QETH_DISCIPLINE_LAYER2: 6114 card->discipline = try_then_request_module( 6115 symbol_get(qeth_l2_discipline), "qeth_l2"); 6116 break; 6117 default: 6118 break; 6119 } 6120 mutex_unlock(&qeth_mod_mutex); 6121 6122 if (!card->discipline) { 6123 dev_err(&card->gdev->dev, "There is no kernel module to " 6124 "support discipline %d\n", discipline); 6125 return -EINVAL; 6126 } 6127 6128 card->options.layer = discipline; 6129 return 0; 6130 } 6131 6132 void qeth_core_free_discipline(struct qeth_card *card) 6133 { 6134 if (IS_LAYER2(card)) 6135 symbol_put(qeth_l2_discipline); 6136 else 6137 symbol_put(qeth_l3_discipline); 6138 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 6139 card->discipline = NULL; 6140 } 6141 6142 const struct device_type qeth_generic_devtype = { 6143 .name = "qeth_generic", 6144 .groups = qeth_generic_attr_groups, 6145 }; 6146 EXPORT_SYMBOL_GPL(qeth_generic_devtype); 6147 6148 static const struct device_type qeth_osn_devtype = { 6149 .name = "qeth_osn", 6150 .groups = qeth_osn_attr_groups, 6151 }; 6152 6153 #define DBF_NAME_LEN 20 6154 6155 struct qeth_dbf_entry { 6156 char dbf_name[DBF_NAME_LEN]; 6157 debug_info_t *dbf_info; 6158 struct list_head dbf_list; 6159 }; 6160 6161 static LIST_HEAD(qeth_dbf_list); 6162 static DEFINE_MUTEX(qeth_dbf_list_mutex); 6163 6164 static debug_info_t *qeth_get_dbf_entry(char *name) 6165 { 6166 struct qeth_dbf_entry *entry; 6167 debug_info_t *rc = NULL; 6168 6169 mutex_lock(&qeth_dbf_list_mutex); 6170 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { 6171 if (strcmp(entry->dbf_name, name) == 0) { 6172 rc = entry->dbf_info; 6173 break; 6174 } 6175 } 6176 mutex_unlock(&qeth_dbf_list_mutex); 6177 return rc; 6178 } 6179 6180 static int qeth_add_dbf_entry(struct qeth_card *card, char *name) 6181 { 6182 struct qeth_dbf_entry *new_entry; 6183 6184 card->debug = debug_register(name, 2, 1, 8); 6185 if (!card->debug) { 6186 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); 6187 goto err; 6188 } 6189 if (debug_register_view(card->debug, &debug_hex_ascii_view)) 6190 goto err_dbg; 6191 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); 6192 if (!new_entry) 6193 goto err_dbg; 6194 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); 6195 new_entry->dbf_info = card->debug; 6196 mutex_lock(&qeth_dbf_list_mutex); 6197 list_add(&new_entry->dbf_list, &qeth_dbf_list); 6198 mutex_unlock(&qeth_dbf_list_mutex); 6199 6200 return 0; 6201 6202 err_dbg: 6203 debug_unregister(card->debug); 6204 err: 6205 return -ENOMEM; 6206 } 6207 6208 static void qeth_clear_dbf_list(void) 6209 { 6210 struct qeth_dbf_entry *entry, *tmp; 6211 6212 mutex_lock(&qeth_dbf_list_mutex); 6213 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { 6214 list_del(&entry->dbf_list); 6215 debug_unregister(entry->dbf_info); 6216 kfree(entry); 6217 } 6218 mutex_unlock(&qeth_dbf_list_mutex); 6219 } 6220 6221 static struct net_device *qeth_alloc_netdev(struct qeth_card *card) 6222 { 6223 struct net_device *dev; 6224 struct qeth_priv *priv; 6225 6226 switch (card->info.type) { 6227 case QETH_CARD_TYPE_IQD: 6228 dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN, 6229 ether_setup, QETH_MAX_OUT_QUEUES, 1); 6230 break; 6231 case QETH_CARD_TYPE_OSM: 6232 dev = alloc_etherdev(sizeof(*priv)); 6233 break; 6234 case QETH_CARD_TYPE_OSN: 6235 dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN, 6236 ether_setup); 6237 break; 6238 default: 6239 dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1); 6240 } 6241 6242 if (!dev) 6243 return NULL; 6244 6245 priv = netdev_priv(dev); 6246 priv->rx_copybreak = QETH_RX_COPYBREAK; 6247 priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1; 6248 6249 dev->ml_priv = card; 6250 dev->watchdog_timeo = QETH_TX_TIMEOUT; 6251 dev->min_mtu = IS_OSN(card) ? 64 : 576; 6252 /* initialized when device first goes online: */ 6253 dev->max_mtu = 0; 6254 dev->mtu = 0; 6255 SET_NETDEV_DEV(dev, &card->gdev->dev); 6256 netif_carrier_off(dev); 6257 6258 if (IS_OSN(card)) { 6259 dev->ethtool_ops = &qeth_osn_ethtool_ops; 6260 } else { 6261 dev->ethtool_ops = &qeth_ethtool_ops; 6262 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 6263 dev->hw_features |= NETIF_F_SG; 6264 dev->vlan_features |= NETIF_F_SG; 6265 if (IS_IQD(card)) 6266 dev->features |= NETIF_F_SG; 6267 } 6268 6269 return dev; 6270 } 6271 6272 struct net_device *qeth_clone_netdev(struct net_device *orig) 6273 { 6274 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); 6275 6276 if (!clone) 6277 return NULL; 6278 6279 clone->dev_port = orig->dev_port; 6280 return clone; 6281 } 6282 6283 static int qeth_core_probe_device(struct ccwgroup_device *gdev) 6284 { 6285 struct qeth_card *card; 6286 struct device *dev; 6287 int rc; 6288 enum qeth_discipline_id enforced_disc; 6289 char dbf_name[DBF_NAME_LEN]; 6290 6291 QETH_DBF_TEXT(SETUP, 2, "probedev"); 6292 6293 dev = &gdev->dev; 6294 if (!get_device(dev)) 6295 return -ENODEV; 6296 6297 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); 6298 6299 card = qeth_alloc_card(gdev); 6300 if (!card) { 6301 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); 6302 rc = -ENOMEM; 6303 goto err_dev; 6304 } 6305 6306 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", 6307 dev_name(&gdev->dev)); 6308 card->debug = qeth_get_dbf_entry(dbf_name); 6309 if (!card->debug) { 6310 rc = qeth_add_dbf_entry(card, dbf_name); 6311 if (rc) 6312 goto err_card; 6313 } 6314 6315 qeth_setup_card(card); 6316 card->dev = qeth_alloc_netdev(card); 6317 if (!card->dev) { 6318 rc = -ENOMEM; 6319 goto err_card; 6320 } 6321 6322 qeth_determine_capabilities(card); 6323 qeth_set_blkt_defaults(card); 6324 6325 card->qdio.no_out_queues = card->dev->num_tx_queues; 6326 rc = qeth_update_from_chp_desc(card); 6327 if (rc) 6328 goto err_chp_desc; 6329 6330 enforced_disc = qeth_enforce_discipline(card); 6331 switch (enforced_disc) { 6332 case QETH_DISCIPLINE_UNDETERMINED: 6333 gdev->dev.type = &qeth_generic_devtype; 6334 break; 6335 default: 6336 card->info.layer_enforced = true; 6337 rc = qeth_core_load_discipline(card, enforced_disc); 6338 if (rc) 6339 goto err_load; 6340 6341 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype : 6342 card->discipline->devtype; 6343 rc = card->discipline->setup(card->gdev); 6344 if (rc) 6345 goto err_disc; 6346 break; 6347 } 6348 6349 return 0; 6350 6351 err_disc: 6352 qeth_core_free_discipline(card); 6353 err_load: 6354 err_chp_desc: 6355 free_netdev(card->dev); 6356 err_card: 6357 qeth_core_free_card(card); 6358 err_dev: 6359 put_device(dev); 6360 return rc; 6361 } 6362 6363 static void qeth_core_remove_device(struct ccwgroup_device *gdev) 6364 { 6365 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6366 6367 QETH_CARD_TEXT(card, 2, "removedv"); 6368 6369 if (card->discipline) { 6370 card->discipline->remove(gdev); 6371 qeth_core_free_discipline(card); 6372 } 6373 6374 qeth_free_qdio_queues(card); 6375 6376 free_netdev(card->dev); 6377 qeth_core_free_card(card); 6378 put_device(&gdev->dev); 6379 } 6380 6381 static int qeth_core_set_online(struct ccwgroup_device *gdev) 6382 { 6383 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6384 int rc = 0; 6385 enum qeth_discipline_id def_discipline; 6386 6387 if (!card->discipline) { 6388 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 6389 QETH_DISCIPLINE_LAYER2; 6390 rc = qeth_core_load_discipline(card, def_discipline); 6391 if (rc) 6392 goto err; 6393 rc = card->discipline->setup(card->gdev); 6394 if (rc) { 6395 qeth_core_free_discipline(card); 6396 goto err; 6397 } 6398 } 6399 6400 rc = qeth_set_online(card); 6401 err: 6402 return rc; 6403 } 6404 6405 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 6406 { 6407 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6408 6409 return qeth_set_offline(card, false); 6410 } 6411 6412 static void qeth_core_shutdown(struct ccwgroup_device *gdev) 6413 { 6414 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6415 6416 qeth_set_allowed_threads(card, 0, 1); 6417 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 6418 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 6419 qeth_qdio_clear_card(card, 0); 6420 qeth_drain_output_queues(card); 6421 qdio_free(CARD_DDEV(card)); 6422 } 6423 6424 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 6425 size_t count) 6426 { 6427 int err; 6428 6429 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, 6430 buf); 6431 6432 return err ? err : count; 6433 } 6434 static DRIVER_ATTR_WO(group); 6435 6436 static struct attribute *qeth_drv_attrs[] = { 6437 &driver_attr_group.attr, 6438 NULL, 6439 }; 6440 static struct attribute_group qeth_drv_attr_group = { 6441 .attrs = qeth_drv_attrs, 6442 }; 6443 static const struct attribute_group *qeth_drv_attr_groups[] = { 6444 &qeth_drv_attr_group, 6445 NULL, 6446 }; 6447 6448 static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 6449 .driver = { 6450 .groups = qeth_drv_attr_groups, 6451 .owner = THIS_MODULE, 6452 .name = "qeth", 6453 }, 6454 .ccw_driver = &qeth_ccw_driver, 6455 .setup = qeth_core_probe_device, 6456 .remove = qeth_core_remove_device, 6457 .set_online = qeth_core_set_online, 6458 .set_offline = qeth_core_set_offline, 6459 .shutdown = qeth_core_shutdown, 6460 }; 6461 6462 struct qeth_card *qeth_get_card_by_busid(char *bus_id) 6463 { 6464 struct ccwgroup_device *gdev; 6465 struct qeth_card *card; 6466 6467 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id); 6468 if (!gdev) 6469 return NULL; 6470 6471 card = dev_get_drvdata(&gdev->dev); 6472 put_device(&gdev->dev); 6473 return card; 6474 } 6475 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid); 6476 6477 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6478 { 6479 struct qeth_card *card = dev->ml_priv; 6480 struct mii_ioctl_data *mii_data; 6481 int rc = 0; 6482 6483 switch (cmd) { 6484 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 6485 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); 6486 break; 6487 case SIOC_QETH_GET_CARD_TYPE: 6488 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && 6489 !IS_VM_NIC(card)) 6490 return 1; 6491 return 0; 6492 case SIOCGMIIPHY: 6493 mii_data = if_mii(rq); 6494 mii_data->phy_id = 0; 6495 break; 6496 case SIOCGMIIREG: 6497 mii_data = if_mii(rq); 6498 if (mii_data->phy_id != 0) 6499 rc = -EINVAL; 6500 else 6501 mii_data->val_out = qeth_mdio_read(dev, 6502 mii_data->phy_id, mii_data->reg_num); 6503 break; 6504 case SIOC_QETH_QUERY_OAT: 6505 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); 6506 break; 6507 default: 6508 if (card->discipline->do_ioctl) 6509 rc = card->discipline->do_ioctl(dev, rq, cmd); 6510 else 6511 rc = -EOPNOTSUPP; 6512 } 6513 if (rc) 6514 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6515 return rc; 6516 } 6517 EXPORT_SYMBOL_GPL(qeth_do_ioctl); 6518 6519 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, 6520 unsigned long data) 6521 { 6522 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6523 u32 *features = reply->param; 6524 6525 if (qeth_setassparms_inspect_rc(cmd)) 6526 return -EIO; 6527 6528 *features = cmd->data.setassparms.data.flags_32bit; 6529 return 0; 6530 } 6531 6532 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6533 enum qeth_prot_versions prot) 6534 { 6535 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP, 6536 NULL, prot); 6537 } 6538 6539 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6540 enum qeth_prot_versions prot, u8 *lp2lp) 6541 { 6542 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; 6543 struct qeth_cmd_buffer *iob; 6544 struct qeth_ipa_caps caps; 6545 u32 features; 6546 int rc; 6547 6548 /* some L3 HW requires combined L3+L4 csum offload: */ 6549 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && 6550 cstype == IPA_OUTBOUND_CHECKSUM) 6551 required_features |= QETH_IPA_CHECKSUM_IP_HDR; 6552 6553 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, 6554 prot); 6555 if (!iob) 6556 return -ENOMEM; 6557 6558 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); 6559 if (rc) 6560 return rc; 6561 6562 if ((required_features & features) != required_features) { 6563 qeth_set_csum_off(card, cstype, prot); 6564 return -EOPNOTSUPP; 6565 } 6566 6567 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 6568 SETASS_DATA_SIZEOF(flags_32bit), 6569 prot); 6570 if (!iob) { 6571 qeth_set_csum_off(card, cstype, prot); 6572 return -ENOMEM; 6573 } 6574 6575 if (features & QETH_IPA_CHECKSUM_LP2LP) 6576 required_features |= QETH_IPA_CHECKSUM_LP2LP; 6577 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; 6578 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6579 if (rc) { 6580 qeth_set_csum_off(card, cstype, prot); 6581 return rc; 6582 } 6583 6584 if (!qeth_ipa_caps_supported(&caps, required_features) || 6585 !qeth_ipa_caps_enabled(&caps, required_features)) { 6586 qeth_set_csum_off(card, cstype, prot); 6587 return -EOPNOTSUPP; 6588 } 6589 6590 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", 6591 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); 6592 6593 if (lp2lp) 6594 *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP); 6595 6596 return 0; 6597 } 6598 6599 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, 6600 enum qeth_prot_versions prot, u8 *lp2lp) 6601 { 6602 return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) : 6603 qeth_set_csum_off(card, cstype, prot); 6604 } 6605 6606 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, 6607 unsigned long data) 6608 { 6609 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6610 struct qeth_tso_start_data *tso_data = reply->param; 6611 6612 if (qeth_setassparms_inspect_rc(cmd)) 6613 return -EIO; 6614 6615 tso_data->mss = cmd->data.setassparms.data.tso.mss; 6616 tso_data->supported = cmd->data.setassparms.data.tso.supported; 6617 return 0; 6618 } 6619 6620 static int qeth_set_tso_off(struct qeth_card *card, 6621 enum qeth_prot_versions prot) 6622 { 6623 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, 6624 IPA_CMD_ASS_STOP, NULL, prot); 6625 } 6626 6627 static int qeth_set_tso_on(struct qeth_card *card, 6628 enum qeth_prot_versions prot) 6629 { 6630 struct qeth_tso_start_data tso_data; 6631 struct qeth_cmd_buffer *iob; 6632 struct qeth_ipa_caps caps; 6633 int rc; 6634 6635 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6636 IPA_CMD_ASS_START, 0, prot); 6637 if (!iob) 6638 return -ENOMEM; 6639 6640 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); 6641 if (rc) 6642 return rc; 6643 6644 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { 6645 qeth_set_tso_off(card, prot); 6646 return -EOPNOTSUPP; 6647 } 6648 6649 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6650 IPA_CMD_ASS_ENABLE, 6651 SETASS_DATA_SIZEOF(caps), prot); 6652 if (!iob) { 6653 qeth_set_tso_off(card, prot); 6654 return -ENOMEM; 6655 } 6656 6657 /* enable TSO capability */ 6658 __ipa_cmd(iob)->data.setassparms.data.caps.enabled = 6659 QETH_IPA_LARGE_SEND_TCP; 6660 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6661 if (rc) { 6662 qeth_set_tso_off(card, prot); 6663 return rc; 6664 } 6665 6666 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || 6667 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { 6668 qeth_set_tso_off(card, prot); 6669 return -EOPNOTSUPP; 6670 } 6671 6672 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, 6673 tso_data.mss); 6674 return 0; 6675 } 6676 6677 static int qeth_set_ipa_tso(struct qeth_card *card, bool on, 6678 enum qeth_prot_versions prot) 6679 { 6680 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); 6681 } 6682 6683 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) 6684 { 6685 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; 6686 int rc_ipv6; 6687 6688 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6689 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6690 QETH_PROT_IPV4, NULL); 6691 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6692 /* no/one Offload Assist available, so the rc is trivial */ 6693 return rc_ipv4; 6694 6695 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6696 QETH_PROT_IPV6, NULL); 6697 6698 if (on) 6699 /* enable: success if any Assist is active */ 6700 return (rc_ipv6) ? rc_ipv4 : 0; 6701 6702 /* disable: failure if any Assist is still active */ 6703 return (rc_ipv6) ? rc_ipv6 : rc_ipv4; 6704 } 6705 6706 /** 6707 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features 6708 * @dev: a net_device 6709 */ 6710 void qeth_enable_hw_features(struct net_device *dev) 6711 { 6712 struct qeth_card *card = dev->ml_priv; 6713 netdev_features_t features; 6714 6715 features = dev->features; 6716 /* force-off any feature that might need an IPA sequence. 6717 * netdev_update_features() will restart them. 6718 */ 6719 dev->features &= ~dev->hw_features; 6720 /* toggle VLAN filter, so that VIDs are re-programmed: */ 6721 if (IS_LAYER2(card) && IS_VM_NIC(card)) { 6722 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 6723 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6724 } 6725 netdev_update_features(dev); 6726 if (features != dev->features) 6727 dev_warn(&card->gdev->dev, 6728 "Device recovery failed to restore all offload features\n"); 6729 } 6730 EXPORT_SYMBOL_GPL(qeth_enable_hw_features); 6731 6732 static void qeth_check_restricted_features(struct qeth_card *card, 6733 netdev_features_t changed, 6734 netdev_features_t actual) 6735 { 6736 netdev_features_t ipv6_features = NETIF_F_TSO6; 6737 netdev_features_t ipv4_features = NETIF_F_TSO; 6738 6739 if (!card->info.has_lp2lp_cso_v6) 6740 ipv6_features |= NETIF_F_IPV6_CSUM; 6741 if (!card->info.has_lp2lp_cso_v4) 6742 ipv4_features |= NETIF_F_IP_CSUM; 6743 6744 if ((changed & ipv6_features) && !(actual & ipv6_features)) 6745 qeth_flush_local_addrs6(card); 6746 if ((changed & ipv4_features) && !(actual & ipv4_features)) 6747 qeth_flush_local_addrs4(card); 6748 } 6749 6750 int qeth_set_features(struct net_device *dev, netdev_features_t features) 6751 { 6752 struct qeth_card *card = dev->ml_priv; 6753 netdev_features_t changed = dev->features ^ features; 6754 int rc = 0; 6755 6756 QETH_CARD_TEXT(card, 2, "setfeat"); 6757 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6758 6759 if ((changed & NETIF_F_IP_CSUM)) { 6760 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, 6761 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4, 6762 &card->info.has_lp2lp_cso_v4); 6763 if (rc) 6764 changed ^= NETIF_F_IP_CSUM; 6765 } 6766 if (changed & NETIF_F_IPV6_CSUM) { 6767 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, 6768 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6, 6769 &card->info.has_lp2lp_cso_v6); 6770 if (rc) 6771 changed ^= NETIF_F_IPV6_CSUM; 6772 } 6773 if (changed & NETIF_F_RXCSUM) { 6774 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); 6775 if (rc) 6776 changed ^= NETIF_F_RXCSUM; 6777 } 6778 if (changed & NETIF_F_TSO) { 6779 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, 6780 QETH_PROT_IPV4); 6781 if (rc) 6782 changed ^= NETIF_F_TSO; 6783 } 6784 if (changed & NETIF_F_TSO6) { 6785 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, 6786 QETH_PROT_IPV6); 6787 if (rc) 6788 changed ^= NETIF_F_TSO6; 6789 } 6790 6791 qeth_check_restricted_features(card, dev->features ^ features, 6792 dev->features ^ changed); 6793 6794 /* everything changed successfully? */ 6795 if ((dev->features ^ features) == changed) 6796 return 0; 6797 /* something went wrong. save changed features and return error */ 6798 dev->features ^= changed; 6799 return -EIO; 6800 } 6801 EXPORT_SYMBOL_GPL(qeth_set_features); 6802 6803 netdev_features_t qeth_fix_features(struct net_device *dev, 6804 netdev_features_t features) 6805 { 6806 struct qeth_card *card = dev->ml_priv; 6807 6808 QETH_CARD_TEXT(card, 2, "fixfeat"); 6809 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 6810 features &= ~NETIF_F_IP_CSUM; 6811 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) 6812 features &= ~NETIF_F_IPV6_CSUM; 6813 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && 6814 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6815 features &= ~NETIF_F_RXCSUM; 6816 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 6817 features &= ~NETIF_F_TSO; 6818 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) 6819 features &= ~NETIF_F_TSO6; 6820 6821 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6822 return features; 6823 } 6824 EXPORT_SYMBOL_GPL(qeth_fix_features); 6825 6826 netdev_features_t qeth_features_check(struct sk_buff *skb, 6827 struct net_device *dev, 6828 netdev_features_t features) 6829 { 6830 struct qeth_card *card = dev->ml_priv; 6831 6832 /* Traffic with local next-hop is not eligible for some offloads: */ 6833 if (skb->ip_summed == CHECKSUM_PARTIAL && 6834 READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) { 6835 netdev_features_t restricted = 0; 6836 6837 if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) 6838 restricted |= NETIF_F_ALL_TSO; 6839 6840 switch (vlan_get_protocol(skb)) { 6841 case htons(ETH_P_IP): 6842 if (!card->info.has_lp2lp_cso_v4) 6843 restricted |= NETIF_F_IP_CSUM; 6844 6845 if (restricted && qeth_next_hop_is_local_v4(card, skb)) 6846 features &= ~restricted; 6847 break; 6848 case htons(ETH_P_IPV6): 6849 if (!card->info.has_lp2lp_cso_v6) 6850 restricted |= NETIF_F_IPV6_CSUM; 6851 6852 if (restricted && qeth_next_hop_is_local_v6(card, skb)) 6853 features &= ~restricted; 6854 break; 6855 default: 6856 break; 6857 } 6858 } 6859 6860 /* GSO segmentation builds skbs with 6861 * a (small) linear part for the headers, and 6862 * page frags for the data. 6863 * Compared to a linear skb, the header-only part consumes an 6864 * additional buffer element. This reduces buffer utilization, and 6865 * hurts throughput. So compress small segments into one element. 6866 */ 6867 if (netif_needs_gso(skb, features)) { 6868 /* match skb_segment(): */ 6869 unsigned int doffset = skb->data - skb_mac_header(skb); 6870 unsigned int hsize = skb_shinfo(skb)->gso_size; 6871 unsigned int hroom = skb_headroom(skb); 6872 6873 /* linearize only if resulting skb allocations are order-0: */ 6874 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) 6875 features &= ~NETIF_F_SG; 6876 } 6877 6878 return vlan_features_check(skb, features); 6879 } 6880 EXPORT_SYMBOL_GPL(qeth_features_check); 6881 6882 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6883 { 6884 struct qeth_card *card = dev->ml_priv; 6885 struct qeth_qdio_out_q *queue; 6886 unsigned int i; 6887 6888 QETH_CARD_TEXT(card, 5, "getstat"); 6889 6890 stats->rx_packets = card->stats.rx_packets; 6891 stats->rx_bytes = card->stats.rx_bytes; 6892 stats->rx_errors = card->stats.rx_length_errors + 6893 card->stats.rx_frame_errors + 6894 card->stats.rx_fifo_errors; 6895 stats->rx_dropped = card->stats.rx_dropped_nomem + 6896 card->stats.rx_dropped_notsupp + 6897 card->stats.rx_dropped_runt; 6898 stats->multicast = card->stats.rx_multicast; 6899 stats->rx_length_errors = card->stats.rx_length_errors; 6900 stats->rx_frame_errors = card->stats.rx_frame_errors; 6901 stats->rx_fifo_errors = card->stats.rx_fifo_errors; 6902 6903 for (i = 0; i < card->qdio.no_out_queues; i++) { 6904 queue = card->qdio.out_qs[i]; 6905 6906 stats->tx_packets += queue->stats.tx_packets; 6907 stats->tx_bytes += queue->stats.tx_bytes; 6908 stats->tx_errors += queue->stats.tx_errors; 6909 stats->tx_dropped += queue->stats.tx_dropped; 6910 } 6911 } 6912 EXPORT_SYMBOL_GPL(qeth_get_stats64); 6913 6914 #define TC_IQD_UCAST 0 6915 static void qeth_iqd_set_prio_tc_map(struct net_device *dev, 6916 unsigned int ucast_txqs) 6917 { 6918 unsigned int prio; 6919 6920 /* IQD requires mcast traffic to be placed on a dedicated queue, and 6921 * qeth_iqd_select_queue() deals with this. 6922 * For unicast traffic, we defer the queue selection to the stack. 6923 * By installing a trivial prio map that spans over only the unicast 6924 * queues, we can encourage the stack to spread the ucast traffic evenly 6925 * without selecting the mcast queue. 6926 */ 6927 6928 /* One traffic class, spanning over all active ucast queues: */ 6929 netdev_set_num_tc(dev, 1); 6930 netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs, 6931 QETH_IQD_MIN_UCAST_TXQ); 6932 6933 /* Map all priorities to this traffic class: */ 6934 for (prio = 0; prio <= TC_BITMASK; prio++) 6935 netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST); 6936 } 6937 6938 int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count) 6939 { 6940 struct net_device *dev = card->dev; 6941 int rc; 6942 6943 /* Per netif_setup_tc(), adjust the mapping first: */ 6944 if (IS_IQD(card)) 6945 qeth_iqd_set_prio_tc_map(dev, count - 1); 6946 6947 rc = netif_set_real_num_tx_queues(dev, count); 6948 6949 if (rc && IS_IQD(card)) 6950 qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1); 6951 6952 return rc; 6953 } 6954 EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues); 6955 6956 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 6957 u8 cast_type, struct net_device *sb_dev) 6958 { 6959 u16 txq; 6960 6961 if (cast_type != RTN_UNICAST) 6962 return QETH_IQD_MCAST_TXQ; 6963 if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ) 6964 return QETH_IQD_MIN_UCAST_TXQ; 6965 6966 txq = netdev_pick_tx(dev, skb, sb_dev); 6967 return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq; 6968 } 6969 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); 6970 6971 int qeth_open(struct net_device *dev) 6972 { 6973 struct qeth_card *card = dev->ml_priv; 6974 6975 QETH_CARD_TEXT(card, 4, "qethopen"); 6976 6977 card->data.state = CH_STATE_UP; 6978 netif_tx_start_all_queues(dev); 6979 6980 napi_enable(&card->napi); 6981 local_bh_disable(); 6982 napi_schedule(&card->napi); 6983 if (IS_IQD(card)) { 6984 struct qeth_qdio_out_q *queue; 6985 unsigned int i; 6986 6987 qeth_for_each_output_queue(card, queue, i) { 6988 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, 6989 QETH_NAPI_WEIGHT); 6990 napi_enable(&queue->napi); 6991 napi_schedule(&queue->napi); 6992 } 6993 } 6994 /* kick-start the NAPI softirq: */ 6995 local_bh_enable(); 6996 return 0; 6997 } 6998 EXPORT_SYMBOL_GPL(qeth_open); 6999 7000 int qeth_stop(struct net_device *dev) 7001 { 7002 struct qeth_card *card = dev->ml_priv; 7003 7004 QETH_CARD_TEXT(card, 4, "qethstop"); 7005 if (IS_IQD(card)) { 7006 struct qeth_qdio_out_q *queue; 7007 unsigned int i; 7008 7009 /* Quiesce the NAPI instances: */ 7010 qeth_for_each_output_queue(card, queue, i) 7011 napi_disable(&queue->napi); 7012 7013 /* Stop .ndo_start_xmit, might still access queue->napi. */ 7014 netif_tx_disable(dev); 7015 7016 qeth_for_each_output_queue(card, queue, i) { 7017 del_timer_sync(&queue->timer); 7018 /* Queues may get re-allocated, so remove the NAPIs. */ 7019 netif_napi_del(&queue->napi); 7020 } 7021 } else { 7022 netif_tx_disable(dev); 7023 } 7024 7025 napi_disable(&card->napi); 7026 cancel_delayed_work_sync(&card->buffer_reclaim_work); 7027 qdio_stop_irq(CARD_DDEV(card)); 7028 7029 return 0; 7030 } 7031 EXPORT_SYMBOL_GPL(qeth_stop); 7032 7033 static int __init qeth_core_init(void) 7034 { 7035 int rc; 7036 7037 pr_info("loading core functions\n"); 7038 7039 qeth_debugfs_root = debugfs_create_dir("qeth", NULL); 7040 7041 rc = qeth_register_dbf_views(); 7042 if (rc) 7043 goto dbf_err; 7044 qeth_core_root_dev = root_device_register("qeth"); 7045 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 7046 if (rc) 7047 goto register_err; 7048 qeth_core_header_cache = 7049 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, 7050 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), 7051 0, NULL); 7052 if (!qeth_core_header_cache) { 7053 rc = -ENOMEM; 7054 goto slab_err; 7055 } 7056 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 7057 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 7058 if (!qeth_qdio_outbuf_cache) { 7059 rc = -ENOMEM; 7060 goto cqslab_err; 7061 } 7062 rc = ccw_driver_register(&qeth_ccw_driver); 7063 if (rc) 7064 goto ccw_err; 7065 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); 7066 if (rc) 7067 goto ccwgroup_err; 7068 7069 return 0; 7070 7071 ccwgroup_err: 7072 ccw_driver_unregister(&qeth_ccw_driver); 7073 ccw_err: 7074 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7075 cqslab_err: 7076 kmem_cache_destroy(qeth_core_header_cache); 7077 slab_err: 7078 root_device_unregister(qeth_core_root_dev); 7079 register_err: 7080 qeth_unregister_dbf_views(); 7081 dbf_err: 7082 debugfs_remove_recursive(qeth_debugfs_root); 7083 pr_err("Initializing the qeth device driver failed\n"); 7084 return rc; 7085 } 7086 7087 static void __exit qeth_core_exit(void) 7088 { 7089 qeth_clear_dbf_list(); 7090 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 7091 ccw_driver_unregister(&qeth_ccw_driver); 7092 kmem_cache_destroy(qeth_qdio_outbuf_cache); 7093 kmem_cache_destroy(qeth_core_header_cache); 7094 root_device_unregister(qeth_core_root_dev); 7095 qeth_unregister_dbf_views(); 7096 debugfs_remove_recursive(qeth_debugfs_root); 7097 pr_info("core functions removed\n"); 7098 } 7099 7100 module_init(qeth_core_init); 7101 module_exit(qeth_core_exit); 7102 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 7103 MODULE_DESCRIPTION("qeth core functions"); 7104 MODULE_LICENSE("GPL"); 7105