1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/compat.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/mii.h> 23 #include <linux/mm.h> 24 #include <linux/kthread.h> 25 #include <linux/slab.h> 26 #include <linux/if_vlan.h> 27 #include <linux/netdevice.h> 28 #include <linux/netdev_features.h> 29 #include <linux/skbuff.h> 30 #include <linux/vmalloc.h> 31 32 #include <net/iucv/af_iucv.h> 33 #include <net/dsfield.h> 34 35 #include <asm/ebcdic.h> 36 #include <asm/chpid.h> 37 #include <asm/io.h> 38 #include <asm/sysinfo.h> 39 #include <asm/diag.h> 40 #include <asm/cio.h> 41 #include <asm/ccwdev.h> 42 #include <asm/cpcmd.h> 43 44 #include "qeth_core.h" 45 46 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 47 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 48 /* N P A M L V H */ 49 [QETH_DBF_SETUP] = {"qeth_setup", 50 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, 51 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, 52 &debug_sprintf_view, NULL}, 53 [QETH_DBF_CTRL] = {"qeth_control", 54 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, 55 }; 56 EXPORT_SYMBOL_GPL(qeth_dbf); 57 58 struct kmem_cache *qeth_core_header_cache; 59 EXPORT_SYMBOL_GPL(qeth_core_header_cache); 60 static struct kmem_cache *qeth_qdio_outbuf_cache; 61 62 static struct device *qeth_core_root_dev; 63 static struct lock_class_key qdio_out_skb_queue_key; 64 65 static void qeth_issue_next_read_cb(struct qeth_card *card, 66 struct qeth_cmd_buffer *iob, 67 unsigned int data_length); 68 static void qeth_free_buffer_pool(struct qeth_card *); 69 static int qeth_qdio_establish(struct qeth_card *); 70 static void qeth_free_qdio_queues(struct qeth_card *card); 71 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, 72 struct qeth_qdio_out_buffer *buf, 73 enum iucv_tx_notify notification); 74 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, 75 int budget); 76 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 77 78 static void qeth_close_dev_handler(struct work_struct *work) 79 { 80 struct qeth_card *card; 81 82 card = container_of(work, struct qeth_card, close_dev_work); 83 QETH_CARD_TEXT(card, 2, "cldevhdl"); 84 ccwgroup_set_offline(card->gdev); 85 } 86 87 static const char *qeth_get_cardname(struct qeth_card *card) 88 { 89 if (IS_VM_NIC(card)) { 90 switch (card->info.type) { 91 case QETH_CARD_TYPE_OSD: 92 return " Virtual NIC QDIO"; 93 case QETH_CARD_TYPE_IQD: 94 return " Virtual NIC Hiper"; 95 case QETH_CARD_TYPE_OSM: 96 return " Virtual NIC QDIO - OSM"; 97 case QETH_CARD_TYPE_OSX: 98 return " Virtual NIC QDIO - OSX"; 99 default: 100 return " unknown"; 101 } 102 } else { 103 switch (card->info.type) { 104 case QETH_CARD_TYPE_OSD: 105 return " OSD Express"; 106 case QETH_CARD_TYPE_IQD: 107 return " HiperSockets"; 108 case QETH_CARD_TYPE_OSN: 109 return " OSN QDIO"; 110 case QETH_CARD_TYPE_OSM: 111 return " OSM QDIO"; 112 case QETH_CARD_TYPE_OSX: 113 return " OSX QDIO"; 114 default: 115 return " unknown"; 116 } 117 } 118 return " n/a"; 119 } 120 121 /* max length to be returned: 14 */ 122 const char *qeth_get_cardname_short(struct qeth_card *card) 123 { 124 if (IS_VM_NIC(card)) { 125 switch (card->info.type) { 126 case QETH_CARD_TYPE_OSD: 127 return "Virt.NIC QDIO"; 128 case QETH_CARD_TYPE_IQD: 129 return "Virt.NIC Hiper"; 130 case QETH_CARD_TYPE_OSM: 131 return "Virt.NIC OSM"; 132 case QETH_CARD_TYPE_OSX: 133 return "Virt.NIC OSX"; 134 default: 135 return "unknown"; 136 } 137 } else { 138 switch (card->info.type) { 139 case QETH_CARD_TYPE_OSD: 140 switch (card->info.link_type) { 141 case QETH_LINK_TYPE_FAST_ETH: 142 return "OSD_100"; 143 case QETH_LINK_TYPE_HSTR: 144 return "HSTR"; 145 case QETH_LINK_TYPE_GBIT_ETH: 146 return "OSD_1000"; 147 case QETH_LINK_TYPE_10GBIT_ETH: 148 return "OSD_10GIG"; 149 case QETH_LINK_TYPE_25GBIT_ETH: 150 return "OSD_25GIG"; 151 case QETH_LINK_TYPE_LANE_ETH100: 152 return "OSD_FE_LANE"; 153 case QETH_LINK_TYPE_LANE_TR: 154 return "OSD_TR_LANE"; 155 case QETH_LINK_TYPE_LANE_ETH1000: 156 return "OSD_GbE_LANE"; 157 case QETH_LINK_TYPE_LANE: 158 return "OSD_ATM_LANE"; 159 default: 160 return "OSD_Express"; 161 } 162 case QETH_CARD_TYPE_IQD: 163 return "HiperSockets"; 164 case QETH_CARD_TYPE_OSN: 165 return "OSN"; 166 case QETH_CARD_TYPE_OSM: 167 return "OSM_1000"; 168 case QETH_CARD_TYPE_OSX: 169 return "OSX_10GIG"; 170 default: 171 return "unknown"; 172 } 173 } 174 return "n/a"; 175 } 176 177 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 178 int clear_start_mask) 179 { 180 unsigned long flags; 181 182 spin_lock_irqsave(&card->thread_mask_lock, flags); 183 card->thread_allowed_mask = threads; 184 if (clear_start_mask) 185 card->thread_start_mask &= threads; 186 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 187 wake_up(&card->wait_q); 188 } 189 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); 190 191 int qeth_threads_running(struct qeth_card *card, unsigned long threads) 192 { 193 unsigned long flags; 194 int rc = 0; 195 196 spin_lock_irqsave(&card->thread_mask_lock, flags); 197 rc = (card->thread_running_mask & threads); 198 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 199 return rc; 200 } 201 EXPORT_SYMBOL_GPL(qeth_threads_running); 202 203 void qeth_clear_working_pool_list(struct qeth_card *card) 204 { 205 struct qeth_buffer_pool_entry *pool_entry, *tmp; 206 207 QETH_CARD_TEXT(card, 5, "clwrklst"); 208 list_for_each_entry_safe(pool_entry, tmp, 209 &card->qdio.in_buf_pool.entry_list, list){ 210 list_del(&pool_entry->list); 211 } 212 } 213 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); 214 215 static int qeth_alloc_buffer_pool(struct qeth_card *card) 216 { 217 struct qeth_buffer_pool_entry *pool_entry; 218 void *ptr; 219 int i, j; 220 221 QETH_CARD_TEXT(card, 5, "alocpool"); 222 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 223 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); 224 if (!pool_entry) { 225 qeth_free_buffer_pool(card); 226 return -ENOMEM; 227 } 228 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { 229 ptr = (void *) __get_free_page(GFP_KERNEL); 230 if (!ptr) { 231 while (j > 0) 232 free_page((unsigned long) 233 pool_entry->elements[--j]); 234 kfree(pool_entry); 235 qeth_free_buffer_pool(card); 236 return -ENOMEM; 237 } 238 pool_entry->elements[j] = ptr; 239 } 240 list_add(&pool_entry->init_list, 241 &card->qdio.init_pool.entry_list); 242 } 243 return 0; 244 } 245 246 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) 247 { 248 QETH_CARD_TEXT(card, 2, "realcbp"); 249 250 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ 251 qeth_clear_working_pool_list(card); 252 qeth_free_buffer_pool(card); 253 card->qdio.in_buf_pool.buf_count = bufcnt; 254 card->qdio.init_pool.buf_count = bufcnt; 255 return qeth_alloc_buffer_pool(card); 256 } 257 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); 258 259 static void qeth_free_qdio_queue(struct qeth_qdio_q *q) 260 { 261 if (!q) 262 return; 263 264 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 265 kfree(q); 266 } 267 268 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) 269 { 270 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 271 int i; 272 273 if (!q) 274 return NULL; 275 276 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 277 kfree(q); 278 return NULL; 279 } 280 281 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 282 q->bufs[i].buffer = q->qdio_bufs[i]; 283 284 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); 285 return q; 286 } 287 288 static int qeth_cq_init(struct qeth_card *card) 289 { 290 int rc; 291 292 if (card->options.cq == QETH_CQ_ENABLED) { 293 QETH_CARD_TEXT(card, 2, "cqinit"); 294 qdio_reset_buffers(card->qdio.c_q->qdio_bufs, 295 QDIO_MAX_BUFFERS_PER_Q); 296 card->qdio.c_q->next_buf_to_init = 127; 297 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 298 card->qdio.no_in_queues - 1, 0, 299 127); 300 if (rc) { 301 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 302 goto out; 303 } 304 } 305 rc = 0; 306 out: 307 return rc; 308 } 309 310 static int qeth_alloc_cq(struct qeth_card *card) 311 { 312 int rc; 313 314 if (card->options.cq == QETH_CQ_ENABLED) { 315 int i; 316 struct qdio_outbuf_state *outbuf_states; 317 318 QETH_CARD_TEXT(card, 2, "cqon"); 319 card->qdio.c_q = qeth_alloc_qdio_queue(); 320 if (!card->qdio.c_q) { 321 rc = -1; 322 goto kmsg_out; 323 } 324 card->qdio.no_in_queues = 2; 325 card->qdio.out_bufstates = 326 kcalloc(card->qdio.no_out_queues * 327 QDIO_MAX_BUFFERS_PER_Q, 328 sizeof(struct qdio_outbuf_state), 329 GFP_KERNEL); 330 outbuf_states = card->qdio.out_bufstates; 331 if (outbuf_states == NULL) { 332 rc = -1; 333 goto free_cq_out; 334 } 335 for (i = 0; i < card->qdio.no_out_queues; ++i) { 336 card->qdio.out_qs[i]->bufstates = outbuf_states; 337 outbuf_states += QDIO_MAX_BUFFERS_PER_Q; 338 } 339 } else { 340 QETH_CARD_TEXT(card, 2, "nocq"); 341 card->qdio.c_q = NULL; 342 card->qdio.no_in_queues = 1; 343 } 344 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues); 345 rc = 0; 346 out: 347 return rc; 348 free_cq_out: 349 qeth_free_qdio_queue(card->qdio.c_q); 350 card->qdio.c_q = NULL; 351 kmsg_out: 352 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); 353 goto out; 354 } 355 356 static void qeth_free_cq(struct qeth_card *card) 357 { 358 if (card->qdio.c_q) { 359 --card->qdio.no_in_queues; 360 qeth_free_qdio_queue(card->qdio.c_q); 361 card->qdio.c_q = NULL; 362 } 363 kfree(card->qdio.out_bufstates); 364 card->qdio.out_bufstates = NULL; 365 } 366 367 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, 368 int delayed) 369 { 370 enum iucv_tx_notify n; 371 372 switch (sbalf15) { 373 case 0: 374 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; 375 break; 376 case 4: 377 case 16: 378 case 17: 379 case 18: 380 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : 381 TX_NOTIFY_UNREACHABLE; 382 break; 383 default: 384 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : 385 TX_NOTIFY_GENERALERROR; 386 break; 387 } 388 389 return n; 390 } 391 392 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, 393 int forced_cleanup) 394 { 395 if (q->card->options.cq != QETH_CQ_ENABLED) 396 return; 397 398 if (q->bufs[bidx]->next_pending != NULL) { 399 struct qeth_qdio_out_buffer *head = q->bufs[bidx]; 400 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; 401 402 while (c) { 403 if (forced_cleanup || 404 atomic_read(&c->state) == 405 QETH_QDIO_BUF_HANDLED_DELAYED) { 406 struct qeth_qdio_out_buffer *f = c; 407 QETH_CARD_TEXT(f->q->card, 5, "fp"); 408 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); 409 /* release here to avoid interleaving between 410 outbound tasklet and inbound tasklet 411 regarding notifications and lifecycle */ 412 qeth_tx_complete_buf(c, forced_cleanup, 0); 413 414 c = f->next_pending; 415 WARN_ON_ONCE(head->next_pending != f); 416 head->next_pending = c; 417 kmem_cache_free(qeth_qdio_outbuf_cache, f); 418 } else { 419 head = c; 420 c = c->next_pending; 421 } 422 423 } 424 } 425 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == 426 QETH_QDIO_BUF_HANDLED_DELAYED)) { 427 /* for recovery situations */ 428 qeth_init_qdio_out_buf(q, bidx); 429 QETH_CARD_TEXT(q->card, 2, "clprecov"); 430 } 431 } 432 433 434 static void qeth_qdio_handle_aob(struct qeth_card *card, 435 unsigned long phys_aob_addr) 436 { 437 struct qaob *aob; 438 struct qeth_qdio_out_buffer *buffer; 439 enum iucv_tx_notify notification; 440 unsigned int i; 441 442 aob = (struct qaob *) phys_to_virt(phys_aob_addr); 443 QETH_CARD_TEXT(card, 5, "haob"); 444 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); 445 buffer = (struct qeth_qdio_out_buffer *) aob->user1; 446 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); 447 448 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, 449 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) { 450 notification = TX_NOTIFY_OK; 451 } else { 452 WARN_ON_ONCE(atomic_read(&buffer->state) != 453 QETH_QDIO_BUF_PENDING); 454 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); 455 notification = TX_NOTIFY_DELAYED_OK; 456 } 457 458 if (aob->aorc != 0) { 459 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); 460 notification = qeth_compute_cq_notification(aob->aorc, 1); 461 } 462 qeth_notify_skbs(buffer->q, buffer, notification); 463 464 /* Free dangling allocations. The attached skbs are handled by 465 * qeth_cleanup_handled_pending(). 466 */ 467 for (i = 0; 468 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); 469 i++) { 470 if (aob->sba[i] && buffer->is_header[i]) 471 kmem_cache_free(qeth_core_header_cache, 472 (void *) aob->sba[i]); 473 } 474 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); 475 476 qdio_release_aob(aob); 477 } 478 479 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) 480 { 481 return card->options.cq == QETH_CQ_ENABLED && 482 card->qdio.c_q != NULL && 483 queue != 0 && 484 queue == card->qdio.no_in_queues - 1; 485 } 486 487 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, 488 void *data) 489 { 490 ccw->cmd_code = cmd_code; 491 ccw->flags = flags | CCW_FLAG_SLI; 492 ccw->count = len; 493 ccw->cda = (__u32) __pa(data); 494 } 495 496 static int __qeth_issue_next_read(struct qeth_card *card) 497 { 498 struct qeth_cmd_buffer *iob = card->read_cmd; 499 struct qeth_channel *channel = iob->channel; 500 struct ccw1 *ccw = __ccw_from_cmd(iob); 501 int rc; 502 503 QETH_CARD_TEXT(card, 5, "issnxrd"); 504 if (channel->state != CH_STATE_UP) 505 return -EIO; 506 507 memset(iob->data, 0, iob->length); 508 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data); 509 iob->callback = qeth_issue_next_read_cb; 510 /* keep the cmd alive after completion: */ 511 qeth_get_cmd(iob); 512 513 QETH_CARD_TEXT(card, 6, "noirqpnd"); 514 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); 515 if (!rc) { 516 channel->active_cmd = iob; 517 } else { 518 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 519 rc, CARD_DEVID(card)); 520 qeth_unlock_channel(card, channel); 521 qeth_put_cmd(iob); 522 card->read_or_write_problem = 1; 523 qeth_schedule_recovery(card); 524 } 525 return rc; 526 } 527 528 static int qeth_issue_next_read(struct qeth_card *card) 529 { 530 int ret; 531 532 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 533 ret = __qeth_issue_next_read(card); 534 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 535 536 return ret; 537 } 538 539 static void qeth_enqueue_cmd(struct qeth_card *card, 540 struct qeth_cmd_buffer *iob) 541 { 542 spin_lock_irq(&card->lock); 543 list_add_tail(&iob->list, &card->cmd_waiter_list); 544 spin_unlock_irq(&card->lock); 545 } 546 547 static void qeth_dequeue_cmd(struct qeth_card *card, 548 struct qeth_cmd_buffer *iob) 549 { 550 spin_lock_irq(&card->lock); 551 list_del(&iob->list); 552 spin_unlock_irq(&card->lock); 553 } 554 555 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) 556 { 557 iob->rc = reason; 558 complete(&iob->done); 559 } 560 EXPORT_SYMBOL_GPL(qeth_notify_cmd); 561 562 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 563 struct qeth_card *card) 564 { 565 const char *ipa_name; 566 int com = cmd->hdr.command; 567 ipa_name = qeth_get_ipa_cmd_name(com); 568 569 if (rc) 570 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", 571 ipa_name, com, CARD_DEVID(card), rc, 572 qeth_get_ipa_msg(rc)); 573 else 574 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", 575 ipa_name, com, CARD_DEVID(card)); 576 } 577 578 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 579 struct qeth_ipa_cmd *cmd) 580 { 581 QETH_CARD_TEXT(card, 5, "chkipad"); 582 583 if (IS_IPA_REPLY(cmd)) { 584 if (cmd->hdr.command != IPA_CMD_SETCCID && 585 cmd->hdr.command != IPA_CMD_DELCCID && 586 cmd->hdr.command != IPA_CMD_MODCCID && 587 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) 588 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 589 return cmd; 590 } 591 592 /* handle unsolicited event: */ 593 switch (cmd->hdr.command) { 594 case IPA_CMD_STOPLAN: 595 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 596 dev_err(&card->gdev->dev, 597 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", 598 QETH_CARD_IFNAME(card)); 599 schedule_work(&card->close_dev_work); 600 } else { 601 dev_warn(&card->gdev->dev, 602 "The link for interface %s on CHPID 0x%X failed\n", 603 QETH_CARD_IFNAME(card), card->info.chpid); 604 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 605 netif_carrier_off(card->dev); 606 } 607 return NULL; 608 case IPA_CMD_STARTLAN: 609 dev_info(&card->gdev->dev, 610 "The link for %s on CHPID 0x%X has been restored\n", 611 QETH_CARD_IFNAME(card), card->info.chpid); 612 if (card->info.hwtrap) 613 card->info.hwtrap = 2; 614 qeth_schedule_recovery(card); 615 return NULL; 616 case IPA_CMD_SETBRIDGEPORT_IQD: 617 case IPA_CMD_SETBRIDGEPORT_OSA: 618 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 619 if (card->discipline->control_event_handler(card, cmd)) 620 return cmd; 621 return NULL; 622 case IPA_CMD_MODCCID: 623 return cmd; 624 case IPA_CMD_REGISTER_LOCAL_ADDR: 625 QETH_CARD_TEXT(card, 3, "irla"); 626 return NULL; 627 case IPA_CMD_UNREGISTER_LOCAL_ADDR: 628 QETH_CARD_TEXT(card, 3, "urla"); 629 return NULL; 630 default: 631 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); 632 return cmd; 633 } 634 } 635 636 void qeth_clear_ipacmd_list(struct qeth_card *card) 637 { 638 struct qeth_cmd_buffer *iob; 639 unsigned long flags; 640 641 QETH_CARD_TEXT(card, 4, "clipalst"); 642 643 spin_lock_irqsave(&card->lock, flags); 644 list_for_each_entry(iob, &card->cmd_waiter_list, list) 645 qeth_notify_cmd(iob, -EIO); 646 spin_unlock_irqrestore(&card->lock, flags); 647 } 648 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); 649 650 static int qeth_check_idx_response(struct qeth_card *card, 651 unsigned char *buffer) 652 { 653 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 654 if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { 655 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", 656 buffer[4]); 657 QETH_CARD_TEXT(card, 2, "ckidxres"); 658 QETH_CARD_TEXT(card, 2, " idxterm"); 659 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); 660 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || 661 buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { 662 dev_err(&card->gdev->dev, 663 "The device does not support the configured transport mode\n"); 664 return -EPROTONOSUPPORT; 665 } 666 return -EIO; 667 } 668 return 0; 669 } 670 671 void qeth_put_cmd(struct qeth_cmd_buffer *iob) 672 { 673 if (refcount_dec_and_test(&iob->ref_count)) { 674 kfree(iob->data); 675 kfree(iob); 676 } 677 } 678 EXPORT_SYMBOL_GPL(qeth_put_cmd); 679 680 static void qeth_release_buffer_cb(struct qeth_card *card, 681 struct qeth_cmd_buffer *iob, 682 unsigned int data_length) 683 { 684 qeth_put_cmd(iob); 685 } 686 687 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) 688 { 689 qeth_notify_cmd(iob, rc); 690 qeth_put_cmd(iob); 691 } 692 693 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, 694 unsigned int length, unsigned int ccws, 695 long timeout) 696 { 697 struct qeth_cmd_buffer *iob; 698 699 if (length > QETH_BUFSIZE) 700 return NULL; 701 702 iob = kzalloc(sizeof(*iob), GFP_KERNEL); 703 if (!iob) 704 return NULL; 705 706 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1), 707 GFP_KERNEL | GFP_DMA); 708 if (!iob->data) { 709 kfree(iob); 710 return NULL; 711 } 712 713 init_completion(&iob->done); 714 spin_lock_init(&iob->lock); 715 INIT_LIST_HEAD(&iob->list); 716 refcount_set(&iob->ref_count, 1); 717 iob->channel = channel; 718 iob->timeout = timeout; 719 iob->length = length; 720 return iob; 721 } 722 EXPORT_SYMBOL_GPL(qeth_alloc_cmd); 723 724 static void qeth_issue_next_read_cb(struct qeth_card *card, 725 struct qeth_cmd_buffer *iob, 726 unsigned int data_length) 727 { 728 struct qeth_cmd_buffer *request = NULL; 729 struct qeth_ipa_cmd *cmd = NULL; 730 struct qeth_reply *reply = NULL; 731 struct qeth_cmd_buffer *tmp; 732 unsigned long flags; 733 int rc = 0; 734 735 QETH_CARD_TEXT(card, 4, "sndctlcb"); 736 rc = qeth_check_idx_response(card, iob->data); 737 switch (rc) { 738 case 0: 739 break; 740 case -EIO: 741 qeth_schedule_recovery(card); 742 /* fall through */ 743 default: 744 qeth_clear_ipacmd_list(card); 745 goto out; 746 } 747 748 cmd = __ipa_reply(iob); 749 if (cmd) { 750 cmd = qeth_check_ipa_data(card, cmd); 751 if (!cmd) 752 goto out; 753 if (IS_OSN(card) && card->osn_info.assist_cb && 754 cmd->hdr.command != IPA_CMD_STARTLAN) { 755 card->osn_info.assist_cb(card->dev, cmd); 756 goto out; 757 } 758 } 759 760 /* match against pending cmd requests */ 761 spin_lock_irqsave(&card->lock, flags); 762 list_for_each_entry(tmp, &card->cmd_waiter_list, list) { 763 if (tmp->match && tmp->match(tmp, iob)) { 764 request = tmp; 765 /* take the object outside the lock */ 766 qeth_get_cmd(request); 767 break; 768 } 769 } 770 spin_unlock_irqrestore(&card->lock, flags); 771 772 if (!request) 773 goto out; 774 775 reply = &request->reply; 776 if (!reply->callback) { 777 rc = 0; 778 goto no_callback; 779 } 780 781 spin_lock_irqsave(&request->lock, flags); 782 if (request->rc) 783 /* Bail out when the requestor has already left: */ 784 rc = request->rc; 785 else 786 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : 787 (unsigned long)iob); 788 spin_unlock_irqrestore(&request->lock, flags); 789 790 no_callback: 791 if (rc <= 0) 792 qeth_notify_cmd(request, rc); 793 qeth_put_cmd(request); 794 out: 795 memcpy(&card->seqno.pdu_hdr_ack, 796 QETH_PDU_HEADER_SEQ_NO(iob->data), 797 QETH_SEQ_NO_LENGTH); 798 qeth_put_cmd(iob); 799 __qeth_issue_next_read(card); 800 } 801 802 static int qeth_set_thread_start_bit(struct qeth_card *card, 803 unsigned long thread) 804 { 805 unsigned long flags; 806 807 spin_lock_irqsave(&card->thread_mask_lock, flags); 808 if (!(card->thread_allowed_mask & thread) || 809 (card->thread_start_mask & thread)) { 810 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 811 return -EPERM; 812 } 813 card->thread_start_mask |= thread; 814 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 815 return 0; 816 } 817 818 static void qeth_clear_thread_start_bit(struct qeth_card *card, 819 unsigned long thread) 820 { 821 unsigned long flags; 822 823 spin_lock_irqsave(&card->thread_mask_lock, flags); 824 card->thread_start_mask &= ~thread; 825 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 826 wake_up(&card->wait_q); 827 } 828 829 static void qeth_clear_thread_running_bit(struct qeth_card *card, 830 unsigned long thread) 831 { 832 unsigned long flags; 833 834 spin_lock_irqsave(&card->thread_mask_lock, flags); 835 card->thread_running_mask &= ~thread; 836 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 837 wake_up_all(&card->wait_q); 838 } 839 840 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 841 { 842 unsigned long flags; 843 int rc = 0; 844 845 spin_lock_irqsave(&card->thread_mask_lock, flags); 846 if (card->thread_start_mask & thread) { 847 if ((card->thread_allowed_mask & thread) && 848 !(card->thread_running_mask & thread)) { 849 rc = 1; 850 card->thread_start_mask &= ~thread; 851 card->thread_running_mask |= thread; 852 } else 853 rc = -EPERM; 854 } 855 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 856 return rc; 857 } 858 859 static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 860 { 861 int rc = 0; 862 863 wait_event(card->wait_q, 864 (rc = __qeth_do_run_thread(card, thread)) >= 0); 865 return rc; 866 } 867 868 void qeth_schedule_recovery(struct qeth_card *card) 869 { 870 QETH_CARD_TEXT(card, 2, "startrec"); 871 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) 872 schedule_work(&card->kernel_thread_starter); 873 } 874 875 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, 876 struct irb *irb) 877 { 878 int dstat, cstat; 879 char *sense; 880 881 sense = (char *) irb->ecw; 882 cstat = irb->scsw.cmd.cstat; 883 dstat = irb->scsw.cmd.dstat; 884 885 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 886 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 887 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 888 QETH_CARD_TEXT(card, 2, "CGENCHK"); 889 dev_warn(&cdev->dev, "The qeth device driver " 890 "failed to recover an error on the device\n"); 891 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", 892 CCW_DEVID(cdev), dstat, cstat); 893 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 894 16, 1, irb, 64, 1); 895 return -EIO; 896 } 897 898 if (dstat & DEV_STAT_UNIT_CHECK) { 899 if (sense[SENSE_RESETTING_EVENT_BYTE] & 900 SENSE_RESETTING_EVENT_FLAG) { 901 QETH_CARD_TEXT(card, 2, "REVIND"); 902 return -EIO; 903 } 904 if (sense[SENSE_COMMAND_REJECT_BYTE] & 905 SENSE_COMMAND_REJECT_FLAG) { 906 QETH_CARD_TEXT(card, 2, "CMDREJi"); 907 return -EIO; 908 } 909 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 910 QETH_CARD_TEXT(card, 2, "AFFE"); 911 return -EIO; 912 } 913 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 914 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 915 return 0; 916 } 917 QETH_CARD_TEXT(card, 2, "DGENCHK"); 918 return -EIO; 919 } 920 return 0; 921 } 922 923 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, 924 struct irb *irb) 925 { 926 if (!IS_ERR(irb)) 927 return 0; 928 929 switch (PTR_ERR(irb)) { 930 case -EIO: 931 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", 932 CCW_DEVID(cdev)); 933 QETH_CARD_TEXT(card, 2, "ckirberr"); 934 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 935 return -EIO; 936 case -ETIMEDOUT: 937 dev_warn(&cdev->dev, "A hardware operation timed out" 938 " on the device\n"); 939 QETH_CARD_TEXT(card, 2, "ckirberr"); 940 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 941 return -ETIMEDOUT; 942 default: 943 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", 944 PTR_ERR(irb), CCW_DEVID(cdev)); 945 QETH_CARD_TEXT(card, 2, "ckirberr"); 946 QETH_CARD_TEXT(card, 2, " rc???"); 947 return PTR_ERR(irb); 948 } 949 } 950 951 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, 952 struct irb *irb) 953 { 954 int rc; 955 int cstat, dstat; 956 struct qeth_cmd_buffer *iob = NULL; 957 struct ccwgroup_device *gdev; 958 struct qeth_channel *channel; 959 struct qeth_card *card; 960 961 /* while we hold the ccwdev lock, this stays valid: */ 962 gdev = dev_get_drvdata(&cdev->dev); 963 card = dev_get_drvdata(&gdev->dev); 964 965 QETH_CARD_TEXT(card, 5, "irq"); 966 967 if (card->read.ccwdev == cdev) { 968 channel = &card->read; 969 QETH_CARD_TEXT(card, 5, "read"); 970 } else if (card->write.ccwdev == cdev) { 971 channel = &card->write; 972 QETH_CARD_TEXT(card, 5, "write"); 973 } else { 974 channel = &card->data; 975 QETH_CARD_TEXT(card, 5, "data"); 976 } 977 978 if (intparm == 0) { 979 QETH_CARD_TEXT(card, 5, "irqunsol"); 980 } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { 981 QETH_CARD_TEXT(card, 5, "irqunexp"); 982 983 dev_err(&cdev->dev, 984 "Received IRQ with intparm %lx, expected %px\n", 985 intparm, channel->active_cmd); 986 if (channel->active_cmd) 987 qeth_cancel_cmd(channel->active_cmd, -EIO); 988 } else { 989 iob = (struct qeth_cmd_buffer *) (addr_t)intparm; 990 } 991 992 channel->active_cmd = NULL; 993 qeth_unlock_channel(card, channel); 994 995 rc = qeth_check_irb_error(card, cdev, irb); 996 if (rc) { 997 /* IO was terminated, free its resources. */ 998 if (iob) 999 qeth_cancel_cmd(iob, rc); 1000 return; 1001 } 1002 1003 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { 1004 channel->state = CH_STATE_STOPPED; 1005 wake_up(&card->wait_q); 1006 } 1007 1008 if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { 1009 channel->state = CH_STATE_HALTED; 1010 wake_up(&card->wait_q); 1011 } 1012 1013 if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | 1014 SCSW_FCTL_HALT_FUNC))) { 1015 qeth_cancel_cmd(iob, -ECANCELED); 1016 iob = NULL; 1017 } 1018 1019 cstat = irb->scsw.cmd.cstat; 1020 dstat = irb->scsw.cmd.dstat; 1021 1022 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1023 (dstat & DEV_STAT_UNIT_CHECK) || 1024 (cstat)) { 1025 if (irb->esw.esw0.erw.cons) { 1026 dev_warn(&channel->ccwdev->dev, 1027 "The qeth device driver failed to recover " 1028 "an error on the device\n"); 1029 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", 1030 CCW_DEVID(channel->ccwdev), cstat, 1031 dstat); 1032 print_hex_dump(KERN_WARNING, "qeth: irb ", 1033 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1034 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1035 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); 1036 } 1037 1038 rc = qeth_get_problem(card, cdev, irb); 1039 if (rc) { 1040 card->read_or_write_problem = 1; 1041 if (iob) 1042 qeth_cancel_cmd(iob, rc); 1043 qeth_clear_ipacmd_list(card); 1044 qeth_schedule_recovery(card); 1045 return; 1046 } 1047 } 1048 1049 if (iob) { 1050 /* sanity check: */ 1051 if (irb->scsw.cmd.count > iob->length) { 1052 qeth_cancel_cmd(iob, -EIO); 1053 return; 1054 } 1055 if (iob->callback) 1056 iob->callback(card, iob, 1057 iob->length - irb->scsw.cmd.count); 1058 } 1059 } 1060 1061 static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 1062 struct qeth_qdio_out_buffer *buf, 1063 enum iucv_tx_notify notification) 1064 { 1065 struct sk_buff *skb; 1066 1067 skb_queue_walk(&buf->skb_list, skb) { 1068 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1069 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1070 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk) 1071 iucv_sk(skb->sk)->sk_txnotify(skb, notification); 1072 } 1073 } 1074 1075 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, 1076 int budget) 1077 { 1078 struct qeth_qdio_out_q *queue = buf->q; 1079 struct sk_buff *skb; 1080 1081 /* release may never happen from within CQ tasklet scope */ 1082 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); 1083 1084 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) 1085 qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR); 1086 1087 /* Empty buffer? */ 1088 if (buf->next_element_to_fill == 0) 1089 return; 1090 1091 QETH_TXQ_STAT_INC(queue, bufs); 1092 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); 1093 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { 1094 unsigned int bytes = qdisc_pkt_len(skb); 1095 bool is_tso = skb_is_gso(skb); 1096 unsigned int packets; 1097 1098 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; 1099 if (error) { 1100 QETH_TXQ_STAT_ADD(queue, tx_errors, packets); 1101 } else { 1102 QETH_TXQ_STAT_ADD(queue, tx_packets, packets); 1103 QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes); 1104 if (skb->ip_summed == CHECKSUM_PARTIAL) 1105 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); 1106 if (skb_is_nonlinear(skb)) 1107 QETH_TXQ_STAT_INC(queue, skbs_sg); 1108 if (is_tso) { 1109 QETH_TXQ_STAT_INC(queue, skbs_tso); 1110 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); 1111 } 1112 } 1113 1114 napi_consume_skb(skb, budget); 1115 } 1116 } 1117 1118 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1119 struct qeth_qdio_out_buffer *buf, 1120 bool error, int budget) 1121 { 1122 int i; 1123 1124 /* is PCI flag set on buffer? */ 1125 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) 1126 atomic_dec(&queue->set_pci_flags_count); 1127 1128 qeth_tx_complete_buf(buf, error, budget); 1129 1130 for (i = 0; i < queue->max_elements; ++i) { 1131 if (buf->buffer->element[i].addr && buf->is_header[i]) 1132 kmem_cache_free(qeth_core_header_cache, 1133 buf->buffer->element[i].addr); 1134 buf->is_header[i] = 0; 1135 } 1136 1137 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); 1138 buf->next_element_to_fill = 0; 1139 buf->bytes = 0; 1140 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1141 } 1142 1143 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) 1144 { 1145 int j; 1146 1147 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 1148 if (!q->bufs[j]) 1149 continue; 1150 qeth_cleanup_handled_pending(q, j, 1); 1151 qeth_clear_output_buffer(q, q->bufs[j], true, 0); 1152 if (free) { 1153 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); 1154 q->bufs[j] = NULL; 1155 } 1156 } 1157 } 1158 1159 void qeth_drain_output_queues(struct qeth_card *card) 1160 { 1161 int i; 1162 1163 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1164 /* clear outbound buffers to free skbs */ 1165 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1166 if (card->qdio.out_qs[i]) 1167 qeth_drain_output_queue(card->qdio.out_qs[i], false); 1168 } 1169 } 1170 EXPORT_SYMBOL_GPL(qeth_drain_output_queues); 1171 1172 static void qeth_free_buffer_pool(struct qeth_card *card) 1173 { 1174 struct qeth_buffer_pool_entry *pool_entry, *tmp; 1175 int i = 0; 1176 list_for_each_entry_safe(pool_entry, tmp, 1177 &card->qdio.init_pool.entry_list, init_list){ 1178 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) 1179 free_page((unsigned long)pool_entry->elements[i]); 1180 list_del(&pool_entry->init_list); 1181 kfree(pool_entry); 1182 } 1183 } 1184 1185 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single) 1186 { 1187 unsigned int count = single ? 1 : card->dev->num_tx_queues; 1188 int rc; 1189 1190 rtnl_lock(); 1191 rc = netif_set_real_num_tx_queues(card->dev, count); 1192 rtnl_unlock(); 1193 1194 if (rc) 1195 return rc; 1196 1197 if (card->qdio.no_out_queues == count) 1198 return 0; 1199 1200 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) 1201 qeth_free_qdio_queues(card); 1202 1203 if (count == 1) 1204 dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); 1205 1206 card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE; 1207 card->qdio.no_out_queues = count; 1208 return 0; 1209 } 1210 1211 static int qeth_update_from_chp_desc(struct qeth_card *card) 1212 { 1213 struct ccw_device *ccwdev; 1214 struct channel_path_desc_fmt0 *chp_dsc; 1215 int rc = 0; 1216 1217 QETH_CARD_TEXT(card, 2, "chp_desc"); 1218 1219 ccwdev = card->data.ccwdev; 1220 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 1221 if (!chp_dsc) 1222 return -ENOMEM; 1223 1224 card->info.func_level = 0x4100 + chp_dsc->desc; 1225 1226 if (IS_OSD(card) || IS_OSX(card)) 1227 /* CHPP field bit 6 == 1 -> single queue */ 1228 rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); 1229 1230 kfree(chp_dsc); 1231 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues); 1232 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level); 1233 return rc; 1234 } 1235 1236 static void qeth_init_qdio_info(struct qeth_card *card) 1237 { 1238 QETH_CARD_TEXT(card, 4, "intqdinf"); 1239 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1240 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; 1241 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; 1242 1243 /* inbound */ 1244 card->qdio.no_in_queues = 1; 1245 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1246 if (IS_IQD(card)) 1247 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; 1248 else 1249 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1250 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1251 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1252 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1253 } 1254 1255 static void qeth_set_initial_options(struct qeth_card *card) 1256 { 1257 card->options.route4.type = NO_ROUTER; 1258 card->options.route6.type = NO_ROUTER; 1259 card->options.rx_sg_cb = QETH_RX_SG_CB; 1260 card->options.isolation = ISOLATION_MODE_NONE; 1261 card->options.cq = QETH_CQ_DISABLED; 1262 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 1263 } 1264 1265 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1266 { 1267 unsigned long flags; 1268 int rc = 0; 1269 1270 spin_lock_irqsave(&card->thread_mask_lock, flags); 1271 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", 1272 (u8) card->thread_start_mask, 1273 (u8) card->thread_allowed_mask, 1274 (u8) card->thread_running_mask); 1275 rc = (card->thread_start_mask & thread); 1276 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1277 return rc; 1278 } 1279 1280 static int qeth_do_reset(void *data); 1281 static void qeth_start_kernel_thread(struct work_struct *work) 1282 { 1283 struct task_struct *ts; 1284 struct qeth_card *card = container_of(work, struct qeth_card, 1285 kernel_thread_starter); 1286 QETH_CARD_TEXT(card , 2, "strthrd"); 1287 1288 if (card->read.state != CH_STATE_UP && 1289 card->write.state != CH_STATE_UP) 1290 return; 1291 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1292 ts = kthread_run(qeth_do_reset, card, "qeth_recover"); 1293 if (IS_ERR(ts)) { 1294 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1295 qeth_clear_thread_running_bit(card, 1296 QETH_RECOVER_THREAD); 1297 } 1298 } 1299 } 1300 1301 static void qeth_buffer_reclaim_work(struct work_struct *); 1302 static void qeth_setup_card(struct qeth_card *card) 1303 { 1304 QETH_CARD_TEXT(card, 2, "setupcrd"); 1305 1306 card->info.type = CARD_RDEV(card)->id.driver_info; 1307 card->state = CARD_STATE_DOWN; 1308 spin_lock_init(&card->lock); 1309 spin_lock_init(&card->thread_mask_lock); 1310 mutex_init(&card->conf_mutex); 1311 mutex_init(&card->discipline_mutex); 1312 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1313 INIT_LIST_HEAD(&card->cmd_waiter_list); 1314 init_waitqueue_head(&card->wait_q); 1315 qeth_set_initial_options(card); 1316 /* IP address takeover */ 1317 INIT_LIST_HEAD(&card->ipato.entries); 1318 qeth_init_qdio_info(card); 1319 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1320 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); 1321 } 1322 1323 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) 1324 { 1325 struct qeth_card *card = container_of(slr, struct qeth_card, 1326 qeth_service_level); 1327 if (card->info.mcl_level[0]) 1328 seq_printf(m, "qeth: %s firmware level %s\n", 1329 CARD_BUS_ID(card), card->info.mcl_level); 1330 } 1331 1332 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) 1333 { 1334 struct qeth_card *card; 1335 1336 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1337 card = kzalloc(sizeof(*card), GFP_KERNEL); 1338 if (!card) 1339 goto out; 1340 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1341 1342 card->gdev = gdev; 1343 dev_set_drvdata(&gdev->dev, card); 1344 CARD_RDEV(card) = gdev->cdev[0]; 1345 CARD_WDEV(card) = gdev->cdev[1]; 1346 CARD_DDEV(card) = gdev->cdev[2]; 1347 1348 card->event_wq = alloc_ordered_workqueue("%s_event", 0, 1349 dev_name(&gdev->dev)); 1350 if (!card->event_wq) 1351 goto out_wq; 1352 1353 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0); 1354 if (!card->read_cmd) 1355 goto out_read_cmd; 1356 1357 card->qeth_service_level.seq_print = qeth_core_sl_print; 1358 register_service_level(&card->qeth_service_level); 1359 return card; 1360 1361 out_read_cmd: 1362 destroy_workqueue(card->event_wq); 1363 out_wq: 1364 dev_set_drvdata(&gdev->dev, NULL); 1365 kfree(card); 1366 out: 1367 return NULL; 1368 } 1369 1370 static int qeth_clear_channel(struct qeth_card *card, 1371 struct qeth_channel *channel) 1372 { 1373 int rc; 1374 1375 QETH_CARD_TEXT(card, 3, "clearch"); 1376 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1377 rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); 1378 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1379 1380 if (rc) 1381 return rc; 1382 rc = wait_event_interruptible_timeout(card->wait_q, 1383 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); 1384 if (rc == -ERESTARTSYS) 1385 return rc; 1386 if (channel->state != CH_STATE_STOPPED) 1387 return -ETIME; 1388 channel->state = CH_STATE_DOWN; 1389 return 0; 1390 } 1391 1392 static int qeth_halt_channel(struct qeth_card *card, 1393 struct qeth_channel *channel) 1394 { 1395 int rc; 1396 1397 QETH_CARD_TEXT(card, 3, "haltch"); 1398 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1399 rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); 1400 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1401 1402 if (rc) 1403 return rc; 1404 rc = wait_event_interruptible_timeout(card->wait_q, 1405 channel->state == CH_STATE_HALTED, QETH_TIMEOUT); 1406 if (rc == -ERESTARTSYS) 1407 return rc; 1408 if (channel->state != CH_STATE_HALTED) 1409 return -ETIME; 1410 return 0; 1411 } 1412 1413 int qeth_stop_channel(struct qeth_channel *channel) 1414 { 1415 struct ccw_device *cdev = channel->ccwdev; 1416 int rc; 1417 1418 rc = ccw_device_set_offline(cdev); 1419 1420 spin_lock_irq(get_ccwdev_lock(cdev)); 1421 if (channel->active_cmd) { 1422 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", 1423 channel->active_cmd); 1424 channel->active_cmd = NULL; 1425 } 1426 cdev->handler = NULL; 1427 spin_unlock_irq(get_ccwdev_lock(cdev)); 1428 1429 return rc; 1430 } 1431 EXPORT_SYMBOL_GPL(qeth_stop_channel); 1432 1433 static int qeth_start_channel(struct qeth_channel *channel) 1434 { 1435 struct ccw_device *cdev = channel->ccwdev; 1436 int rc; 1437 1438 channel->state = CH_STATE_DOWN; 1439 atomic_set(&channel->irq_pending, 0); 1440 1441 spin_lock_irq(get_ccwdev_lock(cdev)); 1442 cdev->handler = qeth_irq; 1443 spin_unlock_irq(get_ccwdev_lock(cdev)); 1444 1445 rc = ccw_device_set_online(cdev); 1446 if (rc) 1447 goto err; 1448 1449 return 0; 1450 1451 err: 1452 spin_lock_irq(get_ccwdev_lock(cdev)); 1453 cdev->handler = NULL; 1454 spin_unlock_irq(get_ccwdev_lock(cdev)); 1455 return rc; 1456 } 1457 1458 static int qeth_halt_channels(struct qeth_card *card) 1459 { 1460 int rc1 = 0, rc2 = 0, rc3 = 0; 1461 1462 QETH_CARD_TEXT(card, 3, "haltchs"); 1463 rc1 = qeth_halt_channel(card, &card->read); 1464 rc2 = qeth_halt_channel(card, &card->write); 1465 rc3 = qeth_halt_channel(card, &card->data); 1466 if (rc1) 1467 return rc1; 1468 if (rc2) 1469 return rc2; 1470 return rc3; 1471 } 1472 1473 static int qeth_clear_channels(struct qeth_card *card) 1474 { 1475 int rc1 = 0, rc2 = 0, rc3 = 0; 1476 1477 QETH_CARD_TEXT(card, 3, "clearchs"); 1478 rc1 = qeth_clear_channel(card, &card->read); 1479 rc2 = qeth_clear_channel(card, &card->write); 1480 rc3 = qeth_clear_channel(card, &card->data); 1481 if (rc1) 1482 return rc1; 1483 if (rc2) 1484 return rc2; 1485 return rc3; 1486 } 1487 1488 static int qeth_clear_halt_card(struct qeth_card *card, int halt) 1489 { 1490 int rc = 0; 1491 1492 QETH_CARD_TEXT(card, 3, "clhacrd"); 1493 1494 if (halt) 1495 rc = qeth_halt_channels(card); 1496 if (rc) 1497 return rc; 1498 return qeth_clear_channels(card); 1499 } 1500 1501 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) 1502 { 1503 int rc = 0; 1504 1505 QETH_CARD_TEXT(card, 3, "qdioclr"); 1506 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, 1507 QETH_QDIO_CLEANING)) { 1508 case QETH_QDIO_ESTABLISHED: 1509 if (IS_IQD(card)) 1510 rc = qdio_shutdown(CARD_DDEV(card), 1511 QDIO_FLAG_CLEANUP_USING_HALT); 1512 else 1513 rc = qdio_shutdown(CARD_DDEV(card), 1514 QDIO_FLAG_CLEANUP_USING_CLEAR); 1515 if (rc) 1516 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1517 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1518 break; 1519 case QETH_QDIO_CLEANING: 1520 return rc; 1521 default: 1522 break; 1523 } 1524 rc = qeth_clear_halt_card(card, use_halt); 1525 if (rc) 1526 QETH_CARD_TEXT_(card, 3, "2err%d", rc); 1527 return rc; 1528 } 1529 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card); 1530 1531 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) 1532 { 1533 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1534 struct diag26c_vnic_resp *response = NULL; 1535 struct diag26c_vnic_req *request = NULL; 1536 struct ccw_dev_id id; 1537 char userid[80]; 1538 int rc = 0; 1539 1540 QETH_CARD_TEXT(card, 2, "vmlayer"); 1541 1542 cpcmd("QUERY USERID", userid, sizeof(userid), &rc); 1543 if (rc) 1544 goto out; 1545 1546 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 1547 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 1548 if (!request || !response) { 1549 rc = -ENOMEM; 1550 goto out; 1551 } 1552 1553 ccw_device_get_id(CARD_RDEV(card), &id); 1554 request->resp_buf_len = sizeof(*response); 1555 request->resp_version = DIAG26C_VERSION6_VM65918; 1556 request->req_format = DIAG26C_VNIC_INFO; 1557 ASCEBC(userid, 8); 1558 memcpy(&request->sys_name, userid, 8); 1559 request->devno = id.devno; 1560 1561 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1562 rc = diag26c(request, response, DIAG26C_PORT_VNIC); 1563 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1564 if (rc) 1565 goto out; 1566 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 1567 1568 if (request->resp_buf_len < sizeof(*response) || 1569 response->version != request->resp_version) { 1570 rc = -EIO; 1571 goto out; 1572 } 1573 1574 if (response->protocol == VNIC_INFO_PROT_L2) 1575 disc = QETH_DISCIPLINE_LAYER2; 1576 else if (response->protocol == VNIC_INFO_PROT_L3) 1577 disc = QETH_DISCIPLINE_LAYER3; 1578 1579 out: 1580 kfree(response); 1581 kfree(request); 1582 if (rc) 1583 QETH_CARD_TEXT_(card, 2, "err%x", rc); 1584 return disc; 1585 } 1586 1587 /* Determine whether the device requires a specific layer discipline */ 1588 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) 1589 { 1590 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1591 1592 if (IS_OSM(card) || IS_OSN(card)) 1593 disc = QETH_DISCIPLINE_LAYER2; 1594 else if (IS_VM_NIC(card)) 1595 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 1596 qeth_vm_detect_layer(card); 1597 1598 switch (disc) { 1599 case QETH_DISCIPLINE_LAYER2: 1600 QETH_CARD_TEXT(card, 3, "force l2"); 1601 break; 1602 case QETH_DISCIPLINE_LAYER3: 1603 QETH_CARD_TEXT(card, 3, "force l3"); 1604 break; 1605 default: 1606 QETH_CARD_TEXT(card, 3, "force no"); 1607 } 1608 1609 return disc; 1610 } 1611 1612 static void qeth_set_blkt_defaults(struct qeth_card *card) 1613 { 1614 QETH_CARD_TEXT(card, 2, "cfgblkt"); 1615 1616 if (card->info.use_v1_blkt) { 1617 card->info.blkt.time_total = 0; 1618 card->info.blkt.inter_packet = 0; 1619 card->info.blkt.inter_packet_jumbo = 0; 1620 } else { 1621 card->info.blkt.time_total = 250; 1622 card->info.blkt.inter_packet = 5; 1623 card->info.blkt.inter_packet_jumbo = 15; 1624 } 1625 } 1626 1627 static void qeth_init_tokens(struct qeth_card *card) 1628 { 1629 card->token.issuer_rm_w = 0x00010103UL; 1630 card->token.cm_filter_w = 0x00010108UL; 1631 card->token.cm_connection_w = 0x0001010aUL; 1632 card->token.ulp_filter_w = 0x0001010bUL; 1633 card->token.ulp_connection_w = 0x0001010dUL; 1634 } 1635 1636 static void qeth_init_func_level(struct qeth_card *card) 1637 { 1638 switch (card->info.type) { 1639 case QETH_CARD_TYPE_IQD: 1640 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; 1641 break; 1642 case QETH_CARD_TYPE_OSD: 1643 case QETH_CARD_TYPE_OSN: 1644 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; 1645 break; 1646 default: 1647 break; 1648 } 1649 } 1650 1651 static void qeth_idx_finalize_cmd(struct qeth_card *card, 1652 struct qeth_cmd_buffer *iob) 1653 { 1654 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, 1655 QETH_SEQ_NO_LENGTH); 1656 if (iob->channel == &card->write) 1657 card->seqno.trans_hdr++; 1658 } 1659 1660 static int qeth_peer_func_level(int level) 1661 { 1662 if ((level & 0xff) == 8) 1663 return (level & 0xff) + 0x400; 1664 if (((level >> 8) & 3) == 1) 1665 return (level & 0xff) + 0x200; 1666 return level; 1667 } 1668 1669 static void qeth_mpc_finalize_cmd(struct qeth_card *card, 1670 struct qeth_cmd_buffer *iob) 1671 { 1672 qeth_idx_finalize_cmd(card, iob); 1673 1674 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), 1675 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); 1676 card->seqno.pdu_hdr++; 1677 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), 1678 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 1679 1680 iob->callback = qeth_release_buffer_cb; 1681 } 1682 1683 static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, 1684 struct qeth_cmd_buffer *reply) 1685 { 1686 /* MPC cmds are issued strictly in sequence. */ 1687 return !IS_IPA(reply->data); 1688 } 1689 1690 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, 1691 void *data, 1692 unsigned int data_length) 1693 { 1694 struct qeth_cmd_buffer *iob; 1695 1696 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT); 1697 if (!iob) 1698 return NULL; 1699 1700 memcpy(iob->data, data, data_length); 1701 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length, 1702 iob->data); 1703 iob->finalize = qeth_mpc_finalize_cmd; 1704 iob->match = qeth_mpc_match_reply; 1705 return iob; 1706 } 1707 1708 /** 1709 * qeth_send_control_data() - send control command to the card 1710 * @card: qeth_card structure pointer 1711 * @iob: qeth_cmd_buffer pointer 1712 * @reply_cb: callback function pointer 1713 * @cb_card: pointer to the qeth_card structure 1714 * @cb_reply: pointer to the qeth_reply structure 1715 * @cb_cmd: pointer to the original iob for non-IPA 1716 * commands, or to the qeth_ipa_cmd structure 1717 * for the IPA commands. 1718 * @reply_param: private pointer passed to the callback 1719 * 1720 * Callback function gets called one or more times, with cb_cmd 1721 * pointing to the response returned by the hardware. Callback 1722 * function must return 1723 * > 0 if more reply blocks are expected, 1724 * 0 if the last or only reply block is received, and 1725 * < 0 on error. 1726 * Callback function can get the value of the reply_param pointer from the 1727 * field 'param' of the structure qeth_reply. 1728 */ 1729 1730 static int qeth_send_control_data(struct qeth_card *card, 1731 struct qeth_cmd_buffer *iob, 1732 int (*reply_cb)(struct qeth_card *cb_card, 1733 struct qeth_reply *cb_reply, 1734 unsigned long cb_cmd), 1735 void *reply_param) 1736 { 1737 struct qeth_channel *channel = iob->channel; 1738 struct qeth_reply *reply = &iob->reply; 1739 long timeout = iob->timeout; 1740 int rc; 1741 1742 QETH_CARD_TEXT(card, 2, "sendctl"); 1743 1744 reply->callback = reply_cb; 1745 reply->param = reply_param; 1746 1747 timeout = wait_event_interruptible_timeout(card->wait_q, 1748 qeth_trylock_channel(channel), 1749 timeout); 1750 if (timeout <= 0) { 1751 qeth_put_cmd(iob); 1752 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1753 } 1754 1755 if (iob->finalize) 1756 iob->finalize(card, iob); 1757 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); 1758 1759 qeth_enqueue_cmd(card, iob); 1760 1761 /* This pairs with iob->callback, and keeps the iob alive after IO: */ 1762 qeth_get_cmd(iob); 1763 1764 QETH_CARD_TEXT(card, 6, "noirqpnd"); 1765 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1766 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), 1767 (addr_t) iob, 0, 0, timeout); 1768 if (!rc) 1769 channel->active_cmd = iob; 1770 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1771 if (rc) { 1772 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", 1773 CARD_DEVID(card), rc); 1774 QETH_CARD_TEXT_(card, 2, " err%d", rc); 1775 qeth_dequeue_cmd(card, iob); 1776 qeth_put_cmd(iob); 1777 qeth_unlock_channel(card, channel); 1778 goto out; 1779 } 1780 1781 timeout = wait_for_completion_interruptible_timeout(&iob->done, 1782 timeout); 1783 if (timeout <= 0) 1784 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1785 1786 qeth_dequeue_cmd(card, iob); 1787 1788 if (reply_cb) { 1789 /* Wait until the callback for a late reply has completed: */ 1790 spin_lock_irq(&iob->lock); 1791 if (rc) 1792 /* Zap any callback that's still pending: */ 1793 iob->rc = rc; 1794 spin_unlock_irq(&iob->lock); 1795 } 1796 1797 if (!rc) 1798 rc = iob->rc; 1799 1800 out: 1801 qeth_put_cmd(iob); 1802 return rc; 1803 } 1804 1805 struct qeth_node_desc { 1806 struct node_descriptor nd1; 1807 struct node_descriptor nd2; 1808 struct node_descriptor nd3; 1809 }; 1810 1811 static void qeth_read_conf_data_cb(struct qeth_card *card, 1812 struct qeth_cmd_buffer *iob, 1813 unsigned int data_length) 1814 { 1815 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; 1816 int rc = 0; 1817 u8 *tag; 1818 1819 QETH_CARD_TEXT(card, 2, "cfgunit"); 1820 1821 if (data_length < sizeof(*nd)) { 1822 rc = -EINVAL; 1823 goto out; 1824 } 1825 1826 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && 1827 nd->nd1.plant[1] == _ascebc['M']; 1828 tag = (u8 *)&nd->nd1.tag; 1829 card->info.chpid = tag[0]; 1830 card->info.unit_addr2 = tag[1]; 1831 1832 tag = (u8 *)&nd->nd2.tag; 1833 card->info.cula = tag[1]; 1834 1835 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && 1836 nd->nd3.model[1] == 0xF0 && 1837 nd->nd3.model[2] >= 0xF1 && 1838 nd->nd3.model[2] <= 0xF4; 1839 1840 out: 1841 qeth_notify_cmd(iob, rc); 1842 qeth_put_cmd(iob); 1843 } 1844 1845 static int qeth_read_conf_data(struct qeth_card *card) 1846 { 1847 struct qeth_channel *channel = &card->data; 1848 struct qeth_cmd_buffer *iob; 1849 struct ciw *ciw; 1850 1851 /* scan for RCD command in extended SenseID data */ 1852 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); 1853 if (!ciw || ciw->cmd == 0) 1854 return -EOPNOTSUPP; 1855 if (ciw->count < sizeof(struct qeth_node_desc)) 1856 return -EINVAL; 1857 1858 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); 1859 if (!iob) 1860 return -ENOMEM; 1861 1862 iob->callback = qeth_read_conf_data_cb; 1863 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length, 1864 iob->data); 1865 1866 return qeth_send_control_data(card, iob, NULL, NULL); 1867 } 1868 1869 static int qeth_idx_check_activate_response(struct qeth_card *card, 1870 struct qeth_channel *channel, 1871 struct qeth_cmd_buffer *iob) 1872 { 1873 int rc; 1874 1875 rc = qeth_check_idx_response(card, iob->data); 1876 if (rc) 1877 return rc; 1878 1879 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) 1880 return 0; 1881 1882 /* negative reply: */ 1883 QETH_CARD_TEXT_(card, 2, "idxneg%c", 1884 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 1885 1886 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { 1887 case QETH_IDX_ACT_ERR_EXCL: 1888 dev_err(&channel->ccwdev->dev, 1889 "The adapter is used exclusively by another host\n"); 1890 return -EBUSY; 1891 case QETH_IDX_ACT_ERR_AUTH: 1892 case QETH_IDX_ACT_ERR_AUTH_USER: 1893 dev_err(&channel->ccwdev->dev, 1894 "Setting the device online failed because of insufficient authorization\n"); 1895 return -EPERM; 1896 default: 1897 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 1898 CCW_DEVID(channel->ccwdev)); 1899 return -EIO; 1900 } 1901 } 1902 1903 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, 1904 struct qeth_cmd_buffer *iob, 1905 unsigned int data_length) 1906 { 1907 struct qeth_channel *channel = iob->channel; 1908 u16 peer_level; 1909 int rc; 1910 1911 QETH_CARD_TEXT(card, 2, "idxrdcb"); 1912 1913 rc = qeth_idx_check_activate_response(card, channel, iob); 1914 if (rc) 1915 goto out; 1916 1917 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 1918 if (peer_level != qeth_peer_func_level(card->info.func_level)) { 1919 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 1920 CCW_DEVID(channel->ccwdev), 1921 card->info.func_level, peer_level); 1922 rc = -EINVAL; 1923 goto out; 1924 } 1925 1926 memcpy(&card->token.issuer_rm_r, 1927 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 1928 QETH_MPC_TOKEN_LENGTH); 1929 memcpy(&card->info.mcl_level[0], 1930 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); 1931 1932 out: 1933 qeth_notify_cmd(iob, rc); 1934 qeth_put_cmd(iob); 1935 } 1936 1937 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, 1938 struct qeth_cmd_buffer *iob, 1939 unsigned int data_length) 1940 { 1941 struct qeth_channel *channel = iob->channel; 1942 u16 peer_level; 1943 int rc; 1944 1945 QETH_CARD_TEXT(card, 2, "idxwrcb"); 1946 1947 rc = qeth_idx_check_activate_response(card, channel, iob); 1948 if (rc) 1949 goto out; 1950 1951 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 1952 if ((peer_level & ~0x0100) != 1953 qeth_peer_func_level(card->info.func_level)) { 1954 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 1955 CCW_DEVID(channel->ccwdev), 1956 card->info.func_level, peer_level); 1957 rc = -EINVAL; 1958 } 1959 1960 out: 1961 qeth_notify_cmd(iob, rc); 1962 qeth_put_cmd(iob); 1963 } 1964 1965 static void qeth_idx_setup_activate_cmd(struct qeth_card *card, 1966 struct qeth_cmd_buffer *iob) 1967 { 1968 u16 addr = (card->info.cula << 8) + card->info.unit_addr2; 1969 u8 port = ((u8)card->dev->dev_port) | 0x80; 1970 struct ccw1 *ccw = __ccw_from_cmd(iob); 1971 struct ccw_dev_id dev_id; 1972 1973 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE, 1974 iob->data); 1975 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data); 1976 ccw_device_get_id(CARD_DDEV(card), &dev_id); 1977 iob->finalize = qeth_idx_finalize_cmd; 1978 1979 port |= QETH_IDX_ACT_INVAL_FRAME; 1980 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); 1981 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 1982 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); 1983 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), 1984 &card->info.func_level, 2); 1985 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2); 1986 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); 1987 } 1988 1989 static int qeth_idx_activate_read_channel(struct qeth_card *card) 1990 { 1991 struct qeth_channel *channel = &card->read; 1992 struct qeth_cmd_buffer *iob; 1993 int rc; 1994 1995 QETH_CARD_TEXT(card, 2, "idxread"); 1996 1997 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 1998 if (!iob) 1999 return -ENOMEM; 2000 2001 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); 2002 qeth_idx_setup_activate_cmd(card, iob); 2003 iob->callback = qeth_idx_activate_read_channel_cb; 2004 2005 rc = qeth_send_control_data(card, iob, NULL, NULL); 2006 if (rc) 2007 return rc; 2008 2009 channel->state = CH_STATE_UP; 2010 return 0; 2011 } 2012 2013 static int qeth_idx_activate_write_channel(struct qeth_card *card) 2014 { 2015 struct qeth_channel *channel = &card->write; 2016 struct qeth_cmd_buffer *iob; 2017 int rc; 2018 2019 QETH_CARD_TEXT(card, 2, "idxwrite"); 2020 2021 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2022 if (!iob) 2023 return -ENOMEM; 2024 2025 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); 2026 qeth_idx_setup_activate_cmd(card, iob); 2027 iob->callback = qeth_idx_activate_write_channel_cb; 2028 2029 rc = qeth_send_control_data(card, iob, NULL, NULL); 2030 if (rc) 2031 return rc; 2032 2033 channel->state = CH_STATE_UP; 2034 return 0; 2035 } 2036 2037 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2038 unsigned long data) 2039 { 2040 struct qeth_cmd_buffer *iob; 2041 2042 QETH_CARD_TEXT(card, 2, "cmenblcb"); 2043 2044 iob = (struct qeth_cmd_buffer *) data; 2045 memcpy(&card->token.cm_filter_r, 2046 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), 2047 QETH_MPC_TOKEN_LENGTH); 2048 return 0; 2049 } 2050 2051 static int qeth_cm_enable(struct qeth_card *card) 2052 { 2053 struct qeth_cmd_buffer *iob; 2054 2055 QETH_CARD_TEXT(card, 2, "cmenable"); 2056 2057 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE); 2058 if (!iob) 2059 return -ENOMEM; 2060 2061 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), 2062 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2063 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), 2064 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); 2065 2066 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL); 2067 } 2068 2069 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2070 unsigned long data) 2071 { 2072 struct qeth_cmd_buffer *iob; 2073 2074 QETH_CARD_TEXT(card, 2, "cmsetpcb"); 2075 2076 iob = (struct qeth_cmd_buffer *) data; 2077 memcpy(&card->token.cm_connection_r, 2078 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), 2079 QETH_MPC_TOKEN_LENGTH); 2080 return 0; 2081 } 2082 2083 static int qeth_cm_setup(struct qeth_card *card) 2084 { 2085 struct qeth_cmd_buffer *iob; 2086 2087 QETH_CARD_TEXT(card, 2, "cmsetup"); 2088 2089 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE); 2090 if (!iob) 2091 return -ENOMEM; 2092 2093 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), 2094 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2095 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), 2096 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); 2097 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), 2098 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); 2099 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); 2100 } 2101 2102 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) 2103 { 2104 struct net_device *dev = card->dev; 2105 unsigned int new_mtu; 2106 2107 if (!max_mtu) { 2108 /* IQD needs accurate max MTU to set up its RX buffers: */ 2109 if (IS_IQD(card)) 2110 return -EINVAL; 2111 /* tolerate quirky HW: */ 2112 max_mtu = ETH_MAX_MTU; 2113 } 2114 2115 rtnl_lock(); 2116 if (IS_IQD(card)) { 2117 /* move any device with default MTU to new max MTU: */ 2118 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; 2119 2120 /* adjust RX buffer size to new max MTU: */ 2121 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; 2122 if (dev->max_mtu && dev->max_mtu != max_mtu) 2123 qeth_free_qdio_queues(card); 2124 } else { 2125 if (dev->mtu) 2126 new_mtu = dev->mtu; 2127 /* default MTUs for first setup: */ 2128 else if (IS_LAYER2(card)) 2129 new_mtu = ETH_DATA_LEN; 2130 else 2131 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ 2132 } 2133 2134 dev->max_mtu = max_mtu; 2135 dev->mtu = min(new_mtu, max_mtu); 2136 rtnl_unlock(); 2137 return 0; 2138 } 2139 2140 static int qeth_get_mtu_outof_framesize(int framesize) 2141 { 2142 switch (framesize) { 2143 case 0x4000: 2144 return 8192; 2145 case 0x6000: 2146 return 16384; 2147 case 0xa000: 2148 return 32768; 2149 case 0xffff: 2150 return 57344; 2151 default: 2152 return 0; 2153 } 2154 } 2155 2156 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2157 unsigned long data) 2158 { 2159 __u16 mtu, framesize; 2160 __u16 len; 2161 __u8 link_type; 2162 struct qeth_cmd_buffer *iob; 2163 2164 QETH_CARD_TEXT(card, 2, "ulpenacb"); 2165 2166 iob = (struct qeth_cmd_buffer *) data; 2167 memcpy(&card->token.ulp_filter_r, 2168 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 2169 QETH_MPC_TOKEN_LENGTH); 2170 if (IS_IQD(card)) { 2171 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 2172 mtu = qeth_get_mtu_outof_framesize(framesize); 2173 } else { 2174 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); 2175 } 2176 *(u16 *)reply->param = mtu; 2177 2178 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); 2179 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { 2180 memcpy(&link_type, 2181 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); 2182 card->info.link_type = link_type; 2183 } else 2184 card->info.link_type = 0; 2185 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); 2186 return 0; 2187 } 2188 2189 static u8 qeth_mpc_select_prot_type(struct qeth_card *card) 2190 { 2191 if (IS_OSN(card)) 2192 return QETH_PROT_OSN2; 2193 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; 2194 } 2195 2196 static int qeth_ulp_enable(struct qeth_card *card) 2197 { 2198 u8 prot_type = qeth_mpc_select_prot_type(card); 2199 struct qeth_cmd_buffer *iob; 2200 u16 max_mtu; 2201 int rc; 2202 2203 QETH_CARD_TEXT(card, 2, "ulpenabl"); 2204 2205 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE); 2206 if (!iob) 2207 return -ENOMEM; 2208 2209 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; 2210 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); 2211 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), 2212 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2213 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), 2214 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); 2215 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu); 2216 if (rc) 2217 return rc; 2218 return qeth_update_max_mtu(card, max_mtu); 2219 } 2220 2221 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2222 unsigned long data) 2223 { 2224 struct qeth_cmd_buffer *iob; 2225 2226 QETH_CARD_TEXT(card, 2, "ulpstpcb"); 2227 2228 iob = (struct qeth_cmd_buffer *) data; 2229 memcpy(&card->token.ulp_connection_r, 2230 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2231 QETH_MPC_TOKEN_LENGTH); 2232 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2233 3)) { 2234 QETH_CARD_TEXT(card, 2, "olmlimit"); 2235 dev_err(&card->gdev->dev, "A connection could not be " 2236 "established because of an OLM limit\n"); 2237 return -EMLINK; 2238 } 2239 return 0; 2240 } 2241 2242 static int qeth_ulp_setup(struct qeth_card *card) 2243 { 2244 __u16 temp; 2245 struct qeth_cmd_buffer *iob; 2246 struct ccw_dev_id dev_id; 2247 2248 QETH_CARD_TEXT(card, 2, "ulpsetup"); 2249 2250 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE); 2251 if (!iob) 2252 return -ENOMEM; 2253 2254 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), 2255 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2256 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), 2257 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); 2258 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), 2259 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); 2260 2261 ccw_device_get_id(CARD_DDEV(card), &dev_id); 2262 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2); 2263 temp = (card->info.cula << 8) + card->info.unit_addr2; 2264 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); 2265 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); 2266 } 2267 2268 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) 2269 { 2270 struct qeth_qdio_out_buffer *newbuf; 2271 2272 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); 2273 if (!newbuf) 2274 return -ENOMEM; 2275 2276 newbuf->buffer = q->qdio_bufs[bidx]; 2277 skb_queue_head_init(&newbuf->skb_list); 2278 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2279 newbuf->q = q; 2280 newbuf->next_pending = q->bufs[bidx]; 2281 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); 2282 q->bufs[bidx] = newbuf; 2283 return 0; 2284 } 2285 2286 static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2287 { 2288 if (!q) 2289 return; 2290 2291 qeth_drain_output_queue(q, true); 2292 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2293 kfree(q); 2294 } 2295 2296 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) 2297 { 2298 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 2299 2300 if (!q) 2301 return NULL; 2302 2303 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 2304 kfree(q); 2305 return NULL; 2306 } 2307 return q; 2308 } 2309 2310 static void qeth_tx_completion_timer(struct timer_list *timer) 2311 { 2312 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); 2313 2314 napi_schedule(&queue->napi); 2315 QETH_TXQ_STAT_INC(queue, completion_timer); 2316 } 2317 2318 static int qeth_alloc_qdio_queues(struct qeth_card *card) 2319 { 2320 int i, j; 2321 2322 QETH_CARD_TEXT(card, 2, "allcqdbf"); 2323 2324 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, 2325 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2326 return 0; 2327 2328 QETH_CARD_TEXT(card, 2, "inq"); 2329 card->qdio.in_q = qeth_alloc_qdio_queue(); 2330 if (!card->qdio.in_q) 2331 goto out_nomem; 2332 2333 /* inbound buffer pool */ 2334 if (qeth_alloc_buffer_pool(card)) 2335 goto out_freeinq; 2336 2337 /* outbound */ 2338 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2339 struct qeth_qdio_out_q *queue; 2340 2341 queue = qeth_alloc_output_queue(); 2342 if (!queue) 2343 goto out_freeoutq; 2344 QETH_CARD_TEXT_(card, 2, "outq %i", i); 2345 QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); 2346 card->qdio.out_qs[i] = queue; 2347 queue->card = card; 2348 queue->queue_no = i; 2349 timer_setup(&queue->timer, qeth_tx_completion_timer, 0); 2350 2351 /* give outbound qeth_qdio_buffers their qdio_buffers */ 2352 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2353 WARN_ON(queue->bufs[j]); 2354 if (qeth_init_qdio_out_buf(queue, j)) 2355 goto out_freeoutqbufs; 2356 } 2357 } 2358 2359 /* completion */ 2360 if (qeth_alloc_cq(card)) 2361 goto out_freeoutq; 2362 2363 return 0; 2364 2365 out_freeoutqbufs: 2366 while (j > 0) { 2367 --j; 2368 kmem_cache_free(qeth_qdio_outbuf_cache, 2369 card->qdio.out_qs[i]->bufs[j]); 2370 card->qdio.out_qs[i]->bufs[j] = NULL; 2371 } 2372 out_freeoutq: 2373 while (i > 0) { 2374 qeth_free_output_queue(card->qdio.out_qs[--i]); 2375 card->qdio.out_qs[i] = NULL; 2376 } 2377 qeth_free_buffer_pool(card); 2378 out_freeinq: 2379 qeth_free_qdio_queue(card->qdio.in_q); 2380 card->qdio.in_q = NULL; 2381 out_nomem: 2382 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2383 return -ENOMEM; 2384 } 2385 2386 static void qeth_free_qdio_queues(struct qeth_card *card) 2387 { 2388 int i, j; 2389 2390 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 2391 QETH_QDIO_UNINITIALIZED) 2392 return; 2393 2394 qeth_free_cq(card); 2395 cancel_delayed_work_sync(&card->buffer_reclaim_work); 2396 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2397 if (card->qdio.in_q->bufs[j].rx_skb) 2398 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); 2399 } 2400 qeth_free_qdio_queue(card->qdio.in_q); 2401 card->qdio.in_q = NULL; 2402 /* inbound buffer pool */ 2403 qeth_free_buffer_pool(card); 2404 /* free outbound qdio_qs */ 2405 for (i = 0; i < card->qdio.no_out_queues; i++) { 2406 qeth_free_output_queue(card->qdio.out_qs[i]); 2407 card->qdio.out_qs[i] = NULL; 2408 } 2409 } 2410 2411 static void qeth_create_qib_param_field(struct qeth_card *card, 2412 char *param_field) 2413 { 2414 2415 param_field[0] = _ascebc['P']; 2416 param_field[1] = _ascebc['C']; 2417 param_field[2] = _ascebc['I']; 2418 param_field[3] = _ascebc['T']; 2419 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card); 2420 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card); 2421 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card); 2422 } 2423 2424 static void qeth_create_qib_param_field_blkt(struct qeth_card *card, 2425 char *param_field) 2426 { 2427 param_field[16] = _ascebc['B']; 2428 param_field[17] = _ascebc['L']; 2429 param_field[18] = _ascebc['K']; 2430 param_field[19] = _ascebc['T']; 2431 *((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total; 2432 *((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet; 2433 *((unsigned int *) (¶m_field[28])) = 2434 card->info.blkt.inter_packet_jumbo; 2435 } 2436 2437 static int qeth_qdio_activate(struct qeth_card *card) 2438 { 2439 QETH_CARD_TEXT(card, 3, "qdioact"); 2440 return qdio_activate(CARD_DDEV(card)); 2441 } 2442 2443 static int qeth_dm_act(struct qeth_card *card) 2444 { 2445 struct qeth_cmd_buffer *iob; 2446 2447 QETH_CARD_TEXT(card, 2, "dmact"); 2448 2449 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE); 2450 if (!iob) 2451 return -ENOMEM; 2452 2453 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), 2454 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2455 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), 2456 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2457 return qeth_send_control_data(card, iob, NULL, NULL); 2458 } 2459 2460 static int qeth_mpc_initialize(struct qeth_card *card) 2461 { 2462 int rc; 2463 2464 QETH_CARD_TEXT(card, 2, "mpcinit"); 2465 2466 rc = qeth_issue_next_read(card); 2467 if (rc) { 2468 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2469 return rc; 2470 } 2471 rc = qeth_cm_enable(card); 2472 if (rc) { 2473 QETH_CARD_TEXT_(card, 2, "2err%d", rc); 2474 return rc; 2475 } 2476 rc = qeth_cm_setup(card); 2477 if (rc) { 2478 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2479 return rc; 2480 } 2481 rc = qeth_ulp_enable(card); 2482 if (rc) { 2483 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 2484 return rc; 2485 } 2486 rc = qeth_ulp_setup(card); 2487 if (rc) { 2488 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2489 return rc; 2490 } 2491 rc = qeth_alloc_qdio_queues(card); 2492 if (rc) { 2493 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2494 return rc; 2495 } 2496 rc = qeth_qdio_establish(card); 2497 if (rc) { 2498 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2499 qeth_free_qdio_queues(card); 2500 return rc; 2501 } 2502 rc = qeth_qdio_activate(card); 2503 if (rc) { 2504 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 2505 return rc; 2506 } 2507 rc = qeth_dm_act(card); 2508 if (rc) { 2509 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 2510 return rc; 2511 } 2512 2513 return 0; 2514 } 2515 2516 void qeth_print_status_message(struct qeth_card *card) 2517 { 2518 switch (card->info.type) { 2519 case QETH_CARD_TYPE_OSD: 2520 case QETH_CARD_TYPE_OSM: 2521 case QETH_CARD_TYPE_OSX: 2522 /* VM will use a non-zero first character 2523 * to indicate a HiperSockets like reporting 2524 * of the level OSA sets the first character to zero 2525 * */ 2526 if (!card->info.mcl_level[0]) { 2527 sprintf(card->info.mcl_level, "%02x%02x", 2528 card->info.mcl_level[2], 2529 card->info.mcl_level[3]); 2530 break; 2531 } 2532 /* fallthrough */ 2533 case QETH_CARD_TYPE_IQD: 2534 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { 2535 card->info.mcl_level[0] = (char) _ebcasc[(__u8) 2536 card->info.mcl_level[0]]; 2537 card->info.mcl_level[1] = (char) _ebcasc[(__u8) 2538 card->info.mcl_level[1]]; 2539 card->info.mcl_level[2] = (char) _ebcasc[(__u8) 2540 card->info.mcl_level[2]]; 2541 card->info.mcl_level[3] = (char) _ebcasc[(__u8) 2542 card->info.mcl_level[3]]; 2543 card->info.mcl_level[QETH_MCL_LENGTH] = 0; 2544 } 2545 break; 2546 default: 2547 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); 2548 } 2549 dev_info(&card->gdev->dev, 2550 "Device is a%s card%s%s%s\nwith link type %s.\n", 2551 qeth_get_cardname(card), 2552 (card->info.mcl_level[0]) ? " (level: " : "", 2553 (card->info.mcl_level[0]) ? card->info.mcl_level : "", 2554 (card->info.mcl_level[0]) ? ")" : "", 2555 qeth_get_cardname_short(card)); 2556 } 2557 EXPORT_SYMBOL_GPL(qeth_print_status_message); 2558 2559 static void qeth_initialize_working_pool_list(struct qeth_card *card) 2560 { 2561 struct qeth_buffer_pool_entry *entry; 2562 2563 QETH_CARD_TEXT(card, 5, "inwrklst"); 2564 2565 list_for_each_entry(entry, 2566 &card->qdio.init_pool.entry_list, init_list) { 2567 qeth_put_buffer_pool_entry(card, entry); 2568 } 2569 } 2570 2571 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( 2572 struct qeth_card *card) 2573 { 2574 struct list_head *plh; 2575 struct qeth_buffer_pool_entry *entry; 2576 int i, free; 2577 struct page *page; 2578 2579 if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2580 return NULL; 2581 2582 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) { 2583 entry = list_entry(plh, struct qeth_buffer_pool_entry, list); 2584 free = 1; 2585 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2586 if (page_count(virt_to_page(entry->elements[i])) > 1) { 2587 free = 0; 2588 break; 2589 } 2590 } 2591 if (free) { 2592 list_del_init(&entry->list); 2593 return entry; 2594 } 2595 } 2596 2597 /* no free buffer in pool so take first one and swap pages */ 2598 entry = list_entry(card->qdio.in_buf_pool.entry_list.next, 2599 struct qeth_buffer_pool_entry, list); 2600 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2601 if (page_count(virt_to_page(entry->elements[i])) > 1) { 2602 page = alloc_page(GFP_ATOMIC); 2603 if (!page) { 2604 return NULL; 2605 } else { 2606 free_page((unsigned long)entry->elements[i]); 2607 entry->elements[i] = page_address(page); 2608 QETH_CARD_STAT_INC(card, rx_sg_alloc_page); 2609 } 2610 } 2611 } 2612 list_del_init(&entry->list); 2613 return entry; 2614 } 2615 2616 static int qeth_init_input_buffer(struct qeth_card *card, 2617 struct qeth_qdio_buffer *buf) 2618 { 2619 struct qeth_buffer_pool_entry *pool_entry; 2620 int i; 2621 2622 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { 2623 buf->rx_skb = netdev_alloc_skb(card->dev, 2624 ETH_HLEN + 2625 sizeof(struct ipv6hdr)); 2626 if (!buf->rx_skb) 2627 return 1; 2628 } 2629 2630 pool_entry = qeth_find_free_buffer_pool_entry(card); 2631 if (!pool_entry) 2632 return 1; 2633 2634 /* 2635 * since the buffer is accessed only from the input_tasklet 2636 * there shouldn't be a need to synchronize; also, since we use 2637 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off 2638 * buffers 2639 */ 2640 2641 buf->pool_entry = pool_entry; 2642 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2643 buf->buffer->element[i].length = PAGE_SIZE; 2644 buf->buffer->element[i].addr = pool_entry->elements[i]; 2645 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2646 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2647 else 2648 buf->buffer->element[i].eflags = 0; 2649 buf->buffer->element[i].sflags = 0; 2650 } 2651 return 0; 2652 } 2653 2654 static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card, 2655 struct qeth_qdio_out_q *queue) 2656 { 2657 if (!IS_IQD(card) || 2658 qeth_iqd_is_mcast_queue(card, queue) || 2659 card->options.cq == QETH_CQ_ENABLED || 2660 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd)) 2661 return 1; 2662 2663 return card->ssqd.mmwc ? card->ssqd.mmwc : 1; 2664 } 2665 2666 static int qeth_init_qdio_queues(struct qeth_card *card) 2667 { 2668 unsigned int i; 2669 int rc; 2670 2671 QETH_CARD_TEXT(card, 2, "initqdqs"); 2672 2673 /* inbound queue */ 2674 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2675 memset(&card->rx, 0, sizeof(struct qeth_rx)); 2676 qeth_initialize_working_pool_list(card); 2677 /*give only as many buffers to hardware as we have buffer pool entries*/ 2678 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) 2679 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); 2680 card->qdio.in_q->next_buf_to_init = 2681 card->qdio.in_buf_pool.buf_count - 1; 2682 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, 2683 card->qdio.in_buf_pool.buf_count - 1); 2684 if (rc) { 2685 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2686 return rc; 2687 } 2688 2689 /* completion */ 2690 rc = qeth_cq_init(card); 2691 if (rc) { 2692 return rc; 2693 } 2694 2695 /* outbound queue */ 2696 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2697 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; 2698 2699 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2700 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); 2701 queue->next_buf_to_fill = 0; 2702 queue->do_pack = 0; 2703 queue->prev_hdr = NULL; 2704 queue->bulk_start = 0; 2705 queue->bulk_count = 0; 2706 queue->bulk_max = qeth_tx_select_bulk_max(card, queue); 2707 atomic_set(&queue->used_buffers, 0); 2708 atomic_set(&queue->set_pci_flags_count, 0); 2709 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 2710 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); 2711 } 2712 return 0; 2713 } 2714 2715 static void qeth_ipa_finalize_cmd(struct qeth_card *card, 2716 struct qeth_cmd_buffer *iob) 2717 { 2718 qeth_mpc_finalize_cmd(card, iob); 2719 2720 /* override with IPA-specific values: */ 2721 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; 2722 } 2723 2724 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 2725 u16 cmd_length, 2726 bool (*match)(struct qeth_cmd_buffer *iob, 2727 struct qeth_cmd_buffer *reply)) 2728 { 2729 u8 prot_type = qeth_mpc_select_prot_type(card); 2730 u16 total_length = iob->length; 2731 2732 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, 2733 iob->data); 2734 iob->finalize = qeth_ipa_finalize_cmd; 2735 iob->match = match; 2736 2737 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 2738 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); 2739 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); 2740 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); 2741 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); 2742 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 2743 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2744 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); 2745 } 2746 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); 2747 2748 static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob, 2749 struct qeth_cmd_buffer *reply) 2750 { 2751 struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply); 2752 2753 return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno); 2754 } 2755 2756 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, 2757 enum qeth_ipa_cmds cmd_code, 2758 enum qeth_prot_versions prot, 2759 unsigned int data_length) 2760 { 2761 enum qeth_link_types link_type = card->info.link_type; 2762 struct qeth_cmd_buffer *iob; 2763 struct qeth_ipacmd_hdr *hdr; 2764 2765 data_length += offsetof(struct qeth_ipa_cmd, data); 2766 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1, 2767 QETH_IPA_TIMEOUT); 2768 if (!iob) 2769 return NULL; 2770 2771 qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply); 2772 2773 hdr = &__ipa_cmd(iob)->hdr; 2774 hdr->command = cmd_code; 2775 hdr->initiator = IPA_CMD_INITIATOR_HOST; 2776 /* hdr->seqno is set by qeth_send_control_data() */ 2777 hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1; 2778 hdr->rel_adapter_no = (u8) card->dev->dev_port; 2779 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; 2780 hdr->param_count = 1; 2781 hdr->prot_version = prot; 2782 return iob; 2783 } 2784 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd); 2785 2786 static int qeth_send_ipa_cmd_cb(struct qeth_card *card, 2787 struct qeth_reply *reply, unsigned long data) 2788 { 2789 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2790 2791 return (cmd->hdr.return_code) ? -EIO : 0; 2792 } 2793 2794 /** 2795 * qeth_send_ipa_cmd() - send an IPA command 2796 * 2797 * See qeth_send_control_data() for explanation of the arguments. 2798 */ 2799 2800 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 2801 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 2802 unsigned long), 2803 void *reply_param) 2804 { 2805 int rc; 2806 2807 QETH_CARD_TEXT(card, 4, "sendipa"); 2808 2809 if (card->read_or_write_problem) { 2810 qeth_put_cmd(iob); 2811 return -EIO; 2812 } 2813 2814 if (reply_cb == NULL) 2815 reply_cb = qeth_send_ipa_cmd_cb; 2816 rc = qeth_send_control_data(card, iob, reply_cb, reply_param); 2817 if (rc == -ETIME) { 2818 qeth_clear_ipacmd_list(card); 2819 qeth_schedule_recovery(card); 2820 } 2821 return rc; 2822 } 2823 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 2824 2825 static int qeth_send_startlan_cb(struct qeth_card *card, 2826 struct qeth_reply *reply, unsigned long data) 2827 { 2828 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2829 2830 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) 2831 return -ENETDOWN; 2832 2833 return (cmd->hdr.return_code) ? -EIO : 0; 2834 } 2835 2836 static int qeth_send_startlan(struct qeth_card *card) 2837 { 2838 struct qeth_cmd_buffer *iob; 2839 2840 QETH_CARD_TEXT(card, 2, "strtlan"); 2841 2842 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0); 2843 if (!iob) 2844 return -ENOMEM; 2845 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); 2846 } 2847 2848 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) 2849 { 2850 if (!cmd->hdr.return_code) 2851 cmd->hdr.return_code = 2852 cmd->data.setadapterparms.hdr.return_code; 2853 return cmd->hdr.return_code; 2854 } 2855 2856 static int qeth_query_setadapterparms_cb(struct qeth_card *card, 2857 struct qeth_reply *reply, unsigned long data) 2858 { 2859 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2860 2861 QETH_CARD_TEXT(card, 3, "quyadpcb"); 2862 if (qeth_setadpparms_inspect_rc(cmd)) 2863 return -EIO; 2864 2865 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { 2866 card->info.link_type = 2867 cmd->data.setadapterparms.data.query_cmds_supp.lan_type; 2868 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); 2869 } 2870 card->options.adp.supported = 2871 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; 2872 return 0; 2873 } 2874 2875 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 2876 enum qeth_ipa_setadp_cmd adp_cmd, 2877 unsigned int data_length) 2878 { 2879 struct qeth_ipacmd_setadpparms_hdr *hdr; 2880 struct qeth_cmd_buffer *iob; 2881 2882 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4, 2883 data_length + 2884 offsetof(struct qeth_ipacmd_setadpparms, 2885 data)); 2886 if (!iob) 2887 return NULL; 2888 2889 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr; 2890 hdr->cmdlength = sizeof(*hdr) + data_length; 2891 hdr->command_code = adp_cmd; 2892 hdr->used_total = 1; 2893 hdr->seq_no = 1; 2894 return iob; 2895 } 2896 2897 static int qeth_query_setadapterparms(struct qeth_card *card) 2898 { 2899 int rc; 2900 struct qeth_cmd_buffer *iob; 2901 2902 QETH_CARD_TEXT(card, 3, "queryadp"); 2903 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 2904 SETADP_DATA_SIZEOF(query_cmds_supp)); 2905 if (!iob) 2906 return -ENOMEM; 2907 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 2908 return rc; 2909 } 2910 2911 static int qeth_query_ipassists_cb(struct qeth_card *card, 2912 struct qeth_reply *reply, unsigned long data) 2913 { 2914 struct qeth_ipa_cmd *cmd; 2915 2916 QETH_CARD_TEXT(card, 2, "qipasscb"); 2917 2918 cmd = (struct qeth_ipa_cmd *) data; 2919 2920 switch (cmd->hdr.return_code) { 2921 case IPA_RC_SUCCESS: 2922 break; 2923 case IPA_RC_NOTSUPP: 2924 case IPA_RC_L2_UNSUPPORTED_CMD: 2925 QETH_CARD_TEXT(card, 2, "ipaunsup"); 2926 card->options.ipa4.supported |= IPA_SETADAPTERPARMS; 2927 card->options.ipa6.supported |= IPA_SETADAPTERPARMS; 2928 return -EOPNOTSUPP; 2929 default: 2930 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", 2931 CARD_DEVID(card), cmd->hdr.return_code); 2932 return -EIO; 2933 } 2934 2935 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 2936 card->options.ipa4 = cmd->hdr.assists; 2937 else if (cmd->hdr.prot_version == QETH_PROT_IPV6) 2938 card->options.ipa6 = cmd->hdr.assists; 2939 else 2940 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", 2941 CARD_DEVID(card)); 2942 return 0; 2943 } 2944 2945 static int qeth_query_ipassists(struct qeth_card *card, 2946 enum qeth_prot_versions prot) 2947 { 2948 int rc; 2949 struct qeth_cmd_buffer *iob; 2950 2951 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot); 2952 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0); 2953 if (!iob) 2954 return -ENOMEM; 2955 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 2956 return rc; 2957 } 2958 2959 static int qeth_query_switch_attributes_cb(struct qeth_card *card, 2960 struct qeth_reply *reply, unsigned long data) 2961 { 2962 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2963 struct qeth_query_switch_attributes *attrs; 2964 struct qeth_switch_info *sw_info; 2965 2966 QETH_CARD_TEXT(card, 2, "qswiatcb"); 2967 if (qeth_setadpparms_inspect_rc(cmd)) 2968 return -EIO; 2969 2970 sw_info = (struct qeth_switch_info *)reply->param; 2971 attrs = &cmd->data.setadapterparms.data.query_switch_attributes; 2972 sw_info->capabilities = attrs->capabilities; 2973 sw_info->settings = attrs->settings; 2974 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, 2975 sw_info->settings); 2976 return 0; 2977 } 2978 2979 int qeth_query_switch_attributes(struct qeth_card *card, 2980 struct qeth_switch_info *sw_info) 2981 { 2982 struct qeth_cmd_buffer *iob; 2983 2984 QETH_CARD_TEXT(card, 2, "qswiattr"); 2985 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) 2986 return -EOPNOTSUPP; 2987 if (!netif_carrier_ok(card->dev)) 2988 return -ENOMEDIUM; 2989 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0); 2990 if (!iob) 2991 return -ENOMEM; 2992 return qeth_send_ipa_cmd(card, iob, 2993 qeth_query_switch_attributes_cb, sw_info); 2994 } 2995 2996 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, 2997 enum qeth_diags_cmds sub_cmd, 2998 unsigned int data_length) 2999 { 3000 struct qeth_ipacmd_diagass *cmd; 3001 struct qeth_cmd_buffer *iob; 3002 3003 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE, 3004 DIAG_HDR_LEN + data_length); 3005 if (!iob) 3006 return NULL; 3007 3008 cmd = &__ipa_cmd(iob)->data.diagass; 3009 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length; 3010 cmd->subcmd = sub_cmd; 3011 return iob; 3012 } 3013 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd); 3014 3015 static int qeth_query_setdiagass_cb(struct qeth_card *card, 3016 struct qeth_reply *reply, unsigned long data) 3017 { 3018 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3019 u16 rc = cmd->hdr.return_code; 3020 3021 if (rc) { 3022 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); 3023 return -EIO; 3024 } 3025 3026 card->info.diagass_support = cmd->data.diagass.ext; 3027 return 0; 3028 } 3029 3030 static int qeth_query_setdiagass(struct qeth_card *card) 3031 { 3032 struct qeth_cmd_buffer *iob; 3033 3034 QETH_CARD_TEXT(card, 2, "qdiagass"); 3035 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0); 3036 if (!iob) 3037 return -ENOMEM; 3038 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); 3039 } 3040 3041 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) 3042 { 3043 unsigned long info = get_zeroed_page(GFP_KERNEL); 3044 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 3045 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 3046 struct ccw_dev_id ccwid; 3047 int level; 3048 3049 tid->chpid = card->info.chpid; 3050 ccw_device_get_id(CARD_RDEV(card), &ccwid); 3051 tid->ssid = ccwid.ssid; 3052 tid->devno = ccwid.devno; 3053 if (!info) 3054 return; 3055 level = stsi(NULL, 0, 0, 0); 3056 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 3057 tid->lparnr = info222->lpar_number; 3058 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 3059 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3060 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3061 } 3062 free_page(info); 3063 return; 3064 } 3065 3066 static int qeth_hw_trap_cb(struct qeth_card *card, 3067 struct qeth_reply *reply, unsigned long data) 3068 { 3069 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3070 u16 rc = cmd->hdr.return_code; 3071 3072 if (rc) { 3073 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); 3074 return -EIO; 3075 } 3076 return 0; 3077 } 3078 3079 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) 3080 { 3081 struct qeth_cmd_buffer *iob; 3082 struct qeth_ipa_cmd *cmd; 3083 3084 QETH_CARD_TEXT(card, 2, "diagtrap"); 3085 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64); 3086 if (!iob) 3087 return -ENOMEM; 3088 cmd = __ipa_cmd(iob); 3089 cmd->data.diagass.type = 1; 3090 cmd->data.diagass.action = action; 3091 switch (action) { 3092 case QETH_DIAGS_TRAP_ARM: 3093 cmd->data.diagass.options = 0x0003; 3094 cmd->data.diagass.ext = 0x00010000 + 3095 sizeof(struct qeth_trap_id); 3096 qeth_get_trap_id(card, 3097 (struct qeth_trap_id *)cmd->data.diagass.cdata); 3098 break; 3099 case QETH_DIAGS_TRAP_DISARM: 3100 cmd->data.diagass.options = 0x0001; 3101 break; 3102 case QETH_DIAGS_TRAP_CAPTURE: 3103 break; 3104 } 3105 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); 3106 } 3107 3108 static int qeth_check_qdio_errors(struct qeth_card *card, 3109 struct qdio_buffer *buf, 3110 unsigned int qdio_error, 3111 const char *dbftext) 3112 { 3113 if (qdio_error) { 3114 QETH_CARD_TEXT(card, 2, dbftext); 3115 QETH_CARD_TEXT_(card, 2, " F15=%02X", 3116 buf->element[15].sflags); 3117 QETH_CARD_TEXT_(card, 2, " F14=%02X", 3118 buf->element[14].sflags); 3119 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 3120 if ((buf->element[15].sflags) == 0x12) { 3121 QETH_CARD_STAT_INC(card, rx_fifo_errors); 3122 return 0; 3123 } else 3124 return 1; 3125 } 3126 return 0; 3127 } 3128 3129 static void qeth_queue_input_buffer(struct qeth_card *card, int index) 3130 { 3131 struct qeth_qdio_q *queue = card->qdio.in_q; 3132 struct list_head *lh; 3133 int count; 3134 int i; 3135 int rc; 3136 int newcount = 0; 3137 3138 count = (index < queue->next_buf_to_init)? 3139 card->qdio.in_buf_pool.buf_count - 3140 (queue->next_buf_to_init - index) : 3141 card->qdio.in_buf_pool.buf_count - 3142 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index); 3143 /* only requeue at a certain threshold to avoid SIGAs */ 3144 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { 3145 for (i = queue->next_buf_to_init; 3146 i < queue->next_buf_to_init + count; ++i) { 3147 if (qeth_init_input_buffer(card, 3148 &queue->bufs[QDIO_BUFNR(i)])) { 3149 break; 3150 } else { 3151 newcount++; 3152 } 3153 } 3154 3155 if (newcount < count) { 3156 /* we are in memory shortage so we switch back to 3157 traditional skb allocation and drop packages */ 3158 atomic_set(&card->force_alloc_skb, 3); 3159 count = newcount; 3160 } else { 3161 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3162 } 3163 3164 if (!count) { 3165 i = 0; 3166 list_for_each(lh, &card->qdio.in_buf_pool.entry_list) 3167 i++; 3168 if (i == card->qdio.in_buf_pool.buf_count) { 3169 QETH_CARD_TEXT(card, 2, "qsarbw"); 3170 card->reclaim_index = index; 3171 schedule_delayed_work( 3172 &card->buffer_reclaim_work, 3173 QETH_RECLAIM_WORK_TIME); 3174 } 3175 return; 3176 } 3177 3178 /* 3179 * according to old code it should be avoided to requeue all 3180 * 128 buffers in order to benefit from PCI avoidance. 3181 * this function keeps at least one buffer (the buffer at 3182 * 'index') un-requeued -> this buffer is the first buffer that 3183 * will be requeued the next time 3184 */ 3185 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 3186 queue->next_buf_to_init, count); 3187 if (rc) { 3188 QETH_CARD_TEXT(card, 2, "qinberr"); 3189 } 3190 queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init + 3191 count); 3192 } 3193 } 3194 3195 static void qeth_buffer_reclaim_work(struct work_struct *work) 3196 { 3197 struct qeth_card *card = container_of(work, struct qeth_card, 3198 buffer_reclaim_work.work); 3199 3200 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); 3201 qeth_queue_input_buffer(card, card->reclaim_index); 3202 } 3203 3204 static void qeth_handle_send_error(struct qeth_card *card, 3205 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 3206 { 3207 int sbalf15 = buffer->buffer->element[15].sflags; 3208 3209 QETH_CARD_TEXT(card, 6, "hdsnderr"); 3210 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); 3211 3212 if (!qdio_err) 3213 return; 3214 3215 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 3216 return; 3217 3218 QETH_CARD_TEXT(card, 1, "lnkfail"); 3219 QETH_CARD_TEXT_(card, 1, "%04x %02x", 3220 (u16)qdio_err, (u8)sbalf15); 3221 } 3222 3223 /** 3224 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. 3225 * @queue: queue to check for packing buffer 3226 * 3227 * Returns number of buffers that were prepared for flush. 3228 */ 3229 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) 3230 { 3231 struct qeth_qdio_out_buffer *buffer; 3232 3233 buffer = queue->bufs[queue->next_buf_to_fill]; 3234 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3235 (buffer->next_element_to_fill > 0)) { 3236 /* it's a packing buffer */ 3237 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3238 queue->next_buf_to_fill = 3239 QDIO_BUFNR(queue->next_buf_to_fill + 1); 3240 return 1; 3241 } 3242 return 0; 3243 } 3244 3245 /* 3246 * Switched to packing state if the number of used buffers on a queue 3247 * reaches a certain limit. 3248 */ 3249 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 3250 { 3251 if (!queue->do_pack) { 3252 if (atomic_read(&queue->used_buffers) 3253 >= QETH_HIGH_WATERMARK_PACK){ 3254 /* switch non-PACKING -> PACKING */ 3255 QETH_CARD_TEXT(queue->card, 6, "np->pack"); 3256 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3257 queue->do_pack = 1; 3258 } 3259 } 3260 } 3261 3262 /* 3263 * Switches from packing to non-packing mode. If there is a packing 3264 * buffer on the queue this buffer will be prepared to be flushed. 3265 * In that case 1 is returned to inform the caller. If no buffer 3266 * has to be flushed, zero is returned. 3267 */ 3268 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 3269 { 3270 if (queue->do_pack) { 3271 if (atomic_read(&queue->used_buffers) 3272 <= QETH_LOW_WATERMARK_PACK) { 3273 /* switch PACKING -> non-PACKING */ 3274 QETH_CARD_TEXT(queue->card, 6, "pack->np"); 3275 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3276 queue->do_pack = 0; 3277 return qeth_prep_flush_pack_buffer(queue); 3278 } 3279 } 3280 return 0; 3281 } 3282 3283 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, 3284 int count) 3285 { 3286 struct qeth_card *card = queue->card; 3287 struct qeth_qdio_out_buffer *buf; 3288 int rc; 3289 int i; 3290 unsigned int qdio_flags; 3291 3292 for (i = index; i < index + count; ++i) { 3293 unsigned int bidx = QDIO_BUFNR(i); 3294 3295 buf = queue->bufs[bidx]; 3296 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3297 SBAL_EFLAGS_LAST_ENTRY; 3298 3299 if (queue->bufstates) 3300 queue->bufstates[bidx].user = buf; 3301 3302 if (IS_IQD(queue->card)) 3303 continue; 3304 3305 if (!queue->do_pack) { 3306 if ((atomic_read(&queue->used_buffers) >= 3307 (QETH_HIGH_WATERMARK_PACK - 3308 QETH_WATERMARK_PACK_FUZZ)) && 3309 !atomic_read(&queue->set_pci_flags_count)) { 3310 /* it's likely that we'll go to packing 3311 * mode soon */ 3312 atomic_inc(&queue->set_pci_flags_count); 3313 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3314 } 3315 } else { 3316 if (!atomic_read(&queue->set_pci_flags_count)) { 3317 /* 3318 * there's no outstanding PCI any more, so we 3319 * have to request a PCI to be sure the the PCI 3320 * will wake at some time in the future then we 3321 * can flush packed buffers that might still be 3322 * hanging around, which can happen if no 3323 * further send was requested by the stack 3324 */ 3325 atomic_inc(&queue->set_pci_flags_count); 3326 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3327 } 3328 } 3329 } 3330 3331 qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 3332 if (atomic_read(&queue->set_pci_flags_count)) 3333 qdio_flags |= QDIO_FLAG_PCI_OUT; 3334 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, 3335 queue->queue_no, index, count); 3336 3337 /* Fake the TX completion interrupt: */ 3338 if (IS_IQD(card)) 3339 napi_schedule(&queue->napi); 3340 3341 if (rc) { 3342 /* ignore temporary SIGA errors without busy condition */ 3343 if (rc == -ENOBUFS) 3344 return; 3345 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3346 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3347 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); 3348 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); 3349 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3350 3351 /* this must not happen under normal circumstances. if it 3352 * happens something is really wrong -> recover */ 3353 qeth_schedule_recovery(queue->card); 3354 return; 3355 } 3356 } 3357 3358 static void qeth_flush_queue(struct qeth_qdio_out_q *queue) 3359 { 3360 qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count); 3361 3362 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count); 3363 queue->prev_hdr = NULL; 3364 queue->bulk_count = 0; 3365 } 3366 3367 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3368 { 3369 int index; 3370 int flush_cnt = 0; 3371 int q_was_packing = 0; 3372 3373 /* 3374 * check if weed have to switch to non-packing mode or if 3375 * we have to get a pci flag out on the queue 3376 */ 3377 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 3378 !atomic_read(&queue->set_pci_flags_count)) { 3379 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == 3380 QETH_OUT_Q_UNLOCKED) { 3381 /* 3382 * If we get in here, there was no action in 3383 * do_send_packet. So, we check if there is a 3384 * packing buffer to be flushed here. 3385 */ 3386 index = queue->next_buf_to_fill; 3387 q_was_packing = queue->do_pack; 3388 /* queue->do_pack may change */ 3389 barrier(); 3390 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); 3391 if (!flush_cnt && 3392 !atomic_read(&queue->set_pci_flags_count)) 3393 flush_cnt += qeth_prep_flush_pack_buffer(queue); 3394 if (q_was_packing) 3395 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); 3396 if (flush_cnt) 3397 qeth_flush_buffers(queue, index, flush_cnt); 3398 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3399 } 3400 } 3401 } 3402 3403 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, 3404 unsigned long card_ptr) 3405 { 3406 struct qeth_card *card = (struct qeth_card *)card_ptr; 3407 3408 if (card->dev->flags & IFF_UP) 3409 napi_schedule_irqoff(&card->napi); 3410 } 3411 3412 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3413 { 3414 int rc; 3415 3416 if (card->options.cq == QETH_CQ_NOTAVAILABLE) { 3417 rc = -1; 3418 goto out; 3419 } else { 3420 if (card->options.cq == cq) { 3421 rc = 0; 3422 goto out; 3423 } 3424 3425 qeth_free_qdio_queues(card); 3426 card->options.cq = cq; 3427 rc = 0; 3428 } 3429 out: 3430 return rc; 3431 3432 } 3433 EXPORT_SYMBOL_GPL(qeth_configure_cq); 3434 3435 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, 3436 unsigned int queue, int first_element, 3437 int count) 3438 { 3439 struct qeth_qdio_q *cq = card->qdio.c_q; 3440 int i; 3441 int rc; 3442 3443 if (!qeth_is_cq(card, queue)) 3444 return; 3445 3446 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); 3447 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); 3448 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); 3449 3450 if (qdio_err) { 3451 netif_tx_stop_all_queues(card->dev); 3452 qeth_schedule_recovery(card); 3453 return; 3454 } 3455 3456 for (i = first_element; i < first_element + count; ++i) { 3457 struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)]; 3458 int e = 0; 3459 3460 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3461 buffer->element[e].addr) { 3462 unsigned long phys_aob_addr; 3463 3464 phys_aob_addr = (unsigned long) buffer->element[e].addr; 3465 qeth_qdio_handle_aob(card, phys_aob_addr); 3466 ++e; 3467 } 3468 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); 3469 } 3470 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, 3471 card->qdio.c_q->next_buf_to_init, 3472 count); 3473 if (rc) { 3474 dev_warn(&card->gdev->dev, 3475 "QDIO reported an error, rc=%i\n", rc); 3476 QETH_CARD_TEXT(card, 2, "qcqherr"); 3477 } 3478 3479 cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count); 3480 } 3481 3482 static void qeth_qdio_input_handler(struct ccw_device *ccwdev, 3483 unsigned int qdio_err, int queue, 3484 int first_elem, int count, 3485 unsigned long card_ptr) 3486 { 3487 struct qeth_card *card = (struct qeth_card *)card_ptr; 3488 3489 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3490 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); 3491 3492 if (qeth_is_cq(card, queue)) 3493 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); 3494 else if (qdio_err) 3495 qeth_schedule_recovery(card); 3496 } 3497 3498 static void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3499 unsigned int qdio_error, int __queue, 3500 int first_element, int count, 3501 unsigned long card_ptr) 3502 { 3503 struct qeth_card *card = (struct qeth_card *) card_ptr; 3504 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 3505 struct net_device *dev = card->dev; 3506 struct netdev_queue *txq; 3507 int i; 3508 3509 QETH_CARD_TEXT(card, 6, "qdouhdl"); 3510 if (qdio_error & QDIO_ERROR_FATAL) { 3511 QETH_CARD_TEXT(card, 2, "achkcond"); 3512 netif_tx_stop_all_queues(dev); 3513 qeth_schedule_recovery(card); 3514 return; 3515 } 3516 3517 for (i = first_element; i < (first_element + count); ++i) { 3518 struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)]; 3519 3520 qeth_handle_send_error(card, buf, qdio_error); 3521 qeth_clear_output_buffer(queue, buf, qdio_error, 0); 3522 } 3523 3524 atomic_sub(count, &queue->used_buffers); 3525 qeth_check_outbound_queue(queue); 3526 3527 txq = netdev_get_tx_queue(dev, __queue); 3528 /* xmit may have observed the full-condition, but not yet stopped the 3529 * txq. In which case the code below won't trigger. So before returning, 3530 * xmit will re-check the txq's fill level and wake it up if needed. 3531 */ 3532 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue)) 3533 netif_tx_wake_queue(txq); 3534 } 3535 3536 /** 3537 * Note: Function assumes that we have 4 outbound queues. 3538 */ 3539 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) 3540 { 3541 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 3542 u8 tos; 3543 3544 switch (card->qdio.do_prio_queueing) { 3545 case QETH_PRIO_Q_ING_TOS: 3546 case QETH_PRIO_Q_ING_PREC: 3547 switch (qeth_get_ip_version(skb)) { 3548 case 4: 3549 tos = ipv4_get_dsfield(ip_hdr(skb)); 3550 break; 3551 case 6: 3552 tos = ipv6_get_dsfield(ipv6_hdr(skb)); 3553 break; 3554 default: 3555 return card->qdio.default_out_queue; 3556 } 3557 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3558 return ~tos >> 6 & 3; 3559 if (tos & IPTOS_MINCOST) 3560 return 3; 3561 if (tos & IPTOS_RELIABILITY) 3562 return 2; 3563 if (tos & IPTOS_THROUGHPUT) 3564 return 1; 3565 if (tos & IPTOS_LOWDELAY) 3566 return 0; 3567 break; 3568 case QETH_PRIO_Q_ING_SKB: 3569 if (skb->priority > 5) 3570 return 0; 3571 return ~skb->priority >> 1 & 3; 3572 case QETH_PRIO_Q_ING_VLAN: 3573 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) 3574 return ~ntohs(veth->h_vlan_TCI) >> 3575 (VLAN_PRIO_SHIFT + 1) & 3; 3576 break; 3577 default: 3578 break; 3579 } 3580 return card->qdio.default_out_queue; 3581 } 3582 EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3583 3584 /** 3585 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. 3586 * @skb: SKB address 3587 * 3588 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3589 * fragmented part of the SKB. Returns zero for linear SKB. 3590 */ 3591 static int qeth_get_elements_for_frags(struct sk_buff *skb) 3592 { 3593 int cnt, elements = 0; 3594 3595 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3596 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3597 3598 elements += qeth_get_elements_for_range( 3599 (addr_t)skb_frag_address(frag), 3600 (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); 3601 } 3602 return elements; 3603 } 3604 3605 /** 3606 * qeth_count_elements() - Counts the number of QDIO buffer elements needed 3607 * to transmit an skb. 3608 * @skb: the skb to operate on. 3609 * @data_offset: skip this part of the skb's linear data 3610 * 3611 * Returns the number of pages, and thus QDIO buffer elements, needed to map the 3612 * skb's data (both its linear part and paged fragments). 3613 */ 3614 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset) 3615 { 3616 unsigned int elements = qeth_get_elements_for_frags(skb); 3617 addr_t end = (addr_t)skb->data + skb_headlen(skb); 3618 addr_t start = (addr_t)skb->data + data_offset; 3619 3620 if (start != end) 3621 elements += qeth_get_elements_for_range(start, end); 3622 return elements; 3623 } 3624 EXPORT_SYMBOL_GPL(qeth_count_elements); 3625 3626 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ 3627 MAX_TCP_HEADER) 3628 3629 /** 3630 * qeth_add_hw_header() - add a HW header to an skb. 3631 * @skb: skb that the HW header should be added to. 3632 * @hdr: double pointer to a qeth_hdr. When returning with >= 0, 3633 * it contains a valid pointer to a qeth_hdr. 3634 * @hdr_len: length of the HW header. 3635 * @proto_len: length of protocol headers that need to be in same page as the 3636 * HW header. 3637 * 3638 * Returns the pushed length. If the header can't be pushed on 3639 * (eg. because it would cross a page boundary), it is allocated from 3640 * the cache instead and 0 is returned. 3641 * The number of needed buffer elements is returned in @elements. 3642 * Error to create the hdr is indicated by returning with < 0. 3643 */ 3644 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, 3645 struct sk_buff *skb, struct qeth_hdr **hdr, 3646 unsigned int hdr_len, unsigned int proto_len, 3647 unsigned int *elements) 3648 { 3649 const unsigned int contiguous = proto_len ? proto_len : 1; 3650 const unsigned int max_elements = queue->max_elements; 3651 unsigned int __elements; 3652 addr_t start, end; 3653 bool push_ok; 3654 int rc; 3655 3656 check_layout: 3657 start = (addr_t)skb->data - hdr_len; 3658 end = (addr_t)skb->data; 3659 3660 if (qeth_get_elements_for_range(start, end + contiguous) == 1) { 3661 /* Push HW header into same page as first protocol header. */ 3662 push_ok = true; 3663 /* ... but TSO always needs a separate element for headers: */ 3664 if (skb_is_gso(skb)) 3665 __elements = 1 + qeth_count_elements(skb, proto_len); 3666 else 3667 __elements = qeth_count_elements(skb, 0); 3668 } else if (!proto_len && PAGE_ALIGNED(skb->data)) { 3669 /* Push HW header into preceding page, flush with skb->data. */ 3670 push_ok = true; 3671 __elements = 1 + qeth_count_elements(skb, 0); 3672 } else { 3673 /* Use header cache, copy protocol headers up. */ 3674 push_ok = false; 3675 __elements = 1 + qeth_count_elements(skb, proto_len); 3676 } 3677 3678 /* Compress skb to fit into one IO buffer: */ 3679 if (__elements > max_elements) { 3680 if (!skb_is_nonlinear(skb)) { 3681 /* Drop it, no easy way of shrinking it further. */ 3682 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", 3683 max_elements, __elements, skb->len); 3684 return -E2BIG; 3685 } 3686 3687 rc = skb_linearize(skb); 3688 if (rc) { 3689 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); 3690 return rc; 3691 } 3692 3693 QETH_TXQ_STAT_INC(queue, skbs_linearized); 3694 /* Linearization changed the layout, re-evaluate: */ 3695 goto check_layout; 3696 } 3697 3698 *elements = __elements; 3699 /* Add the header: */ 3700 if (push_ok) { 3701 *hdr = skb_push(skb, hdr_len); 3702 return hdr_len; 3703 } 3704 /* fall back */ 3705 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) 3706 return -E2BIG; 3707 *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 3708 if (!*hdr) 3709 return -ENOMEM; 3710 /* Copy protocol headers behind HW header: */ 3711 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); 3712 return 0; 3713 } 3714 3715 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, 3716 struct sk_buff *curr_skb, 3717 struct qeth_hdr *curr_hdr) 3718 { 3719 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; 3720 struct qeth_hdr *prev_hdr = queue->prev_hdr; 3721 3722 if (!prev_hdr) 3723 return true; 3724 3725 /* All packets must have the same target: */ 3726 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 3727 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); 3728 3729 return ether_addr_equal(eth_hdr(prev_skb)->h_dest, 3730 eth_hdr(curr_skb)->h_dest) && 3731 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); 3732 } 3733 3734 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && 3735 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); 3736 } 3737 3738 static unsigned int __qeth_fill_buffer(struct sk_buff *skb, 3739 struct qeth_qdio_out_buffer *buf, 3740 bool is_first_elem, unsigned int offset) 3741 { 3742 struct qdio_buffer *buffer = buf->buffer; 3743 int element = buf->next_element_to_fill; 3744 int length = skb_headlen(skb) - offset; 3745 char *data = skb->data + offset; 3746 unsigned int elem_length, cnt; 3747 3748 /* map linear part into buffer element(s) */ 3749 while (length > 0) { 3750 elem_length = min_t(unsigned int, length, 3751 PAGE_SIZE - offset_in_page(data)); 3752 3753 buffer->element[element].addr = data; 3754 buffer->element[element].length = elem_length; 3755 length -= elem_length; 3756 if (is_first_elem) { 3757 is_first_elem = false; 3758 if (length || skb_is_nonlinear(skb)) 3759 /* skb needs additional elements */ 3760 buffer->element[element].eflags = 3761 SBAL_EFLAGS_FIRST_FRAG; 3762 else 3763 buffer->element[element].eflags = 0; 3764 } else { 3765 buffer->element[element].eflags = 3766 SBAL_EFLAGS_MIDDLE_FRAG; 3767 } 3768 3769 data += elem_length; 3770 element++; 3771 } 3772 3773 /* map page frags into buffer element(s) */ 3774 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3775 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3776 3777 data = skb_frag_address(frag); 3778 length = skb_frag_size(frag); 3779 while (length > 0) { 3780 elem_length = min_t(unsigned int, length, 3781 PAGE_SIZE - offset_in_page(data)); 3782 3783 buffer->element[element].addr = data; 3784 buffer->element[element].length = elem_length; 3785 buffer->element[element].eflags = 3786 SBAL_EFLAGS_MIDDLE_FRAG; 3787 3788 length -= elem_length; 3789 data += elem_length; 3790 element++; 3791 } 3792 } 3793 3794 if (buffer->element[element - 1].eflags) 3795 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; 3796 buf->next_element_to_fill = element; 3797 return element; 3798 } 3799 3800 /** 3801 * qeth_fill_buffer() - map skb into an output buffer 3802 * @buf: buffer to transport the skb 3803 * @skb: skb to map into the buffer 3804 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated 3805 * from qeth_core_header_cache. 3806 * @offset: when mapping the skb, start at skb->data + offset 3807 * @hd_len: if > 0, build a dedicated header element of this size 3808 */ 3809 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, 3810 struct sk_buff *skb, struct qeth_hdr *hdr, 3811 unsigned int offset, unsigned int hd_len) 3812 { 3813 struct qdio_buffer *buffer = buf->buffer; 3814 bool is_first_elem = true; 3815 3816 __skb_queue_tail(&buf->skb_list, skb); 3817 3818 /* build dedicated header element */ 3819 if (hd_len) { 3820 int element = buf->next_element_to_fill; 3821 is_first_elem = false; 3822 3823 buffer->element[element].addr = hdr; 3824 buffer->element[element].length = hd_len; 3825 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 3826 /* remember to free cache-allocated qeth_hdr: */ 3827 buf->is_header[element] = ((void *)hdr != skb->data); 3828 buf->next_element_to_fill++; 3829 } 3830 3831 return __qeth_fill_buffer(skb, buf, is_first_elem, offset); 3832 } 3833 3834 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, 3835 struct sk_buff *skb, unsigned int elements, 3836 struct qeth_hdr *hdr, unsigned int offset, 3837 unsigned int hd_len) 3838 { 3839 unsigned int bytes = qdisc_pkt_len(skb); 3840 struct qeth_qdio_out_buffer *buffer; 3841 unsigned int next_element; 3842 struct netdev_queue *txq; 3843 bool stopped = false; 3844 bool flush; 3845 3846 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)]; 3847 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 3848 3849 /* Just a sanity check, the wake/stop logic should ensure that we always 3850 * get a free buffer. 3851 */ 3852 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 3853 return -EBUSY; 3854 3855 flush = !qeth_iqd_may_bulk(queue, skb, hdr); 3856 3857 if (flush || 3858 (buffer->next_element_to_fill + elements > queue->max_elements)) { 3859 if (buffer->next_element_to_fill > 0) { 3860 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3861 queue->bulk_count++; 3862 } 3863 3864 if (queue->bulk_count >= queue->bulk_max) 3865 flush = true; 3866 3867 if (flush) 3868 qeth_flush_queue(queue); 3869 3870 buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + 3871 queue->bulk_count)]; 3872 3873 /* Sanity-check again: */ 3874 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 3875 return -EBUSY; 3876 } 3877 3878 if (buffer->next_element_to_fill == 0 && 3879 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 3880 /* If a TX completion happens right _here_ and misses to wake 3881 * the txq, then our re-check below will catch the race. 3882 */ 3883 QETH_TXQ_STAT_INC(queue, stopped); 3884 netif_tx_stop_queue(txq); 3885 stopped = true; 3886 } 3887 3888 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 3889 buffer->bytes += bytes; 3890 queue->prev_hdr = hdr; 3891 3892 flush = __netdev_tx_sent_queue(txq, bytes, 3893 !stopped && netdev_xmit_more()); 3894 3895 if (flush || next_element >= queue->max_elements) { 3896 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3897 queue->bulk_count++; 3898 3899 if (queue->bulk_count >= queue->bulk_max) 3900 flush = true; 3901 3902 if (flush) 3903 qeth_flush_queue(queue); 3904 } 3905 3906 if (stopped && !qeth_out_queue_is_full(queue)) 3907 netif_tx_start_queue(txq); 3908 return 0; 3909 } 3910 3911 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 3912 struct sk_buff *skb, struct qeth_hdr *hdr, 3913 unsigned int offset, unsigned int hd_len, 3914 int elements_needed) 3915 { 3916 struct qeth_qdio_out_buffer *buffer; 3917 unsigned int next_element; 3918 struct netdev_queue *txq; 3919 bool stopped = false; 3920 int start_index; 3921 int flush_count = 0; 3922 int do_pack = 0; 3923 int tmp; 3924 int rc = 0; 3925 3926 /* spin until we get the queue ... */ 3927 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3928 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3929 start_index = queue->next_buf_to_fill; 3930 buffer = queue->bufs[queue->next_buf_to_fill]; 3931 3932 /* Just a sanity check, the wake/stop logic should ensure that we always 3933 * get a free buffer. 3934 */ 3935 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { 3936 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3937 return -EBUSY; 3938 } 3939 3940 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 3941 3942 /* check if we need to switch packing state of this queue */ 3943 qeth_switch_to_packing_if_needed(queue); 3944 if (queue->do_pack) { 3945 do_pack = 1; 3946 /* does packet fit in current buffer? */ 3947 if (buffer->next_element_to_fill + elements_needed > 3948 queue->max_elements) { 3949 /* ... no -> set state PRIMED */ 3950 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3951 flush_count++; 3952 queue->next_buf_to_fill = 3953 QDIO_BUFNR(queue->next_buf_to_fill + 1); 3954 buffer = queue->bufs[queue->next_buf_to_fill]; 3955 3956 /* We stepped forward, so sanity-check again: */ 3957 if (atomic_read(&buffer->state) != 3958 QETH_QDIO_BUF_EMPTY) { 3959 qeth_flush_buffers(queue, start_index, 3960 flush_count); 3961 atomic_set(&queue->state, 3962 QETH_OUT_Q_UNLOCKED); 3963 rc = -EBUSY; 3964 goto out; 3965 } 3966 } 3967 } 3968 3969 if (buffer->next_element_to_fill == 0 && 3970 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 3971 /* If a TX completion happens right _here_ and misses to wake 3972 * the txq, then our re-check below will catch the race. 3973 */ 3974 QETH_TXQ_STAT_INC(queue, stopped); 3975 netif_tx_stop_queue(txq); 3976 stopped = true; 3977 } 3978 3979 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 3980 3981 if (queue->do_pack) 3982 QETH_TXQ_STAT_INC(queue, skbs_pack); 3983 if (!queue->do_pack || stopped || next_element >= queue->max_elements) { 3984 flush_count++; 3985 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3986 queue->next_buf_to_fill = 3987 QDIO_BUFNR(queue->next_buf_to_fill + 1); 3988 } 3989 3990 if (flush_count) 3991 qeth_flush_buffers(queue, start_index, flush_count); 3992 else if (!atomic_read(&queue->set_pci_flags_count)) 3993 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); 3994 /* 3995 * queue->state will go from LOCKED -> UNLOCKED or from 3996 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us 3997 * (switch packing state or flush buffer to get another pci flag out). 3998 * In that case we will enter this loop 3999 */ 4000 while (atomic_dec_return(&queue->state)) { 4001 start_index = queue->next_buf_to_fill; 4002 /* check if we can go back to non-packing state */ 4003 tmp = qeth_switch_to_nonpacking_if_needed(queue); 4004 /* 4005 * check if we need to flush a packing buffer to get a pci 4006 * flag out on the queue 4007 */ 4008 if (!tmp && !atomic_read(&queue->set_pci_flags_count)) 4009 tmp = qeth_prep_flush_pack_buffer(queue); 4010 if (tmp) { 4011 qeth_flush_buffers(queue, start_index, tmp); 4012 flush_count += tmp; 4013 } 4014 } 4015 out: 4016 /* at this point the queue is UNLOCKED again */ 4017 if (do_pack) 4018 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); 4019 4020 if (stopped && !qeth_out_queue_is_full(queue)) 4021 netif_tx_start_queue(txq); 4022 return rc; 4023 } 4024 EXPORT_SYMBOL_GPL(qeth_do_send_packet); 4025 4026 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, 4027 unsigned int payload_len, struct sk_buff *skb, 4028 unsigned int proto_len) 4029 { 4030 struct qeth_hdr_ext_tso *ext = &hdr->ext; 4031 4032 ext->hdr_tot_len = sizeof(*ext); 4033 ext->imb_hdr_no = 1; 4034 ext->hdr_type = 1; 4035 ext->hdr_version = 1; 4036 ext->hdr_len = 28; 4037 ext->payload_len = payload_len; 4038 ext->mss = skb_shinfo(skb)->gso_size; 4039 ext->dg_hdr_len = proto_len; 4040 } 4041 4042 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, 4043 struct qeth_qdio_out_q *queue, int ipv, 4044 void (*fill_header)(struct qeth_qdio_out_q *queue, 4045 struct qeth_hdr *hdr, struct sk_buff *skb, 4046 int ipv, unsigned int data_len)) 4047 { 4048 unsigned int proto_len, hw_hdr_len; 4049 unsigned int frame_len = skb->len; 4050 bool is_tso = skb_is_gso(skb); 4051 unsigned int data_offset = 0; 4052 struct qeth_hdr *hdr = NULL; 4053 unsigned int hd_len = 0; 4054 unsigned int elements; 4055 int push_len, rc; 4056 4057 if (is_tso) { 4058 hw_hdr_len = sizeof(struct qeth_hdr_tso); 4059 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4060 } else { 4061 hw_hdr_len = sizeof(struct qeth_hdr); 4062 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; 4063 } 4064 4065 rc = skb_cow_head(skb, hw_hdr_len); 4066 if (rc) 4067 return rc; 4068 4069 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, 4070 &elements); 4071 if (push_len < 0) 4072 return push_len; 4073 if (is_tso || !push_len) { 4074 /* HW header needs its own buffer element. */ 4075 hd_len = hw_hdr_len + proto_len; 4076 data_offset = push_len + proto_len; 4077 } 4078 memset(hdr, 0, hw_hdr_len); 4079 fill_header(queue, hdr, skb, ipv, frame_len); 4080 if (is_tso) 4081 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, 4082 frame_len - proto_len, skb, proto_len); 4083 4084 if (IS_IQD(card)) { 4085 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, 4086 hd_len); 4087 } else { 4088 /* TODO: drop skb_orphan() once TX completion is fast enough */ 4089 skb_orphan(skb); 4090 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, 4091 hd_len, elements); 4092 } 4093 4094 if (rc && !push_len) 4095 kmem_cache_free(qeth_core_header_cache, hdr); 4096 4097 return rc; 4098 } 4099 EXPORT_SYMBOL_GPL(qeth_xmit); 4100 4101 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4102 struct qeth_reply *reply, unsigned long data) 4103 { 4104 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4105 struct qeth_ipacmd_setadpparms *setparms; 4106 4107 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4108 4109 setparms = &(cmd->data.setadapterparms); 4110 if (qeth_setadpparms_inspect_rc(cmd)) { 4111 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4112 setparms->data.mode = SET_PROMISC_MODE_OFF; 4113 } 4114 card->info.promisc_mode = setparms->data.mode; 4115 return (cmd->hdr.return_code) ? -EIO : 0; 4116 } 4117 4118 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) 4119 { 4120 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : 4121 SET_PROMISC_MODE_OFF; 4122 struct qeth_cmd_buffer *iob; 4123 struct qeth_ipa_cmd *cmd; 4124 4125 QETH_CARD_TEXT(card, 4, "setprom"); 4126 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4127 4128 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4129 SETADP_DATA_SIZEOF(mode)); 4130 if (!iob) 4131 return; 4132 cmd = __ipa_cmd(iob); 4133 cmd->data.setadapterparms.data.mode = mode; 4134 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4135 } 4136 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); 4137 4138 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4139 struct qeth_reply *reply, unsigned long data) 4140 { 4141 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4142 struct qeth_ipacmd_setadpparms *adp_cmd; 4143 4144 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4145 if (qeth_setadpparms_inspect_rc(cmd)) 4146 return -EIO; 4147 4148 adp_cmd = &cmd->data.setadapterparms; 4149 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) 4150 return -EADDRNOTAVAIL; 4151 4152 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && 4153 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) 4154 return -EADDRNOTAVAIL; 4155 4156 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); 4157 return 0; 4158 } 4159 4160 int qeth_setadpparms_change_macaddr(struct qeth_card *card) 4161 { 4162 int rc; 4163 struct qeth_cmd_buffer *iob; 4164 struct qeth_ipa_cmd *cmd; 4165 4166 QETH_CARD_TEXT(card, 4, "chgmac"); 4167 4168 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4169 SETADP_DATA_SIZEOF(change_addr)); 4170 if (!iob) 4171 return -ENOMEM; 4172 cmd = __ipa_cmd(iob); 4173 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4174 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; 4175 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, 4176 card->dev->dev_addr); 4177 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, 4178 NULL); 4179 return rc; 4180 } 4181 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 4182 4183 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4184 struct qeth_reply *reply, unsigned long data) 4185 { 4186 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4187 struct qeth_set_access_ctrl *access_ctrl_req; 4188 int fallback = *(int *)reply->param; 4189 4190 QETH_CARD_TEXT(card, 4, "setaccb"); 4191 if (cmd->hdr.return_code) 4192 return -EIO; 4193 qeth_setadpparms_inspect_rc(cmd); 4194 4195 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4196 QETH_CARD_TEXT_(card, 2, "rc=%d", 4197 cmd->data.setadapterparms.hdr.return_code); 4198 if (cmd->data.setadapterparms.hdr.return_code != 4199 SET_ACCESS_CTRL_RC_SUCCESS) 4200 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", 4201 access_ctrl_req->subcmd_code, CARD_DEVID(card), 4202 cmd->data.setadapterparms.hdr.return_code); 4203 switch (cmd->data.setadapterparms.hdr.return_code) { 4204 case SET_ACCESS_CTRL_RC_SUCCESS: 4205 if (card->options.isolation == ISOLATION_MODE_NONE) { 4206 dev_info(&card->gdev->dev, 4207 "QDIO data connection isolation is deactivated\n"); 4208 } else { 4209 dev_info(&card->gdev->dev, 4210 "QDIO data connection isolation is activated\n"); 4211 } 4212 break; 4213 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4214 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", 4215 CARD_DEVID(card)); 4216 if (fallback) 4217 card->options.isolation = card->options.prev_isolation; 4218 break; 4219 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4220 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", 4221 CARD_DEVID(card)); 4222 if (fallback) 4223 card->options.isolation = card->options.prev_isolation; 4224 break; 4225 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: 4226 dev_err(&card->gdev->dev, "Adapter does not " 4227 "support QDIO data connection isolation\n"); 4228 break; 4229 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: 4230 dev_err(&card->gdev->dev, 4231 "Adapter is dedicated. " 4232 "QDIO data connection isolation not supported\n"); 4233 if (fallback) 4234 card->options.isolation = card->options.prev_isolation; 4235 break; 4236 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: 4237 dev_err(&card->gdev->dev, 4238 "TSO does not permit QDIO data connection isolation\n"); 4239 if (fallback) 4240 card->options.isolation = card->options.prev_isolation; 4241 break; 4242 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: 4243 dev_err(&card->gdev->dev, "The adjacent switch port does not " 4244 "support reflective relay mode\n"); 4245 if (fallback) 4246 card->options.isolation = card->options.prev_isolation; 4247 break; 4248 case SET_ACCESS_CTRL_RC_REFLREL_FAILED: 4249 dev_err(&card->gdev->dev, "The reflective relay mode cannot be " 4250 "enabled at the adjacent switch port"); 4251 if (fallback) 4252 card->options.isolation = card->options.prev_isolation; 4253 break; 4254 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: 4255 dev_warn(&card->gdev->dev, "Turning off reflective relay mode " 4256 "at the adjacent switch failed\n"); 4257 break; 4258 default: 4259 /* this should never happen */ 4260 if (fallback) 4261 card->options.isolation = card->options.prev_isolation; 4262 break; 4263 } 4264 return (cmd->hdr.return_code) ? -EIO : 0; 4265 } 4266 4267 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, 4268 enum qeth_ipa_isolation_modes isolation, int fallback) 4269 { 4270 int rc; 4271 struct qeth_cmd_buffer *iob; 4272 struct qeth_ipa_cmd *cmd; 4273 struct qeth_set_access_ctrl *access_ctrl_req; 4274 4275 QETH_CARD_TEXT(card, 4, "setacctl"); 4276 4277 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4278 SETADP_DATA_SIZEOF(set_access_ctrl)); 4279 if (!iob) 4280 return -ENOMEM; 4281 cmd = __ipa_cmd(iob); 4282 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4283 access_ctrl_req->subcmd_code = isolation; 4284 4285 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, 4286 &fallback); 4287 QETH_CARD_TEXT_(card, 2, "rc=%d", rc); 4288 return rc; 4289 } 4290 4291 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) 4292 { 4293 int rc = 0; 4294 4295 QETH_CARD_TEXT(card, 4, "setactlo"); 4296 4297 if ((IS_OSD(card) || IS_OSX(card)) && 4298 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { 4299 rc = qeth_setadpparms_set_access_ctrl(card, 4300 card->options.isolation, fallback); 4301 if (rc) { 4302 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", 4303 rc, CARD_DEVID(card)); 4304 rc = -EOPNOTSUPP; 4305 } 4306 } else if (card->options.isolation != ISOLATION_MODE_NONE) { 4307 card->options.isolation = ISOLATION_MODE_NONE; 4308 4309 dev_err(&card->gdev->dev, "Adapter does not " 4310 "support QDIO data connection isolation\n"); 4311 rc = -EOPNOTSUPP; 4312 } 4313 return rc; 4314 } 4315 4316 void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue) 4317 { 4318 struct qeth_card *card; 4319 4320 card = dev->ml_priv; 4321 QETH_CARD_TEXT(card, 4, "txtimeo"); 4322 qeth_schedule_recovery(card); 4323 } 4324 EXPORT_SYMBOL_GPL(qeth_tx_timeout); 4325 4326 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 4327 { 4328 struct qeth_card *card = dev->ml_priv; 4329 int rc = 0; 4330 4331 switch (regnum) { 4332 case MII_BMCR: /* Basic mode control register */ 4333 rc = BMCR_FULLDPLX; 4334 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4335 (card->info.link_type != QETH_LINK_TYPE_OSN) && 4336 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && 4337 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) 4338 rc |= BMCR_SPEED100; 4339 break; 4340 case MII_BMSR: /* Basic mode status register */ 4341 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | 4342 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | 4343 BMSR_100BASE4; 4344 break; 4345 case MII_PHYSID1: /* PHYS ID 1 */ 4346 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | 4347 dev->dev_addr[2]; 4348 rc = (rc >> 5) & 0xFFFF; 4349 break; 4350 case MII_PHYSID2: /* PHYS ID 2 */ 4351 rc = (dev->dev_addr[2] << 10) & 0xFFFF; 4352 break; 4353 case MII_ADVERTISE: /* Advertisement control reg */ 4354 rc = ADVERTISE_ALL; 4355 break; 4356 case MII_LPA: /* Link partner ability reg */ 4357 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | 4358 LPA_100BASE4 | LPA_LPACK; 4359 break; 4360 case MII_EXPANSION: /* Expansion register */ 4361 break; 4362 case MII_DCOUNTER: /* disconnect counter */ 4363 break; 4364 case MII_FCSCOUNTER: /* false carrier counter */ 4365 break; 4366 case MII_NWAYTEST: /* N-way auto-neg test register */ 4367 break; 4368 case MII_RERRCOUNTER: /* rx error counter */ 4369 rc = card->stats.rx_length_errors + 4370 card->stats.rx_frame_errors + 4371 card->stats.rx_fifo_errors; 4372 break; 4373 case MII_SREVISION: /* silicon revision */ 4374 break; 4375 case MII_RESV1: /* reserved 1 */ 4376 break; 4377 case MII_LBRERROR: /* loopback, rx, bypass error */ 4378 break; 4379 case MII_PHYADDR: /* physical address */ 4380 break; 4381 case MII_RESV2: /* reserved 2 */ 4382 break; 4383 case MII_TPISTATUS: /* TPI status for 10mbps */ 4384 break; 4385 case MII_NCONFIG: /* network interface config */ 4386 break; 4387 default: 4388 break; 4389 } 4390 return rc; 4391 } 4392 4393 static int qeth_snmp_command_cb(struct qeth_card *card, 4394 struct qeth_reply *reply, unsigned long data) 4395 { 4396 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4397 struct qeth_arp_query_info *qinfo = reply->param; 4398 struct qeth_ipacmd_setadpparms *adp_cmd; 4399 unsigned int data_len; 4400 void *snmp_data; 4401 4402 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4403 4404 if (cmd->hdr.return_code) { 4405 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4406 return -EIO; 4407 } 4408 if (cmd->data.setadapterparms.hdr.return_code) { 4409 cmd->hdr.return_code = 4410 cmd->data.setadapterparms.hdr.return_code; 4411 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); 4412 return -EIO; 4413 } 4414 4415 adp_cmd = &cmd->data.setadapterparms; 4416 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); 4417 if (adp_cmd->hdr.seq_no == 1) { 4418 snmp_data = &adp_cmd->data.snmp; 4419 } else { 4420 snmp_data = &adp_cmd->data.snmp.request; 4421 data_len -= offsetof(struct qeth_snmp_cmd, request); 4422 } 4423 4424 /* check if there is enough room in userspace */ 4425 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4426 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); 4427 return -ENOSPC; 4428 } 4429 QETH_CARD_TEXT_(card, 4, "snore%i", 4430 cmd->data.setadapterparms.hdr.used_total); 4431 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4432 cmd->data.setadapterparms.hdr.seq_no); 4433 /*copy entries to user buffer*/ 4434 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); 4435 qinfo->udata_offset += data_len; 4436 4437 if (cmd->data.setadapterparms.hdr.seq_no < 4438 cmd->data.setadapterparms.hdr.used_total) 4439 return 1; 4440 return 0; 4441 } 4442 4443 static int qeth_snmp_command(struct qeth_card *card, char __user *udata) 4444 { 4445 struct qeth_snmp_ureq __user *ureq; 4446 struct qeth_cmd_buffer *iob; 4447 unsigned int req_len; 4448 struct qeth_arp_query_info qinfo = {0, }; 4449 int rc = 0; 4450 4451 QETH_CARD_TEXT(card, 3, "snmpcmd"); 4452 4453 if (IS_VM_NIC(card)) 4454 return -EOPNOTSUPP; 4455 4456 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && 4457 IS_LAYER3(card)) 4458 return -EOPNOTSUPP; 4459 4460 ureq = (struct qeth_snmp_ureq __user *) udata; 4461 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) || 4462 get_user(req_len, &ureq->hdr.req_len)) 4463 return -EFAULT; 4464 4465 /* Sanitize user input, to avoid overflows in iob size calculation: */ 4466 if (req_len > QETH_BUFSIZE) 4467 return -EINVAL; 4468 4469 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4470 if (!iob) 4471 return -ENOMEM; 4472 4473 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp, 4474 &ureq->cmd, req_len)) { 4475 qeth_put_cmd(iob); 4476 return -EFAULT; 4477 } 4478 4479 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 4480 if (!qinfo.udata) { 4481 qeth_put_cmd(iob); 4482 return -ENOMEM; 4483 } 4484 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); 4485 4486 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); 4487 if (rc) 4488 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", 4489 CARD_DEVID(card), rc); 4490 else { 4491 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4492 rc = -EFAULT; 4493 } 4494 4495 kfree(qinfo.udata); 4496 return rc; 4497 } 4498 4499 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4500 struct qeth_reply *reply, unsigned long data) 4501 { 4502 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4503 struct qeth_qoat_priv *priv; 4504 char *resdata; 4505 int resdatalen; 4506 4507 QETH_CARD_TEXT(card, 3, "qoatcb"); 4508 if (qeth_setadpparms_inspect_rc(cmd)) 4509 return -EIO; 4510 4511 priv = (struct qeth_qoat_priv *)reply->param; 4512 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4513 resdata = (char *)data + 28; 4514 4515 if (resdatalen > (priv->buffer_len - priv->response_len)) 4516 return -ENOSPC; 4517 4518 memcpy((priv->buffer + priv->response_len), resdata, 4519 resdatalen); 4520 priv->response_len += resdatalen; 4521 4522 if (cmd->data.setadapterparms.hdr.seq_no < 4523 cmd->data.setadapterparms.hdr.used_total) 4524 return 1; 4525 return 0; 4526 } 4527 4528 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) 4529 { 4530 int rc = 0; 4531 struct qeth_cmd_buffer *iob; 4532 struct qeth_ipa_cmd *cmd; 4533 struct qeth_query_oat *oat_req; 4534 struct qeth_query_oat_data oat_data; 4535 struct qeth_qoat_priv priv; 4536 void __user *tmp; 4537 4538 QETH_CARD_TEXT(card, 3, "qoatcmd"); 4539 4540 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { 4541 rc = -EOPNOTSUPP; 4542 goto out; 4543 } 4544 4545 if (copy_from_user(&oat_data, udata, 4546 sizeof(struct qeth_query_oat_data))) { 4547 rc = -EFAULT; 4548 goto out; 4549 } 4550 4551 priv.buffer_len = oat_data.buffer_len; 4552 priv.response_len = 0; 4553 priv.buffer = vzalloc(oat_data.buffer_len); 4554 if (!priv.buffer) { 4555 rc = -ENOMEM; 4556 goto out; 4557 } 4558 4559 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4560 SETADP_DATA_SIZEOF(query_oat)); 4561 if (!iob) { 4562 rc = -ENOMEM; 4563 goto out_free; 4564 } 4565 cmd = __ipa_cmd(iob); 4566 oat_req = &cmd->data.setadapterparms.data.query_oat; 4567 oat_req->subcmd_code = oat_data.command; 4568 4569 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, 4570 &priv); 4571 if (!rc) { 4572 if (is_compat_task()) 4573 tmp = compat_ptr(oat_data.ptr); 4574 else 4575 tmp = (void __user *)(unsigned long)oat_data.ptr; 4576 4577 if (copy_to_user(tmp, priv.buffer, 4578 priv.response_len)) { 4579 rc = -EFAULT; 4580 goto out_free; 4581 } 4582 4583 oat_data.response_len = priv.response_len; 4584 4585 if (copy_to_user(udata, &oat_data, 4586 sizeof(struct qeth_query_oat_data))) 4587 rc = -EFAULT; 4588 } 4589 4590 out_free: 4591 vfree(priv.buffer); 4592 out: 4593 return rc; 4594 } 4595 4596 static int qeth_query_card_info_cb(struct qeth_card *card, 4597 struct qeth_reply *reply, unsigned long data) 4598 { 4599 struct carrier_info *carrier_info = (struct carrier_info *)reply->param; 4600 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4601 struct qeth_query_card_info *card_info; 4602 4603 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4604 if (qeth_setadpparms_inspect_rc(cmd)) 4605 return -EIO; 4606 4607 card_info = &cmd->data.setadapterparms.data.card_info; 4608 carrier_info->card_type = card_info->card_type; 4609 carrier_info->port_mode = card_info->port_mode; 4610 carrier_info->port_speed = card_info->port_speed; 4611 return 0; 4612 } 4613 4614 int qeth_query_card_info(struct qeth_card *card, 4615 struct carrier_info *carrier_info) 4616 { 4617 struct qeth_cmd_buffer *iob; 4618 4619 QETH_CARD_TEXT(card, 2, "qcrdinfo"); 4620 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) 4621 return -EOPNOTSUPP; 4622 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0); 4623 if (!iob) 4624 return -ENOMEM; 4625 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, 4626 (void *)carrier_info); 4627 } 4628 4629 /** 4630 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address 4631 * @card: pointer to a qeth_card 4632 * 4633 * Returns 4634 * 0, if a MAC address has been set for the card's netdevice 4635 * a return code, for various error conditions 4636 */ 4637 int qeth_vm_request_mac(struct qeth_card *card) 4638 { 4639 struct diag26c_mac_resp *response; 4640 struct diag26c_mac_req *request; 4641 struct ccw_dev_id id; 4642 int rc; 4643 4644 QETH_CARD_TEXT(card, 2, "vmreqmac"); 4645 4646 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 4647 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 4648 if (!request || !response) { 4649 rc = -ENOMEM; 4650 goto out; 4651 } 4652 4653 ccw_device_get_id(CARD_DDEV(card), &id); 4654 request->resp_buf_len = sizeof(*response); 4655 request->resp_version = DIAG26C_VERSION2; 4656 request->op_code = DIAG26C_GET_MAC; 4657 request->devno = id.devno; 4658 4659 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4660 rc = diag26c(request, response, DIAG26C_MAC_SERVICES); 4661 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4662 if (rc) 4663 goto out; 4664 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 4665 4666 if (request->resp_buf_len < sizeof(*response) || 4667 response->version != request->resp_version) { 4668 rc = -EIO; 4669 QETH_CARD_TEXT(card, 2, "badresp"); 4670 QETH_CARD_HEX(card, 2, &request->resp_buf_len, 4671 sizeof(request->resp_buf_len)); 4672 } else if (!is_valid_ether_addr(response->mac)) { 4673 rc = -EINVAL; 4674 QETH_CARD_TEXT(card, 2, "badmac"); 4675 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN); 4676 } else { 4677 ether_addr_copy(card->dev->dev_addr, response->mac); 4678 } 4679 4680 out: 4681 kfree(response); 4682 kfree(request); 4683 return rc; 4684 } 4685 EXPORT_SYMBOL_GPL(qeth_vm_request_mac); 4686 4687 static void qeth_determine_capabilities(struct qeth_card *card) 4688 { 4689 struct qeth_channel *channel = &card->data; 4690 struct ccw_device *ddev = channel->ccwdev; 4691 int rc; 4692 int ddev_offline = 0; 4693 4694 QETH_CARD_TEXT(card, 2, "detcapab"); 4695 if (!ddev->online) { 4696 ddev_offline = 1; 4697 rc = qeth_start_channel(channel); 4698 if (rc) { 4699 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 4700 goto out; 4701 } 4702 } 4703 4704 rc = qeth_read_conf_data(card); 4705 if (rc) { 4706 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", 4707 CARD_DEVID(card), rc); 4708 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 4709 goto out_offline; 4710 } 4711 4712 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 4713 if (rc) 4714 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 4715 4716 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt); 4717 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1); 4718 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2); 4719 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3); 4720 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt); 4721 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || 4722 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || 4723 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { 4724 dev_info(&card->gdev->dev, 4725 "Completion Queueing supported\n"); 4726 } else { 4727 card->options.cq = QETH_CQ_NOTAVAILABLE; 4728 } 4729 4730 4731 out_offline: 4732 if (ddev_offline == 1) 4733 qeth_stop_channel(channel); 4734 out: 4735 return; 4736 } 4737 4738 static void qeth_qdio_establish_cq(struct qeth_card *card, 4739 struct qdio_buffer **in_sbal_ptrs, 4740 void (**queue_start_poll) 4741 (struct ccw_device *, int, 4742 unsigned long)) 4743 { 4744 int i; 4745 4746 if (card->options.cq == QETH_CQ_ENABLED) { 4747 int offset = QDIO_MAX_BUFFERS_PER_Q * 4748 (card->qdio.no_in_queues - 1); 4749 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { 4750 in_sbal_ptrs[offset + i] = (struct qdio_buffer *) 4751 virt_to_phys(card->qdio.c_q->bufs[i].buffer); 4752 } 4753 4754 queue_start_poll[card->qdio.no_in_queues - 1] = NULL; 4755 } 4756 } 4757 4758 static int qeth_qdio_establish(struct qeth_card *card) 4759 { 4760 struct qdio_initialize init_data; 4761 char *qib_param_field; 4762 struct qdio_buffer **in_sbal_ptrs; 4763 void (**queue_start_poll) (struct ccw_device *, int, unsigned long); 4764 struct qdio_buffer **out_sbal_ptrs; 4765 int i, j, k; 4766 int rc = 0; 4767 4768 QETH_CARD_TEXT(card, 2, "qdioest"); 4769 4770 qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); 4771 if (!qib_param_field) { 4772 rc = -ENOMEM; 4773 goto out_free_nothing; 4774 } 4775 4776 qeth_create_qib_param_field(card, qib_param_field); 4777 qeth_create_qib_param_field_blkt(card, qib_param_field); 4778 4779 in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q, 4780 sizeof(void *), 4781 GFP_KERNEL); 4782 if (!in_sbal_ptrs) { 4783 rc = -ENOMEM; 4784 goto out_free_qib_param; 4785 } 4786 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { 4787 in_sbal_ptrs[i] = (struct qdio_buffer *) 4788 virt_to_phys(card->qdio.in_q->bufs[i].buffer); 4789 } 4790 4791 queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *), 4792 GFP_KERNEL); 4793 if (!queue_start_poll) { 4794 rc = -ENOMEM; 4795 goto out_free_in_sbals; 4796 } 4797 for (i = 0; i < card->qdio.no_in_queues; ++i) 4798 queue_start_poll[i] = qeth_qdio_start_poll; 4799 4800 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); 4801 4802 out_sbal_ptrs = 4803 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q, 4804 sizeof(void *), 4805 GFP_KERNEL); 4806 if (!out_sbal_ptrs) { 4807 rc = -ENOMEM; 4808 goto out_free_queue_start_poll; 4809 } 4810 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) 4811 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { 4812 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( 4813 card->qdio.out_qs[i]->bufs[j]->buffer); 4814 } 4815 4816 memset(&init_data, 0, sizeof(struct qdio_initialize)); 4817 init_data.cdev = CARD_DDEV(card); 4818 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : 4819 QDIO_QETH_QFMT; 4820 init_data.qib_param_field_format = 0; 4821 init_data.qib_param_field = qib_param_field; 4822 init_data.no_input_qs = card->qdio.no_in_queues; 4823 init_data.no_output_qs = card->qdio.no_out_queues; 4824 init_data.input_handler = qeth_qdio_input_handler; 4825 init_data.output_handler = qeth_qdio_output_handler; 4826 init_data.queue_start_poll_array = queue_start_poll; 4827 init_data.int_parm = (unsigned long) card; 4828 init_data.input_sbal_addr_array = in_sbal_ptrs; 4829 init_data.output_sbal_addr_array = out_sbal_ptrs; 4830 init_data.output_sbal_state_array = card->qdio.out_bufstates; 4831 init_data.scan_threshold = IS_IQD(card) ? 0 : 32; 4832 4833 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 4834 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 4835 rc = qdio_allocate(&init_data); 4836 if (rc) { 4837 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 4838 goto out; 4839 } 4840 rc = qdio_establish(&init_data); 4841 if (rc) { 4842 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 4843 qdio_free(CARD_DDEV(card)); 4844 } 4845 } 4846 4847 switch (card->options.cq) { 4848 case QETH_CQ_ENABLED: 4849 dev_info(&card->gdev->dev, "Completion Queue support enabled"); 4850 break; 4851 case QETH_CQ_DISABLED: 4852 dev_info(&card->gdev->dev, "Completion Queue support disabled"); 4853 break; 4854 default: 4855 break; 4856 } 4857 out: 4858 kfree(out_sbal_ptrs); 4859 out_free_queue_start_poll: 4860 kfree(queue_start_poll); 4861 out_free_in_sbals: 4862 kfree(in_sbal_ptrs); 4863 out_free_qib_param: 4864 kfree(qib_param_field); 4865 out_free_nothing: 4866 return rc; 4867 } 4868 4869 static void qeth_core_free_card(struct qeth_card *card) 4870 { 4871 QETH_CARD_TEXT(card, 2, "freecrd"); 4872 qeth_put_cmd(card->read_cmd); 4873 destroy_workqueue(card->event_wq); 4874 unregister_service_level(&card->qeth_service_level); 4875 dev_set_drvdata(&card->gdev->dev, NULL); 4876 kfree(card); 4877 } 4878 4879 void qeth_trace_features(struct qeth_card *card) 4880 { 4881 QETH_CARD_TEXT(card, 2, "features"); 4882 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); 4883 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); 4884 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); 4885 QETH_CARD_HEX(card, 2, &card->info.diagass_support, 4886 sizeof(card->info.diagass_support)); 4887 } 4888 EXPORT_SYMBOL_GPL(qeth_trace_features); 4889 4890 static struct ccw_device_id qeth_ids[] = { 4891 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 4892 .driver_info = QETH_CARD_TYPE_OSD}, 4893 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), 4894 .driver_info = QETH_CARD_TYPE_IQD}, 4895 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), 4896 .driver_info = QETH_CARD_TYPE_OSN}, 4897 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), 4898 .driver_info = QETH_CARD_TYPE_OSM}, 4899 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), 4900 .driver_info = QETH_CARD_TYPE_OSX}, 4901 {}, 4902 }; 4903 MODULE_DEVICE_TABLE(ccw, qeth_ids); 4904 4905 static struct ccw_driver qeth_ccw_driver = { 4906 .driver = { 4907 .owner = THIS_MODULE, 4908 .name = "qeth", 4909 }, 4910 .ids = qeth_ids, 4911 .probe = ccwgroup_probe_ccwdev, 4912 .remove = ccwgroup_remove_ccwdev, 4913 }; 4914 4915 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok) 4916 { 4917 int retries = 3; 4918 int rc; 4919 4920 QETH_CARD_TEXT(card, 2, "hrdsetup"); 4921 atomic_set(&card->force_alloc_skb, 0); 4922 rc = qeth_update_from_chp_desc(card); 4923 if (rc) 4924 return rc; 4925 retry: 4926 if (retries < 3) 4927 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 4928 CARD_DEVID(card)); 4929 rc = qeth_qdio_clear_card(card, !IS_IQD(card)); 4930 qeth_stop_channel(&card->data); 4931 qeth_stop_channel(&card->write); 4932 qeth_stop_channel(&card->read); 4933 qdio_free(CARD_DDEV(card)); 4934 4935 rc = qeth_start_channel(&card->read); 4936 if (rc) 4937 goto retriable; 4938 rc = qeth_start_channel(&card->write); 4939 if (rc) 4940 goto retriable; 4941 rc = qeth_start_channel(&card->data); 4942 if (rc) 4943 goto retriable; 4944 retriable: 4945 if (rc == -ERESTARTSYS) { 4946 QETH_CARD_TEXT(card, 2, "break1"); 4947 return rc; 4948 } else if (rc) { 4949 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 4950 if (--retries < 0) 4951 goto out; 4952 else 4953 goto retry; 4954 } 4955 qeth_determine_capabilities(card); 4956 qeth_init_tokens(card); 4957 qeth_init_func_level(card); 4958 4959 rc = qeth_idx_activate_read_channel(card); 4960 if (rc == -EINTR) { 4961 QETH_CARD_TEXT(card, 2, "break2"); 4962 return rc; 4963 } else if (rc) { 4964 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 4965 if (--retries < 0) 4966 goto out; 4967 else 4968 goto retry; 4969 } 4970 4971 rc = qeth_idx_activate_write_channel(card); 4972 if (rc == -EINTR) { 4973 QETH_CARD_TEXT(card, 2, "break3"); 4974 return rc; 4975 } else if (rc) { 4976 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 4977 if (--retries < 0) 4978 goto out; 4979 else 4980 goto retry; 4981 } 4982 card->read_or_write_problem = 0; 4983 rc = qeth_mpc_initialize(card); 4984 if (rc) { 4985 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 4986 goto out; 4987 } 4988 4989 rc = qeth_send_startlan(card); 4990 if (rc) { 4991 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 4992 if (rc == -ENETDOWN) { 4993 dev_warn(&card->gdev->dev, "The LAN is offline\n"); 4994 *carrier_ok = false; 4995 } else { 4996 goto out; 4997 } 4998 } else { 4999 *carrier_ok = true; 5000 } 5001 5002 card->options.ipa4.supported = 0; 5003 card->options.ipa6.supported = 0; 5004 card->options.adp.supported = 0; 5005 card->options.sbp.supported_funcs = 0; 5006 card->info.diagass_support = 0; 5007 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 5008 if (rc == -ENOMEM) 5009 goto out; 5010 if (qeth_is_supported(card, IPA_IPV6)) { 5011 rc = qeth_query_ipassists(card, QETH_PROT_IPV6); 5012 if (rc == -ENOMEM) 5013 goto out; 5014 } 5015 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 5016 rc = qeth_query_setadapterparms(card); 5017 if (rc < 0) { 5018 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 5019 goto out; 5020 } 5021 } 5022 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 5023 rc = qeth_query_setdiagass(card); 5024 if (rc) 5025 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 5026 } 5027 5028 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || 5029 (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) 5030 card->info.hwtrap = 0; 5031 5032 rc = qeth_set_access_ctrl_online(card, 0); 5033 if (rc) 5034 goto out; 5035 5036 rc = qeth_init_qdio_queues(card); 5037 if (rc) { 5038 QETH_CARD_TEXT_(card, 2, "9err%d", rc); 5039 goto out; 5040 } 5041 5042 return 0; 5043 out: 5044 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5045 "an error on the device\n"); 5046 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", 5047 CARD_DEVID(card), rc); 5048 return rc; 5049 } 5050 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); 5051 5052 static int qeth_set_online(struct qeth_card *card) 5053 { 5054 int rc; 5055 5056 mutex_lock(&card->discipline_mutex); 5057 mutex_lock(&card->conf_mutex); 5058 QETH_CARD_TEXT(card, 2, "setonlin"); 5059 5060 rc = card->discipline->set_online(card); 5061 5062 mutex_unlock(&card->conf_mutex); 5063 mutex_unlock(&card->discipline_mutex); 5064 5065 return rc; 5066 } 5067 5068 int qeth_set_offline(struct qeth_card *card, bool resetting) 5069 { 5070 int rc, rc2, rc3; 5071 5072 mutex_lock(&card->discipline_mutex); 5073 mutex_lock(&card->conf_mutex); 5074 QETH_CARD_TEXT(card, 3, "setoffl"); 5075 5076 if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) { 5077 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 5078 card->info.hwtrap = 1; 5079 } 5080 5081 rtnl_lock(); 5082 card->info.open_when_online = card->dev->flags & IFF_UP; 5083 dev_close(card->dev); 5084 netif_device_detach(card->dev); 5085 netif_carrier_off(card->dev); 5086 rtnl_unlock(); 5087 5088 card->discipline->set_offline(card); 5089 5090 rc = qeth_stop_channel(&card->data); 5091 rc2 = qeth_stop_channel(&card->write); 5092 rc3 = qeth_stop_channel(&card->read); 5093 if (!rc) 5094 rc = (rc2) ? rc2 : rc3; 5095 if (rc) 5096 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 5097 qdio_free(CARD_DDEV(card)); 5098 5099 /* let user_space know that device is offline */ 5100 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5101 5102 mutex_unlock(&card->conf_mutex); 5103 mutex_unlock(&card->discipline_mutex); 5104 return 0; 5105 } 5106 EXPORT_SYMBOL_GPL(qeth_set_offline); 5107 5108 static int qeth_do_reset(void *data) 5109 { 5110 struct qeth_card *card = data; 5111 int rc; 5112 5113 QETH_CARD_TEXT(card, 2, "recover1"); 5114 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) 5115 return 0; 5116 QETH_CARD_TEXT(card, 2, "recover2"); 5117 dev_warn(&card->gdev->dev, 5118 "A recovery process has been started for the device\n"); 5119 5120 qeth_set_offline(card, true); 5121 rc = qeth_set_online(card); 5122 if (!rc) { 5123 dev_info(&card->gdev->dev, 5124 "Device successfully recovered!\n"); 5125 } else { 5126 ccwgroup_set_offline(card->gdev); 5127 dev_warn(&card->gdev->dev, 5128 "The qeth device driver failed to recover an error on the device\n"); 5129 } 5130 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 5131 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 5132 return 0; 5133 } 5134 5135 #if IS_ENABLED(CONFIG_QETH_L3) 5136 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 5137 struct qeth_hdr *hdr) 5138 { 5139 struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; 5140 struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; 5141 struct net_device *dev = skb->dev; 5142 5143 if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { 5144 dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, 5145 "FAKELL", skb->len); 5146 return; 5147 } 5148 5149 if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) { 5150 u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : 5151 ETH_P_IP; 5152 unsigned char tg_addr[ETH_ALEN]; 5153 5154 skb_reset_network_header(skb); 5155 switch (l3_hdr->flags & QETH_HDR_CAST_MASK) { 5156 case QETH_CAST_MULTICAST: 5157 if (prot == ETH_P_IP) 5158 ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); 5159 else 5160 ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); 5161 QETH_CARD_STAT_INC(card, rx_multicast); 5162 break; 5163 case QETH_CAST_BROADCAST: 5164 ether_addr_copy(tg_addr, dev->broadcast); 5165 QETH_CARD_STAT_INC(card, rx_multicast); 5166 break; 5167 default: 5168 if (card->options.sniffer) 5169 skb->pkt_type = PACKET_OTHERHOST; 5170 ether_addr_copy(tg_addr, dev->dev_addr); 5171 } 5172 5173 if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) 5174 dev_hard_header(skb, dev, prot, tg_addr, 5175 &l3_hdr->next_hop.rx.src_mac, skb->len); 5176 else 5177 dev_hard_header(skb, dev, prot, tg_addr, "FAKELL", 5178 skb->len); 5179 } 5180 5181 /* copy VLAN tag from hdr into skb */ 5182 if (!card->options.sniffer && 5183 (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME | 5184 QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { 5185 u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? 5186 l3_hdr->vlan_id : 5187 l3_hdr->next_hop.rx.vlan_id; 5188 5189 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); 5190 } 5191 } 5192 #endif 5193 5194 static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, 5195 struct qeth_hdr *hdr, bool uses_frags) 5196 { 5197 struct napi_struct *napi = &card->napi; 5198 bool is_cso; 5199 5200 switch (hdr->hdr.l2.id) { 5201 case QETH_HEADER_TYPE_OSN: 5202 skb_push(skb, sizeof(*hdr)); 5203 skb_copy_to_linear_data(skb, hdr, sizeof(*hdr)); 5204 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5205 QETH_CARD_STAT_INC(card, rx_packets); 5206 5207 card->osn_info.data_cb(skb); 5208 return; 5209 #if IS_ENABLED(CONFIG_QETH_L3) 5210 case QETH_HEADER_TYPE_LAYER3: 5211 qeth_l3_rebuild_skb(card, skb, hdr); 5212 is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5213 break; 5214 #endif 5215 case QETH_HEADER_TYPE_LAYER2: 5216 is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; 5217 break; 5218 default: 5219 /* never happens */ 5220 if (uses_frags) 5221 napi_free_frags(napi); 5222 else 5223 dev_kfree_skb_any(skb); 5224 return; 5225 } 5226 5227 if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) { 5228 skb->ip_summed = CHECKSUM_UNNECESSARY; 5229 QETH_CARD_STAT_INC(card, rx_skb_csum); 5230 } else { 5231 skb->ip_summed = CHECKSUM_NONE; 5232 } 5233 5234 QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); 5235 QETH_CARD_STAT_INC(card, rx_packets); 5236 if (skb_is_nonlinear(skb)) { 5237 QETH_CARD_STAT_INC(card, rx_sg_skbs); 5238 QETH_CARD_STAT_ADD(card, rx_sg_frags, 5239 skb_shinfo(skb)->nr_frags); 5240 } 5241 5242 if (uses_frags) { 5243 napi_gro_frags(napi); 5244 } else { 5245 skb->protocol = eth_type_trans(skb, skb->dev); 5246 napi_gro_receive(napi, skb); 5247 } 5248 } 5249 5250 static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len) 5251 { 5252 struct page *page = virt_to_page(data); 5253 unsigned int next_frag; 5254 5255 next_frag = skb_shinfo(skb)->nr_frags; 5256 get_page(page); 5257 skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len, 5258 data_len); 5259 } 5260 5261 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 5262 { 5263 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); 5264 } 5265 5266 static int qeth_extract_skb(struct qeth_card *card, 5267 struct qeth_qdio_buffer *qethbuffer, 5268 struct qdio_buffer_element **__element, 5269 int *__offset) 5270 { 5271 struct qdio_buffer_element *element = *__element; 5272 struct qdio_buffer *buffer = qethbuffer->buffer; 5273 struct napi_struct *napi = &card->napi; 5274 unsigned int linear_len = 0; 5275 bool uses_frags = false; 5276 int offset = *__offset; 5277 bool use_rx_sg = false; 5278 unsigned int headroom; 5279 struct qeth_hdr *hdr; 5280 struct sk_buff *skb; 5281 int skb_len = 0; 5282 5283 next_packet: 5284 /* qeth_hdr must not cross element boundaries */ 5285 while (element->length < offset + sizeof(struct qeth_hdr)) { 5286 if (qeth_is_last_sbale(element)) 5287 return -ENODATA; 5288 element++; 5289 offset = 0; 5290 } 5291 5292 hdr = element->addr + offset; 5293 offset += sizeof(*hdr); 5294 skb = NULL; 5295 5296 switch (hdr->hdr.l2.id) { 5297 case QETH_HEADER_TYPE_LAYER2: 5298 skb_len = hdr->hdr.l2.pkt_length; 5299 linear_len = ETH_HLEN; 5300 headroom = 0; 5301 break; 5302 case QETH_HEADER_TYPE_LAYER3: 5303 skb_len = hdr->hdr.l3.length; 5304 if (!IS_LAYER3(card)) { 5305 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5306 goto walk_packet; 5307 } 5308 5309 if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { 5310 linear_len = ETH_HLEN; 5311 headroom = 0; 5312 break; 5313 } 5314 5315 if (hdr->hdr.l3.flags & QETH_HDR_IPV6) 5316 linear_len = sizeof(struct ipv6hdr); 5317 else 5318 linear_len = sizeof(struct iphdr); 5319 headroom = ETH_HLEN; 5320 break; 5321 case QETH_HEADER_TYPE_OSN: 5322 skb_len = hdr->hdr.osn.pdu_length; 5323 if (!IS_OSN(card)) { 5324 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5325 goto walk_packet; 5326 } 5327 5328 linear_len = skb_len; 5329 headroom = sizeof(struct qeth_hdr); 5330 break; 5331 default: 5332 if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL) 5333 QETH_CARD_STAT_INC(card, rx_frame_errors); 5334 else 5335 QETH_CARD_STAT_INC(card, rx_dropped_notsupp); 5336 5337 /* Can't determine packet length, drop the whole buffer. */ 5338 return -EPROTONOSUPPORT; 5339 } 5340 5341 if (skb_len < linear_len) { 5342 QETH_CARD_STAT_INC(card, rx_dropped_runt); 5343 goto walk_packet; 5344 } 5345 5346 use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || 5347 ((skb_len >= card->options.rx_sg_cb) && 5348 !atomic_read(&card->force_alloc_skb) && 5349 !IS_OSN(card)); 5350 5351 if (use_rx_sg) { 5352 /* QETH_CQ_ENABLED only: */ 5353 if (qethbuffer->rx_skb && 5354 skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) { 5355 skb = qethbuffer->rx_skb; 5356 qethbuffer->rx_skb = NULL; 5357 goto use_skb; 5358 } 5359 5360 skb = napi_get_frags(napi); 5361 if (!skb) { 5362 /* -ENOMEM, no point in falling back further. */ 5363 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5364 goto walk_packet; 5365 } 5366 5367 if (skb_tailroom(skb) >= linear_len + headroom) { 5368 uses_frags = true; 5369 goto use_skb; 5370 } 5371 5372 netdev_info_once(card->dev, 5373 "Insufficient linear space in NAPI frags skb, need %u but have %u\n", 5374 linear_len + headroom, skb_tailroom(skb)); 5375 /* Shouldn't happen. Don't optimize, fall back to linear skb. */ 5376 } 5377 5378 linear_len = skb_len; 5379 skb = napi_alloc_skb(napi, linear_len + headroom); 5380 if (!skb) { 5381 QETH_CARD_STAT_INC(card, rx_dropped_nomem); 5382 goto walk_packet; 5383 } 5384 5385 use_skb: 5386 if (headroom) 5387 skb_reserve(skb, headroom); 5388 walk_packet: 5389 while (skb_len) { 5390 int data_len = min(skb_len, (int)(element->length - offset)); 5391 char *data = element->addr + offset; 5392 5393 skb_len -= data_len; 5394 offset += data_len; 5395 5396 /* Extract data from current element: */ 5397 if (skb && data_len) { 5398 if (linear_len) { 5399 unsigned int copy_len; 5400 5401 copy_len = min_t(unsigned int, linear_len, 5402 data_len); 5403 5404 skb_put_data(skb, data, copy_len); 5405 linear_len -= copy_len; 5406 data_len -= copy_len; 5407 data += copy_len; 5408 } 5409 5410 if (data_len) 5411 qeth_create_skb_frag(skb, data, data_len); 5412 } 5413 5414 /* Step forward to next element: */ 5415 if (skb_len) { 5416 if (qeth_is_last_sbale(element)) { 5417 QETH_CARD_TEXT(card, 4, "unexeob"); 5418 QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); 5419 if (skb) { 5420 if (uses_frags) 5421 napi_free_frags(napi); 5422 else 5423 dev_kfree_skb_any(skb); 5424 QETH_CARD_STAT_INC(card, 5425 rx_length_errors); 5426 } 5427 return -EMSGSIZE; 5428 } 5429 element++; 5430 offset = 0; 5431 } 5432 } 5433 5434 /* This packet was skipped, go get another one: */ 5435 if (!skb) 5436 goto next_packet; 5437 5438 *__element = element; 5439 *__offset = offset; 5440 5441 qeth_receive_skb(card, skb, hdr, uses_frags); 5442 return 0; 5443 } 5444 5445 static int qeth_extract_skbs(struct qeth_card *card, int budget, 5446 struct qeth_qdio_buffer *buf, bool *done) 5447 { 5448 int work_done = 0; 5449 5450 WARN_ON_ONCE(!budget); 5451 *done = false; 5452 5453 while (budget) { 5454 if (qeth_extract_skb(card, buf, &card->rx.b_element, 5455 &card->rx.e_offset)) { 5456 *done = true; 5457 break; 5458 } 5459 5460 work_done++; 5461 budget--; 5462 } 5463 5464 return work_done; 5465 } 5466 5467 int qeth_poll(struct napi_struct *napi, int budget) 5468 { 5469 struct qeth_card *card = container_of(napi, struct qeth_card, napi); 5470 int work_done = 0; 5471 struct qeth_qdio_buffer *buffer; 5472 int new_budget = budget; 5473 bool done; 5474 5475 while (1) { 5476 if (!card->rx.b_count) { 5477 card->rx.qdio_err = 0; 5478 card->rx.b_count = qdio_get_next_buffers( 5479 card->data.ccwdev, 0, &card->rx.b_index, 5480 &card->rx.qdio_err); 5481 if (card->rx.b_count <= 0) { 5482 card->rx.b_count = 0; 5483 break; 5484 } 5485 card->rx.b_element = 5486 &card->qdio.in_q->bufs[card->rx.b_index] 5487 .buffer->element[0]; 5488 card->rx.e_offset = 0; 5489 } 5490 5491 while (card->rx.b_count) { 5492 buffer = &card->qdio.in_q->bufs[card->rx.b_index]; 5493 if (!(card->rx.qdio_err && 5494 qeth_check_qdio_errors(card, buffer->buffer, 5495 card->rx.qdio_err, "qinerr"))) 5496 work_done += qeth_extract_skbs(card, new_budget, 5497 buffer, &done); 5498 else 5499 done = true; 5500 5501 if (done) { 5502 QETH_CARD_STAT_INC(card, rx_bufs); 5503 qeth_put_buffer_pool_entry(card, 5504 buffer->pool_entry); 5505 qeth_queue_input_buffer(card, card->rx.b_index); 5506 card->rx.b_count--; 5507 if (card->rx.b_count) { 5508 card->rx.b_index = 5509 QDIO_BUFNR(card->rx.b_index + 1); 5510 card->rx.b_element = 5511 &card->qdio.in_q 5512 ->bufs[card->rx.b_index] 5513 .buffer->element[0]; 5514 card->rx.e_offset = 0; 5515 } 5516 } 5517 5518 if (work_done >= budget) 5519 goto out; 5520 else 5521 new_budget = budget - work_done; 5522 } 5523 } 5524 5525 if (napi_complete_done(napi, work_done) && 5526 qdio_start_irq(CARD_DDEV(card), 0)) 5527 napi_schedule(napi); 5528 out: 5529 return work_done; 5530 } 5531 EXPORT_SYMBOL_GPL(qeth_poll); 5532 5533 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, 5534 unsigned int bidx, bool error, int budget) 5535 { 5536 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; 5537 u8 sflags = buffer->buffer->element[15].sflags; 5538 struct qeth_card *card = queue->card; 5539 5540 if (queue->bufstates && (queue->bufstates[bidx].flags & 5541 QDIO_OUTBUF_STATE_FLAG_PENDING)) { 5542 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); 5543 5544 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, 5545 QETH_QDIO_BUF_PENDING) == 5546 QETH_QDIO_BUF_PRIMED) 5547 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); 5548 5549 QETH_CARD_TEXT_(card, 5, "pel%u", bidx); 5550 5551 /* prepare the queue slot for re-use: */ 5552 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); 5553 if (qeth_init_qdio_out_buf(queue, bidx)) { 5554 QETH_CARD_TEXT(card, 2, "outofbuf"); 5555 qeth_schedule_recovery(card); 5556 } 5557 5558 return; 5559 } 5560 5561 if (card->options.cq == QETH_CQ_ENABLED) 5562 qeth_notify_skbs(queue, buffer, 5563 qeth_compute_cq_notification(sflags, 0)); 5564 qeth_clear_output_buffer(queue, buffer, error, budget); 5565 } 5566 5567 static int qeth_tx_poll(struct napi_struct *napi, int budget) 5568 { 5569 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); 5570 unsigned int queue_no = queue->queue_no; 5571 struct qeth_card *card = queue->card; 5572 struct net_device *dev = card->dev; 5573 unsigned int work_done = 0; 5574 struct netdev_queue *txq; 5575 5576 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); 5577 5578 while (1) { 5579 unsigned int start, error, i; 5580 unsigned int packets = 0; 5581 unsigned int bytes = 0; 5582 int completed; 5583 5584 if (qeth_out_queue_is_empty(queue)) { 5585 napi_complete(napi); 5586 return 0; 5587 } 5588 5589 /* Give the CPU a breather: */ 5590 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { 5591 QETH_TXQ_STAT_INC(queue, completion_yield); 5592 if (napi_complete_done(napi, 0)) 5593 napi_schedule(napi); 5594 return 0; 5595 } 5596 5597 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, 5598 &start, &error); 5599 if (completed <= 0) { 5600 /* Ensure we see TX completion for pending work: */ 5601 if (napi_complete_done(napi, 0)) 5602 qeth_tx_arm_timer(queue); 5603 return 0; 5604 } 5605 5606 for (i = start; i < start + completed; i++) { 5607 struct qeth_qdio_out_buffer *buffer; 5608 unsigned int bidx = QDIO_BUFNR(i); 5609 5610 buffer = queue->bufs[bidx]; 5611 packets += skb_queue_len(&buffer->skb_list); 5612 bytes += buffer->bytes; 5613 5614 qeth_handle_send_error(card, buffer, error); 5615 qeth_iqd_tx_complete(queue, bidx, error, budget); 5616 qeth_cleanup_handled_pending(queue, bidx, false); 5617 } 5618 5619 netdev_tx_completed_queue(txq, packets, bytes); 5620 atomic_sub(completed, &queue->used_buffers); 5621 work_done += completed; 5622 5623 /* xmit may have observed the full-condition, but not yet 5624 * stopped the txq. In which case the code below won't trigger. 5625 * So before returning, xmit will re-check the txq's fill level 5626 * and wake it up if needed. 5627 */ 5628 if (netif_tx_queue_stopped(txq) && 5629 !qeth_out_queue_is_full(queue)) 5630 netif_tx_wake_queue(txq); 5631 } 5632 } 5633 5634 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) 5635 { 5636 if (!cmd->hdr.return_code) 5637 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 5638 return cmd->hdr.return_code; 5639 } 5640 5641 static int qeth_setassparms_get_caps_cb(struct qeth_card *card, 5642 struct qeth_reply *reply, 5643 unsigned long data) 5644 { 5645 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5646 struct qeth_ipa_caps *caps = reply->param; 5647 5648 if (qeth_setassparms_inspect_rc(cmd)) 5649 return -EIO; 5650 5651 caps->supported = cmd->data.setassparms.data.caps.supported; 5652 caps->enabled = cmd->data.setassparms.data.caps.enabled; 5653 return 0; 5654 } 5655 5656 int qeth_setassparms_cb(struct qeth_card *card, 5657 struct qeth_reply *reply, unsigned long data) 5658 { 5659 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5660 5661 QETH_CARD_TEXT(card, 4, "defadpcb"); 5662 5663 if (cmd->hdr.return_code) 5664 return -EIO; 5665 5666 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 5667 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 5668 card->options.ipa4.enabled = cmd->hdr.assists.enabled; 5669 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 5670 card->options.ipa6.enabled = cmd->hdr.assists.enabled; 5671 return 0; 5672 } 5673 EXPORT_SYMBOL_GPL(qeth_setassparms_cb); 5674 5675 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 5676 enum qeth_ipa_funcs ipa_func, 5677 u16 cmd_code, 5678 unsigned int data_length, 5679 enum qeth_prot_versions prot) 5680 { 5681 struct qeth_ipacmd_setassparms *setassparms; 5682 struct qeth_ipacmd_setassparms_hdr *hdr; 5683 struct qeth_cmd_buffer *iob; 5684 5685 QETH_CARD_TEXT(card, 4, "getasscm"); 5686 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot, 5687 data_length + 5688 offsetof(struct qeth_ipacmd_setassparms, 5689 data)); 5690 if (!iob) 5691 return NULL; 5692 5693 setassparms = &__ipa_cmd(iob)->data.setassparms; 5694 setassparms->assist_no = ipa_func; 5695 5696 hdr = &setassparms->hdr; 5697 hdr->length = sizeof(*hdr) + data_length; 5698 hdr->command_code = cmd_code; 5699 return iob; 5700 } 5701 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 5702 5703 int qeth_send_simple_setassparms_prot(struct qeth_card *card, 5704 enum qeth_ipa_funcs ipa_func, 5705 u16 cmd_code, u32 *data, 5706 enum qeth_prot_versions prot) 5707 { 5708 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0; 5709 struct qeth_cmd_buffer *iob; 5710 5711 QETH_CARD_TEXT_(card, 4, "simassp%i", prot); 5712 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); 5713 if (!iob) 5714 return -ENOMEM; 5715 5716 if (data) 5717 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data; 5718 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); 5719 } 5720 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); 5721 5722 static void qeth_unregister_dbf_views(void) 5723 { 5724 int x; 5725 for (x = 0; x < QETH_DBF_INFOS; x++) { 5726 debug_unregister(qeth_dbf[x].id); 5727 qeth_dbf[x].id = NULL; 5728 } 5729 } 5730 5731 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) 5732 { 5733 char dbf_txt_buf[32]; 5734 va_list args; 5735 5736 if (!debug_level_enabled(id, level)) 5737 return; 5738 va_start(args, fmt); 5739 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 5740 va_end(args); 5741 debug_text_event(id, level, dbf_txt_buf); 5742 } 5743 EXPORT_SYMBOL_GPL(qeth_dbf_longtext); 5744 5745 static int qeth_register_dbf_views(void) 5746 { 5747 int ret; 5748 int x; 5749 5750 for (x = 0; x < QETH_DBF_INFOS; x++) { 5751 /* register the areas */ 5752 qeth_dbf[x].id = debug_register(qeth_dbf[x].name, 5753 qeth_dbf[x].pages, 5754 qeth_dbf[x].areas, 5755 qeth_dbf[x].len); 5756 if (qeth_dbf[x].id == NULL) { 5757 qeth_unregister_dbf_views(); 5758 return -ENOMEM; 5759 } 5760 5761 /* register a view */ 5762 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); 5763 if (ret) { 5764 qeth_unregister_dbf_views(); 5765 return ret; 5766 } 5767 5768 /* set a passing level */ 5769 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); 5770 } 5771 5772 return 0; 5773 } 5774 5775 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ 5776 5777 int qeth_core_load_discipline(struct qeth_card *card, 5778 enum qeth_discipline_id discipline) 5779 { 5780 mutex_lock(&qeth_mod_mutex); 5781 switch (discipline) { 5782 case QETH_DISCIPLINE_LAYER3: 5783 card->discipline = try_then_request_module( 5784 symbol_get(qeth_l3_discipline), "qeth_l3"); 5785 break; 5786 case QETH_DISCIPLINE_LAYER2: 5787 card->discipline = try_then_request_module( 5788 symbol_get(qeth_l2_discipline), "qeth_l2"); 5789 break; 5790 default: 5791 break; 5792 } 5793 mutex_unlock(&qeth_mod_mutex); 5794 5795 if (!card->discipline) { 5796 dev_err(&card->gdev->dev, "There is no kernel module to " 5797 "support discipline %d\n", discipline); 5798 return -EINVAL; 5799 } 5800 5801 card->options.layer = discipline; 5802 return 0; 5803 } 5804 5805 void qeth_core_free_discipline(struct qeth_card *card) 5806 { 5807 if (IS_LAYER2(card)) 5808 symbol_put(qeth_l2_discipline); 5809 else 5810 symbol_put(qeth_l3_discipline); 5811 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 5812 card->discipline = NULL; 5813 } 5814 5815 const struct device_type qeth_generic_devtype = { 5816 .name = "qeth_generic", 5817 .groups = qeth_generic_attr_groups, 5818 }; 5819 EXPORT_SYMBOL_GPL(qeth_generic_devtype); 5820 5821 static const struct device_type qeth_osn_devtype = { 5822 .name = "qeth_osn", 5823 .groups = qeth_osn_attr_groups, 5824 }; 5825 5826 #define DBF_NAME_LEN 20 5827 5828 struct qeth_dbf_entry { 5829 char dbf_name[DBF_NAME_LEN]; 5830 debug_info_t *dbf_info; 5831 struct list_head dbf_list; 5832 }; 5833 5834 static LIST_HEAD(qeth_dbf_list); 5835 static DEFINE_MUTEX(qeth_dbf_list_mutex); 5836 5837 static debug_info_t *qeth_get_dbf_entry(char *name) 5838 { 5839 struct qeth_dbf_entry *entry; 5840 debug_info_t *rc = NULL; 5841 5842 mutex_lock(&qeth_dbf_list_mutex); 5843 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { 5844 if (strcmp(entry->dbf_name, name) == 0) { 5845 rc = entry->dbf_info; 5846 break; 5847 } 5848 } 5849 mutex_unlock(&qeth_dbf_list_mutex); 5850 return rc; 5851 } 5852 5853 static int qeth_add_dbf_entry(struct qeth_card *card, char *name) 5854 { 5855 struct qeth_dbf_entry *new_entry; 5856 5857 card->debug = debug_register(name, 2, 1, 8); 5858 if (!card->debug) { 5859 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); 5860 goto err; 5861 } 5862 if (debug_register_view(card->debug, &debug_hex_ascii_view)) 5863 goto err_dbg; 5864 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); 5865 if (!new_entry) 5866 goto err_dbg; 5867 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); 5868 new_entry->dbf_info = card->debug; 5869 mutex_lock(&qeth_dbf_list_mutex); 5870 list_add(&new_entry->dbf_list, &qeth_dbf_list); 5871 mutex_unlock(&qeth_dbf_list_mutex); 5872 5873 return 0; 5874 5875 err_dbg: 5876 debug_unregister(card->debug); 5877 err: 5878 return -ENOMEM; 5879 } 5880 5881 static void qeth_clear_dbf_list(void) 5882 { 5883 struct qeth_dbf_entry *entry, *tmp; 5884 5885 mutex_lock(&qeth_dbf_list_mutex); 5886 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { 5887 list_del(&entry->dbf_list); 5888 debug_unregister(entry->dbf_info); 5889 kfree(entry); 5890 } 5891 mutex_unlock(&qeth_dbf_list_mutex); 5892 } 5893 5894 static struct net_device *qeth_alloc_netdev(struct qeth_card *card) 5895 { 5896 struct net_device *dev; 5897 5898 switch (card->info.type) { 5899 case QETH_CARD_TYPE_IQD: 5900 dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN, 5901 ether_setup, QETH_MAX_QUEUES, 1); 5902 break; 5903 case QETH_CARD_TYPE_OSM: 5904 dev = alloc_etherdev(0); 5905 break; 5906 case QETH_CARD_TYPE_OSN: 5907 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup); 5908 break; 5909 default: 5910 dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1); 5911 } 5912 5913 if (!dev) 5914 return NULL; 5915 5916 dev->ml_priv = card; 5917 dev->watchdog_timeo = QETH_TX_TIMEOUT; 5918 dev->min_mtu = IS_OSN(card) ? 64 : 576; 5919 /* initialized when device first goes online: */ 5920 dev->max_mtu = 0; 5921 dev->mtu = 0; 5922 SET_NETDEV_DEV(dev, &card->gdev->dev); 5923 netif_carrier_off(dev); 5924 5925 if (IS_OSN(card)) { 5926 dev->ethtool_ops = &qeth_osn_ethtool_ops; 5927 } else { 5928 dev->ethtool_ops = &qeth_ethtool_ops; 5929 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 5930 dev->hw_features |= NETIF_F_SG; 5931 dev->vlan_features |= NETIF_F_SG; 5932 if (IS_IQD(card)) { 5933 dev->features |= NETIF_F_SG; 5934 if (netif_set_real_num_tx_queues(dev, 5935 QETH_IQD_MIN_TXQ)) { 5936 free_netdev(dev); 5937 return NULL; 5938 } 5939 } 5940 } 5941 5942 return dev; 5943 } 5944 5945 struct net_device *qeth_clone_netdev(struct net_device *orig) 5946 { 5947 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); 5948 5949 if (!clone) 5950 return NULL; 5951 5952 clone->dev_port = orig->dev_port; 5953 return clone; 5954 } 5955 5956 static int qeth_core_probe_device(struct ccwgroup_device *gdev) 5957 { 5958 struct qeth_card *card; 5959 struct device *dev; 5960 int rc; 5961 enum qeth_discipline_id enforced_disc; 5962 char dbf_name[DBF_NAME_LEN]; 5963 5964 QETH_DBF_TEXT(SETUP, 2, "probedev"); 5965 5966 dev = &gdev->dev; 5967 if (!get_device(dev)) 5968 return -ENODEV; 5969 5970 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); 5971 5972 card = qeth_alloc_card(gdev); 5973 if (!card) { 5974 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); 5975 rc = -ENOMEM; 5976 goto err_dev; 5977 } 5978 5979 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", 5980 dev_name(&gdev->dev)); 5981 card->debug = qeth_get_dbf_entry(dbf_name); 5982 if (!card->debug) { 5983 rc = qeth_add_dbf_entry(card, dbf_name); 5984 if (rc) 5985 goto err_card; 5986 } 5987 5988 qeth_setup_card(card); 5989 card->dev = qeth_alloc_netdev(card); 5990 if (!card->dev) { 5991 rc = -ENOMEM; 5992 goto err_card; 5993 } 5994 5995 card->qdio.no_out_queues = card->dev->num_tx_queues; 5996 rc = qeth_update_from_chp_desc(card); 5997 if (rc) 5998 goto err_chp_desc; 5999 qeth_determine_capabilities(card); 6000 qeth_set_blkt_defaults(card); 6001 6002 enforced_disc = qeth_enforce_discipline(card); 6003 switch (enforced_disc) { 6004 case QETH_DISCIPLINE_UNDETERMINED: 6005 gdev->dev.type = &qeth_generic_devtype; 6006 break; 6007 default: 6008 card->info.layer_enforced = true; 6009 rc = qeth_core_load_discipline(card, enforced_disc); 6010 if (rc) 6011 goto err_load; 6012 6013 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype : 6014 card->discipline->devtype; 6015 rc = card->discipline->setup(card->gdev); 6016 if (rc) 6017 goto err_disc; 6018 break; 6019 } 6020 6021 return 0; 6022 6023 err_disc: 6024 qeth_core_free_discipline(card); 6025 err_load: 6026 err_chp_desc: 6027 free_netdev(card->dev); 6028 err_card: 6029 qeth_core_free_card(card); 6030 err_dev: 6031 put_device(dev); 6032 return rc; 6033 } 6034 6035 static void qeth_core_remove_device(struct ccwgroup_device *gdev) 6036 { 6037 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6038 6039 QETH_CARD_TEXT(card, 2, "removedv"); 6040 6041 if (card->discipline) { 6042 card->discipline->remove(gdev); 6043 qeth_core_free_discipline(card); 6044 } 6045 6046 qeth_free_qdio_queues(card); 6047 6048 free_netdev(card->dev); 6049 qeth_core_free_card(card); 6050 put_device(&gdev->dev); 6051 } 6052 6053 static int qeth_core_set_online(struct ccwgroup_device *gdev) 6054 { 6055 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6056 int rc = 0; 6057 enum qeth_discipline_id def_discipline; 6058 6059 if (!card->discipline) { 6060 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 6061 QETH_DISCIPLINE_LAYER2; 6062 rc = qeth_core_load_discipline(card, def_discipline); 6063 if (rc) 6064 goto err; 6065 rc = card->discipline->setup(card->gdev); 6066 if (rc) { 6067 qeth_core_free_discipline(card); 6068 goto err; 6069 } 6070 } 6071 6072 rc = qeth_set_online(card); 6073 err: 6074 return rc; 6075 } 6076 6077 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 6078 { 6079 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6080 6081 return qeth_set_offline(card, false); 6082 } 6083 6084 static void qeth_core_shutdown(struct ccwgroup_device *gdev) 6085 { 6086 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6087 qeth_set_allowed_threads(card, 0, 1); 6088 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 6089 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 6090 qeth_qdio_clear_card(card, 0); 6091 qeth_drain_output_queues(card); 6092 qdio_free(CARD_DDEV(card)); 6093 } 6094 6095 static int qeth_suspend(struct ccwgroup_device *gdev) 6096 { 6097 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6098 6099 qeth_set_allowed_threads(card, 0, 1); 6100 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 6101 if (gdev->state == CCWGROUP_OFFLINE) 6102 return 0; 6103 6104 qeth_set_offline(card, false); 6105 return 0; 6106 } 6107 6108 static int qeth_resume(struct ccwgroup_device *gdev) 6109 { 6110 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6111 int rc; 6112 6113 rc = qeth_set_online(card); 6114 6115 qeth_set_allowed_threads(card, 0xffffffff, 0); 6116 if (rc) 6117 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n"); 6118 return rc; 6119 } 6120 6121 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 6122 size_t count) 6123 { 6124 int err; 6125 6126 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, 6127 buf); 6128 6129 return err ? err : count; 6130 } 6131 static DRIVER_ATTR_WO(group); 6132 6133 static struct attribute *qeth_drv_attrs[] = { 6134 &driver_attr_group.attr, 6135 NULL, 6136 }; 6137 static struct attribute_group qeth_drv_attr_group = { 6138 .attrs = qeth_drv_attrs, 6139 }; 6140 static const struct attribute_group *qeth_drv_attr_groups[] = { 6141 &qeth_drv_attr_group, 6142 NULL, 6143 }; 6144 6145 static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 6146 .driver = { 6147 .groups = qeth_drv_attr_groups, 6148 .owner = THIS_MODULE, 6149 .name = "qeth", 6150 }, 6151 .ccw_driver = &qeth_ccw_driver, 6152 .setup = qeth_core_probe_device, 6153 .remove = qeth_core_remove_device, 6154 .set_online = qeth_core_set_online, 6155 .set_offline = qeth_core_set_offline, 6156 .shutdown = qeth_core_shutdown, 6157 .prepare = NULL, 6158 .complete = NULL, 6159 .freeze = qeth_suspend, 6160 .thaw = qeth_resume, 6161 .restore = qeth_resume, 6162 }; 6163 6164 struct qeth_card *qeth_get_card_by_busid(char *bus_id) 6165 { 6166 struct ccwgroup_device *gdev; 6167 struct qeth_card *card; 6168 6169 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id); 6170 if (!gdev) 6171 return NULL; 6172 6173 card = dev_get_drvdata(&gdev->dev); 6174 put_device(&gdev->dev); 6175 return card; 6176 } 6177 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid); 6178 6179 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6180 { 6181 struct qeth_card *card = dev->ml_priv; 6182 struct mii_ioctl_data *mii_data; 6183 int rc = 0; 6184 6185 if (!card) 6186 return -ENODEV; 6187 6188 switch (cmd) { 6189 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 6190 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); 6191 break; 6192 case SIOC_QETH_GET_CARD_TYPE: 6193 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && 6194 !IS_VM_NIC(card)) 6195 return 1; 6196 return 0; 6197 case SIOCGMIIPHY: 6198 mii_data = if_mii(rq); 6199 mii_data->phy_id = 0; 6200 break; 6201 case SIOCGMIIREG: 6202 mii_data = if_mii(rq); 6203 if (mii_data->phy_id != 0) 6204 rc = -EINVAL; 6205 else 6206 mii_data->val_out = qeth_mdio_read(dev, 6207 mii_data->phy_id, mii_data->reg_num); 6208 break; 6209 case SIOC_QETH_QUERY_OAT: 6210 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); 6211 break; 6212 default: 6213 if (card->discipline->do_ioctl) 6214 rc = card->discipline->do_ioctl(dev, rq, cmd); 6215 else 6216 rc = -EOPNOTSUPP; 6217 } 6218 if (rc) 6219 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 6220 return rc; 6221 } 6222 EXPORT_SYMBOL_GPL(qeth_do_ioctl); 6223 6224 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, 6225 unsigned long data) 6226 { 6227 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6228 u32 *features = reply->param; 6229 6230 if (qeth_setassparms_inspect_rc(cmd)) 6231 return -EIO; 6232 6233 *features = cmd->data.setassparms.data.flags_32bit; 6234 return 0; 6235 } 6236 6237 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6238 enum qeth_prot_versions prot) 6239 { 6240 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP, 6241 NULL, prot); 6242 } 6243 6244 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, 6245 enum qeth_prot_versions prot) 6246 { 6247 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; 6248 struct qeth_cmd_buffer *iob; 6249 struct qeth_ipa_caps caps; 6250 u32 features; 6251 int rc; 6252 6253 /* some L3 HW requires combined L3+L4 csum offload: */ 6254 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && 6255 cstype == IPA_OUTBOUND_CHECKSUM) 6256 required_features |= QETH_IPA_CHECKSUM_IP_HDR; 6257 6258 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, 6259 prot); 6260 if (!iob) 6261 return -ENOMEM; 6262 6263 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); 6264 if (rc) 6265 return rc; 6266 6267 if ((required_features & features) != required_features) { 6268 qeth_set_csum_off(card, cstype, prot); 6269 return -EOPNOTSUPP; 6270 } 6271 6272 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 6273 SETASS_DATA_SIZEOF(flags_32bit), 6274 prot); 6275 if (!iob) { 6276 qeth_set_csum_off(card, cstype, prot); 6277 return -ENOMEM; 6278 } 6279 6280 if (features & QETH_IPA_CHECKSUM_LP2LP) 6281 required_features |= QETH_IPA_CHECKSUM_LP2LP; 6282 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; 6283 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6284 if (rc) { 6285 qeth_set_csum_off(card, cstype, prot); 6286 return rc; 6287 } 6288 6289 if (!qeth_ipa_caps_supported(&caps, required_features) || 6290 !qeth_ipa_caps_enabled(&caps, required_features)) { 6291 qeth_set_csum_off(card, cstype, prot); 6292 return -EOPNOTSUPP; 6293 } 6294 6295 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", 6296 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); 6297 if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) && 6298 cstype == IPA_OUTBOUND_CHECKSUM) 6299 dev_warn(&card->gdev->dev, 6300 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", 6301 QETH_CARD_IFNAME(card)); 6302 return 0; 6303 } 6304 6305 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, 6306 enum qeth_prot_versions prot) 6307 { 6308 return on ? qeth_set_csum_on(card, cstype, prot) : 6309 qeth_set_csum_off(card, cstype, prot); 6310 } 6311 6312 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, 6313 unsigned long data) 6314 { 6315 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 6316 struct qeth_tso_start_data *tso_data = reply->param; 6317 6318 if (qeth_setassparms_inspect_rc(cmd)) 6319 return -EIO; 6320 6321 tso_data->mss = cmd->data.setassparms.data.tso.mss; 6322 tso_data->supported = cmd->data.setassparms.data.tso.supported; 6323 return 0; 6324 } 6325 6326 static int qeth_set_tso_off(struct qeth_card *card, 6327 enum qeth_prot_versions prot) 6328 { 6329 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, 6330 IPA_CMD_ASS_STOP, NULL, prot); 6331 } 6332 6333 static int qeth_set_tso_on(struct qeth_card *card, 6334 enum qeth_prot_versions prot) 6335 { 6336 struct qeth_tso_start_data tso_data; 6337 struct qeth_cmd_buffer *iob; 6338 struct qeth_ipa_caps caps; 6339 int rc; 6340 6341 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6342 IPA_CMD_ASS_START, 0, prot); 6343 if (!iob) 6344 return -ENOMEM; 6345 6346 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); 6347 if (rc) 6348 return rc; 6349 6350 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { 6351 qeth_set_tso_off(card, prot); 6352 return -EOPNOTSUPP; 6353 } 6354 6355 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6356 IPA_CMD_ASS_ENABLE, 6357 SETASS_DATA_SIZEOF(caps), prot); 6358 if (!iob) { 6359 qeth_set_tso_off(card, prot); 6360 return -ENOMEM; 6361 } 6362 6363 /* enable TSO capability */ 6364 __ipa_cmd(iob)->data.setassparms.data.caps.enabled = 6365 QETH_IPA_LARGE_SEND_TCP; 6366 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6367 if (rc) { 6368 qeth_set_tso_off(card, prot); 6369 return rc; 6370 } 6371 6372 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || 6373 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { 6374 qeth_set_tso_off(card, prot); 6375 return -EOPNOTSUPP; 6376 } 6377 6378 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, 6379 tso_data.mss); 6380 return 0; 6381 } 6382 6383 static int qeth_set_ipa_tso(struct qeth_card *card, bool on, 6384 enum qeth_prot_versions prot) 6385 { 6386 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); 6387 } 6388 6389 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) 6390 { 6391 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; 6392 int rc_ipv6; 6393 6394 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6395 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6396 QETH_PROT_IPV4); 6397 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6398 /* no/one Offload Assist available, so the rc is trivial */ 6399 return rc_ipv4; 6400 6401 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6402 QETH_PROT_IPV6); 6403 6404 if (on) 6405 /* enable: success if any Assist is active */ 6406 return (rc_ipv6) ? rc_ipv4 : 0; 6407 6408 /* disable: failure if any Assist is still active */ 6409 return (rc_ipv6) ? rc_ipv6 : rc_ipv4; 6410 } 6411 6412 /** 6413 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features 6414 * @dev: a net_device 6415 */ 6416 void qeth_enable_hw_features(struct net_device *dev) 6417 { 6418 struct qeth_card *card = dev->ml_priv; 6419 netdev_features_t features; 6420 6421 features = dev->features; 6422 /* force-off any feature that might need an IPA sequence. 6423 * netdev_update_features() will restart them. 6424 */ 6425 dev->features &= ~dev->hw_features; 6426 /* toggle VLAN filter, so that VIDs are re-programmed: */ 6427 if (IS_LAYER2(card) && IS_VM_NIC(card)) { 6428 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 6429 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6430 } 6431 netdev_update_features(dev); 6432 if (features != dev->features) 6433 dev_warn(&card->gdev->dev, 6434 "Device recovery failed to restore all offload features\n"); 6435 } 6436 EXPORT_SYMBOL_GPL(qeth_enable_hw_features); 6437 6438 int qeth_set_features(struct net_device *dev, netdev_features_t features) 6439 { 6440 struct qeth_card *card = dev->ml_priv; 6441 netdev_features_t changed = dev->features ^ features; 6442 int rc = 0; 6443 6444 QETH_CARD_TEXT(card, 2, "setfeat"); 6445 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6446 6447 if ((changed & NETIF_F_IP_CSUM)) { 6448 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, 6449 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4); 6450 if (rc) 6451 changed ^= NETIF_F_IP_CSUM; 6452 } 6453 if (changed & NETIF_F_IPV6_CSUM) { 6454 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, 6455 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6); 6456 if (rc) 6457 changed ^= NETIF_F_IPV6_CSUM; 6458 } 6459 if (changed & NETIF_F_RXCSUM) { 6460 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); 6461 if (rc) 6462 changed ^= NETIF_F_RXCSUM; 6463 } 6464 if (changed & NETIF_F_TSO) { 6465 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, 6466 QETH_PROT_IPV4); 6467 if (rc) 6468 changed ^= NETIF_F_TSO; 6469 } 6470 if (changed & NETIF_F_TSO6) { 6471 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, 6472 QETH_PROT_IPV6); 6473 if (rc) 6474 changed ^= NETIF_F_TSO6; 6475 } 6476 6477 /* everything changed successfully? */ 6478 if ((dev->features ^ features) == changed) 6479 return 0; 6480 /* something went wrong. save changed features and return error */ 6481 dev->features ^= changed; 6482 return -EIO; 6483 } 6484 EXPORT_SYMBOL_GPL(qeth_set_features); 6485 6486 netdev_features_t qeth_fix_features(struct net_device *dev, 6487 netdev_features_t features) 6488 { 6489 struct qeth_card *card = dev->ml_priv; 6490 6491 QETH_CARD_TEXT(card, 2, "fixfeat"); 6492 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 6493 features &= ~NETIF_F_IP_CSUM; 6494 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) 6495 features &= ~NETIF_F_IPV6_CSUM; 6496 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && 6497 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6498 features &= ~NETIF_F_RXCSUM; 6499 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 6500 features &= ~NETIF_F_TSO; 6501 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) 6502 features &= ~NETIF_F_TSO6; 6503 6504 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6505 return features; 6506 } 6507 EXPORT_SYMBOL_GPL(qeth_fix_features); 6508 6509 netdev_features_t qeth_features_check(struct sk_buff *skb, 6510 struct net_device *dev, 6511 netdev_features_t features) 6512 { 6513 /* GSO segmentation builds skbs with 6514 * a (small) linear part for the headers, and 6515 * page frags for the data. 6516 * Compared to a linear skb, the header-only part consumes an 6517 * additional buffer element. This reduces buffer utilization, and 6518 * hurts throughput. So compress small segments into one element. 6519 */ 6520 if (netif_needs_gso(skb, features)) { 6521 /* match skb_segment(): */ 6522 unsigned int doffset = skb->data - skb_mac_header(skb); 6523 unsigned int hsize = skb_shinfo(skb)->gso_size; 6524 unsigned int hroom = skb_headroom(skb); 6525 6526 /* linearize only if resulting skb allocations are order-0: */ 6527 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) 6528 features &= ~NETIF_F_SG; 6529 } 6530 6531 return vlan_features_check(skb, features); 6532 } 6533 EXPORT_SYMBOL_GPL(qeth_features_check); 6534 6535 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6536 { 6537 struct qeth_card *card = dev->ml_priv; 6538 struct qeth_qdio_out_q *queue; 6539 unsigned int i; 6540 6541 QETH_CARD_TEXT(card, 5, "getstat"); 6542 6543 stats->rx_packets = card->stats.rx_packets; 6544 stats->rx_bytes = card->stats.rx_bytes; 6545 stats->rx_errors = card->stats.rx_length_errors + 6546 card->stats.rx_frame_errors + 6547 card->stats.rx_fifo_errors; 6548 stats->rx_dropped = card->stats.rx_dropped_nomem + 6549 card->stats.rx_dropped_notsupp + 6550 card->stats.rx_dropped_runt; 6551 stats->multicast = card->stats.rx_multicast; 6552 stats->rx_length_errors = card->stats.rx_length_errors; 6553 stats->rx_frame_errors = card->stats.rx_frame_errors; 6554 stats->rx_fifo_errors = card->stats.rx_fifo_errors; 6555 6556 for (i = 0; i < card->qdio.no_out_queues; i++) { 6557 queue = card->qdio.out_qs[i]; 6558 6559 stats->tx_packets += queue->stats.tx_packets; 6560 stats->tx_bytes += queue->stats.tx_bytes; 6561 stats->tx_errors += queue->stats.tx_errors; 6562 stats->tx_dropped += queue->stats.tx_dropped; 6563 } 6564 } 6565 EXPORT_SYMBOL_GPL(qeth_get_stats64); 6566 6567 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 6568 u8 cast_type, struct net_device *sb_dev) 6569 { 6570 if (cast_type != RTN_UNICAST) 6571 return QETH_IQD_MCAST_TXQ; 6572 return QETH_IQD_MIN_UCAST_TXQ; 6573 } 6574 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); 6575 6576 int qeth_open(struct net_device *dev) 6577 { 6578 struct qeth_card *card = dev->ml_priv; 6579 6580 QETH_CARD_TEXT(card, 4, "qethopen"); 6581 6582 if (qdio_stop_irq(CARD_DDEV(card), 0) < 0) 6583 return -EIO; 6584 6585 card->data.state = CH_STATE_UP; 6586 netif_tx_start_all_queues(dev); 6587 6588 napi_enable(&card->napi); 6589 local_bh_disable(); 6590 napi_schedule(&card->napi); 6591 if (IS_IQD(card)) { 6592 struct qeth_qdio_out_q *queue; 6593 unsigned int i; 6594 6595 qeth_for_each_output_queue(card, queue, i) { 6596 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, 6597 QETH_NAPI_WEIGHT); 6598 napi_enable(&queue->napi); 6599 napi_schedule(&queue->napi); 6600 } 6601 } 6602 /* kick-start the NAPI softirq: */ 6603 local_bh_enable(); 6604 return 0; 6605 } 6606 EXPORT_SYMBOL_GPL(qeth_open); 6607 6608 int qeth_stop(struct net_device *dev) 6609 { 6610 struct qeth_card *card = dev->ml_priv; 6611 6612 QETH_CARD_TEXT(card, 4, "qethstop"); 6613 if (IS_IQD(card)) { 6614 struct qeth_qdio_out_q *queue; 6615 unsigned int i; 6616 6617 /* Quiesce the NAPI instances: */ 6618 qeth_for_each_output_queue(card, queue, i) { 6619 napi_disable(&queue->napi); 6620 del_timer_sync(&queue->timer); 6621 } 6622 6623 /* Stop .ndo_start_xmit, might still access queue->napi. */ 6624 netif_tx_disable(dev); 6625 6626 /* Queues may get re-allocated, so remove the NAPIs here. */ 6627 qeth_for_each_output_queue(card, queue, i) 6628 netif_napi_del(&queue->napi); 6629 } else { 6630 netif_tx_disable(dev); 6631 } 6632 6633 napi_disable(&card->napi); 6634 return 0; 6635 } 6636 EXPORT_SYMBOL_GPL(qeth_stop); 6637 6638 static int __init qeth_core_init(void) 6639 { 6640 int rc; 6641 6642 pr_info("loading core functions\n"); 6643 6644 rc = qeth_register_dbf_views(); 6645 if (rc) 6646 goto dbf_err; 6647 qeth_core_root_dev = root_device_register("qeth"); 6648 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 6649 if (rc) 6650 goto register_err; 6651 qeth_core_header_cache = 6652 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, 6653 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), 6654 0, NULL); 6655 if (!qeth_core_header_cache) { 6656 rc = -ENOMEM; 6657 goto slab_err; 6658 } 6659 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 6660 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 6661 if (!qeth_qdio_outbuf_cache) { 6662 rc = -ENOMEM; 6663 goto cqslab_err; 6664 } 6665 rc = ccw_driver_register(&qeth_ccw_driver); 6666 if (rc) 6667 goto ccw_err; 6668 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); 6669 if (rc) 6670 goto ccwgroup_err; 6671 6672 return 0; 6673 6674 ccwgroup_err: 6675 ccw_driver_unregister(&qeth_ccw_driver); 6676 ccw_err: 6677 kmem_cache_destroy(qeth_qdio_outbuf_cache); 6678 cqslab_err: 6679 kmem_cache_destroy(qeth_core_header_cache); 6680 slab_err: 6681 root_device_unregister(qeth_core_root_dev); 6682 register_err: 6683 qeth_unregister_dbf_views(); 6684 dbf_err: 6685 pr_err("Initializing the qeth device driver failed\n"); 6686 return rc; 6687 } 6688 6689 static void __exit qeth_core_exit(void) 6690 { 6691 qeth_clear_dbf_list(); 6692 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 6693 ccw_driver_unregister(&qeth_ccw_driver); 6694 kmem_cache_destroy(qeth_qdio_outbuf_cache); 6695 kmem_cache_destroy(qeth_core_header_cache); 6696 root_device_unregister(qeth_core_root_dev); 6697 qeth_unregister_dbf_views(); 6698 pr_info("core functions removed\n"); 6699 } 6700 6701 module_init(qeth_core_init); 6702 module_exit(qeth_core_exit); 6703 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 6704 MODULE_DESCRIPTION("qeth core functions"); 6705 MODULE_LICENSE("GPL"); 6706