1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/compat.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/mii.h> 23 #include <linux/kthread.h> 24 #include <linux/slab.h> 25 #include <linux/if_vlan.h> 26 #include <linux/netdevice.h> 27 #include <linux/netdev_features.h> 28 #include <linux/skbuff.h> 29 #include <linux/vmalloc.h> 30 31 #include <net/iucv/af_iucv.h> 32 #include <net/dsfield.h> 33 34 #include <asm/ebcdic.h> 35 #include <asm/chpid.h> 36 #include <asm/io.h> 37 #include <asm/sysinfo.h> 38 #include <asm/diag.h> 39 #include <asm/cio.h> 40 #include <asm/ccwdev.h> 41 #include <asm/cpcmd.h> 42 43 #include "qeth_core.h" 44 45 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 46 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 47 /* N P A M L V H */ 48 [QETH_DBF_SETUP] = {"qeth_setup", 49 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, 50 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, 51 &debug_sprintf_view, NULL}, 52 [QETH_DBF_CTRL] = {"qeth_control", 53 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, 54 }; 55 EXPORT_SYMBOL_GPL(qeth_dbf); 56 57 struct kmem_cache *qeth_core_header_cache; 58 EXPORT_SYMBOL_GPL(qeth_core_header_cache); 59 static struct kmem_cache *qeth_qdio_outbuf_cache; 60 61 static struct device *qeth_core_root_dev; 62 static struct lock_class_key qdio_out_skb_queue_key; 63 64 static void qeth_issue_next_read_cb(struct qeth_card *card, 65 struct qeth_channel *channel, 66 struct qeth_cmd_buffer *iob); 67 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); 68 static void qeth_free_buffer_pool(struct qeth_card *); 69 static int qeth_qdio_establish(struct qeth_card *); 70 static void qeth_free_qdio_queues(struct qeth_card *card); 71 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, 72 struct qeth_qdio_out_buffer *buf, 73 enum iucv_tx_notify notification); 74 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); 75 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 76 77 static void qeth_close_dev_handler(struct work_struct *work) 78 { 79 struct qeth_card *card; 80 81 card = container_of(work, struct qeth_card, close_dev_work); 82 QETH_CARD_TEXT(card, 2, "cldevhdl"); 83 ccwgroup_set_offline(card->gdev); 84 } 85 86 static const char *qeth_get_cardname(struct qeth_card *card) 87 { 88 if (IS_VM_NIC(card)) { 89 switch (card->info.type) { 90 case QETH_CARD_TYPE_OSD: 91 return " Virtual NIC QDIO"; 92 case QETH_CARD_TYPE_IQD: 93 return " Virtual NIC Hiper"; 94 case QETH_CARD_TYPE_OSM: 95 return " Virtual NIC QDIO - OSM"; 96 case QETH_CARD_TYPE_OSX: 97 return " Virtual NIC QDIO - OSX"; 98 default: 99 return " unknown"; 100 } 101 } else { 102 switch (card->info.type) { 103 case QETH_CARD_TYPE_OSD: 104 return " OSD Express"; 105 case QETH_CARD_TYPE_IQD: 106 return " HiperSockets"; 107 case QETH_CARD_TYPE_OSN: 108 return " OSN QDIO"; 109 case QETH_CARD_TYPE_OSM: 110 return " OSM QDIO"; 111 case QETH_CARD_TYPE_OSX: 112 return " OSX QDIO"; 113 default: 114 return " unknown"; 115 } 116 } 117 return " n/a"; 118 } 119 120 /* max length to be returned: 14 */ 121 const char *qeth_get_cardname_short(struct qeth_card *card) 122 { 123 if (IS_VM_NIC(card)) { 124 switch (card->info.type) { 125 case QETH_CARD_TYPE_OSD: 126 return "Virt.NIC QDIO"; 127 case QETH_CARD_TYPE_IQD: 128 return "Virt.NIC Hiper"; 129 case QETH_CARD_TYPE_OSM: 130 return "Virt.NIC OSM"; 131 case QETH_CARD_TYPE_OSX: 132 return "Virt.NIC OSX"; 133 default: 134 return "unknown"; 135 } 136 } else { 137 switch (card->info.type) { 138 case QETH_CARD_TYPE_OSD: 139 switch (card->info.link_type) { 140 case QETH_LINK_TYPE_FAST_ETH: 141 return "OSD_100"; 142 case QETH_LINK_TYPE_HSTR: 143 return "HSTR"; 144 case QETH_LINK_TYPE_GBIT_ETH: 145 return "OSD_1000"; 146 case QETH_LINK_TYPE_10GBIT_ETH: 147 return "OSD_10GIG"; 148 case QETH_LINK_TYPE_25GBIT_ETH: 149 return "OSD_25GIG"; 150 case QETH_LINK_TYPE_LANE_ETH100: 151 return "OSD_FE_LANE"; 152 case QETH_LINK_TYPE_LANE_TR: 153 return "OSD_TR_LANE"; 154 case QETH_LINK_TYPE_LANE_ETH1000: 155 return "OSD_GbE_LANE"; 156 case QETH_LINK_TYPE_LANE: 157 return "OSD_ATM_LANE"; 158 default: 159 return "OSD_Express"; 160 } 161 case QETH_CARD_TYPE_IQD: 162 return "HiperSockets"; 163 case QETH_CARD_TYPE_OSN: 164 return "OSN"; 165 case QETH_CARD_TYPE_OSM: 166 return "OSM_1000"; 167 case QETH_CARD_TYPE_OSX: 168 return "OSX_10GIG"; 169 default: 170 return "unknown"; 171 } 172 } 173 return "n/a"; 174 } 175 176 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 177 int clear_start_mask) 178 { 179 unsigned long flags; 180 181 spin_lock_irqsave(&card->thread_mask_lock, flags); 182 card->thread_allowed_mask = threads; 183 if (clear_start_mask) 184 card->thread_start_mask &= threads; 185 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 186 wake_up(&card->wait_q); 187 } 188 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); 189 190 int qeth_threads_running(struct qeth_card *card, unsigned long threads) 191 { 192 unsigned long flags; 193 int rc = 0; 194 195 spin_lock_irqsave(&card->thread_mask_lock, flags); 196 rc = (card->thread_running_mask & threads); 197 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 198 return rc; 199 } 200 EXPORT_SYMBOL_GPL(qeth_threads_running); 201 202 void qeth_clear_working_pool_list(struct qeth_card *card) 203 { 204 struct qeth_buffer_pool_entry *pool_entry, *tmp; 205 206 QETH_CARD_TEXT(card, 5, "clwrklst"); 207 list_for_each_entry_safe(pool_entry, tmp, 208 &card->qdio.in_buf_pool.entry_list, list){ 209 list_del(&pool_entry->list); 210 } 211 } 212 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); 213 214 static int qeth_alloc_buffer_pool(struct qeth_card *card) 215 { 216 struct qeth_buffer_pool_entry *pool_entry; 217 void *ptr; 218 int i, j; 219 220 QETH_CARD_TEXT(card, 5, "alocpool"); 221 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 222 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); 223 if (!pool_entry) { 224 qeth_free_buffer_pool(card); 225 return -ENOMEM; 226 } 227 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { 228 ptr = (void *) __get_free_page(GFP_KERNEL); 229 if (!ptr) { 230 while (j > 0) 231 free_page((unsigned long) 232 pool_entry->elements[--j]); 233 kfree(pool_entry); 234 qeth_free_buffer_pool(card); 235 return -ENOMEM; 236 } 237 pool_entry->elements[j] = ptr; 238 } 239 list_add(&pool_entry->init_list, 240 &card->qdio.init_pool.entry_list); 241 } 242 return 0; 243 } 244 245 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) 246 { 247 QETH_CARD_TEXT(card, 2, "realcbp"); 248 249 if (card->state != CARD_STATE_DOWN) 250 return -EPERM; 251 252 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ 253 qeth_clear_working_pool_list(card); 254 qeth_free_buffer_pool(card); 255 card->qdio.in_buf_pool.buf_count = bufcnt; 256 card->qdio.init_pool.buf_count = bufcnt; 257 return qeth_alloc_buffer_pool(card); 258 } 259 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); 260 261 static void qeth_free_qdio_queue(struct qeth_qdio_q *q) 262 { 263 if (!q) 264 return; 265 266 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 267 kfree(q); 268 } 269 270 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) 271 { 272 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 273 int i; 274 275 if (!q) 276 return NULL; 277 278 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 279 kfree(q); 280 return NULL; 281 } 282 283 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 284 q->bufs[i].buffer = q->qdio_bufs[i]; 285 286 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); 287 return q; 288 } 289 290 static int qeth_cq_init(struct qeth_card *card) 291 { 292 int rc; 293 294 if (card->options.cq == QETH_CQ_ENABLED) { 295 QETH_DBF_TEXT(SETUP, 2, "cqinit"); 296 qdio_reset_buffers(card->qdio.c_q->qdio_bufs, 297 QDIO_MAX_BUFFERS_PER_Q); 298 card->qdio.c_q->next_buf_to_init = 127; 299 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 300 card->qdio.no_in_queues - 1, 0, 301 127); 302 if (rc) { 303 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 304 goto out; 305 } 306 } 307 rc = 0; 308 out: 309 return rc; 310 } 311 312 static int qeth_alloc_cq(struct qeth_card *card) 313 { 314 int rc; 315 316 if (card->options.cq == QETH_CQ_ENABLED) { 317 int i; 318 struct qdio_outbuf_state *outbuf_states; 319 320 QETH_DBF_TEXT(SETUP, 2, "cqon"); 321 card->qdio.c_q = qeth_alloc_qdio_queue(); 322 if (!card->qdio.c_q) { 323 rc = -1; 324 goto kmsg_out; 325 } 326 card->qdio.no_in_queues = 2; 327 card->qdio.out_bufstates = 328 kcalloc(card->qdio.no_out_queues * 329 QDIO_MAX_BUFFERS_PER_Q, 330 sizeof(struct qdio_outbuf_state), 331 GFP_KERNEL); 332 outbuf_states = card->qdio.out_bufstates; 333 if (outbuf_states == NULL) { 334 rc = -1; 335 goto free_cq_out; 336 } 337 for (i = 0; i < card->qdio.no_out_queues; ++i) { 338 card->qdio.out_qs[i]->bufstates = outbuf_states; 339 outbuf_states += QDIO_MAX_BUFFERS_PER_Q; 340 } 341 } else { 342 QETH_DBF_TEXT(SETUP, 2, "nocq"); 343 card->qdio.c_q = NULL; 344 card->qdio.no_in_queues = 1; 345 } 346 QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues); 347 rc = 0; 348 out: 349 return rc; 350 free_cq_out: 351 qeth_free_qdio_queue(card->qdio.c_q); 352 card->qdio.c_q = NULL; 353 kmsg_out: 354 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); 355 goto out; 356 } 357 358 static void qeth_free_cq(struct qeth_card *card) 359 { 360 if (card->qdio.c_q) { 361 --card->qdio.no_in_queues; 362 qeth_free_qdio_queue(card->qdio.c_q); 363 card->qdio.c_q = NULL; 364 } 365 kfree(card->qdio.out_bufstates); 366 card->qdio.out_bufstates = NULL; 367 } 368 369 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, 370 int delayed) 371 { 372 enum iucv_tx_notify n; 373 374 switch (sbalf15) { 375 case 0: 376 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; 377 break; 378 case 4: 379 case 16: 380 case 17: 381 case 18: 382 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : 383 TX_NOTIFY_UNREACHABLE; 384 break; 385 default: 386 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : 387 TX_NOTIFY_GENERALERROR; 388 break; 389 } 390 391 return n; 392 } 393 394 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, 395 int forced_cleanup) 396 { 397 if (q->card->options.cq != QETH_CQ_ENABLED) 398 return; 399 400 if (q->bufs[bidx]->next_pending != NULL) { 401 struct qeth_qdio_out_buffer *head = q->bufs[bidx]; 402 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; 403 404 while (c) { 405 if (forced_cleanup || 406 atomic_read(&c->state) == 407 QETH_QDIO_BUF_HANDLED_DELAYED) { 408 struct qeth_qdio_out_buffer *f = c; 409 QETH_CARD_TEXT(f->q->card, 5, "fp"); 410 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); 411 /* release here to avoid interleaving between 412 outbound tasklet and inbound tasklet 413 regarding notifications and lifecycle */ 414 qeth_release_skbs(c); 415 416 c = f->next_pending; 417 WARN_ON_ONCE(head->next_pending != f); 418 head->next_pending = c; 419 kmem_cache_free(qeth_qdio_outbuf_cache, f); 420 } else { 421 head = c; 422 c = c->next_pending; 423 } 424 425 } 426 } 427 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == 428 QETH_QDIO_BUF_HANDLED_DELAYED)) { 429 /* for recovery situations */ 430 qeth_init_qdio_out_buf(q, bidx); 431 QETH_CARD_TEXT(q->card, 2, "clprecov"); 432 } 433 } 434 435 436 static void qeth_qdio_handle_aob(struct qeth_card *card, 437 unsigned long phys_aob_addr) 438 { 439 struct qaob *aob; 440 struct qeth_qdio_out_buffer *buffer; 441 enum iucv_tx_notify notification; 442 unsigned int i; 443 444 aob = (struct qaob *) phys_to_virt(phys_aob_addr); 445 QETH_CARD_TEXT(card, 5, "haob"); 446 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); 447 buffer = (struct qeth_qdio_out_buffer *) aob->user1; 448 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); 449 450 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, 451 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) { 452 notification = TX_NOTIFY_OK; 453 } else { 454 WARN_ON_ONCE(atomic_read(&buffer->state) != 455 QETH_QDIO_BUF_PENDING); 456 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); 457 notification = TX_NOTIFY_DELAYED_OK; 458 } 459 460 if (aob->aorc != 0) { 461 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); 462 notification = qeth_compute_cq_notification(aob->aorc, 1); 463 } 464 qeth_notify_skbs(buffer->q, buffer, notification); 465 466 /* Free dangling allocations. The attached skbs are handled by 467 * qeth_cleanup_handled_pending(). 468 */ 469 for (i = 0; 470 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); 471 i++) { 472 if (aob->sba[i] && buffer->is_header[i]) 473 kmem_cache_free(qeth_core_header_cache, 474 (void *) aob->sba[i]); 475 } 476 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); 477 478 qdio_release_aob(aob); 479 } 480 481 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) 482 { 483 return card->options.cq == QETH_CQ_ENABLED && 484 card->qdio.c_q != NULL && 485 queue != 0 && 486 queue == card->qdio.no_in_queues - 1; 487 } 488 489 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data) 490 { 491 ccw->cmd_code = cmd_code; 492 ccw->flags = CCW_FLAG_SLI; 493 ccw->count = len; 494 ccw->cda = (__u32) __pa(data); 495 } 496 497 static int __qeth_issue_next_read(struct qeth_card *card) 498 { 499 struct qeth_channel *channel = &card->read; 500 struct qeth_cmd_buffer *iob; 501 int rc; 502 503 QETH_CARD_TEXT(card, 5, "issnxrd"); 504 if (channel->state != CH_STATE_UP) 505 return -EIO; 506 iob = qeth_get_buffer(channel); 507 if (!iob) { 508 dev_warn(&card->gdev->dev, "The qeth device driver " 509 "failed to recover an error on the device\n"); 510 QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n", 511 CARD_DEVID(card)); 512 return -ENOMEM; 513 } 514 515 qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); 516 iob->callback = qeth_issue_next_read_cb; 517 QETH_CARD_TEXT(card, 6, "noirqpnd"); 518 rc = ccw_device_start(channel->ccwdev, channel->ccw, 519 (addr_t) iob, 0, 0); 520 if (rc) { 521 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 522 rc, CARD_DEVID(card)); 523 atomic_set(&channel->irq_pending, 0); 524 qeth_release_buffer(channel, iob); 525 card->read_or_write_problem = 1; 526 qeth_schedule_recovery(card); 527 wake_up(&card->wait_q); 528 } 529 return rc; 530 } 531 532 static int qeth_issue_next_read(struct qeth_card *card) 533 { 534 int ret; 535 536 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 537 ret = __qeth_issue_next_read(card); 538 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 539 540 return ret; 541 } 542 543 static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) 544 { 545 struct qeth_reply *reply; 546 547 reply = kzalloc(sizeof(*reply), GFP_KERNEL); 548 if (reply) { 549 refcount_set(&reply->refcnt, 1); 550 init_completion(&reply->received); 551 } 552 return reply; 553 } 554 555 static void qeth_get_reply(struct qeth_reply *reply) 556 { 557 refcount_inc(&reply->refcnt); 558 } 559 560 static void qeth_put_reply(struct qeth_reply *reply) 561 { 562 if (refcount_dec_and_test(&reply->refcnt)) 563 kfree(reply); 564 } 565 566 static void qeth_enqueue_reply(struct qeth_card *card, struct qeth_reply *reply) 567 { 568 spin_lock_irq(&card->lock); 569 list_add_tail(&reply->list, &card->cmd_waiter_list); 570 spin_unlock_irq(&card->lock); 571 } 572 573 static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply) 574 { 575 spin_lock_irq(&card->lock); 576 list_del(&reply->list); 577 spin_unlock_irq(&card->lock); 578 } 579 580 static void qeth_notify_reply(struct qeth_reply *reply, int reason) 581 { 582 reply->rc = reason; 583 complete(&reply->received); 584 } 585 586 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 587 struct qeth_card *card) 588 { 589 const char *ipa_name; 590 int com = cmd->hdr.command; 591 ipa_name = qeth_get_ipa_cmd_name(com); 592 593 if (rc) 594 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", 595 ipa_name, com, CARD_DEVID(card), rc, 596 qeth_get_ipa_msg(rc)); 597 else 598 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", 599 ipa_name, com, CARD_DEVID(card)); 600 } 601 602 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 603 struct qeth_ipa_cmd *cmd) 604 { 605 QETH_CARD_TEXT(card, 5, "chkipad"); 606 607 if (IS_IPA_REPLY(cmd)) { 608 if (cmd->hdr.command != IPA_CMD_SETCCID && 609 cmd->hdr.command != IPA_CMD_DELCCID && 610 cmd->hdr.command != IPA_CMD_MODCCID && 611 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) 612 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 613 return cmd; 614 } 615 616 /* handle unsolicited event: */ 617 switch (cmd->hdr.command) { 618 case IPA_CMD_STOPLAN: 619 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 620 dev_err(&card->gdev->dev, 621 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", 622 QETH_CARD_IFNAME(card)); 623 schedule_work(&card->close_dev_work); 624 } else { 625 dev_warn(&card->gdev->dev, 626 "The link for interface %s on CHPID 0x%X failed\n", 627 QETH_CARD_IFNAME(card), card->info.chpid); 628 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 629 netif_carrier_off(card->dev); 630 } 631 return NULL; 632 case IPA_CMD_STARTLAN: 633 dev_info(&card->gdev->dev, 634 "The link for %s on CHPID 0x%X has been restored\n", 635 QETH_CARD_IFNAME(card), card->info.chpid); 636 if (card->info.hwtrap) 637 card->info.hwtrap = 2; 638 qeth_schedule_recovery(card); 639 return NULL; 640 case IPA_CMD_SETBRIDGEPORT_IQD: 641 case IPA_CMD_SETBRIDGEPORT_OSA: 642 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 643 if (card->discipline->control_event_handler(card, cmd)) 644 return cmd; 645 return NULL; 646 case IPA_CMD_MODCCID: 647 return cmd; 648 case IPA_CMD_REGISTER_LOCAL_ADDR: 649 QETH_CARD_TEXT(card, 3, "irla"); 650 return NULL; 651 case IPA_CMD_UNREGISTER_LOCAL_ADDR: 652 QETH_CARD_TEXT(card, 3, "urla"); 653 return NULL; 654 default: 655 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); 656 return cmd; 657 } 658 } 659 660 void qeth_clear_ipacmd_list(struct qeth_card *card) 661 { 662 struct qeth_reply *reply; 663 unsigned long flags; 664 665 QETH_CARD_TEXT(card, 4, "clipalst"); 666 667 spin_lock_irqsave(&card->lock, flags); 668 list_for_each_entry(reply, &card->cmd_waiter_list, list) 669 qeth_notify_reply(reply, -EIO); 670 spin_unlock_irqrestore(&card->lock, flags); 671 } 672 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); 673 674 static int qeth_check_idx_response(struct qeth_card *card, 675 unsigned char *buffer) 676 { 677 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 678 if ((buffer[2] & 0xc0) == 0xc0) { 679 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", 680 buffer[4]); 681 QETH_CARD_TEXT(card, 2, "ckidxres"); 682 QETH_CARD_TEXT(card, 2, " idxterm"); 683 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 684 if (buffer[4] == 0xf6) { 685 dev_err(&card->gdev->dev, 686 "The qeth device is not configured " 687 "for the OSI layer required by z/VM\n"); 688 return -EPERM; 689 } 690 return -EIO; 691 } 692 return 0; 693 } 694 695 static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) 696 { 697 __u8 index; 698 699 index = channel->io_buf_no; 700 do { 701 if (channel->iob[index].state == BUF_STATE_FREE) { 702 channel->iob[index].state = BUF_STATE_LOCKED; 703 channel->iob[index].timeout = QETH_TIMEOUT; 704 channel->io_buf_no = (channel->io_buf_no + 1) % 705 QETH_CMD_BUFFER_NO; 706 memset(channel->iob[index].data, 0, QETH_BUFSIZE); 707 return channel->iob + index; 708 } 709 index = (index + 1) % QETH_CMD_BUFFER_NO; 710 } while (index != channel->io_buf_no); 711 712 return NULL; 713 } 714 715 void qeth_release_buffer(struct qeth_channel *channel, 716 struct qeth_cmd_buffer *iob) 717 { 718 unsigned long flags; 719 720 spin_lock_irqsave(&channel->iob_lock, flags); 721 iob->state = BUF_STATE_FREE; 722 iob->callback = NULL; 723 if (iob->reply) { 724 qeth_put_reply(iob->reply); 725 iob->reply = NULL; 726 } 727 spin_unlock_irqrestore(&channel->iob_lock, flags); 728 wake_up(&channel->wait_q); 729 } 730 EXPORT_SYMBOL_GPL(qeth_release_buffer); 731 732 static void qeth_release_buffer_cb(struct qeth_card *card, 733 struct qeth_channel *channel, 734 struct qeth_cmd_buffer *iob) 735 { 736 qeth_release_buffer(channel, iob); 737 } 738 739 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) 740 { 741 struct qeth_reply *reply = iob->reply; 742 743 if (reply) 744 qeth_notify_reply(reply, rc); 745 qeth_release_buffer(iob->channel, iob); 746 } 747 748 static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel) 749 { 750 struct qeth_cmd_buffer *buffer = NULL; 751 unsigned long flags; 752 753 spin_lock_irqsave(&channel->iob_lock, flags); 754 buffer = __qeth_get_buffer(channel); 755 spin_unlock_irqrestore(&channel->iob_lock, flags); 756 return buffer; 757 } 758 759 struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel) 760 { 761 struct qeth_cmd_buffer *buffer; 762 wait_event(channel->wait_q, 763 ((buffer = qeth_get_buffer(channel)) != NULL)); 764 return buffer; 765 } 766 EXPORT_SYMBOL_GPL(qeth_wait_for_buffer); 767 768 void qeth_clear_cmd_buffers(struct qeth_channel *channel) 769 { 770 int cnt; 771 772 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) 773 qeth_release_buffer(channel, &channel->iob[cnt]); 774 channel->io_buf_no = 0; 775 } 776 EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); 777 778 static void qeth_issue_next_read_cb(struct qeth_card *card, 779 struct qeth_channel *channel, 780 struct qeth_cmd_buffer *iob) 781 { 782 struct qeth_ipa_cmd *cmd = NULL; 783 struct qeth_reply *reply = NULL; 784 struct qeth_reply *r; 785 unsigned long flags; 786 int rc = 0; 787 788 QETH_CARD_TEXT(card, 4, "sndctlcb"); 789 rc = qeth_check_idx_response(card, iob->data); 790 switch (rc) { 791 case 0: 792 break; 793 case -EIO: 794 qeth_clear_ipacmd_list(card); 795 qeth_schedule_recovery(card); 796 /* fall through */ 797 default: 798 goto out; 799 } 800 801 if (IS_IPA(iob->data)) { 802 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); 803 cmd = qeth_check_ipa_data(card, cmd); 804 if (!cmd) 805 goto out; 806 if (IS_OSN(card) && card->osn_info.assist_cb && 807 cmd->hdr.command != IPA_CMD_STARTLAN) { 808 card->osn_info.assist_cb(card->dev, cmd); 809 goto out; 810 } 811 } else { 812 /* non-IPA commands should only flow during initialization */ 813 if (card->state != CARD_STATE_DOWN) 814 goto out; 815 } 816 817 /* match against pending cmd requests */ 818 spin_lock_irqsave(&card->lock, flags); 819 list_for_each_entry(r, &card->cmd_waiter_list, list) { 820 if ((r->seqno == QETH_IDX_COMMAND_SEQNO) || 821 (cmd && (r->seqno == cmd->hdr.seqno))) { 822 reply = r; 823 /* take the object outside the lock */ 824 qeth_get_reply(reply); 825 break; 826 } 827 } 828 spin_unlock_irqrestore(&card->lock, flags); 829 830 if (!reply) 831 goto out; 832 833 if (!reply->callback) { 834 rc = 0; 835 } else { 836 if (cmd) { 837 reply->offset = (u16)((char *)cmd - (char *)iob->data); 838 rc = reply->callback(card, reply, (unsigned long)cmd); 839 } else { 840 rc = reply->callback(card, reply, (unsigned long)iob); 841 } 842 } 843 844 if (rc <= 0) 845 qeth_notify_reply(reply, rc); 846 qeth_put_reply(reply); 847 848 out: 849 memcpy(&card->seqno.pdu_hdr_ack, 850 QETH_PDU_HEADER_SEQ_NO(iob->data), 851 QETH_SEQ_NO_LENGTH); 852 qeth_release_buffer(channel, iob); 853 } 854 855 static int qeth_set_thread_start_bit(struct qeth_card *card, 856 unsigned long thread) 857 { 858 unsigned long flags; 859 860 spin_lock_irqsave(&card->thread_mask_lock, flags); 861 if (!(card->thread_allowed_mask & thread) || 862 (card->thread_start_mask & thread)) { 863 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 864 return -EPERM; 865 } 866 card->thread_start_mask |= thread; 867 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 868 return 0; 869 } 870 871 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread) 872 { 873 unsigned long flags; 874 875 spin_lock_irqsave(&card->thread_mask_lock, flags); 876 card->thread_start_mask &= ~thread; 877 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 878 wake_up(&card->wait_q); 879 } 880 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit); 881 882 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) 883 { 884 unsigned long flags; 885 886 spin_lock_irqsave(&card->thread_mask_lock, flags); 887 card->thread_running_mask &= ~thread; 888 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 889 wake_up_all(&card->wait_q); 890 } 891 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); 892 893 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 894 { 895 unsigned long flags; 896 int rc = 0; 897 898 spin_lock_irqsave(&card->thread_mask_lock, flags); 899 if (card->thread_start_mask & thread) { 900 if ((card->thread_allowed_mask & thread) && 901 !(card->thread_running_mask & thread)) { 902 rc = 1; 903 card->thread_start_mask &= ~thread; 904 card->thread_running_mask |= thread; 905 } else 906 rc = -EPERM; 907 } 908 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 909 return rc; 910 } 911 912 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 913 { 914 int rc = 0; 915 916 wait_event(card->wait_q, 917 (rc = __qeth_do_run_thread(card, thread)) >= 0); 918 return rc; 919 } 920 EXPORT_SYMBOL_GPL(qeth_do_run_thread); 921 922 void qeth_schedule_recovery(struct qeth_card *card) 923 { 924 QETH_CARD_TEXT(card, 2, "startrec"); 925 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) 926 schedule_work(&card->kernel_thread_starter); 927 } 928 EXPORT_SYMBOL_GPL(qeth_schedule_recovery); 929 930 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, 931 struct irb *irb) 932 { 933 int dstat, cstat; 934 char *sense; 935 936 sense = (char *) irb->ecw; 937 cstat = irb->scsw.cmd.cstat; 938 dstat = irb->scsw.cmd.dstat; 939 940 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 941 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 942 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 943 QETH_CARD_TEXT(card, 2, "CGENCHK"); 944 dev_warn(&cdev->dev, "The qeth device driver " 945 "failed to recover an error on the device\n"); 946 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", 947 CCW_DEVID(cdev), dstat, cstat); 948 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 949 16, 1, irb, 64, 1); 950 return 1; 951 } 952 953 if (dstat & DEV_STAT_UNIT_CHECK) { 954 if (sense[SENSE_RESETTING_EVENT_BYTE] & 955 SENSE_RESETTING_EVENT_FLAG) { 956 QETH_CARD_TEXT(card, 2, "REVIND"); 957 return 1; 958 } 959 if (sense[SENSE_COMMAND_REJECT_BYTE] & 960 SENSE_COMMAND_REJECT_FLAG) { 961 QETH_CARD_TEXT(card, 2, "CMDREJi"); 962 return 1; 963 } 964 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 965 QETH_CARD_TEXT(card, 2, "AFFE"); 966 return 1; 967 } 968 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 969 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 970 return 0; 971 } 972 QETH_CARD_TEXT(card, 2, "DGENCHK"); 973 return 1; 974 } 975 return 0; 976 } 977 978 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, 979 unsigned long intparm, struct irb *irb) 980 { 981 if (!IS_ERR(irb)) 982 return 0; 983 984 switch (PTR_ERR(irb)) { 985 case -EIO: 986 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", 987 CCW_DEVID(cdev)); 988 QETH_CARD_TEXT(card, 2, "ckirberr"); 989 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 990 return -EIO; 991 case -ETIMEDOUT: 992 dev_warn(&cdev->dev, "A hardware operation timed out" 993 " on the device\n"); 994 QETH_CARD_TEXT(card, 2, "ckirberr"); 995 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 996 if (intparm == QETH_RCD_PARM) { 997 if (card->data.ccwdev == cdev) { 998 card->data.state = CH_STATE_DOWN; 999 wake_up(&card->wait_q); 1000 } 1001 } 1002 return -ETIMEDOUT; 1003 default: 1004 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", 1005 PTR_ERR(irb), CCW_DEVID(cdev)); 1006 QETH_CARD_TEXT(card, 2, "ckirberr"); 1007 QETH_CARD_TEXT(card, 2, " rc???"); 1008 return PTR_ERR(irb); 1009 } 1010 } 1011 1012 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, 1013 struct irb *irb) 1014 { 1015 int rc; 1016 int cstat, dstat; 1017 struct qeth_cmd_buffer *iob = NULL; 1018 struct ccwgroup_device *gdev; 1019 struct qeth_channel *channel; 1020 struct qeth_card *card; 1021 1022 /* while we hold the ccwdev lock, this stays valid: */ 1023 gdev = dev_get_drvdata(&cdev->dev); 1024 card = dev_get_drvdata(&gdev->dev); 1025 if (!card) 1026 return; 1027 1028 QETH_CARD_TEXT(card, 5, "irq"); 1029 1030 if (card->read.ccwdev == cdev) { 1031 channel = &card->read; 1032 QETH_CARD_TEXT(card, 5, "read"); 1033 } else if (card->write.ccwdev == cdev) { 1034 channel = &card->write; 1035 QETH_CARD_TEXT(card, 5, "write"); 1036 } else { 1037 channel = &card->data; 1038 QETH_CARD_TEXT(card, 5, "data"); 1039 } 1040 1041 if (qeth_intparm_is_iob(intparm)) 1042 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); 1043 1044 rc = qeth_check_irb_error(card, cdev, intparm, irb); 1045 if (rc) { 1046 /* IO was terminated, free its resources. */ 1047 if (iob) 1048 qeth_cancel_cmd(iob, rc); 1049 atomic_set(&channel->irq_pending, 0); 1050 wake_up(&card->wait_q); 1051 return; 1052 } 1053 1054 atomic_set(&channel->irq_pending, 0); 1055 1056 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) 1057 channel->state = CH_STATE_STOPPED; 1058 1059 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) 1060 channel->state = CH_STATE_HALTED; 1061 1062 /*let's wake up immediately on data channel*/ 1063 if ((channel == &card->data) && (intparm != 0) && 1064 (intparm != QETH_RCD_PARM)) 1065 goto out; 1066 1067 if (intparm == QETH_CLEAR_CHANNEL_PARM) { 1068 QETH_CARD_TEXT(card, 6, "clrchpar"); 1069 /* we don't have to handle this further */ 1070 intparm = 0; 1071 } 1072 if (intparm == QETH_HALT_CHANNEL_PARM) { 1073 QETH_CARD_TEXT(card, 6, "hltchpar"); 1074 /* we don't have to handle this further */ 1075 intparm = 0; 1076 } 1077 1078 cstat = irb->scsw.cmd.cstat; 1079 dstat = irb->scsw.cmd.dstat; 1080 1081 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1082 (dstat & DEV_STAT_UNIT_CHECK) || 1083 (cstat)) { 1084 if (irb->esw.esw0.erw.cons) { 1085 dev_warn(&channel->ccwdev->dev, 1086 "The qeth device driver failed to recover " 1087 "an error on the device\n"); 1088 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", 1089 CCW_DEVID(channel->ccwdev), cstat, 1090 dstat); 1091 print_hex_dump(KERN_WARNING, "qeth: irb ", 1092 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1093 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1094 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); 1095 } 1096 if (intparm == QETH_RCD_PARM) { 1097 channel->state = CH_STATE_DOWN; 1098 goto out; 1099 } 1100 rc = qeth_get_problem(card, cdev, irb); 1101 if (rc) { 1102 card->read_or_write_problem = 1; 1103 if (iob) 1104 qeth_cancel_cmd(iob, rc); 1105 qeth_clear_ipacmd_list(card); 1106 qeth_schedule_recovery(card); 1107 goto out; 1108 } 1109 } 1110 1111 if (intparm == QETH_RCD_PARM) { 1112 channel->state = CH_STATE_RCD_DONE; 1113 goto out; 1114 } 1115 if (channel == &card->data) 1116 return; 1117 if (channel == &card->read && 1118 channel->state == CH_STATE_UP) 1119 __qeth_issue_next_read(card); 1120 1121 if (iob && iob->callback) 1122 iob->callback(card, iob->channel, iob); 1123 1124 out: 1125 wake_up(&card->wait_q); 1126 return; 1127 } 1128 1129 static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 1130 struct qeth_qdio_out_buffer *buf, 1131 enum iucv_tx_notify notification) 1132 { 1133 struct sk_buff *skb; 1134 1135 skb_queue_walk(&buf->skb_list, skb) { 1136 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1137 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1138 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk) 1139 iucv_sk(skb->sk)->sk_txnotify(skb, notification); 1140 } 1141 } 1142 1143 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) 1144 { 1145 struct sk_buff *skb; 1146 1147 /* release may never happen from within CQ tasklet scope */ 1148 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); 1149 1150 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) 1151 qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); 1152 1153 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) 1154 consume_skb(skb); 1155 } 1156 1157 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1158 struct qeth_qdio_out_buffer *buf) 1159 { 1160 int i; 1161 1162 /* is PCI flag set on buffer? */ 1163 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) 1164 atomic_dec(&queue->set_pci_flags_count); 1165 1166 qeth_release_skbs(buf); 1167 1168 for (i = 0; i < queue->max_elements; ++i) { 1169 if (buf->buffer->element[i].addr && buf->is_header[i]) 1170 kmem_cache_free(qeth_core_header_cache, 1171 buf->buffer->element[i].addr); 1172 buf->is_header[i] = 0; 1173 } 1174 1175 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); 1176 buf->next_element_to_fill = 0; 1177 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1178 } 1179 1180 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) 1181 { 1182 int j; 1183 1184 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 1185 if (!q->bufs[j]) 1186 continue; 1187 qeth_cleanup_handled_pending(q, j, 1); 1188 qeth_clear_output_buffer(q, q->bufs[j]); 1189 if (free) { 1190 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); 1191 q->bufs[j] = NULL; 1192 } 1193 } 1194 } 1195 1196 void qeth_drain_output_queues(struct qeth_card *card) 1197 { 1198 int i; 1199 1200 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1201 /* clear outbound buffers to free skbs */ 1202 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1203 if (card->qdio.out_qs[i]) 1204 qeth_drain_output_queue(card->qdio.out_qs[i], false); 1205 } 1206 } 1207 EXPORT_SYMBOL_GPL(qeth_drain_output_queues); 1208 1209 static void qeth_free_buffer_pool(struct qeth_card *card) 1210 { 1211 struct qeth_buffer_pool_entry *pool_entry, *tmp; 1212 int i = 0; 1213 list_for_each_entry_safe(pool_entry, tmp, 1214 &card->qdio.init_pool.entry_list, init_list){ 1215 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) 1216 free_page((unsigned long)pool_entry->elements[i]); 1217 list_del(&pool_entry->init_list); 1218 kfree(pool_entry); 1219 } 1220 } 1221 1222 static void qeth_clean_channel(struct qeth_channel *channel) 1223 { 1224 struct ccw_device *cdev = channel->ccwdev; 1225 int cnt; 1226 1227 QETH_DBF_TEXT(SETUP, 2, "freech"); 1228 1229 spin_lock_irq(get_ccwdev_lock(cdev)); 1230 cdev->handler = NULL; 1231 spin_unlock_irq(get_ccwdev_lock(cdev)); 1232 1233 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) 1234 kfree(channel->iob[cnt].data); 1235 kfree(channel->ccw); 1236 } 1237 1238 static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) 1239 { 1240 struct ccw_device *cdev = channel->ccwdev; 1241 int cnt; 1242 1243 QETH_DBF_TEXT(SETUP, 2, "setupch"); 1244 1245 channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 1246 if (!channel->ccw) 1247 return -ENOMEM; 1248 channel->state = CH_STATE_DOWN; 1249 atomic_set(&channel->irq_pending, 0); 1250 init_waitqueue_head(&channel->wait_q); 1251 1252 spin_lock_irq(get_ccwdev_lock(cdev)); 1253 cdev->handler = qeth_irq; 1254 spin_unlock_irq(get_ccwdev_lock(cdev)); 1255 1256 if (!alloc_buffers) 1257 return 0; 1258 1259 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { 1260 channel->iob[cnt].data = kmalloc(QETH_BUFSIZE, 1261 GFP_KERNEL | GFP_DMA); 1262 if (channel->iob[cnt].data == NULL) 1263 break; 1264 channel->iob[cnt].state = BUF_STATE_FREE; 1265 channel->iob[cnt].channel = channel; 1266 } 1267 if (cnt < QETH_CMD_BUFFER_NO) { 1268 qeth_clean_channel(channel); 1269 return -ENOMEM; 1270 } 1271 channel->io_buf_no = 0; 1272 spin_lock_init(&channel->iob_lock); 1273 1274 return 0; 1275 } 1276 1277 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single) 1278 { 1279 unsigned int count = single ? 1 : card->dev->num_tx_queues; 1280 int rc; 1281 1282 rtnl_lock(); 1283 rc = netif_set_real_num_tx_queues(card->dev, count); 1284 rtnl_unlock(); 1285 1286 if (rc) 1287 return rc; 1288 1289 if (card->qdio.no_out_queues == count) 1290 return 0; 1291 1292 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) 1293 qeth_free_qdio_queues(card); 1294 1295 if (count == 1) 1296 dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); 1297 1298 card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE; 1299 card->qdio.no_out_queues = count; 1300 return 0; 1301 } 1302 1303 static int qeth_update_from_chp_desc(struct qeth_card *card) 1304 { 1305 struct ccw_device *ccwdev; 1306 struct channel_path_desc_fmt0 *chp_dsc; 1307 int rc = 0; 1308 1309 QETH_DBF_TEXT(SETUP, 2, "chp_desc"); 1310 1311 ccwdev = card->data.ccwdev; 1312 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 1313 if (!chp_dsc) 1314 return -ENOMEM; 1315 1316 card->info.func_level = 0x4100 + chp_dsc->desc; 1317 1318 if (IS_OSD(card) || IS_OSX(card)) 1319 /* CHPP field bit 6 == 1 -> single queue */ 1320 rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); 1321 1322 kfree(chp_dsc); 1323 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); 1324 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); 1325 return rc; 1326 } 1327 1328 static void qeth_init_qdio_info(struct qeth_card *card) 1329 { 1330 QETH_DBF_TEXT(SETUP, 4, "intqdinf"); 1331 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1332 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; 1333 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; 1334 1335 /* inbound */ 1336 card->qdio.no_in_queues = 1; 1337 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1338 if (IS_IQD(card)) 1339 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; 1340 else 1341 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1342 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1343 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1344 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1345 } 1346 1347 static void qeth_set_initial_options(struct qeth_card *card) 1348 { 1349 card->options.route4.type = NO_ROUTER; 1350 card->options.route6.type = NO_ROUTER; 1351 card->options.rx_sg_cb = QETH_RX_SG_CB; 1352 card->options.isolation = ISOLATION_MODE_NONE; 1353 card->options.cq = QETH_CQ_DISABLED; 1354 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 1355 } 1356 1357 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1358 { 1359 unsigned long flags; 1360 int rc = 0; 1361 1362 spin_lock_irqsave(&card->thread_mask_lock, flags); 1363 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", 1364 (u8) card->thread_start_mask, 1365 (u8) card->thread_allowed_mask, 1366 (u8) card->thread_running_mask); 1367 rc = (card->thread_start_mask & thread); 1368 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1369 return rc; 1370 } 1371 1372 static void qeth_start_kernel_thread(struct work_struct *work) 1373 { 1374 struct task_struct *ts; 1375 struct qeth_card *card = container_of(work, struct qeth_card, 1376 kernel_thread_starter); 1377 QETH_CARD_TEXT(card , 2, "strthrd"); 1378 1379 if (card->read.state != CH_STATE_UP && 1380 card->write.state != CH_STATE_UP) 1381 return; 1382 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1383 ts = kthread_run(card->discipline->recover, (void *)card, 1384 "qeth_recover"); 1385 if (IS_ERR(ts)) { 1386 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1387 qeth_clear_thread_running_bit(card, 1388 QETH_RECOVER_THREAD); 1389 } 1390 } 1391 } 1392 1393 static void qeth_buffer_reclaim_work(struct work_struct *); 1394 static void qeth_setup_card(struct qeth_card *card) 1395 { 1396 QETH_DBF_TEXT(SETUP, 2, "setupcrd"); 1397 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1398 1399 card->info.type = CARD_RDEV(card)->id.driver_info; 1400 card->state = CARD_STATE_DOWN; 1401 spin_lock_init(&card->lock); 1402 spin_lock_init(&card->thread_mask_lock); 1403 mutex_init(&card->conf_mutex); 1404 mutex_init(&card->discipline_mutex); 1405 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1406 INIT_LIST_HEAD(&card->cmd_waiter_list); 1407 init_waitqueue_head(&card->wait_q); 1408 qeth_set_initial_options(card); 1409 /* IP address takeover */ 1410 INIT_LIST_HEAD(&card->ipato.entries); 1411 qeth_init_qdio_info(card); 1412 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1413 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); 1414 } 1415 1416 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) 1417 { 1418 struct qeth_card *card = container_of(slr, struct qeth_card, 1419 qeth_service_level); 1420 if (card->info.mcl_level[0]) 1421 seq_printf(m, "qeth: %s firmware level %s\n", 1422 CARD_BUS_ID(card), card->info.mcl_level); 1423 } 1424 1425 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) 1426 { 1427 struct qeth_card *card; 1428 1429 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1430 card = kzalloc(sizeof(*card), GFP_KERNEL); 1431 if (!card) 1432 goto out; 1433 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1434 1435 card->gdev = gdev; 1436 dev_set_drvdata(&gdev->dev, card); 1437 CARD_RDEV(card) = gdev->cdev[0]; 1438 CARD_WDEV(card) = gdev->cdev[1]; 1439 CARD_DDEV(card) = gdev->cdev[2]; 1440 1441 card->event_wq = alloc_ordered_workqueue("%s_event", 0, 1442 dev_name(&gdev->dev)); 1443 if (!card->event_wq) 1444 goto out_wq; 1445 if (qeth_setup_channel(&card->read, true)) 1446 goto out_ip; 1447 if (qeth_setup_channel(&card->write, true)) 1448 goto out_channel; 1449 if (qeth_setup_channel(&card->data, false)) 1450 goto out_data; 1451 card->qeth_service_level.seq_print = qeth_core_sl_print; 1452 register_service_level(&card->qeth_service_level); 1453 return card; 1454 1455 out_data: 1456 qeth_clean_channel(&card->write); 1457 out_channel: 1458 qeth_clean_channel(&card->read); 1459 out_ip: 1460 destroy_workqueue(card->event_wq); 1461 out_wq: 1462 dev_set_drvdata(&gdev->dev, NULL); 1463 kfree(card); 1464 out: 1465 return NULL; 1466 } 1467 1468 static int qeth_clear_channel(struct qeth_card *card, 1469 struct qeth_channel *channel) 1470 { 1471 int rc; 1472 1473 QETH_CARD_TEXT(card, 3, "clearch"); 1474 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1475 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); 1476 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1477 1478 if (rc) 1479 return rc; 1480 rc = wait_event_interruptible_timeout(card->wait_q, 1481 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); 1482 if (rc == -ERESTARTSYS) 1483 return rc; 1484 if (channel->state != CH_STATE_STOPPED) 1485 return -ETIME; 1486 channel->state = CH_STATE_DOWN; 1487 return 0; 1488 } 1489 1490 static int qeth_halt_channel(struct qeth_card *card, 1491 struct qeth_channel *channel) 1492 { 1493 int rc; 1494 1495 QETH_CARD_TEXT(card, 3, "haltch"); 1496 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1497 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); 1498 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1499 1500 if (rc) 1501 return rc; 1502 rc = wait_event_interruptible_timeout(card->wait_q, 1503 channel->state == CH_STATE_HALTED, QETH_TIMEOUT); 1504 if (rc == -ERESTARTSYS) 1505 return rc; 1506 if (channel->state != CH_STATE_HALTED) 1507 return -ETIME; 1508 return 0; 1509 } 1510 1511 static int qeth_halt_channels(struct qeth_card *card) 1512 { 1513 int rc1 = 0, rc2 = 0, rc3 = 0; 1514 1515 QETH_CARD_TEXT(card, 3, "haltchs"); 1516 rc1 = qeth_halt_channel(card, &card->read); 1517 rc2 = qeth_halt_channel(card, &card->write); 1518 rc3 = qeth_halt_channel(card, &card->data); 1519 if (rc1) 1520 return rc1; 1521 if (rc2) 1522 return rc2; 1523 return rc3; 1524 } 1525 1526 static int qeth_clear_channels(struct qeth_card *card) 1527 { 1528 int rc1 = 0, rc2 = 0, rc3 = 0; 1529 1530 QETH_CARD_TEXT(card, 3, "clearchs"); 1531 rc1 = qeth_clear_channel(card, &card->read); 1532 rc2 = qeth_clear_channel(card, &card->write); 1533 rc3 = qeth_clear_channel(card, &card->data); 1534 if (rc1) 1535 return rc1; 1536 if (rc2) 1537 return rc2; 1538 return rc3; 1539 } 1540 1541 static int qeth_clear_halt_card(struct qeth_card *card, int halt) 1542 { 1543 int rc = 0; 1544 1545 QETH_CARD_TEXT(card, 3, "clhacrd"); 1546 1547 if (halt) 1548 rc = qeth_halt_channels(card); 1549 if (rc) 1550 return rc; 1551 return qeth_clear_channels(card); 1552 } 1553 1554 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) 1555 { 1556 int rc = 0; 1557 1558 QETH_CARD_TEXT(card, 3, "qdioclr"); 1559 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, 1560 QETH_QDIO_CLEANING)) { 1561 case QETH_QDIO_ESTABLISHED: 1562 if (IS_IQD(card)) 1563 rc = qdio_shutdown(CARD_DDEV(card), 1564 QDIO_FLAG_CLEANUP_USING_HALT); 1565 else 1566 rc = qdio_shutdown(CARD_DDEV(card), 1567 QDIO_FLAG_CLEANUP_USING_CLEAR); 1568 if (rc) 1569 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1570 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1571 break; 1572 case QETH_QDIO_CLEANING: 1573 return rc; 1574 default: 1575 break; 1576 } 1577 rc = qeth_clear_halt_card(card, use_halt); 1578 if (rc) 1579 QETH_CARD_TEXT_(card, 3, "2err%d", rc); 1580 card->state = CARD_STATE_DOWN; 1581 return rc; 1582 } 1583 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card); 1584 1585 static int qeth_read_conf_data(struct qeth_card *card, void **buffer, 1586 int *length) 1587 { 1588 struct ciw *ciw; 1589 char *rcd_buf; 1590 int ret; 1591 struct qeth_channel *channel = &card->data; 1592 1593 /* 1594 * scan for RCD command in extended SenseID data 1595 */ 1596 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); 1597 if (!ciw || ciw->cmd == 0) 1598 return -EOPNOTSUPP; 1599 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); 1600 if (!rcd_buf) 1601 return -ENOMEM; 1602 1603 qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf); 1604 channel->state = CH_STATE_RCD; 1605 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1606 ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw, 1607 QETH_RCD_PARM, LPM_ANYPATH, 0, 1608 QETH_RCD_TIMEOUT); 1609 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1610 if (!ret) 1611 wait_event(card->wait_q, 1612 (channel->state == CH_STATE_RCD_DONE || 1613 channel->state == CH_STATE_DOWN)); 1614 if (channel->state == CH_STATE_DOWN) 1615 ret = -EIO; 1616 else 1617 channel->state = CH_STATE_DOWN; 1618 if (ret) { 1619 kfree(rcd_buf); 1620 *buffer = NULL; 1621 *length = 0; 1622 } else { 1623 *length = ciw->count; 1624 *buffer = rcd_buf; 1625 } 1626 return ret; 1627 } 1628 1629 static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd) 1630 { 1631 QETH_DBF_TEXT(SETUP, 2, "cfgunit"); 1632 card->info.chpid = prcd[30]; 1633 card->info.unit_addr2 = prcd[31]; 1634 card->info.cula = prcd[63]; 1635 card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) && 1636 (prcd[0x11] == _ascebc['M'])); 1637 } 1638 1639 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) 1640 { 1641 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1642 struct diag26c_vnic_resp *response = NULL; 1643 struct diag26c_vnic_req *request = NULL; 1644 struct ccw_dev_id id; 1645 char userid[80]; 1646 int rc = 0; 1647 1648 QETH_DBF_TEXT(SETUP, 2, "vmlayer"); 1649 1650 cpcmd("QUERY USERID", userid, sizeof(userid), &rc); 1651 if (rc) 1652 goto out; 1653 1654 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 1655 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 1656 if (!request || !response) { 1657 rc = -ENOMEM; 1658 goto out; 1659 } 1660 1661 ccw_device_get_id(CARD_RDEV(card), &id); 1662 request->resp_buf_len = sizeof(*response); 1663 request->resp_version = DIAG26C_VERSION6_VM65918; 1664 request->req_format = DIAG26C_VNIC_INFO; 1665 ASCEBC(userid, 8); 1666 memcpy(&request->sys_name, userid, 8); 1667 request->devno = id.devno; 1668 1669 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1670 rc = diag26c(request, response, DIAG26C_PORT_VNIC); 1671 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1672 if (rc) 1673 goto out; 1674 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 1675 1676 if (request->resp_buf_len < sizeof(*response) || 1677 response->version != request->resp_version) { 1678 rc = -EIO; 1679 goto out; 1680 } 1681 1682 if (response->protocol == VNIC_INFO_PROT_L2) 1683 disc = QETH_DISCIPLINE_LAYER2; 1684 else if (response->protocol == VNIC_INFO_PROT_L3) 1685 disc = QETH_DISCIPLINE_LAYER3; 1686 1687 out: 1688 kfree(response); 1689 kfree(request); 1690 if (rc) 1691 QETH_DBF_TEXT_(SETUP, 2, "err%x", rc); 1692 return disc; 1693 } 1694 1695 /* Determine whether the device requires a specific layer discipline */ 1696 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) 1697 { 1698 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1699 1700 if (IS_OSM(card) || IS_OSN(card)) 1701 disc = QETH_DISCIPLINE_LAYER2; 1702 else if (IS_VM_NIC(card)) 1703 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 1704 qeth_vm_detect_layer(card); 1705 1706 switch (disc) { 1707 case QETH_DISCIPLINE_LAYER2: 1708 QETH_DBF_TEXT(SETUP, 3, "force l2"); 1709 break; 1710 case QETH_DISCIPLINE_LAYER3: 1711 QETH_DBF_TEXT(SETUP, 3, "force l3"); 1712 break; 1713 default: 1714 QETH_DBF_TEXT(SETUP, 3, "force no"); 1715 } 1716 1717 return disc; 1718 } 1719 1720 static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd) 1721 { 1722 QETH_DBF_TEXT(SETUP, 2, "cfgblkt"); 1723 1724 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && 1725 prcd[76] >= 0xF1 && prcd[76] <= 0xF4) { 1726 card->info.blkt.time_total = 0; 1727 card->info.blkt.inter_packet = 0; 1728 card->info.blkt.inter_packet_jumbo = 0; 1729 } else { 1730 card->info.blkt.time_total = 250; 1731 card->info.blkt.inter_packet = 5; 1732 card->info.blkt.inter_packet_jumbo = 15; 1733 } 1734 } 1735 1736 static void qeth_init_tokens(struct qeth_card *card) 1737 { 1738 card->token.issuer_rm_w = 0x00010103UL; 1739 card->token.cm_filter_w = 0x00010108UL; 1740 card->token.cm_connection_w = 0x0001010aUL; 1741 card->token.ulp_filter_w = 0x0001010bUL; 1742 card->token.ulp_connection_w = 0x0001010dUL; 1743 } 1744 1745 static void qeth_init_func_level(struct qeth_card *card) 1746 { 1747 switch (card->info.type) { 1748 case QETH_CARD_TYPE_IQD: 1749 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; 1750 break; 1751 case QETH_CARD_TYPE_OSD: 1752 case QETH_CARD_TYPE_OSN: 1753 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; 1754 break; 1755 default: 1756 break; 1757 } 1758 } 1759 1760 static void qeth_idx_finalize_cmd(struct qeth_card *card, 1761 struct qeth_cmd_buffer *iob, 1762 unsigned int length) 1763 { 1764 qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, length, iob->data); 1765 1766 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, 1767 QETH_SEQ_NO_LENGTH); 1768 if (iob->channel == &card->write) 1769 card->seqno.trans_hdr++; 1770 } 1771 1772 static int qeth_peer_func_level(int level) 1773 { 1774 if ((level & 0xff) == 8) 1775 return (level & 0xff) + 0x400; 1776 if (((level >> 8) & 3) == 1) 1777 return (level & 0xff) + 0x200; 1778 return level; 1779 } 1780 1781 static void qeth_mpc_finalize_cmd(struct qeth_card *card, 1782 struct qeth_cmd_buffer *iob, 1783 unsigned int length) 1784 { 1785 qeth_idx_finalize_cmd(card, iob, length); 1786 1787 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), 1788 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); 1789 card->seqno.pdu_hdr++; 1790 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), 1791 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 1792 1793 iob->reply->seqno = QETH_IDX_COMMAND_SEQNO; 1794 iob->callback = qeth_release_buffer_cb; 1795 } 1796 1797 /** 1798 * qeth_send_control_data() - send control command to the card 1799 * @card: qeth_card structure pointer 1800 * @len: size of the command buffer 1801 * @iob: qeth_cmd_buffer pointer 1802 * @reply_cb: callback function pointer 1803 * @cb_card: pointer to the qeth_card structure 1804 * @cb_reply: pointer to the qeth_reply structure 1805 * @cb_cmd: pointer to the original iob for non-IPA 1806 * commands, or to the qeth_ipa_cmd structure 1807 * for the IPA commands. 1808 * @reply_param: private pointer passed to the callback 1809 * 1810 * Callback function gets called one or more times, with cb_cmd 1811 * pointing to the response returned by the hardware. Callback 1812 * function must return 1813 * > 0 if more reply blocks are expected, 1814 * 0 if the last or only reply block is received, and 1815 * < 0 on error. 1816 * Callback function can get the value of the reply_param pointer from the 1817 * field 'param' of the structure qeth_reply. 1818 */ 1819 1820 static int qeth_send_control_data(struct qeth_card *card, int len, 1821 struct qeth_cmd_buffer *iob, 1822 int (*reply_cb)(struct qeth_card *cb_card, 1823 struct qeth_reply *cb_reply, 1824 unsigned long cb_cmd), 1825 void *reply_param) 1826 { 1827 struct qeth_channel *channel = iob->channel; 1828 long timeout = iob->timeout; 1829 int rc; 1830 struct qeth_reply *reply = NULL; 1831 1832 QETH_CARD_TEXT(card, 2, "sendctl"); 1833 1834 reply = qeth_alloc_reply(card); 1835 if (!reply) { 1836 qeth_release_buffer(channel, iob); 1837 return -ENOMEM; 1838 } 1839 reply->callback = reply_cb; 1840 reply->param = reply_param; 1841 1842 /* pairs with qeth_release_buffer(): */ 1843 qeth_get_reply(reply); 1844 iob->reply = reply; 1845 1846 timeout = wait_event_interruptible_timeout(card->wait_q, 1847 qeth_trylock_channel(channel), 1848 timeout); 1849 if (timeout <= 0) { 1850 qeth_put_reply(reply); 1851 qeth_release_buffer(channel, iob); 1852 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1853 } 1854 1855 iob->finalize(card, iob, len); 1856 QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN)); 1857 1858 qeth_enqueue_reply(card, reply); 1859 1860 QETH_CARD_TEXT(card, 6, "noirqpnd"); 1861 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1862 rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, 1863 (addr_t) iob, 0, 0, timeout); 1864 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1865 if (rc) { 1866 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", 1867 CARD_DEVID(card), rc); 1868 QETH_CARD_TEXT_(card, 2, " err%d", rc); 1869 qeth_dequeue_reply(card, reply); 1870 qeth_put_reply(reply); 1871 qeth_release_buffer(channel, iob); 1872 atomic_set(&channel->irq_pending, 0); 1873 wake_up(&card->wait_q); 1874 return rc; 1875 } 1876 1877 timeout = wait_for_completion_interruptible_timeout(&reply->received, 1878 timeout); 1879 if (timeout <= 0) 1880 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1881 1882 qeth_dequeue_reply(card, reply); 1883 if (!rc) 1884 rc = reply->rc; 1885 qeth_put_reply(reply); 1886 return rc; 1887 } 1888 1889 static int qeth_idx_check_activate_response(struct qeth_card *card, 1890 struct qeth_channel *channel, 1891 struct qeth_cmd_buffer *iob) 1892 { 1893 int rc; 1894 1895 rc = qeth_check_idx_response(card, iob->data); 1896 if (rc) 1897 return rc; 1898 1899 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) 1900 return 0; 1901 1902 /* negative reply: */ 1903 QETH_DBF_TEXT_(SETUP, 2, "idxneg%c", 1904 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 1905 1906 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { 1907 case QETH_IDX_ACT_ERR_EXCL: 1908 dev_err(&channel->ccwdev->dev, 1909 "The adapter is used exclusively by another host\n"); 1910 return -EBUSY; 1911 case QETH_IDX_ACT_ERR_AUTH: 1912 case QETH_IDX_ACT_ERR_AUTH_USER: 1913 dev_err(&channel->ccwdev->dev, 1914 "Setting the device online failed because of insufficient authorization\n"); 1915 return -EPERM; 1916 default: 1917 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 1918 CCW_DEVID(channel->ccwdev)); 1919 return -EIO; 1920 } 1921 } 1922 1923 static void qeth_idx_query_read_cb(struct qeth_card *card, 1924 struct qeth_channel *channel, 1925 struct qeth_cmd_buffer *iob) 1926 { 1927 u16 peer_level; 1928 int rc; 1929 1930 QETH_DBF_TEXT(SETUP, 2, "idxrdcb"); 1931 1932 rc = qeth_idx_check_activate_response(card, channel, iob); 1933 if (rc) 1934 goto out; 1935 1936 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 1937 if (peer_level != qeth_peer_func_level(card->info.func_level)) { 1938 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 1939 CCW_DEVID(channel->ccwdev), 1940 card->info.func_level, peer_level); 1941 rc = -EINVAL; 1942 goto out; 1943 } 1944 1945 memcpy(&card->token.issuer_rm_r, 1946 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 1947 QETH_MPC_TOKEN_LENGTH); 1948 memcpy(&card->info.mcl_level[0], 1949 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); 1950 1951 out: 1952 qeth_notify_reply(iob->reply, rc); 1953 qeth_release_buffer(channel, iob); 1954 } 1955 1956 static void qeth_idx_query_write_cb(struct qeth_card *card, 1957 struct qeth_channel *channel, 1958 struct qeth_cmd_buffer *iob) 1959 { 1960 u16 peer_level; 1961 int rc; 1962 1963 QETH_DBF_TEXT(SETUP, 2, "idxwrcb"); 1964 1965 rc = qeth_idx_check_activate_response(card, channel, iob); 1966 if (rc) 1967 goto out; 1968 1969 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 1970 if ((peer_level & ~0x0100) != 1971 qeth_peer_func_level(card->info.func_level)) { 1972 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 1973 CCW_DEVID(channel->ccwdev), 1974 card->info.func_level, peer_level); 1975 rc = -EINVAL; 1976 } 1977 1978 out: 1979 qeth_notify_reply(iob->reply, rc); 1980 qeth_release_buffer(channel, iob); 1981 } 1982 1983 static void qeth_idx_finalize_query_cmd(struct qeth_card *card, 1984 struct qeth_cmd_buffer *iob, 1985 unsigned int length) 1986 { 1987 qeth_setup_ccw(iob->channel->ccw, CCW_CMD_READ, length, iob->data); 1988 } 1989 1990 static void qeth_idx_activate_cb(struct qeth_card *card, 1991 struct qeth_channel *channel, 1992 struct qeth_cmd_buffer *iob) 1993 { 1994 qeth_notify_reply(iob->reply, 0); 1995 qeth_release_buffer(channel, iob); 1996 } 1997 1998 static void qeth_idx_setup_activate_cmd(struct qeth_card *card, 1999 struct qeth_cmd_buffer *iob) 2000 { 2001 u16 addr = (card->info.cula << 8) + card->info.unit_addr2; 2002 u8 port = ((u8)card->dev->dev_port) | 0x80; 2003 struct ccw_dev_id dev_id; 2004 2005 ccw_device_get_id(CARD_DDEV(card), &dev_id); 2006 iob->finalize = qeth_idx_finalize_cmd; 2007 iob->callback = qeth_idx_activate_cb; 2008 2009 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); 2010 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 2011 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); 2012 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2013 &card->info.func_level, 2); 2014 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2); 2015 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); 2016 } 2017 2018 static int qeth_idx_activate_read_channel(struct qeth_card *card) 2019 { 2020 struct qeth_channel *channel = &card->read; 2021 struct qeth_cmd_buffer *iob; 2022 int rc; 2023 2024 QETH_DBF_TEXT(SETUP, 2, "idxread"); 2025 2026 iob = qeth_get_buffer(channel); 2027 if (!iob) 2028 return -ENOMEM; 2029 2030 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); 2031 qeth_idx_setup_activate_cmd(card, iob); 2032 2033 rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL); 2034 if (rc) 2035 return rc; 2036 2037 iob = qeth_get_buffer(channel); 2038 if (!iob) 2039 return -ENOMEM; 2040 2041 iob->finalize = qeth_idx_finalize_query_cmd; 2042 iob->callback = qeth_idx_query_read_cb; 2043 rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL); 2044 if (rc) 2045 return rc; 2046 2047 channel->state = CH_STATE_UP; 2048 return 0; 2049 } 2050 2051 static int qeth_idx_activate_write_channel(struct qeth_card *card) 2052 { 2053 struct qeth_channel *channel = &card->write; 2054 struct qeth_cmd_buffer *iob; 2055 int rc; 2056 2057 QETH_DBF_TEXT(SETUP, 2, "idxwrite"); 2058 2059 iob = qeth_get_buffer(channel); 2060 if (!iob) 2061 return -ENOMEM; 2062 2063 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); 2064 qeth_idx_setup_activate_cmd(card, iob); 2065 2066 rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL); 2067 if (rc) 2068 return rc; 2069 2070 iob = qeth_get_buffer(channel); 2071 if (!iob) 2072 return -ENOMEM; 2073 2074 iob->finalize = qeth_idx_finalize_query_cmd; 2075 iob->callback = qeth_idx_query_write_cb; 2076 rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL); 2077 if (rc) 2078 return rc; 2079 2080 channel->state = CH_STATE_UP; 2081 return 0; 2082 } 2083 2084 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2085 unsigned long data) 2086 { 2087 struct qeth_cmd_buffer *iob; 2088 2089 QETH_DBF_TEXT(SETUP, 2, "cmenblcb"); 2090 2091 iob = (struct qeth_cmd_buffer *) data; 2092 memcpy(&card->token.cm_filter_r, 2093 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), 2094 QETH_MPC_TOKEN_LENGTH); 2095 return 0; 2096 } 2097 2098 static int qeth_cm_enable(struct qeth_card *card) 2099 { 2100 int rc; 2101 struct qeth_cmd_buffer *iob; 2102 2103 QETH_DBF_TEXT(SETUP, 2, "cmenable"); 2104 2105 iob = qeth_wait_for_buffer(&card->write); 2106 iob->finalize = qeth_mpc_finalize_cmd; 2107 memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE); 2108 2109 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), 2110 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2111 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), 2112 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); 2113 2114 rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob, 2115 qeth_cm_enable_cb, NULL); 2116 return rc; 2117 } 2118 2119 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2120 unsigned long data) 2121 { 2122 struct qeth_cmd_buffer *iob; 2123 2124 QETH_DBF_TEXT(SETUP, 2, "cmsetpcb"); 2125 2126 iob = (struct qeth_cmd_buffer *) data; 2127 memcpy(&card->token.cm_connection_r, 2128 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), 2129 QETH_MPC_TOKEN_LENGTH); 2130 return 0; 2131 } 2132 2133 static int qeth_cm_setup(struct qeth_card *card) 2134 { 2135 int rc; 2136 struct qeth_cmd_buffer *iob; 2137 2138 QETH_DBF_TEXT(SETUP, 2, "cmsetup"); 2139 2140 iob = qeth_wait_for_buffer(&card->write); 2141 iob->finalize = qeth_mpc_finalize_cmd; 2142 memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE); 2143 2144 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), 2145 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2146 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), 2147 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); 2148 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), 2149 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); 2150 rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob, 2151 qeth_cm_setup_cb, NULL); 2152 return rc; 2153 } 2154 2155 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) 2156 { 2157 struct net_device *dev = card->dev; 2158 unsigned int new_mtu; 2159 2160 if (!max_mtu) { 2161 /* IQD needs accurate max MTU to set up its RX buffers: */ 2162 if (IS_IQD(card)) 2163 return -EINVAL; 2164 /* tolerate quirky HW: */ 2165 max_mtu = ETH_MAX_MTU; 2166 } 2167 2168 rtnl_lock(); 2169 if (IS_IQD(card)) { 2170 /* move any device with default MTU to new max MTU: */ 2171 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; 2172 2173 /* adjust RX buffer size to new max MTU: */ 2174 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; 2175 if (dev->max_mtu && dev->max_mtu != max_mtu) 2176 qeth_free_qdio_queues(card); 2177 } else { 2178 if (dev->mtu) 2179 new_mtu = dev->mtu; 2180 /* default MTUs for first setup: */ 2181 else if (IS_LAYER2(card)) 2182 new_mtu = ETH_DATA_LEN; 2183 else 2184 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ 2185 } 2186 2187 dev->max_mtu = max_mtu; 2188 dev->mtu = min(new_mtu, max_mtu); 2189 rtnl_unlock(); 2190 return 0; 2191 } 2192 2193 static int qeth_get_mtu_outof_framesize(int framesize) 2194 { 2195 switch (framesize) { 2196 case 0x4000: 2197 return 8192; 2198 case 0x6000: 2199 return 16384; 2200 case 0xa000: 2201 return 32768; 2202 case 0xffff: 2203 return 57344; 2204 default: 2205 return 0; 2206 } 2207 } 2208 2209 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2210 unsigned long data) 2211 { 2212 __u16 mtu, framesize; 2213 __u16 len; 2214 __u8 link_type; 2215 struct qeth_cmd_buffer *iob; 2216 2217 QETH_DBF_TEXT(SETUP, 2, "ulpenacb"); 2218 2219 iob = (struct qeth_cmd_buffer *) data; 2220 memcpy(&card->token.ulp_filter_r, 2221 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 2222 QETH_MPC_TOKEN_LENGTH); 2223 if (IS_IQD(card)) { 2224 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 2225 mtu = qeth_get_mtu_outof_framesize(framesize); 2226 } else { 2227 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); 2228 } 2229 *(u16 *)reply->param = mtu; 2230 2231 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); 2232 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { 2233 memcpy(&link_type, 2234 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); 2235 card->info.link_type = link_type; 2236 } else 2237 card->info.link_type = 0; 2238 QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type); 2239 return 0; 2240 } 2241 2242 static u8 qeth_mpc_select_prot_type(struct qeth_card *card) 2243 { 2244 if (IS_OSN(card)) 2245 return QETH_PROT_OSN2; 2246 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; 2247 } 2248 2249 static int qeth_ulp_enable(struct qeth_card *card) 2250 { 2251 u8 prot_type = qeth_mpc_select_prot_type(card); 2252 struct qeth_cmd_buffer *iob; 2253 u16 max_mtu; 2254 int rc; 2255 2256 /*FIXME: trace view callbacks*/ 2257 QETH_DBF_TEXT(SETUP, 2, "ulpenabl"); 2258 2259 iob = qeth_wait_for_buffer(&card->write); 2260 iob->finalize = qeth_mpc_finalize_cmd; 2261 memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); 2262 2263 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; 2264 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); 2265 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), 2266 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2267 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), 2268 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); 2269 rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob, 2270 qeth_ulp_enable_cb, &max_mtu); 2271 if (rc) 2272 return rc; 2273 return qeth_update_max_mtu(card, max_mtu); 2274 } 2275 2276 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2277 unsigned long data) 2278 { 2279 struct qeth_cmd_buffer *iob; 2280 2281 QETH_DBF_TEXT(SETUP, 2, "ulpstpcb"); 2282 2283 iob = (struct qeth_cmd_buffer *) data; 2284 memcpy(&card->token.ulp_connection_r, 2285 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2286 QETH_MPC_TOKEN_LENGTH); 2287 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2288 3)) { 2289 QETH_DBF_TEXT(SETUP, 2, "olmlimit"); 2290 dev_err(&card->gdev->dev, "A connection could not be " 2291 "established because of an OLM limit\n"); 2292 return -EMLINK; 2293 } 2294 return 0; 2295 } 2296 2297 static int qeth_ulp_setup(struct qeth_card *card) 2298 { 2299 int rc; 2300 __u16 temp; 2301 struct qeth_cmd_buffer *iob; 2302 struct ccw_dev_id dev_id; 2303 2304 QETH_DBF_TEXT(SETUP, 2, "ulpsetup"); 2305 2306 iob = qeth_wait_for_buffer(&card->write); 2307 iob->finalize = qeth_mpc_finalize_cmd; 2308 memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE); 2309 2310 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), 2311 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2312 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), 2313 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); 2314 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), 2315 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); 2316 2317 ccw_device_get_id(CARD_DDEV(card), &dev_id); 2318 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2); 2319 temp = (card->info.cula << 8) + card->info.unit_addr2; 2320 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); 2321 rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob, 2322 qeth_ulp_setup_cb, NULL); 2323 return rc; 2324 } 2325 2326 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) 2327 { 2328 struct qeth_qdio_out_buffer *newbuf; 2329 2330 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); 2331 if (!newbuf) 2332 return -ENOMEM; 2333 2334 newbuf->buffer = q->qdio_bufs[bidx]; 2335 skb_queue_head_init(&newbuf->skb_list); 2336 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2337 newbuf->q = q; 2338 newbuf->next_pending = q->bufs[bidx]; 2339 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); 2340 q->bufs[bidx] = newbuf; 2341 return 0; 2342 } 2343 2344 static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2345 { 2346 if (!q) 2347 return; 2348 2349 qeth_drain_output_queue(q, true); 2350 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2351 kfree(q); 2352 } 2353 2354 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) 2355 { 2356 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 2357 2358 if (!q) 2359 return NULL; 2360 2361 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 2362 kfree(q); 2363 return NULL; 2364 } 2365 return q; 2366 } 2367 2368 static int qeth_alloc_qdio_queues(struct qeth_card *card) 2369 { 2370 int i, j; 2371 2372 QETH_DBF_TEXT(SETUP, 2, "allcqdbf"); 2373 2374 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, 2375 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2376 return 0; 2377 2378 QETH_DBF_TEXT(SETUP, 2, "inq"); 2379 card->qdio.in_q = qeth_alloc_qdio_queue(); 2380 if (!card->qdio.in_q) 2381 goto out_nomem; 2382 2383 /* inbound buffer pool */ 2384 if (qeth_alloc_buffer_pool(card)) 2385 goto out_freeinq; 2386 2387 /* outbound */ 2388 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2389 card->qdio.out_qs[i] = qeth_alloc_output_queue(); 2390 if (!card->qdio.out_qs[i]) 2391 goto out_freeoutq; 2392 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); 2393 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); 2394 card->qdio.out_qs[i]->card = card; 2395 card->qdio.out_qs[i]->queue_no = i; 2396 /* give outbound qeth_qdio_buffers their qdio_buffers */ 2397 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2398 WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL); 2399 if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) 2400 goto out_freeoutqbufs; 2401 } 2402 } 2403 2404 /* completion */ 2405 if (qeth_alloc_cq(card)) 2406 goto out_freeoutq; 2407 2408 return 0; 2409 2410 out_freeoutqbufs: 2411 while (j > 0) { 2412 --j; 2413 kmem_cache_free(qeth_qdio_outbuf_cache, 2414 card->qdio.out_qs[i]->bufs[j]); 2415 card->qdio.out_qs[i]->bufs[j] = NULL; 2416 } 2417 out_freeoutq: 2418 while (i > 0) { 2419 qeth_free_output_queue(card->qdio.out_qs[--i]); 2420 card->qdio.out_qs[i] = NULL; 2421 } 2422 qeth_free_buffer_pool(card); 2423 out_freeinq: 2424 qeth_free_qdio_queue(card->qdio.in_q); 2425 card->qdio.in_q = NULL; 2426 out_nomem: 2427 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2428 return -ENOMEM; 2429 } 2430 2431 static void qeth_free_qdio_queues(struct qeth_card *card) 2432 { 2433 int i, j; 2434 2435 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 2436 QETH_QDIO_UNINITIALIZED) 2437 return; 2438 2439 qeth_free_cq(card); 2440 cancel_delayed_work_sync(&card->buffer_reclaim_work); 2441 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2442 if (card->qdio.in_q->bufs[j].rx_skb) 2443 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); 2444 } 2445 qeth_free_qdio_queue(card->qdio.in_q); 2446 card->qdio.in_q = NULL; 2447 /* inbound buffer pool */ 2448 qeth_free_buffer_pool(card); 2449 /* free outbound qdio_qs */ 2450 for (i = 0; i < card->qdio.no_out_queues; i++) { 2451 qeth_free_output_queue(card->qdio.out_qs[i]); 2452 card->qdio.out_qs[i] = NULL; 2453 } 2454 } 2455 2456 static void qeth_create_qib_param_field(struct qeth_card *card, 2457 char *param_field) 2458 { 2459 2460 param_field[0] = _ascebc['P']; 2461 param_field[1] = _ascebc['C']; 2462 param_field[2] = _ascebc['I']; 2463 param_field[3] = _ascebc['T']; 2464 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card); 2465 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card); 2466 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card); 2467 } 2468 2469 static void qeth_create_qib_param_field_blkt(struct qeth_card *card, 2470 char *param_field) 2471 { 2472 param_field[16] = _ascebc['B']; 2473 param_field[17] = _ascebc['L']; 2474 param_field[18] = _ascebc['K']; 2475 param_field[19] = _ascebc['T']; 2476 *((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total; 2477 *((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet; 2478 *((unsigned int *) (¶m_field[28])) = 2479 card->info.blkt.inter_packet_jumbo; 2480 } 2481 2482 static int qeth_qdio_activate(struct qeth_card *card) 2483 { 2484 QETH_DBF_TEXT(SETUP, 3, "qdioact"); 2485 return qdio_activate(CARD_DDEV(card)); 2486 } 2487 2488 static int qeth_dm_act(struct qeth_card *card) 2489 { 2490 int rc; 2491 struct qeth_cmd_buffer *iob; 2492 2493 QETH_DBF_TEXT(SETUP, 2, "dmact"); 2494 2495 iob = qeth_wait_for_buffer(&card->write); 2496 iob->finalize = qeth_mpc_finalize_cmd; 2497 memcpy(iob->data, DM_ACT, DM_ACT_SIZE); 2498 2499 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), 2500 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2501 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), 2502 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2503 rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL); 2504 return rc; 2505 } 2506 2507 static int qeth_mpc_initialize(struct qeth_card *card) 2508 { 2509 int rc; 2510 2511 QETH_DBF_TEXT(SETUP, 2, "mpcinit"); 2512 2513 rc = qeth_issue_next_read(card); 2514 if (rc) { 2515 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2516 return rc; 2517 } 2518 rc = qeth_cm_enable(card); 2519 if (rc) { 2520 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 2521 goto out_qdio; 2522 } 2523 rc = qeth_cm_setup(card); 2524 if (rc) { 2525 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 2526 goto out_qdio; 2527 } 2528 rc = qeth_ulp_enable(card); 2529 if (rc) { 2530 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 2531 goto out_qdio; 2532 } 2533 rc = qeth_ulp_setup(card); 2534 if (rc) { 2535 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 2536 goto out_qdio; 2537 } 2538 rc = qeth_alloc_qdio_queues(card); 2539 if (rc) { 2540 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 2541 goto out_qdio; 2542 } 2543 rc = qeth_qdio_establish(card); 2544 if (rc) { 2545 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 2546 qeth_free_qdio_queues(card); 2547 goto out_qdio; 2548 } 2549 rc = qeth_qdio_activate(card); 2550 if (rc) { 2551 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); 2552 goto out_qdio; 2553 } 2554 rc = qeth_dm_act(card); 2555 if (rc) { 2556 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); 2557 goto out_qdio; 2558 } 2559 2560 return 0; 2561 out_qdio: 2562 qeth_qdio_clear_card(card, !IS_IQD(card)); 2563 qdio_free(CARD_DDEV(card)); 2564 return rc; 2565 } 2566 2567 void qeth_print_status_message(struct qeth_card *card) 2568 { 2569 switch (card->info.type) { 2570 case QETH_CARD_TYPE_OSD: 2571 case QETH_CARD_TYPE_OSM: 2572 case QETH_CARD_TYPE_OSX: 2573 /* VM will use a non-zero first character 2574 * to indicate a HiperSockets like reporting 2575 * of the level OSA sets the first character to zero 2576 * */ 2577 if (!card->info.mcl_level[0]) { 2578 sprintf(card->info.mcl_level, "%02x%02x", 2579 card->info.mcl_level[2], 2580 card->info.mcl_level[3]); 2581 break; 2582 } 2583 /* fallthrough */ 2584 case QETH_CARD_TYPE_IQD: 2585 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { 2586 card->info.mcl_level[0] = (char) _ebcasc[(__u8) 2587 card->info.mcl_level[0]]; 2588 card->info.mcl_level[1] = (char) _ebcasc[(__u8) 2589 card->info.mcl_level[1]]; 2590 card->info.mcl_level[2] = (char) _ebcasc[(__u8) 2591 card->info.mcl_level[2]]; 2592 card->info.mcl_level[3] = (char) _ebcasc[(__u8) 2593 card->info.mcl_level[3]]; 2594 card->info.mcl_level[QETH_MCL_LENGTH] = 0; 2595 } 2596 break; 2597 default: 2598 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); 2599 } 2600 dev_info(&card->gdev->dev, 2601 "Device is a%s card%s%s%s\nwith link type %s.\n", 2602 qeth_get_cardname(card), 2603 (card->info.mcl_level[0]) ? " (level: " : "", 2604 (card->info.mcl_level[0]) ? card->info.mcl_level : "", 2605 (card->info.mcl_level[0]) ? ")" : "", 2606 qeth_get_cardname_short(card)); 2607 } 2608 EXPORT_SYMBOL_GPL(qeth_print_status_message); 2609 2610 static void qeth_initialize_working_pool_list(struct qeth_card *card) 2611 { 2612 struct qeth_buffer_pool_entry *entry; 2613 2614 QETH_CARD_TEXT(card, 5, "inwrklst"); 2615 2616 list_for_each_entry(entry, 2617 &card->qdio.init_pool.entry_list, init_list) { 2618 qeth_put_buffer_pool_entry(card, entry); 2619 } 2620 } 2621 2622 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( 2623 struct qeth_card *card) 2624 { 2625 struct list_head *plh; 2626 struct qeth_buffer_pool_entry *entry; 2627 int i, free; 2628 struct page *page; 2629 2630 if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2631 return NULL; 2632 2633 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) { 2634 entry = list_entry(plh, struct qeth_buffer_pool_entry, list); 2635 free = 1; 2636 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2637 if (page_count(virt_to_page(entry->elements[i])) > 1) { 2638 free = 0; 2639 break; 2640 } 2641 } 2642 if (free) { 2643 list_del_init(&entry->list); 2644 return entry; 2645 } 2646 } 2647 2648 /* no free buffer in pool so take first one and swap pages */ 2649 entry = list_entry(card->qdio.in_buf_pool.entry_list.next, 2650 struct qeth_buffer_pool_entry, list); 2651 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2652 if (page_count(virt_to_page(entry->elements[i])) > 1) { 2653 page = alloc_page(GFP_ATOMIC); 2654 if (!page) { 2655 return NULL; 2656 } else { 2657 free_page((unsigned long)entry->elements[i]); 2658 entry->elements[i] = page_address(page); 2659 QETH_CARD_STAT_INC(card, rx_sg_alloc_page); 2660 } 2661 } 2662 } 2663 list_del_init(&entry->list); 2664 return entry; 2665 } 2666 2667 static int qeth_init_input_buffer(struct qeth_card *card, 2668 struct qeth_qdio_buffer *buf) 2669 { 2670 struct qeth_buffer_pool_entry *pool_entry; 2671 int i; 2672 2673 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { 2674 buf->rx_skb = netdev_alloc_skb(card->dev, 2675 QETH_RX_PULL_LEN + ETH_HLEN); 2676 if (!buf->rx_skb) 2677 return 1; 2678 } 2679 2680 pool_entry = qeth_find_free_buffer_pool_entry(card); 2681 if (!pool_entry) 2682 return 1; 2683 2684 /* 2685 * since the buffer is accessed only from the input_tasklet 2686 * there shouldn't be a need to synchronize; also, since we use 2687 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off 2688 * buffers 2689 */ 2690 2691 buf->pool_entry = pool_entry; 2692 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2693 buf->buffer->element[i].length = PAGE_SIZE; 2694 buf->buffer->element[i].addr = pool_entry->elements[i]; 2695 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2696 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2697 else 2698 buf->buffer->element[i].eflags = 0; 2699 buf->buffer->element[i].sflags = 0; 2700 } 2701 return 0; 2702 } 2703 2704 int qeth_init_qdio_queues(struct qeth_card *card) 2705 { 2706 unsigned int i; 2707 int rc; 2708 2709 QETH_DBF_TEXT(SETUP, 2, "initqdqs"); 2710 2711 /* inbound queue */ 2712 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2713 memset(&card->rx, 0, sizeof(struct qeth_rx)); 2714 qeth_initialize_working_pool_list(card); 2715 /*give only as many buffers to hardware as we have buffer pool entries*/ 2716 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) 2717 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); 2718 card->qdio.in_q->next_buf_to_init = 2719 card->qdio.in_buf_pool.buf_count - 1; 2720 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, 2721 card->qdio.in_buf_pool.buf_count - 1); 2722 if (rc) { 2723 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2724 return rc; 2725 } 2726 2727 /* completion */ 2728 rc = qeth_cq_init(card); 2729 if (rc) { 2730 return rc; 2731 } 2732 2733 /* outbound queue */ 2734 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2735 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; 2736 2737 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2738 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); 2739 queue->next_buf_to_fill = 0; 2740 queue->do_pack = 0; 2741 atomic_set(&queue->used_buffers, 0); 2742 atomic_set(&queue->set_pci_flags_count, 0); 2743 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 2744 } 2745 return 0; 2746 } 2747 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues); 2748 2749 static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) 2750 { 2751 switch (link_type) { 2752 case QETH_LINK_TYPE_HSTR: 2753 return 2; 2754 default: 2755 return 1; 2756 } 2757 } 2758 2759 static void qeth_fill_ipacmd_header(struct qeth_card *card, 2760 struct qeth_ipa_cmd *cmd, 2761 enum qeth_ipa_cmds command, 2762 enum qeth_prot_versions prot) 2763 { 2764 cmd->hdr.command = command; 2765 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; 2766 /* cmd->hdr.seqno is set by qeth_send_control_data() */ 2767 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); 2768 cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port; 2769 cmd->hdr.prim_version_no = IS_LAYER2(card) ? 2 : 1; 2770 cmd->hdr.param_count = 1; 2771 cmd->hdr.prot_version = prot; 2772 } 2773 2774 static void qeth_ipa_finalize_cmd(struct qeth_card *card, 2775 struct qeth_cmd_buffer *iob, 2776 unsigned int length) 2777 { 2778 qeth_mpc_finalize_cmd(card, iob, length); 2779 2780 /* override with IPA-specific values: */ 2781 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa; 2782 iob->reply->seqno = card->seqno.ipa++; 2783 } 2784 2785 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 2786 u16 cmd_length) 2787 { 2788 u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length; 2789 u8 prot_type = qeth_mpc_select_prot_type(card); 2790 2791 iob->finalize = qeth_ipa_finalize_cmd; 2792 iob->timeout = QETH_IPA_TIMEOUT; 2793 2794 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 2795 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); 2796 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); 2797 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); 2798 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); 2799 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 2800 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2801 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); 2802 } 2803 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); 2804 2805 struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, 2806 enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot) 2807 { 2808 struct qeth_cmd_buffer *iob; 2809 2810 iob = qeth_get_buffer(&card->write); 2811 if (iob) { 2812 qeth_prepare_ipa_cmd(card, iob, sizeof(struct qeth_ipa_cmd)); 2813 qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot); 2814 } else { 2815 dev_warn(&card->gdev->dev, 2816 "The qeth driver ran out of channel command buffers\n"); 2817 QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers", 2818 CARD_DEVID(card)); 2819 } 2820 2821 return iob; 2822 } 2823 EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer); 2824 2825 static int qeth_send_ipa_cmd_cb(struct qeth_card *card, 2826 struct qeth_reply *reply, unsigned long data) 2827 { 2828 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2829 2830 return (cmd->hdr.return_code) ? -EIO : 0; 2831 } 2832 2833 /** 2834 * qeth_send_ipa_cmd() - send an IPA command 2835 * 2836 * See qeth_send_control_data() for explanation of the arguments. 2837 */ 2838 2839 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 2840 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 2841 unsigned long), 2842 void *reply_param) 2843 { 2844 u16 length; 2845 int rc; 2846 2847 QETH_CARD_TEXT(card, 4, "sendipa"); 2848 2849 if (card->read_or_write_problem) { 2850 qeth_release_buffer(iob->channel, iob); 2851 return -EIO; 2852 } 2853 2854 if (reply_cb == NULL) 2855 reply_cb = qeth_send_ipa_cmd_cb; 2856 memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2); 2857 rc = qeth_send_control_data(card, length, iob, reply_cb, reply_param); 2858 if (rc == -ETIME) { 2859 qeth_clear_ipacmd_list(card); 2860 qeth_schedule_recovery(card); 2861 } 2862 return rc; 2863 } 2864 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 2865 2866 static int qeth_send_startlan_cb(struct qeth_card *card, 2867 struct qeth_reply *reply, unsigned long data) 2868 { 2869 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2870 2871 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) 2872 return -ENETDOWN; 2873 2874 return (cmd->hdr.return_code) ? -EIO : 0; 2875 } 2876 2877 static int qeth_send_startlan(struct qeth_card *card) 2878 { 2879 struct qeth_cmd_buffer *iob; 2880 2881 QETH_DBF_TEXT(SETUP, 2, "strtlan"); 2882 2883 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); 2884 if (!iob) 2885 return -ENOMEM; 2886 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); 2887 } 2888 2889 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) 2890 { 2891 if (!cmd->hdr.return_code) 2892 cmd->hdr.return_code = 2893 cmd->data.setadapterparms.hdr.return_code; 2894 return cmd->hdr.return_code; 2895 } 2896 2897 static int qeth_query_setadapterparms_cb(struct qeth_card *card, 2898 struct qeth_reply *reply, unsigned long data) 2899 { 2900 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2901 2902 QETH_CARD_TEXT(card, 3, "quyadpcb"); 2903 if (qeth_setadpparms_inspect_rc(cmd)) 2904 return -EIO; 2905 2906 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { 2907 card->info.link_type = 2908 cmd->data.setadapterparms.data.query_cmds_supp.lan_type; 2909 QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type); 2910 } 2911 card->options.adp.supported_funcs = 2912 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; 2913 return 0; 2914 } 2915 2916 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 2917 __u32 command, __u32 cmdlen) 2918 { 2919 struct qeth_cmd_buffer *iob; 2920 struct qeth_ipa_cmd *cmd; 2921 2922 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, 2923 QETH_PROT_IPV4); 2924 if (iob) { 2925 cmd = __ipa_cmd(iob); 2926 cmd->data.setadapterparms.hdr.cmdlength = cmdlen; 2927 cmd->data.setadapterparms.hdr.command_code = command; 2928 cmd->data.setadapterparms.hdr.used_total = 1; 2929 cmd->data.setadapterparms.hdr.seq_no = 1; 2930 } 2931 2932 return iob; 2933 } 2934 2935 static int qeth_query_setadapterparms(struct qeth_card *card) 2936 { 2937 int rc; 2938 struct qeth_cmd_buffer *iob; 2939 2940 QETH_CARD_TEXT(card, 3, "queryadp"); 2941 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 2942 sizeof(struct qeth_ipacmd_setadpparms)); 2943 if (!iob) 2944 return -ENOMEM; 2945 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 2946 return rc; 2947 } 2948 2949 static int qeth_query_ipassists_cb(struct qeth_card *card, 2950 struct qeth_reply *reply, unsigned long data) 2951 { 2952 struct qeth_ipa_cmd *cmd; 2953 2954 QETH_DBF_TEXT(SETUP, 2, "qipasscb"); 2955 2956 cmd = (struct qeth_ipa_cmd *) data; 2957 2958 switch (cmd->hdr.return_code) { 2959 case IPA_RC_SUCCESS: 2960 break; 2961 case IPA_RC_NOTSUPP: 2962 case IPA_RC_L2_UNSUPPORTED_CMD: 2963 QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); 2964 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; 2965 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; 2966 return -EOPNOTSUPP; 2967 default: 2968 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", 2969 CARD_DEVID(card), cmd->hdr.return_code); 2970 return -EIO; 2971 } 2972 2973 if (cmd->hdr.prot_version == QETH_PROT_IPV4) { 2974 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; 2975 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 2976 } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { 2977 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; 2978 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 2979 } else 2980 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", 2981 CARD_DEVID(card)); 2982 return 0; 2983 } 2984 2985 static int qeth_query_ipassists(struct qeth_card *card, 2986 enum qeth_prot_versions prot) 2987 { 2988 int rc; 2989 struct qeth_cmd_buffer *iob; 2990 2991 QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); 2992 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); 2993 if (!iob) 2994 return -ENOMEM; 2995 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 2996 return rc; 2997 } 2998 2999 static int qeth_query_switch_attributes_cb(struct qeth_card *card, 3000 struct qeth_reply *reply, unsigned long data) 3001 { 3002 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3003 struct qeth_query_switch_attributes *attrs; 3004 struct qeth_switch_info *sw_info; 3005 3006 QETH_CARD_TEXT(card, 2, "qswiatcb"); 3007 if (qeth_setadpparms_inspect_rc(cmd)) 3008 return -EIO; 3009 3010 sw_info = (struct qeth_switch_info *)reply->param; 3011 attrs = &cmd->data.setadapterparms.data.query_switch_attributes; 3012 sw_info->capabilities = attrs->capabilities; 3013 sw_info->settings = attrs->settings; 3014 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, 3015 sw_info->settings); 3016 return 0; 3017 } 3018 3019 int qeth_query_switch_attributes(struct qeth_card *card, 3020 struct qeth_switch_info *sw_info) 3021 { 3022 struct qeth_cmd_buffer *iob; 3023 3024 QETH_CARD_TEXT(card, 2, "qswiattr"); 3025 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) 3026 return -EOPNOTSUPP; 3027 if (!netif_carrier_ok(card->dev)) 3028 return -ENOMEDIUM; 3029 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 3030 sizeof(struct qeth_ipacmd_setadpparms_hdr)); 3031 if (!iob) 3032 return -ENOMEM; 3033 return qeth_send_ipa_cmd(card, iob, 3034 qeth_query_switch_attributes_cb, sw_info); 3035 } 3036 3037 static int qeth_query_setdiagass_cb(struct qeth_card *card, 3038 struct qeth_reply *reply, unsigned long data) 3039 { 3040 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3041 u16 rc = cmd->hdr.return_code; 3042 3043 if (rc) { 3044 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); 3045 return -EIO; 3046 } 3047 3048 card->info.diagass_support = cmd->data.diagass.ext; 3049 return 0; 3050 } 3051 3052 static int qeth_query_setdiagass(struct qeth_card *card) 3053 { 3054 struct qeth_cmd_buffer *iob; 3055 struct qeth_ipa_cmd *cmd; 3056 3057 QETH_DBF_TEXT(SETUP, 2, "qdiagass"); 3058 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 3059 if (!iob) 3060 return -ENOMEM; 3061 cmd = __ipa_cmd(iob); 3062 cmd->data.diagass.subcmd_len = 16; 3063 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; 3064 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); 3065 } 3066 3067 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) 3068 { 3069 unsigned long info = get_zeroed_page(GFP_KERNEL); 3070 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 3071 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 3072 struct ccw_dev_id ccwid; 3073 int level; 3074 3075 tid->chpid = card->info.chpid; 3076 ccw_device_get_id(CARD_RDEV(card), &ccwid); 3077 tid->ssid = ccwid.ssid; 3078 tid->devno = ccwid.devno; 3079 if (!info) 3080 return; 3081 level = stsi(NULL, 0, 0, 0); 3082 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 3083 tid->lparnr = info222->lpar_number; 3084 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 3085 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3086 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3087 } 3088 free_page(info); 3089 return; 3090 } 3091 3092 static int qeth_hw_trap_cb(struct qeth_card *card, 3093 struct qeth_reply *reply, unsigned long data) 3094 { 3095 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3096 u16 rc = cmd->hdr.return_code; 3097 3098 if (rc) { 3099 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); 3100 return -EIO; 3101 } 3102 return 0; 3103 } 3104 3105 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) 3106 { 3107 struct qeth_cmd_buffer *iob; 3108 struct qeth_ipa_cmd *cmd; 3109 3110 QETH_DBF_TEXT(SETUP, 2, "diagtrap"); 3111 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); 3112 if (!iob) 3113 return -ENOMEM; 3114 cmd = __ipa_cmd(iob); 3115 cmd->data.diagass.subcmd_len = 80; 3116 cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; 3117 cmd->data.diagass.type = 1; 3118 cmd->data.diagass.action = action; 3119 switch (action) { 3120 case QETH_DIAGS_TRAP_ARM: 3121 cmd->data.diagass.options = 0x0003; 3122 cmd->data.diagass.ext = 0x00010000 + 3123 sizeof(struct qeth_trap_id); 3124 qeth_get_trap_id(card, 3125 (struct qeth_trap_id *)cmd->data.diagass.cdata); 3126 break; 3127 case QETH_DIAGS_TRAP_DISARM: 3128 cmd->data.diagass.options = 0x0001; 3129 break; 3130 case QETH_DIAGS_TRAP_CAPTURE: 3131 break; 3132 } 3133 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); 3134 } 3135 EXPORT_SYMBOL_GPL(qeth_hw_trap); 3136 3137 static int qeth_check_qdio_errors(struct qeth_card *card, 3138 struct qdio_buffer *buf, 3139 unsigned int qdio_error, 3140 const char *dbftext) 3141 { 3142 if (qdio_error) { 3143 QETH_CARD_TEXT(card, 2, dbftext); 3144 QETH_CARD_TEXT_(card, 2, " F15=%02X", 3145 buf->element[15].sflags); 3146 QETH_CARD_TEXT_(card, 2, " F14=%02X", 3147 buf->element[14].sflags); 3148 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 3149 if ((buf->element[15].sflags) == 0x12) { 3150 QETH_CARD_STAT_INC(card, rx_dropped); 3151 return 0; 3152 } else 3153 return 1; 3154 } 3155 return 0; 3156 } 3157 3158 static void qeth_queue_input_buffer(struct qeth_card *card, int index) 3159 { 3160 struct qeth_qdio_q *queue = card->qdio.in_q; 3161 struct list_head *lh; 3162 int count; 3163 int i; 3164 int rc; 3165 int newcount = 0; 3166 3167 count = (index < queue->next_buf_to_init)? 3168 card->qdio.in_buf_pool.buf_count - 3169 (queue->next_buf_to_init - index) : 3170 card->qdio.in_buf_pool.buf_count - 3171 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index); 3172 /* only requeue at a certain threshold to avoid SIGAs */ 3173 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { 3174 for (i = queue->next_buf_to_init; 3175 i < queue->next_buf_to_init + count; ++i) { 3176 if (qeth_init_input_buffer(card, 3177 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) { 3178 break; 3179 } else { 3180 newcount++; 3181 } 3182 } 3183 3184 if (newcount < count) { 3185 /* we are in memory shortage so we switch back to 3186 traditional skb allocation and drop packages */ 3187 atomic_set(&card->force_alloc_skb, 3); 3188 count = newcount; 3189 } else { 3190 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3191 } 3192 3193 if (!count) { 3194 i = 0; 3195 list_for_each(lh, &card->qdio.in_buf_pool.entry_list) 3196 i++; 3197 if (i == card->qdio.in_buf_pool.buf_count) { 3198 QETH_CARD_TEXT(card, 2, "qsarbw"); 3199 card->reclaim_index = index; 3200 schedule_delayed_work( 3201 &card->buffer_reclaim_work, 3202 QETH_RECLAIM_WORK_TIME); 3203 } 3204 return; 3205 } 3206 3207 /* 3208 * according to old code it should be avoided to requeue all 3209 * 128 buffers in order to benefit from PCI avoidance. 3210 * this function keeps at least one buffer (the buffer at 3211 * 'index') un-requeued -> this buffer is the first buffer that 3212 * will be requeued the next time 3213 */ 3214 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 3215 queue->next_buf_to_init, count); 3216 if (rc) { 3217 QETH_CARD_TEXT(card, 2, "qinberr"); 3218 } 3219 queue->next_buf_to_init = (queue->next_buf_to_init + count) % 3220 QDIO_MAX_BUFFERS_PER_Q; 3221 } 3222 } 3223 3224 static void qeth_buffer_reclaim_work(struct work_struct *work) 3225 { 3226 struct qeth_card *card = container_of(work, struct qeth_card, 3227 buffer_reclaim_work.work); 3228 3229 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); 3230 qeth_queue_input_buffer(card, card->reclaim_index); 3231 } 3232 3233 static void qeth_handle_send_error(struct qeth_card *card, 3234 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 3235 { 3236 int sbalf15 = buffer->buffer->element[15].sflags; 3237 3238 QETH_CARD_TEXT(card, 6, "hdsnderr"); 3239 if (IS_IQD(card)) { 3240 if (sbalf15 == 0) { 3241 qdio_err = 0; 3242 } else { 3243 qdio_err = 1; 3244 } 3245 } 3246 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); 3247 3248 if (!qdio_err) 3249 return; 3250 3251 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 3252 return; 3253 3254 QETH_CARD_TEXT(card, 1, "lnkfail"); 3255 QETH_CARD_TEXT_(card, 1, "%04x %02x", 3256 (u16)qdio_err, (u8)sbalf15); 3257 } 3258 3259 /** 3260 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. 3261 * @queue: queue to check for packing buffer 3262 * 3263 * Returns number of buffers that were prepared for flush. 3264 */ 3265 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) 3266 { 3267 struct qeth_qdio_out_buffer *buffer; 3268 3269 buffer = queue->bufs[queue->next_buf_to_fill]; 3270 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3271 (buffer->next_element_to_fill > 0)) { 3272 /* it's a packing buffer */ 3273 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3274 queue->next_buf_to_fill = 3275 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; 3276 return 1; 3277 } 3278 return 0; 3279 } 3280 3281 /* 3282 * Switched to packing state if the number of used buffers on a queue 3283 * reaches a certain limit. 3284 */ 3285 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 3286 { 3287 if (!queue->do_pack) { 3288 if (atomic_read(&queue->used_buffers) 3289 >= QETH_HIGH_WATERMARK_PACK){ 3290 /* switch non-PACKING -> PACKING */ 3291 QETH_CARD_TEXT(queue->card, 6, "np->pack"); 3292 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3293 queue->do_pack = 1; 3294 } 3295 } 3296 } 3297 3298 /* 3299 * Switches from packing to non-packing mode. If there is a packing 3300 * buffer on the queue this buffer will be prepared to be flushed. 3301 * In that case 1 is returned to inform the caller. If no buffer 3302 * has to be flushed, zero is returned. 3303 */ 3304 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 3305 { 3306 if (queue->do_pack) { 3307 if (atomic_read(&queue->used_buffers) 3308 <= QETH_LOW_WATERMARK_PACK) { 3309 /* switch PACKING -> non-PACKING */ 3310 QETH_CARD_TEXT(queue->card, 6, "pack->np"); 3311 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3312 queue->do_pack = 0; 3313 return qeth_prep_flush_pack_buffer(queue); 3314 } 3315 } 3316 return 0; 3317 } 3318 3319 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, 3320 int count) 3321 { 3322 struct qeth_qdio_out_buffer *buf; 3323 int rc; 3324 int i; 3325 unsigned int qdio_flags; 3326 3327 for (i = index; i < index + count; ++i) { 3328 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 3329 buf = queue->bufs[bidx]; 3330 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3331 SBAL_EFLAGS_LAST_ENTRY; 3332 3333 if (queue->bufstates) 3334 queue->bufstates[bidx].user = buf; 3335 3336 if (IS_IQD(queue->card)) 3337 continue; 3338 3339 if (!queue->do_pack) { 3340 if ((atomic_read(&queue->used_buffers) >= 3341 (QETH_HIGH_WATERMARK_PACK - 3342 QETH_WATERMARK_PACK_FUZZ)) && 3343 !atomic_read(&queue->set_pci_flags_count)) { 3344 /* it's likely that we'll go to packing 3345 * mode soon */ 3346 atomic_inc(&queue->set_pci_flags_count); 3347 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3348 } 3349 } else { 3350 if (!atomic_read(&queue->set_pci_flags_count)) { 3351 /* 3352 * there's no outstanding PCI any more, so we 3353 * have to request a PCI to be sure the the PCI 3354 * will wake at some time in the future then we 3355 * can flush packed buffers that might still be 3356 * hanging around, which can happen if no 3357 * further send was requested by the stack 3358 */ 3359 atomic_inc(&queue->set_pci_flags_count); 3360 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3361 } 3362 } 3363 } 3364 3365 QETH_TXQ_STAT_ADD(queue, bufs, count); 3366 qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 3367 if (atomic_read(&queue->set_pci_flags_count)) 3368 qdio_flags |= QDIO_FLAG_PCI_OUT; 3369 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, 3370 queue->queue_no, index, count); 3371 if (rc) { 3372 QETH_TXQ_STAT_ADD(queue, tx_errors, count); 3373 /* ignore temporary SIGA errors without busy condition */ 3374 if (rc == -ENOBUFS) 3375 return; 3376 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3377 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3378 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); 3379 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); 3380 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3381 3382 /* this must not happen under normal circumstances. if it 3383 * happens something is really wrong -> recover */ 3384 qeth_schedule_recovery(queue->card); 3385 return; 3386 } 3387 } 3388 3389 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3390 { 3391 int index; 3392 int flush_cnt = 0; 3393 int q_was_packing = 0; 3394 3395 /* 3396 * check if weed have to switch to non-packing mode or if 3397 * we have to get a pci flag out on the queue 3398 */ 3399 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 3400 !atomic_read(&queue->set_pci_flags_count)) { 3401 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == 3402 QETH_OUT_Q_UNLOCKED) { 3403 /* 3404 * If we get in here, there was no action in 3405 * do_send_packet. So, we check if there is a 3406 * packing buffer to be flushed here. 3407 */ 3408 index = queue->next_buf_to_fill; 3409 q_was_packing = queue->do_pack; 3410 /* queue->do_pack may change */ 3411 barrier(); 3412 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); 3413 if (!flush_cnt && 3414 !atomic_read(&queue->set_pci_flags_count)) 3415 flush_cnt += qeth_prep_flush_pack_buffer(queue); 3416 if (q_was_packing) 3417 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); 3418 if (flush_cnt) 3419 qeth_flush_buffers(queue, index, flush_cnt); 3420 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3421 } 3422 } 3423 } 3424 3425 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, 3426 unsigned long card_ptr) 3427 { 3428 struct qeth_card *card = (struct qeth_card *)card_ptr; 3429 3430 if (card->dev->flags & IFF_UP) 3431 napi_schedule(&card->napi); 3432 } 3433 3434 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3435 { 3436 int rc; 3437 3438 if (card->options.cq == QETH_CQ_NOTAVAILABLE) { 3439 rc = -1; 3440 goto out; 3441 } else { 3442 if (card->options.cq == cq) { 3443 rc = 0; 3444 goto out; 3445 } 3446 3447 if (card->state != CARD_STATE_DOWN) { 3448 rc = -1; 3449 goto out; 3450 } 3451 3452 qeth_free_qdio_queues(card); 3453 card->options.cq = cq; 3454 rc = 0; 3455 } 3456 out: 3457 return rc; 3458 3459 } 3460 EXPORT_SYMBOL_GPL(qeth_configure_cq); 3461 3462 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, 3463 unsigned int queue, int first_element, 3464 int count) 3465 { 3466 struct qeth_qdio_q *cq = card->qdio.c_q; 3467 int i; 3468 int rc; 3469 3470 if (!qeth_is_cq(card, queue)) 3471 return; 3472 3473 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); 3474 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); 3475 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); 3476 3477 if (qdio_err) { 3478 netif_tx_stop_all_queues(card->dev); 3479 qeth_schedule_recovery(card); 3480 return; 3481 } 3482 3483 for (i = first_element; i < first_element + count; ++i) { 3484 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 3485 struct qdio_buffer *buffer = cq->qdio_bufs[bidx]; 3486 int e = 0; 3487 3488 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3489 buffer->element[e].addr) { 3490 unsigned long phys_aob_addr; 3491 3492 phys_aob_addr = (unsigned long) buffer->element[e].addr; 3493 qeth_qdio_handle_aob(card, phys_aob_addr); 3494 ++e; 3495 } 3496 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); 3497 } 3498 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, 3499 card->qdio.c_q->next_buf_to_init, 3500 count); 3501 if (rc) { 3502 dev_warn(&card->gdev->dev, 3503 "QDIO reported an error, rc=%i\n", rc); 3504 QETH_CARD_TEXT(card, 2, "qcqherr"); 3505 } 3506 card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init 3507 + count) % QDIO_MAX_BUFFERS_PER_Q; 3508 } 3509 3510 static void qeth_qdio_input_handler(struct ccw_device *ccwdev, 3511 unsigned int qdio_err, int queue, 3512 int first_elem, int count, 3513 unsigned long card_ptr) 3514 { 3515 struct qeth_card *card = (struct qeth_card *)card_ptr; 3516 3517 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3518 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); 3519 3520 if (qeth_is_cq(card, queue)) 3521 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); 3522 else if (qdio_err) 3523 qeth_schedule_recovery(card); 3524 } 3525 3526 static void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3527 unsigned int qdio_error, int __queue, 3528 int first_element, int count, 3529 unsigned long card_ptr) 3530 { 3531 struct qeth_card *card = (struct qeth_card *) card_ptr; 3532 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 3533 struct qeth_qdio_out_buffer *buffer; 3534 struct net_device *dev = card->dev; 3535 struct netdev_queue *txq; 3536 int i; 3537 3538 QETH_CARD_TEXT(card, 6, "qdouhdl"); 3539 if (qdio_error & QDIO_ERROR_FATAL) { 3540 QETH_CARD_TEXT(card, 2, "achkcond"); 3541 netif_tx_stop_all_queues(dev); 3542 qeth_schedule_recovery(card); 3543 return; 3544 } 3545 3546 for (i = first_element; i < (first_element + count); ++i) { 3547 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 3548 buffer = queue->bufs[bidx]; 3549 qeth_handle_send_error(card, buffer, qdio_error); 3550 3551 if (queue->bufstates && 3552 (queue->bufstates[bidx].flags & 3553 QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) { 3554 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); 3555 3556 if (atomic_cmpxchg(&buffer->state, 3557 QETH_QDIO_BUF_PRIMED, 3558 QETH_QDIO_BUF_PENDING) == 3559 QETH_QDIO_BUF_PRIMED) { 3560 qeth_notify_skbs(queue, buffer, 3561 TX_NOTIFY_PENDING); 3562 } 3563 QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); 3564 3565 /* prepare the queue slot for re-use: */ 3566 qeth_scrub_qdio_buffer(buffer->buffer, 3567 queue->max_elements); 3568 if (qeth_init_qdio_out_buf(queue, bidx)) { 3569 QETH_CARD_TEXT(card, 2, "outofbuf"); 3570 qeth_schedule_recovery(card); 3571 } 3572 } else { 3573 if (card->options.cq == QETH_CQ_ENABLED) { 3574 enum iucv_tx_notify n; 3575 3576 n = qeth_compute_cq_notification( 3577 buffer->buffer->element[15].sflags, 0); 3578 qeth_notify_skbs(queue, buffer, n); 3579 } 3580 3581 qeth_clear_output_buffer(queue, buffer); 3582 } 3583 qeth_cleanup_handled_pending(queue, bidx, 0); 3584 } 3585 atomic_sub(count, &queue->used_buffers); 3586 /* check if we need to do something on this outbound queue */ 3587 if (!IS_IQD(card)) 3588 qeth_check_outbound_queue(queue); 3589 3590 if (IS_IQD(card)) 3591 __queue = qeth_iqd_translate_txq(dev, __queue); 3592 txq = netdev_get_tx_queue(dev, __queue); 3593 /* xmit may have observed the full-condition, but not yet stopped the 3594 * txq. In which case the code below won't trigger. So before returning, 3595 * xmit will re-check the txq's fill level and wake it up if needed. 3596 */ 3597 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue)) 3598 netif_tx_wake_queue(txq); 3599 } 3600 3601 /** 3602 * Note: Function assumes that we have 4 outbound queues. 3603 */ 3604 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) 3605 { 3606 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 3607 u8 tos; 3608 3609 switch (card->qdio.do_prio_queueing) { 3610 case QETH_PRIO_Q_ING_TOS: 3611 case QETH_PRIO_Q_ING_PREC: 3612 switch (qeth_get_ip_version(skb)) { 3613 case 4: 3614 tos = ipv4_get_dsfield(ip_hdr(skb)); 3615 break; 3616 case 6: 3617 tos = ipv6_get_dsfield(ipv6_hdr(skb)); 3618 break; 3619 default: 3620 return card->qdio.default_out_queue; 3621 } 3622 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3623 return ~tos >> 6 & 3; 3624 if (tos & IPTOS_MINCOST) 3625 return 3; 3626 if (tos & IPTOS_RELIABILITY) 3627 return 2; 3628 if (tos & IPTOS_THROUGHPUT) 3629 return 1; 3630 if (tos & IPTOS_LOWDELAY) 3631 return 0; 3632 break; 3633 case QETH_PRIO_Q_ING_SKB: 3634 if (skb->priority > 5) 3635 return 0; 3636 return ~skb->priority >> 1 & 3; 3637 case QETH_PRIO_Q_ING_VLAN: 3638 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) 3639 return ~ntohs(veth->h_vlan_TCI) >> 3640 (VLAN_PRIO_SHIFT + 1) & 3; 3641 break; 3642 default: 3643 break; 3644 } 3645 return card->qdio.default_out_queue; 3646 } 3647 EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3648 3649 /** 3650 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. 3651 * @skb: SKB address 3652 * 3653 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3654 * fragmented part of the SKB. Returns zero for linear SKB. 3655 */ 3656 static int qeth_get_elements_for_frags(struct sk_buff *skb) 3657 { 3658 int cnt, elements = 0; 3659 3660 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3661 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt]; 3662 3663 elements += qeth_get_elements_for_range( 3664 (addr_t)skb_frag_address(frag), 3665 (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); 3666 } 3667 return elements; 3668 } 3669 3670 /** 3671 * qeth_count_elements() - Counts the number of QDIO buffer elements needed 3672 * to transmit an skb. 3673 * @skb: the skb to operate on. 3674 * @data_offset: skip this part of the skb's linear data 3675 * 3676 * Returns the number of pages, and thus QDIO buffer elements, needed to map the 3677 * skb's data (both its linear part and paged fragments). 3678 */ 3679 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset) 3680 { 3681 unsigned int elements = qeth_get_elements_for_frags(skb); 3682 addr_t end = (addr_t)skb->data + skb_headlen(skb); 3683 addr_t start = (addr_t)skb->data + data_offset; 3684 3685 if (start != end) 3686 elements += qeth_get_elements_for_range(start, end); 3687 return elements; 3688 } 3689 EXPORT_SYMBOL_GPL(qeth_count_elements); 3690 3691 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ 3692 MAX_TCP_HEADER) 3693 3694 /** 3695 * qeth_add_hw_header() - add a HW header to an skb. 3696 * @skb: skb that the HW header should be added to. 3697 * @hdr: double pointer to a qeth_hdr. When returning with >= 0, 3698 * it contains a valid pointer to a qeth_hdr. 3699 * @hdr_len: length of the HW header. 3700 * @proto_len: length of protocol headers that need to be in same page as the 3701 * HW header. 3702 * 3703 * Returns the pushed length. If the header can't be pushed on 3704 * (eg. because it would cross a page boundary), it is allocated from 3705 * the cache instead and 0 is returned. 3706 * The number of needed buffer elements is returned in @elements. 3707 * Error to create the hdr is indicated by returning with < 0. 3708 */ 3709 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, 3710 struct sk_buff *skb, struct qeth_hdr **hdr, 3711 unsigned int hdr_len, unsigned int proto_len, 3712 unsigned int *elements) 3713 { 3714 const unsigned int contiguous = proto_len ? proto_len : 1; 3715 const unsigned int max_elements = queue->max_elements; 3716 unsigned int __elements; 3717 addr_t start, end; 3718 bool push_ok; 3719 int rc; 3720 3721 check_layout: 3722 start = (addr_t)skb->data - hdr_len; 3723 end = (addr_t)skb->data; 3724 3725 if (qeth_get_elements_for_range(start, end + contiguous) == 1) { 3726 /* Push HW header into same page as first protocol header. */ 3727 push_ok = true; 3728 /* ... but TSO always needs a separate element for headers: */ 3729 if (skb_is_gso(skb)) 3730 __elements = 1 + qeth_count_elements(skb, proto_len); 3731 else 3732 __elements = qeth_count_elements(skb, 0); 3733 } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) { 3734 /* Push HW header into a new page. */ 3735 push_ok = true; 3736 __elements = 1 + qeth_count_elements(skb, 0); 3737 } else { 3738 /* Use header cache, copy protocol headers up. */ 3739 push_ok = false; 3740 __elements = 1 + qeth_count_elements(skb, proto_len); 3741 } 3742 3743 /* Compress skb to fit into one IO buffer: */ 3744 if (__elements > max_elements) { 3745 if (!skb_is_nonlinear(skb)) { 3746 /* Drop it, no easy way of shrinking it further. */ 3747 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", 3748 max_elements, __elements, skb->len); 3749 return -E2BIG; 3750 } 3751 3752 rc = skb_linearize(skb); 3753 if (rc) { 3754 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); 3755 return rc; 3756 } 3757 3758 QETH_TXQ_STAT_INC(queue, skbs_linearized); 3759 /* Linearization changed the layout, re-evaluate: */ 3760 goto check_layout; 3761 } 3762 3763 *elements = __elements; 3764 /* Add the header: */ 3765 if (push_ok) { 3766 *hdr = skb_push(skb, hdr_len); 3767 return hdr_len; 3768 } 3769 /* fall back */ 3770 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) 3771 return -E2BIG; 3772 *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 3773 if (!*hdr) 3774 return -ENOMEM; 3775 /* Copy protocol headers behind HW header: */ 3776 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); 3777 return 0; 3778 } 3779 3780 static void __qeth_fill_buffer(struct sk_buff *skb, 3781 struct qeth_qdio_out_buffer *buf, 3782 bool is_first_elem, unsigned int offset) 3783 { 3784 struct qdio_buffer *buffer = buf->buffer; 3785 int element = buf->next_element_to_fill; 3786 int length = skb_headlen(skb) - offset; 3787 char *data = skb->data + offset; 3788 int length_here, cnt; 3789 3790 /* map linear part into buffer element(s) */ 3791 while (length > 0) { 3792 /* length_here is the remaining amount of data in this page */ 3793 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); 3794 if (length < length_here) 3795 length_here = length; 3796 3797 buffer->element[element].addr = data; 3798 buffer->element[element].length = length_here; 3799 length -= length_here; 3800 if (is_first_elem) { 3801 is_first_elem = false; 3802 if (length || skb_is_nonlinear(skb)) 3803 /* skb needs additional elements */ 3804 buffer->element[element].eflags = 3805 SBAL_EFLAGS_FIRST_FRAG; 3806 else 3807 buffer->element[element].eflags = 0; 3808 } else { 3809 buffer->element[element].eflags = 3810 SBAL_EFLAGS_MIDDLE_FRAG; 3811 } 3812 data += length_here; 3813 element++; 3814 } 3815 3816 /* map page frags into buffer element(s) */ 3817 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3818 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3819 3820 data = skb_frag_address(frag); 3821 length = skb_frag_size(frag); 3822 while (length > 0) { 3823 length_here = PAGE_SIZE - 3824 ((unsigned long) data % PAGE_SIZE); 3825 if (length < length_here) 3826 length_here = length; 3827 3828 buffer->element[element].addr = data; 3829 buffer->element[element].length = length_here; 3830 buffer->element[element].eflags = 3831 SBAL_EFLAGS_MIDDLE_FRAG; 3832 length -= length_here; 3833 data += length_here; 3834 element++; 3835 } 3836 } 3837 3838 if (buffer->element[element - 1].eflags) 3839 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; 3840 buf->next_element_to_fill = element; 3841 } 3842 3843 /** 3844 * qeth_fill_buffer() - map skb into an output buffer 3845 * @queue: QDIO queue to submit the buffer on 3846 * @buf: buffer to transport the skb 3847 * @skb: skb to map into the buffer 3848 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated 3849 * from qeth_core_header_cache. 3850 * @offset: when mapping the skb, start at skb->data + offset 3851 * @hd_len: if > 0, build a dedicated header element of this size 3852 * flush: Prepare the buffer to be flushed, regardless of its fill level. 3853 */ 3854 static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, 3855 struct qeth_qdio_out_buffer *buf, 3856 struct sk_buff *skb, struct qeth_hdr *hdr, 3857 unsigned int offset, unsigned int hd_len, 3858 bool flush) 3859 { 3860 struct qdio_buffer *buffer = buf->buffer; 3861 bool is_first_elem = true; 3862 3863 __skb_queue_tail(&buf->skb_list, skb); 3864 3865 /* build dedicated header element */ 3866 if (hd_len) { 3867 int element = buf->next_element_to_fill; 3868 is_first_elem = false; 3869 3870 buffer->element[element].addr = hdr; 3871 buffer->element[element].length = hd_len; 3872 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 3873 /* remember to free cache-allocated qeth_hdr: */ 3874 buf->is_header[element] = ((void *)hdr != skb->data); 3875 buf->next_element_to_fill++; 3876 } 3877 3878 __qeth_fill_buffer(skb, buf, is_first_elem, offset); 3879 3880 if (!queue->do_pack) { 3881 QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); 3882 } else { 3883 QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); 3884 3885 QETH_TXQ_STAT_INC(queue, skbs_pack); 3886 /* If the buffer still has free elements, keep using it. */ 3887 if (!flush && 3888 buf->next_element_to_fill < queue->max_elements) 3889 return 0; 3890 } 3891 3892 /* flush out the buffer */ 3893 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); 3894 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % 3895 QDIO_MAX_BUFFERS_PER_Q; 3896 return 1; 3897 } 3898 3899 static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, 3900 struct sk_buff *skb, struct qeth_hdr *hdr, 3901 unsigned int offset, unsigned int hd_len) 3902 { 3903 int index = queue->next_buf_to_fill; 3904 struct qeth_qdio_out_buffer *buffer = queue->bufs[index]; 3905 struct netdev_queue *txq; 3906 bool stopped = false; 3907 3908 /* Just a sanity check, the wake/stop logic should ensure that we always 3909 * get a free buffer. 3910 */ 3911 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 3912 return -EBUSY; 3913 3914 txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb)); 3915 3916 if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 3917 /* If a TX completion happens right _here_ and misses to wake 3918 * the txq, then our re-check below will catch the race. 3919 */ 3920 QETH_TXQ_STAT_INC(queue, stopped); 3921 netif_tx_stop_queue(txq); 3922 stopped = true; 3923 } 3924 3925 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped); 3926 qeth_flush_buffers(queue, index, 1); 3927 3928 if (stopped && !qeth_out_queue_is_full(queue)) 3929 netif_tx_start_queue(txq); 3930 return 0; 3931 } 3932 3933 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 3934 struct sk_buff *skb, struct qeth_hdr *hdr, 3935 unsigned int offset, unsigned int hd_len, 3936 int elements_needed) 3937 { 3938 struct qeth_qdio_out_buffer *buffer; 3939 struct netdev_queue *txq; 3940 bool stopped = false; 3941 int start_index; 3942 int flush_count = 0; 3943 int do_pack = 0; 3944 int tmp; 3945 int rc = 0; 3946 3947 /* spin until we get the queue ... */ 3948 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3949 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3950 start_index = queue->next_buf_to_fill; 3951 buffer = queue->bufs[queue->next_buf_to_fill]; 3952 3953 /* Just a sanity check, the wake/stop logic should ensure that we always 3954 * get a free buffer. 3955 */ 3956 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { 3957 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3958 return -EBUSY; 3959 } 3960 3961 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 3962 3963 /* check if we need to switch packing state of this queue */ 3964 qeth_switch_to_packing_if_needed(queue); 3965 if (queue->do_pack) { 3966 do_pack = 1; 3967 /* does packet fit in current buffer? */ 3968 if (buffer->next_element_to_fill + elements_needed > 3969 queue->max_elements) { 3970 /* ... no -> set state PRIMED */ 3971 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3972 flush_count++; 3973 queue->next_buf_to_fill = 3974 (queue->next_buf_to_fill + 1) % 3975 QDIO_MAX_BUFFERS_PER_Q; 3976 buffer = queue->bufs[queue->next_buf_to_fill]; 3977 3978 /* We stepped forward, so sanity-check again: */ 3979 if (atomic_read(&buffer->state) != 3980 QETH_QDIO_BUF_EMPTY) { 3981 qeth_flush_buffers(queue, start_index, 3982 flush_count); 3983 atomic_set(&queue->state, 3984 QETH_OUT_Q_UNLOCKED); 3985 rc = -EBUSY; 3986 goto out; 3987 } 3988 } 3989 } 3990 3991 if (buffer->next_element_to_fill == 0 && 3992 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 3993 /* If a TX completion happens right _here_ and misses to wake 3994 * the txq, then our re-check below will catch the race. 3995 */ 3996 QETH_TXQ_STAT_INC(queue, stopped); 3997 netif_tx_stop_queue(txq); 3998 stopped = true; 3999 } 4000 4001 flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, 4002 stopped); 4003 if (flush_count) 4004 qeth_flush_buffers(queue, start_index, flush_count); 4005 else if (!atomic_read(&queue->set_pci_flags_count)) 4006 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); 4007 /* 4008 * queue->state will go from LOCKED -> UNLOCKED or from 4009 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us 4010 * (switch packing state or flush buffer to get another pci flag out). 4011 * In that case we will enter this loop 4012 */ 4013 while (atomic_dec_return(&queue->state)) { 4014 start_index = queue->next_buf_to_fill; 4015 /* check if we can go back to non-packing state */ 4016 tmp = qeth_switch_to_nonpacking_if_needed(queue); 4017 /* 4018 * check if we need to flush a packing buffer to get a pci 4019 * flag out on the queue 4020 */ 4021 if (!tmp && !atomic_read(&queue->set_pci_flags_count)) 4022 tmp = qeth_prep_flush_pack_buffer(queue); 4023 if (tmp) { 4024 qeth_flush_buffers(queue, start_index, tmp); 4025 flush_count += tmp; 4026 } 4027 } 4028 out: 4029 /* at this point the queue is UNLOCKED again */ 4030 if (do_pack) 4031 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); 4032 4033 if (stopped && !qeth_out_queue_is_full(queue)) 4034 netif_tx_start_queue(txq); 4035 return rc; 4036 } 4037 EXPORT_SYMBOL_GPL(qeth_do_send_packet); 4038 4039 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, 4040 unsigned int payload_len, struct sk_buff *skb, 4041 unsigned int proto_len) 4042 { 4043 struct qeth_hdr_ext_tso *ext = &hdr->ext; 4044 4045 ext->hdr_tot_len = sizeof(*ext); 4046 ext->imb_hdr_no = 1; 4047 ext->hdr_type = 1; 4048 ext->hdr_version = 1; 4049 ext->hdr_len = 28; 4050 ext->payload_len = payload_len; 4051 ext->mss = skb_shinfo(skb)->gso_size; 4052 ext->dg_hdr_len = proto_len; 4053 } 4054 4055 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, 4056 struct qeth_qdio_out_q *queue, int ipv, int cast_type, 4057 void (*fill_header)(struct qeth_qdio_out_q *queue, 4058 struct qeth_hdr *hdr, struct sk_buff *skb, 4059 int ipv, int cast_type, 4060 unsigned int data_len)) 4061 { 4062 unsigned int proto_len, hw_hdr_len; 4063 unsigned int frame_len = skb->len; 4064 bool is_tso = skb_is_gso(skb); 4065 unsigned int data_offset = 0; 4066 struct qeth_hdr *hdr = NULL; 4067 unsigned int hd_len = 0; 4068 unsigned int elements; 4069 int push_len, rc; 4070 bool is_sg; 4071 4072 if (is_tso) { 4073 hw_hdr_len = sizeof(struct qeth_hdr_tso); 4074 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4075 } else { 4076 hw_hdr_len = sizeof(struct qeth_hdr); 4077 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; 4078 } 4079 4080 rc = skb_cow_head(skb, hw_hdr_len); 4081 if (rc) 4082 return rc; 4083 4084 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, 4085 &elements); 4086 if (push_len < 0) 4087 return push_len; 4088 if (is_tso || !push_len) { 4089 /* HW header needs its own buffer element. */ 4090 hd_len = hw_hdr_len + proto_len; 4091 data_offset = push_len + proto_len; 4092 } 4093 memset(hdr, 0, hw_hdr_len); 4094 fill_header(queue, hdr, skb, ipv, cast_type, frame_len); 4095 if (is_tso) 4096 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, 4097 frame_len - proto_len, skb, proto_len); 4098 4099 is_sg = skb_is_nonlinear(skb); 4100 if (IS_IQD(card)) { 4101 rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, 4102 hd_len); 4103 } else { 4104 /* TODO: drop skb_orphan() once TX completion is fast enough */ 4105 skb_orphan(skb); 4106 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, 4107 hd_len, elements); 4108 } 4109 4110 if (!rc) { 4111 QETH_TXQ_STAT_ADD(queue, buf_elements, elements); 4112 if (is_sg) 4113 QETH_TXQ_STAT_INC(queue, skbs_sg); 4114 if (is_tso) { 4115 QETH_TXQ_STAT_INC(queue, skbs_tso); 4116 QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len); 4117 } 4118 } else { 4119 if (!push_len) 4120 kmem_cache_free(qeth_core_header_cache, hdr); 4121 } 4122 return rc; 4123 } 4124 EXPORT_SYMBOL_GPL(qeth_xmit); 4125 4126 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4127 struct qeth_reply *reply, unsigned long data) 4128 { 4129 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4130 struct qeth_ipacmd_setadpparms *setparms; 4131 4132 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4133 4134 setparms = &(cmd->data.setadapterparms); 4135 if (qeth_setadpparms_inspect_rc(cmd)) { 4136 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4137 setparms->data.mode = SET_PROMISC_MODE_OFF; 4138 } 4139 card->info.promisc_mode = setparms->data.mode; 4140 return (cmd->hdr.return_code) ? -EIO : 0; 4141 } 4142 4143 void qeth_setadp_promisc_mode(struct qeth_card *card) 4144 { 4145 enum qeth_ipa_promisc_modes mode; 4146 struct net_device *dev = card->dev; 4147 struct qeth_cmd_buffer *iob; 4148 struct qeth_ipa_cmd *cmd; 4149 4150 QETH_CARD_TEXT(card, 4, "setprom"); 4151 4152 if (((dev->flags & IFF_PROMISC) && 4153 (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || 4154 (!(dev->flags & IFF_PROMISC) && 4155 (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) 4156 return; 4157 mode = SET_PROMISC_MODE_OFF; 4158 if (dev->flags & IFF_PROMISC) 4159 mode = SET_PROMISC_MODE_ON; 4160 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4161 4162 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4163 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8); 4164 if (!iob) 4165 return; 4166 cmd = __ipa_cmd(iob); 4167 cmd->data.setadapterparms.data.mode = mode; 4168 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4169 } 4170 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); 4171 4172 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4173 struct qeth_reply *reply, unsigned long data) 4174 { 4175 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4176 struct qeth_ipacmd_setadpparms *adp_cmd; 4177 4178 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4179 if (qeth_setadpparms_inspect_rc(cmd)) 4180 return -EIO; 4181 4182 adp_cmd = &cmd->data.setadapterparms; 4183 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) 4184 return -EADDRNOTAVAIL; 4185 4186 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && 4187 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) 4188 return -EADDRNOTAVAIL; 4189 4190 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); 4191 return 0; 4192 } 4193 4194 int qeth_setadpparms_change_macaddr(struct qeth_card *card) 4195 { 4196 int rc; 4197 struct qeth_cmd_buffer *iob; 4198 struct qeth_ipa_cmd *cmd; 4199 4200 QETH_CARD_TEXT(card, 4, "chgmac"); 4201 4202 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4203 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 4204 sizeof(struct qeth_change_addr)); 4205 if (!iob) 4206 return -ENOMEM; 4207 cmd = __ipa_cmd(iob); 4208 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4209 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; 4210 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, 4211 card->dev->dev_addr); 4212 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, 4213 NULL); 4214 return rc; 4215 } 4216 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 4217 4218 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4219 struct qeth_reply *reply, unsigned long data) 4220 { 4221 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4222 struct qeth_set_access_ctrl *access_ctrl_req; 4223 int fallback = *(int *)reply->param; 4224 4225 QETH_CARD_TEXT(card, 4, "setaccb"); 4226 if (cmd->hdr.return_code) 4227 return -EIO; 4228 qeth_setadpparms_inspect_rc(cmd); 4229 4230 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4231 QETH_DBF_TEXT_(SETUP, 2, "setaccb"); 4232 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); 4233 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", 4234 cmd->data.setadapterparms.hdr.return_code); 4235 if (cmd->data.setadapterparms.hdr.return_code != 4236 SET_ACCESS_CTRL_RC_SUCCESS) 4237 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", 4238 access_ctrl_req->subcmd_code, CARD_DEVID(card), 4239 cmd->data.setadapterparms.hdr.return_code); 4240 switch (cmd->data.setadapterparms.hdr.return_code) { 4241 case SET_ACCESS_CTRL_RC_SUCCESS: 4242 if (card->options.isolation == ISOLATION_MODE_NONE) { 4243 dev_info(&card->gdev->dev, 4244 "QDIO data connection isolation is deactivated\n"); 4245 } else { 4246 dev_info(&card->gdev->dev, 4247 "QDIO data connection isolation is activated\n"); 4248 } 4249 break; 4250 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4251 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", 4252 CARD_DEVID(card)); 4253 if (fallback) 4254 card->options.isolation = card->options.prev_isolation; 4255 break; 4256 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4257 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", 4258 CARD_DEVID(card)); 4259 if (fallback) 4260 card->options.isolation = card->options.prev_isolation; 4261 break; 4262 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: 4263 dev_err(&card->gdev->dev, "Adapter does not " 4264 "support QDIO data connection isolation\n"); 4265 break; 4266 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: 4267 dev_err(&card->gdev->dev, 4268 "Adapter is dedicated. " 4269 "QDIO data connection isolation not supported\n"); 4270 if (fallback) 4271 card->options.isolation = card->options.prev_isolation; 4272 break; 4273 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: 4274 dev_err(&card->gdev->dev, 4275 "TSO does not permit QDIO data connection isolation\n"); 4276 if (fallback) 4277 card->options.isolation = card->options.prev_isolation; 4278 break; 4279 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: 4280 dev_err(&card->gdev->dev, "The adjacent switch port does not " 4281 "support reflective relay mode\n"); 4282 if (fallback) 4283 card->options.isolation = card->options.prev_isolation; 4284 break; 4285 case SET_ACCESS_CTRL_RC_REFLREL_FAILED: 4286 dev_err(&card->gdev->dev, "The reflective relay mode cannot be " 4287 "enabled at the adjacent switch port"); 4288 if (fallback) 4289 card->options.isolation = card->options.prev_isolation; 4290 break; 4291 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: 4292 dev_warn(&card->gdev->dev, "Turning off reflective relay mode " 4293 "at the adjacent switch failed\n"); 4294 break; 4295 default: 4296 /* this should never happen */ 4297 if (fallback) 4298 card->options.isolation = card->options.prev_isolation; 4299 break; 4300 } 4301 return (cmd->hdr.return_code) ? -EIO : 0; 4302 } 4303 4304 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, 4305 enum qeth_ipa_isolation_modes isolation, int fallback) 4306 { 4307 int rc; 4308 struct qeth_cmd_buffer *iob; 4309 struct qeth_ipa_cmd *cmd; 4310 struct qeth_set_access_ctrl *access_ctrl_req; 4311 4312 QETH_CARD_TEXT(card, 4, "setacctl"); 4313 4314 QETH_DBF_TEXT_(SETUP, 2, "setacctl"); 4315 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); 4316 4317 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4318 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 4319 sizeof(struct qeth_set_access_ctrl)); 4320 if (!iob) 4321 return -ENOMEM; 4322 cmd = __ipa_cmd(iob); 4323 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4324 access_ctrl_req->subcmd_code = isolation; 4325 4326 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, 4327 &fallback); 4328 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc); 4329 return rc; 4330 } 4331 4332 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) 4333 { 4334 int rc = 0; 4335 4336 QETH_CARD_TEXT(card, 4, "setactlo"); 4337 4338 if ((IS_OSD(card) || IS_OSX(card)) && 4339 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { 4340 rc = qeth_setadpparms_set_access_ctrl(card, 4341 card->options.isolation, fallback); 4342 if (rc) { 4343 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", 4344 rc, CARD_DEVID(card)); 4345 rc = -EOPNOTSUPP; 4346 } 4347 } else if (card->options.isolation != ISOLATION_MODE_NONE) { 4348 card->options.isolation = ISOLATION_MODE_NONE; 4349 4350 dev_err(&card->gdev->dev, "Adapter does not " 4351 "support QDIO data connection isolation\n"); 4352 rc = -EOPNOTSUPP; 4353 } 4354 return rc; 4355 } 4356 EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online); 4357 4358 void qeth_tx_timeout(struct net_device *dev) 4359 { 4360 struct qeth_card *card; 4361 4362 card = dev->ml_priv; 4363 QETH_CARD_TEXT(card, 4, "txtimeo"); 4364 qeth_schedule_recovery(card); 4365 } 4366 EXPORT_SYMBOL_GPL(qeth_tx_timeout); 4367 4368 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 4369 { 4370 struct qeth_card *card = dev->ml_priv; 4371 int rc = 0; 4372 4373 switch (regnum) { 4374 case MII_BMCR: /* Basic mode control register */ 4375 rc = BMCR_FULLDPLX; 4376 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4377 (card->info.link_type != QETH_LINK_TYPE_OSN) && 4378 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && 4379 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) 4380 rc |= BMCR_SPEED100; 4381 break; 4382 case MII_BMSR: /* Basic mode status register */ 4383 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | 4384 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | 4385 BMSR_100BASE4; 4386 break; 4387 case MII_PHYSID1: /* PHYS ID 1 */ 4388 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | 4389 dev->dev_addr[2]; 4390 rc = (rc >> 5) & 0xFFFF; 4391 break; 4392 case MII_PHYSID2: /* PHYS ID 2 */ 4393 rc = (dev->dev_addr[2] << 10) & 0xFFFF; 4394 break; 4395 case MII_ADVERTISE: /* Advertisement control reg */ 4396 rc = ADVERTISE_ALL; 4397 break; 4398 case MII_LPA: /* Link partner ability reg */ 4399 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | 4400 LPA_100BASE4 | LPA_LPACK; 4401 break; 4402 case MII_EXPANSION: /* Expansion register */ 4403 break; 4404 case MII_DCOUNTER: /* disconnect counter */ 4405 break; 4406 case MII_FCSCOUNTER: /* false carrier counter */ 4407 break; 4408 case MII_NWAYTEST: /* N-way auto-neg test register */ 4409 break; 4410 case MII_RERRCOUNTER: /* rx error counter */ 4411 rc = card->stats.rx_errors; 4412 break; 4413 case MII_SREVISION: /* silicon revision */ 4414 break; 4415 case MII_RESV1: /* reserved 1 */ 4416 break; 4417 case MII_LBRERROR: /* loopback, rx, bypass error */ 4418 break; 4419 case MII_PHYADDR: /* physical address */ 4420 break; 4421 case MII_RESV2: /* reserved 2 */ 4422 break; 4423 case MII_TPISTATUS: /* TPI status for 10mbps */ 4424 break; 4425 case MII_NCONFIG: /* network interface config */ 4426 break; 4427 default: 4428 break; 4429 } 4430 return rc; 4431 } 4432 4433 static int qeth_snmp_command_cb(struct qeth_card *card, 4434 struct qeth_reply *reply, unsigned long sdata) 4435 { 4436 struct qeth_ipa_cmd *cmd; 4437 struct qeth_arp_query_info *qinfo; 4438 unsigned char *data; 4439 void *snmp_data; 4440 __u16 data_len; 4441 4442 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4443 4444 cmd = (struct qeth_ipa_cmd *) sdata; 4445 data = (unsigned char *)((char *)cmd - reply->offset); 4446 qinfo = (struct qeth_arp_query_info *) reply->param; 4447 4448 if (cmd->hdr.return_code) { 4449 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4450 return -EIO; 4451 } 4452 if (cmd->data.setadapterparms.hdr.return_code) { 4453 cmd->hdr.return_code = 4454 cmd->data.setadapterparms.hdr.return_code; 4455 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); 4456 return -EIO; 4457 } 4458 data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); 4459 if (cmd->data.setadapterparms.hdr.seq_no == 1) { 4460 snmp_data = &cmd->data.setadapterparms.data.snmp; 4461 data_len -= offsetof(struct qeth_ipa_cmd, 4462 data.setadapterparms.data.snmp); 4463 } else { 4464 snmp_data = &cmd->data.setadapterparms.data.snmp.request; 4465 data_len -= offsetof(struct qeth_ipa_cmd, 4466 data.setadapterparms.data.snmp.request); 4467 } 4468 4469 /* check if there is enough room in userspace */ 4470 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4471 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); 4472 return -ENOSPC; 4473 } 4474 QETH_CARD_TEXT_(card, 4, "snore%i", 4475 cmd->data.setadapterparms.hdr.used_total); 4476 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4477 cmd->data.setadapterparms.hdr.seq_no); 4478 /*copy entries to user buffer*/ 4479 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); 4480 qinfo->udata_offset += data_len; 4481 4482 /* check if all replies received ... */ 4483 QETH_CARD_TEXT_(card, 4, "srtot%i", 4484 cmd->data.setadapterparms.hdr.used_total); 4485 QETH_CARD_TEXT_(card, 4, "srseq%i", 4486 cmd->data.setadapterparms.hdr.seq_no); 4487 if (cmd->data.setadapterparms.hdr.seq_no < 4488 cmd->data.setadapterparms.hdr.used_total) 4489 return 1; 4490 return 0; 4491 } 4492 4493 static int qeth_snmp_command(struct qeth_card *card, char __user *udata) 4494 { 4495 struct qeth_cmd_buffer *iob; 4496 struct qeth_ipa_cmd *cmd; 4497 struct qeth_snmp_ureq *ureq; 4498 unsigned int req_len; 4499 struct qeth_arp_query_info qinfo = {0, }; 4500 int rc = 0; 4501 4502 QETH_CARD_TEXT(card, 3, "snmpcmd"); 4503 4504 if (IS_VM_NIC(card)) 4505 return -EOPNOTSUPP; 4506 4507 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && 4508 IS_LAYER3(card)) 4509 return -EOPNOTSUPP; 4510 4511 /* skip 4 bytes (data_len struct member) to get req_len */ 4512 if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) 4513 return -EFAULT; 4514 if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE - 4515 sizeof(struct qeth_ipacmd_hdr) - 4516 sizeof(struct qeth_ipacmd_setadpparms_hdr))) 4517 return -EINVAL; 4518 ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr)); 4519 if (IS_ERR(ureq)) { 4520 QETH_CARD_TEXT(card, 2, "snmpnome"); 4521 return PTR_ERR(ureq); 4522 } 4523 qinfo.udata_len = ureq->hdr.data_len; 4524 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 4525 if (!qinfo.udata) { 4526 kfree(ureq); 4527 return -ENOMEM; 4528 } 4529 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); 4530 4531 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, 4532 QETH_SNMP_SETADP_CMDLENGTH + req_len); 4533 if (!iob) { 4534 rc = -ENOMEM; 4535 goto out; 4536 } 4537 4538 /* for large requests, fix-up the length fields: */ 4539 qeth_prepare_ipa_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len); 4540 4541 cmd = __ipa_cmd(iob); 4542 memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); 4543 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); 4544 if (rc) 4545 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", 4546 CARD_DEVID(card), rc); 4547 else { 4548 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4549 rc = -EFAULT; 4550 } 4551 out: 4552 kfree(ureq); 4553 kfree(qinfo.udata); 4554 return rc; 4555 } 4556 4557 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4558 struct qeth_reply *reply, unsigned long data) 4559 { 4560 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4561 struct qeth_qoat_priv *priv; 4562 char *resdata; 4563 int resdatalen; 4564 4565 QETH_CARD_TEXT(card, 3, "qoatcb"); 4566 if (qeth_setadpparms_inspect_rc(cmd)) 4567 return -EIO; 4568 4569 priv = (struct qeth_qoat_priv *)reply->param; 4570 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4571 resdata = (char *)data + 28; 4572 4573 if (resdatalen > (priv->buffer_len - priv->response_len)) 4574 return -ENOSPC; 4575 4576 memcpy((priv->buffer + priv->response_len), resdata, 4577 resdatalen); 4578 priv->response_len += resdatalen; 4579 4580 if (cmd->data.setadapterparms.hdr.seq_no < 4581 cmd->data.setadapterparms.hdr.used_total) 4582 return 1; 4583 return 0; 4584 } 4585 4586 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) 4587 { 4588 int rc = 0; 4589 struct qeth_cmd_buffer *iob; 4590 struct qeth_ipa_cmd *cmd; 4591 struct qeth_query_oat *oat_req; 4592 struct qeth_query_oat_data oat_data; 4593 struct qeth_qoat_priv priv; 4594 void __user *tmp; 4595 4596 QETH_CARD_TEXT(card, 3, "qoatcmd"); 4597 4598 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { 4599 rc = -EOPNOTSUPP; 4600 goto out; 4601 } 4602 4603 if (copy_from_user(&oat_data, udata, 4604 sizeof(struct qeth_query_oat_data))) { 4605 rc = -EFAULT; 4606 goto out; 4607 } 4608 4609 priv.buffer_len = oat_data.buffer_len; 4610 priv.response_len = 0; 4611 priv.buffer = vzalloc(oat_data.buffer_len); 4612 if (!priv.buffer) { 4613 rc = -ENOMEM; 4614 goto out; 4615 } 4616 4617 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4618 sizeof(struct qeth_ipacmd_setadpparms_hdr) + 4619 sizeof(struct qeth_query_oat)); 4620 if (!iob) { 4621 rc = -ENOMEM; 4622 goto out_free; 4623 } 4624 cmd = __ipa_cmd(iob); 4625 oat_req = &cmd->data.setadapterparms.data.query_oat; 4626 oat_req->subcmd_code = oat_data.command; 4627 4628 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, 4629 &priv); 4630 if (!rc) { 4631 if (is_compat_task()) 4632 tmp = compat_ptr(oat_data.ptr); 4633 else 4634 tmp = (void __user *)(unsigned long)oat_data.ptr; 4635 4636 if (copy_to_user(tmp, priv.buffer, 4637 priv.response_len)) { 4638 rc = -EFAULT; 4639 goto out_free; 4640 } 4641 4642 oat_data.response_len = priv.response_len; 4643 4644 if (copy_to_user(udata, &oat_data, 4645 sizeof(struct qeth_query_oat_data))) 4646 rc = -EFAULT; 4647 } 4648 4649 out_free: 4650 vfree(priv.buffer); 4651 out: 4652 return rc; 4653 } 4654 4655 static int qeth_query_card_info_cb(struct qeth_card *card, 4656 struct qeth_reply *reply, unsigned long data) 4657 { 4658 struct carrier_info *carrier_info = (struct carrier_info *)reply->param; 4659 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4660 struct qeth_query_card_info *card_info; 4661 4662 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4663 if (qeth_setadpparms_inspect_rc(cmd)) 4664 return -EIO; 4665 4666 card_info = &cmd->data.setadapterparms.data.card_info; 4667 carrier_info->card_type = card_info->card_type; 4668 carrier_info->port_mode = card_info->port_mode; 4669 carrier_info->port_speed = card_info->port_speed; 4670 return 0; 4671 } 4672 4673 int qeth_query_card_info(struct qeth_card *card, 4674 struct carrier_info *carrier_info) 4675 { 4676 struct qeth_cmd_buffer *iob; 4677 4678 QETH_CARD_TEXT(card, 2, "qcrdinfo"); 4679 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) 4680 return -EOPNOTSUPP; 4681 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 4682 sizeof(struct qeth_ipacmd_setadpparms_hdr)); 4683 if (!iob) 4684 return -ENOMEM; 4685 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, 4686 (void *)carrier_info); 4687 } 4688 4689 /** 4690 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address 4691 * @card: pointer to a qeth_card 4692 * 4693 * Returns 4694 * 0, if a MAC address has been set for the card's netdevice 4695 * a return code, for various error conditions 4696 */ 4697 int qeth_vm_request_mac(struct qeth_card *card) 4698 { 4699 struct diag26c_mac_resp *response; 4700 struct diag26c_mac_req *request; 4701 struct ccw_dev_id id; 4702 int rc; 4703 4704 QETH_DBF_TEXT(SETUP, 2, "vmreqmac"); 4705 4706 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 4707 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 4708 if (!request || !response) { 4709 rc = -ENOMEM; 4710 goto out; 4711 } 4712 4713 ccw_device_get_id(CARD_DDEV(card), &id); 4714 request->resp_buf_len = sizeof(*response); 4715 request->resp_version = DIAG26C_VERSION2; 4716 request->op_code = DIAG26C_GET_MAC; 4717 request->devno = id.devno; 4718 4719 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4720 rc = diag26c(request, response, DIAG26C_MAC_SERVICES); 4721 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4722 if (rc) 4723 goto out; 4724 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 4725 4726 if (request->resp_buf_len < sizeof(*response) || 4727 response->version != request->resp_version) { 4728 rc = -EIO; 4729 QETH_DBF_TEXT(SETUP, 2, "badresp"); 4730 QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len, 4731 sizeof(request->resp_buf_len)); 4732 } else if (!is_valid_ether_addr(response->mac)) { 4733 rc = -EINVAL; 4734 QETH_DBF_TEXT(SETUP, 2, "badmac"); 4735 QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN); 4736 } else { 4737 ether_addr_copy(card->dev->dev_addr, response->mac); 4738 } 4739 4740 out: 4741 kfree(response); 4742 kfree(request); 4743 return rc; 4744 } 4745 EXPORT_SYMBOL_GPL(qeth_vm_request_mac); 4746 4747 static void qeth_determine_capabilities(struct qeth_card *card) 4748 { 4749 int rc; 4750 int length; 4751 char *prcd; 4752 struct ccw_device *ddev; 4753 int ddev_offline = 0; 4754 4755 QETH_DBF_TEXT(SETUP, 2, "detcapab"); 4756 ddev = CARD_DDEV(card); 4757 if (!ddev->online) { 4758 ddev_offline = 1; 4759 rc = ccw_device_set_online(ddev); 4760 if (rc) { 4761 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 4762 goto out; 4763 } 4764 } 4765 4766 rc = qeth_read_conf_data(card, (void **) &prcd, &length); 4767 if (rc) { 4768 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", 4769 CARD_DEVID(card), rc); 4770 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 4771 goto out_offline; 4772 } 4773 qeth_configure_unitaddr(card, prcd); 4774 if (ddev_offline) 4775 qeth_configure_blkt_default(card, prcd); 4776 kfree(prcd); 4777 4778 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 4779 if (rc) 4780 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 4781 4782 QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt); 4783 QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1); 4784 QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2); 4785 QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3); 4786 QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt); 4787 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || 4788 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || 4789 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { 4790 dev_info(&card->gdev->dev, 4791 "Completion Queueing supported\n"); 4792 } else { 4793 card->options.cq = QETH_CQ_NOTAVAILABLE; 4794 } 4795 4796 4797 out_offline: 4798 if (ddev_offline == 1) 4799 ccw_device_set_offline(ddev); 4800 out: 4801 return; 4802 } 4803 4804 static void qeth_qdio_establish_cq(struct qeth_card *card, 4805 struct qdio_buffer **in_sbal_ptrs, 4806 void (**queue_start_poll) 4807 (struct ccw_device *, int, 4808 unsigned long)) 4809 { 4810 int i; 4811 4812 if (card->options.cq == QETH_CQ_ENABLED) { 4813 int offset = QDIO_MAX_BUFFERS_PER_Q * 4814 (card->qdio.no_in_queues - 1); 4815 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { 4816 in_sbal_ptrs[offset + i] = (struct qdio_buffer *) 4817 virt_to_phys(card->qdio.c_q->bufs[i].buffer); 4818 } 4819 4820 queue_start_poll[card->qdio.no_in_queues - 1] = NULL; 4821 } 4822 } 4823 4824 static int qeth_qdio_establish(struct qeth_card *card) 4825 { 4826 struct qdio_initialize init_data; 4827 char *qib_param_field; 4828 struct qdio_buffer **in_sbal_ptrs; 4829 void (**queue_start_poll) (struct ccw_device *, int, unsigned long); 4830 struct qdio_buffer **out_sbal_ptrs; 4831 int i, j, k; 4832 int rc = 0; 4833 4834 QETH_DBF_TEXT(SETUP, 2, "qdioest"); 4835 4836 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q, 4837 GFP_KERNEL); 4838 if (!qib_param_field) { 4839 rc = -ENOMEM; 4840 goto out_free_nothing; 4841 } 4842 4843 qeth_create_qib_param_field(card, qib_param_field); 4844 qeth_create_qib_param_field_blkt(card, qib_param_field); 4845 4846 in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q, 4847 sizeof(void *), 4848 GFP_KERNEL); 4849 if (!in_sbal_ptrs) { 4850 rc = -ENOMEM; 4851 goto out_free_qib_param; 4852 } 4853 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { 4854 in_sbal_ptrs[i] = (struct qdio_buffer *) 4855 virt_to_phys(card->qdio.in_q->bufs[i].buffer); 4856 } 4857 4858 queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *), 4859 GFP_KERNEL); 4860 if (!queue_start_poll) { 4861 rc = -ENOMEM; 4862 goto out_free_in_sbals; 4863 } 4864 for (i = 0; i < card->qdio.no_in_queues; ++i) 4865 queue_start_poll[i] = qeth_qdio_start_poll; 4866 4867 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); 4868 4869 out_sbal_ptrs = 4870 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q, 4871 sizeof(void *), 4872 GFP_KERNEL); 4873 if (!out_sbal_ptrs) { 4874 rc = -ENOMEM; 4875 goto out_free_queue_start_poll; 4876 } 4877 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) 4878 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { 4879 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( 4880 card->qdio.out_qs[i]->bufs[j]->buffer); 4881 } 4882 4883 memset(&init_data, 0, sizeof(struct qdio_initialize)); 4884 init_data.cdev = CARD_DDEV(card); 4885 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : 4886 QDIO_QETH_QFMT; 4887 init_data.qib_param_field_format = 0; 4888 init_data.qib_param_field = qib_param_field; 4889 init_data.no_input_qs = card->qdio.no_in_queues; 4890 init_data.no_output_qs = card->qdio.no_out_queues; 4891 init_data.input_handler = qeth_qdio_input_handler; 4892 init_data.output_handler = qeth_qdio_output_handler; 4893 init_data.queue_start_poll_array = queue_start_poll; 4894 init_data.int_parm = (unsigned long) card; 4895 init_data.input_sbal_addr_array = in_sbal_ptrs; 4896 init_data.output_sbal_addr_array = out_sbal_ptrs; 4897 init_data.output_sbal_state_array = card->qdio.out_bufstates; 4898 init_data.scan_threshold = IS_IQD(card) ? 1 : 32; 4899 4900 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 4901 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 4902 rc = qdio_allocate(&init_data); 4903 if (rc) { 4904 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 4905 goto out; 4906 } 4907 rc = qdio_establish(&init_data); 4908 if (rc) { 4909 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 4910 qdio_free(CARD_DDEV(card)); 4911 } 4912 } 4913 4914 switch (card->options.cq) { 4915 case QETH_CQ_ENABLED: 4916 dev_info(&card->gdev->dev, "Completion Queue support enabled"); 4917 break; 4918 case QETH_CQ_DISABLED: 4919 dev_info(&card->gdev->dev, "Completion Queue support disabled"); 4920 break; 4921 default: 4922 break; 4923 } 4924 out: 4925 kfree(out_sbal_ptrs); 4926 out_free_queue_start_poll: 4927 kfree(queue_start_poll); 4928 out_free_in_sbals: 4929 kfree(in_sbal_ptrs); 4930 out_free_qib_param: 4931 kfree(qib_param_field); 4932 out_free_nothing: 4933 return rc; 4934 } 4935 4936 static void qeth_core_free_card(struct qeth_card *card) 4937 { 4938 QETH_DBF_TEXT(SETUP, 2, "freecrd"); 4939 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 4940 qeth_clean_channel(&card->read); 4941 qeth_clean_channel(&card->write); 4942 qeth_clean_channel(&card->data); 4943 destroy_workqueue(card->event_wq); 4944 qeth_free_qdio_queues(card); 4945 unregister_service_level(&card->qeth_service_level); 4946 dev_set_drvdata(&card->gdev->dev, NULL); 4947 kfree(card); 4948 } 4949 4950 void qeth_trace_features(struct qeth_card *card) 4951 { 4952 QETH_CARD_TEXT(card, 2, "features"); 4953 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); 4954 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); 4955 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); 4956 QETH_CARD_HEX(card, 2, &card->info.diagass_support, 4957 sizeof(card->info.diagass_support)); 4958 } 4959 EXPORT_SYMBOL_GPL(qeth_trace_features); 4960 4961 static struct ccw_device_id qeth_ids[] = { 4962 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 4963 .driver_info = QETH_CARD_TYPE_OSD}, 4964 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), 4965 .driver_info = QETH_CARD_TYPE_IQD}, 4966 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), 4967 .driver_info = QETH_CARD_TYPE_OSN}, 4968 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), 4969 .driver_info = QETH_CARD_TYPE_OSM}, 4970 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), 4971 .driver_info = QETH_CARD_TYPE_OSX}, 4972 {}, 4973 }; 4974 MODULE_DEVICE_TABLE(ccw, qeth_ids); 4975 4976 static struct ccw_driver qeth_ccw_driver = { 4977 .driver = { 4978 .owner = THIS_MODULE, 4979 .name = "qeth", 4980 }, 4981 .ids = qeth_ids, 4982 .probe = ccwgroup_probe_ccwdev, 4983 .remove = ccwgroup_remove_ccwdev, 4984 }; 4985 4986 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok) 4987 { 4988 int retries = 3; 4989 int rc; 4990 4991 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 4992 atomic_set(&card->force_alloc_skb, 0); 4993 rc = qeth_update_from_chp_desc(card); 4994 if (rc) 4995 return rc; 4996 retry: 4997 if (retries < 3) 4998 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 4999 CARD_DEVID(card)); 5000 rc = qeth_qdio_clear_card(card, !IS_IQD(card)); 5001 ccw_device_set_offline(CARD_DDEV(card)); 5002 ccw_device_set_offline(CARD_WDEV(card)); 5003 ccw_device_set_offline(CARD_RDEV(card)); 5004 qdio_free(CARD_DDEV(card)); 5005 rc = ccw_device_set_online(CARD_RDEV(card)); 5006 if (rc) 5007 goto retriable; 5008 rc = ccw_device_set_online(CARD_WDEV(card)); 5009 if (rc) 5010 goto retriable; 5011 rc = ccw_device_set_online(CARD_DDEV(card)); 5012 if (rc) 5013 goto retriable; 5014 retriable: 5015 if (rc == -ERESTARTSYS) { 5016 QETH_DBF_TEXT(SETUP, 2, "break1"); 5017 return rc; 5018 } else if (rc) { 5019 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 5020 if (--retries < 0) 5021 goto out; 5022 else 5023 goto retry; 5024 } 5025 qeth_determine_capabilities(card); 5026 qeth_init_tokens(card); 5027 qeth_init_func_level(card); 5028 5029 rc = qeth_idx_activate_read_channel(card); 5030 if (rc == -EINTR) { 5031 QETH_DBF_TEXT(SETUP, 2, "break2"); 5032 return rc; 5033 } else if (rc) { 5034 QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); 5035 if (--retries < 0) 5036 goto out; 5037 else 5038 goto retry; 5039 } 5040 5041 rc = qeth_idx_activate_write_channel(card); 5042 if (rc == -EINTR) { 5043 QETH_DBF_TEXT(SETUP, 2, "break3"); 5044 return rc; 5045 } else if (rc) { 5046 QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); 5047 if (--retries < 0) 5048 goto out; 5049 else 5050 goto retry; 5051 } 5052 card->read_or_write_problem = 0; 5053 rc = qeth_mpc_initialize(card); 5054 if (rc) { 5055 QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); 5056 goto out; 5057 } 5058 5059 rc = qeth_send_startlan(card); 5060 if (rc) { 5061 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 5062 if (rc == -ENETDOWN) { 5063 dev_warn(&card->gdev->dev, "The LAN is offline\n"); 5064 *carrier_ok = false; 5065 } else { 5066 goto out; 5067 } 5068 } else { 5069 *carrier_ok = true; 5070 } 5071 5072 card->options.ipa4.supported_funcs = 0; 5073 card->options.ipa6.supported_funcs = 0; 5074 card->options.adp.supported_funcs = 0; 5075 card->options.sbp.supported_funcs = 0; 5076 card->info.diagass_support = 0; 5077 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 5078 if (rc == -ENOMEM) 5079 goto out; 5080 if (qeth_is_supported(card, IPA_IPV6)) { 5081 rc = qeth_query_ipassists(card, QETH_PROT_IPV6); 5082 if (rc == -ENOMEM) 5083 goto out; 5084 } 5085 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 5086 rc = qeth_query_setadapterparms(card); 5087 if (rc < 0) { 5088 QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); 5089 goto out; 5090 } 5091 } 5092 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 5093 rc = qeth_query_setdiagass(card); 5094 if (rc < 0) { 5095 QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); 5096 goto out; 5097 } 5098 } 5099 return 0; 5100 out: 5101 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 5102 "an error on the device\n"); 5103 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", 5104 CARD_DEVID(card), rc); 5105 return rc; 5106 } 5107 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); 5108 5109 static void qeth_create_skb_frag(struct qdio_buffer_element *element, 5110 struct sk_buff *skb, int offset, int data_len) 5111 { 5112 struct page *page = virt_to_page(element->addr); 5113 unsigned int next_frag; 5114 5115 /* first fill the linear space */ 5116 if (!skb->len) { 5117 unsigned int linear = min(data_len, skb_tailroom(skb)); 5118 5119 skb_put_data(skb, element->addr + offset, linear); 5120 data_len -= linear; 5121 if (!data_len) 5122 return; 5123 offset += linear; 5124 /* fall through to add page frag for remaining data */ 5125 } 5126 5127 next_frag = skb_shinfo(skb)->nr_frags; 5128 get_page(page); 5129 skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len); 5130 } 5131 5132 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 5133 { 5134 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); 5135 } 5136 5137 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, 5138 struct qeth_qdio_buffer *qethbuffer, 5139 struct qdio_buffer_element **__element, int *__offset, 5140 struct qeth_hdr **hdr) 5141 { 5142 struct qdio_buffer_element *element = *__element; 5143 struct qdio_buffer *buffer = qethbuffer->buffer; 5144 int offset = *__offset; 5145 struct sk_buff *skb; 5146 int skb_len = 0; 5147 void *data_ptr; 5148 int data_len; 5149 int headroom = 0; 5150 int use_rx_sg = 0; 5151 5152 /* qeth_hdr must not cross element boundaries */ 5153 while (element->length < offset + sizeof(struct qeth_hdr)) { 5154 if (qeth_is_last_sbale(element)) 5155 return NULL; 5156 element++; 5157 offset = 0; 5158 } 5159 *hdr = element->addr + offset; 5160 5161 offset += sizeof(struct qeth_hdr); 5162 switch ((*hdr)->hdr.l2.id) { 5163 case QETH_HEADER_TYPE_LAYER2: 5164 skb_len = (*hdr)->hdr.l2.pkt_length; 5165 break; 5166 case QETH_HEADER_TYPE_LAYER3: 5167 skb_len = (*hdr)->hdr.l3.length; 5168 headroom = ETH_HLEN; 5169 break; 5170 case QETH_HEADER_TYPE_OSN: 5171 skb_len = (*hdr)->hdr.osn.pdu_length; 5172 headroom = sizeof(struct qeth_hdr); 5173 break; 5174 default: 5175 break; 5176 } 5177 5178 if (!skb_len) 5179 return NULL; 5180 5181 if (((skb_len >= card->options.rx_sg_cb) && 5182 !IS_OSN(card) && 5183 (!atomic_read(&card->force_alloc_skb))) || 5184 (card->options.cq == QETH_CQ_ENABLED)) 5185 use_rx_sg = 1; 5186 5187 if (use_rx_sg && qethbuffer->rx_skb) { 5188 /* QETH_CQ_ENABLED only: */ 5189 skb = qethbuffer->rx_skb; 5190 qethbuffer->rx_skb = NULL; 5191 } else { 5192 unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len; 5193 5194 skb = napi_alloc_skb(&card->napi, linear + headroom); 5195 } 5196 if (!skb) 5197 goto no_mem; 5198 if (headroom) 5199 skb_reserve(skb, headroom); 5200 5201 data_ptr = element->addr + offset; 5202 while (skb_len) { 5203 data_len = min(skb_len, (int)(element->length - offset)); 5204 if (data_len) { 5205 if (use_rx_sg) 5206 qeth_create_skb_frag(element, skb, offset, 5207 data_len); 5208 else 5209 skb_put_data(skb, data_ptr, data_len); 5210 } 5211 skb_len -= data_len; 5212 if (skb_len) { 5213 if (qeth_is_last_sbale(element)) { 5214 QETH_CARD_TEXT(card, 4, "unexeob"); 5215 QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); 5216 dev_kfree_skb_any(skb); 5217 QETH_CARD_STAT_INC(card, rx_errors); 5218 return NULL; 5219 } 5220 element++; 5221 offset = 0; 5222 data_ptr = element->addr; 5223 } else { 5224 offset += data_len; 5225 } 5226 } 5227 *__element = element; 5228 *__offset = offset; 5229 if (use_rx_sg) { 5230 QETH_CARD_STAT_INC(card, rx_sg_skbs); 5231 QETH_CARD_STAT_ADD(card, rx_sg_frags, 5232 skb_shinfo(skb)->nr_frags); 5233 } 5234 return skb; 5235 no_mem: 5236 if (net_ratelimit()) { 5237 QETH_CARD_TEXT(card, 2, "noskbmem"); 5238 } 5239 QETH_CARD_STAT_INC(card, rx_dropped); 5240 return NULL; 5241 } 5242 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); 5243 5244 int qeth_poll(struct napi_struct *napi, int budget) 5245 { 5246 struct qeth_card *card = container_of(napi, struct qeth_card, napi); 5247 int work_done = 0; 5248 struct qeth_qdio_buffer *buffer; 5249 int done; 5250 int new_budget = budget; 5251 5252 while (1) { 5253 if (!card->rx.b_count) { 5254 card->rx.qdio_err = 0; 5255 card->rx.b_count = qdio_get_next_buffers( 5256 card->data.ccwdev, 0, &card->rx.b_index, 5257 &card->rx.qdio_err); 5258 if (card->rx.b_count <= 0) { 5259 card->rx.b_count = 0; 5260 break; 5261 } 5262 card->rx.b_element = 5263 &card->qdio.in_q->bufs[card->rx.b_index] 5264 .buffer->element[0]; 5265 card->rx.e_offset = 0; 5266 } 5267 5268 while (card->rx.b_count) { 5269 buffer = &card->qdio.in_q->bufs[card->rx.b_index]; 5270 if (!(card->rx.qdio_err && 5271 qeth_check_qdio_errors(card, buffer->buffer, 5272 card->rx.qdio_err, "qinerr"))) 5273 work_done += 5274 card->discipline->process_rx_buffer( 5275 card, new_budget, &done); 5276 else 5277 done = 1; 5278 5279 if (done) { 5280 QETH_CARD_STAT_INC(card, rx_bufs); 5281 qeth_put_buffer_pool_entry(card, 5282 buffer->pool_entry); 5283 qeth_queue_input_buffer(card, card->rx.b_index); 5284 card->rx.b_count--; 5285 if (card->rx.b_count) { 5286 card->rx.b_index = 5287 (card->rx.b_index + 1) % 5288 QDIO_MAX_BUFFERS_PER_Q; 5289 card->rx.b_element = 5290 &card->qdio.in_q 5291 ->bufs[card->rx.b_index] 5292 .buffer->element[0]; 5293 card->rx.e_offset = 0; 5294 } 5295 } 5296 5297 if (work_done >= budget) 5298 goto out; 5299 else 5300 new_budget = budget - work_done; 5301 } 5302 } 5303 5304 napi_complete_done(napi, work_done); 5305 if (qdio_start_irq(card->data.ccwdev, 0)) 5306 napi_schedule(&card->napi); 5307 out: 5308 return work_done; 5309 } 5310 EXPORT_SYMBOL_GPL(qeth_poll); 5311 5312 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) 5313 { 5314 if (!cmd->hdr.return_code) 5315 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 5316 return cmd->hdr.return_code; 5317 } 5318 5319 static int qeth_setassparms_get_caps_cb(struct qeth_card *card, 5320 struct qeth_reply *reply, 5321 unsigned long data) 5322 { 5323 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5324 struct qeth_ipa_caps *caps = reply->param; 5325 5326 if (qeth_setassparms_inspect_rc(cmd)) 5327 return -EIO; 5328 5329 caps->supported = cmd->data.setassparms.data.caps.supported; 5330 caps->enabled = cmd->data.setassparms.data.caps.enabled; 5331 return 0; 5332 } 5333 5334 int qeth_setassparms_cb(struct qeth_card *card, 5335 struct qeth_reply *reply, unsigned long data) 5336 { 5337 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5338 5339 QETH_CARD_TEXT(card, 4, "defadpcb"); 5340 5341 if (cmd->hdr.return_code) 5342 return -EIO; 5343 5344 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 5345 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 5346 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 5347 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 5348 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 5349 return 0; 5350 } 5351 EXPORT_SYMBOL_GPL(qeth_setassparms_cb); 5352 5353 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 5354 enum qeth_ipa_funcs ipa_func, 5355 __u16 cmd_code, __u16 len, 5356 enum qeth_prot_versions prot) 5357 { 5358 struct qeth_cmd_buffer *iob; 5359 struct qeth_ipa_cmd *cmd; 5360 5361 QETH_CARD_TEXT(card, 4, "getasscm"); 5362 iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); 5363 5364 if (iob) { 5365 cmd = __ipa_cmd(iob); 5366 cmd->data.setassparms.hdr.assist_no = ipa_func; 5367 cmd->data.setassparms.hdr.length = 8 + len; 5368 cmd->data.setassparms.hdr.command_code = cmd_code; 5369 } 5370 5371 return iob; 5372 } 5373 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 5374 5375 int qeth_send_simple_setassparms_prot(struct qeth_card *card, 5376 enum qeth_ipa_funcs ipa_func, 5377 u16 cmd_code, long data, 5378 enum qeth_prot_versions prot) 5379 { 5380 int length = 0; 5381 struct qeth_cmd_buffer *iob; 5382 5383 QETH_CARD_TEXT_(card, 4, "simassp%i", prot); 5384 if (data) 5385 length = sizeof(__u32); 5386 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); 5387 if (!iob) 5388 return -ENOMEM; 5389 5390 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data; 5391 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); 5392 } 5393 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); 5394 5395 static void qeth_unregister_dbf_views(void) 5396 { 5397 int x; 5398 for (x = 0; x < QETH_DBF_INFOS; x++) { 5399 debug_unregister(qeth_dbf[x].id); 5400 qeth_dbf[x].id = NULL; 5401 } 5402 } 5403 5404 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) 5405 { 5406 char dbf_txt_buf[32]; 5407 va_list args; 5408 5409 if (!debug_level_enabled(id, level)) 5410 return; 5411 va_start(args, fmt); 5412 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 5413 va_end(args); 5414 debug_text_event(id, level, dbf_txt_buf); 5415 } 5416 EXPORT_SYMBOL_GPL(qeth_dbf_longtext); 5417 5418 static int qeth_register_dbf_views(void) 5419 { 5420 int ret; 5421 int x; 5422 5423 for (x = 0; x < QETH_DBF_INFOS; x++) { 5424 /* register the areas */ 5425 qeth_dbf[x].id = debug_register(qeth_dbf[x].name, 5426 qeth_dbf[x].pages, 5427 qeth_dbf[x].areas, 5428 qeth_dbf[x].len); 5429 if (qeth_dbf[x].id == NULL) { 5430 qeth_unregister_dbf_views(); 5431 return -ENOMEM; 5432 } 5433 5434 /* register a view */ 5435 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); 5436 if (ret) { 5437 qeth_unregister_dbf_views(); 5438 return ret; 5439 } 5440 5441 /* set a passing level */ 5442 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); 5443 } 5444 5445 return 0; 5446 } 5447 5448 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ 5449 5450 int qeth_core_load_discipline(struct qeth_card *card, 5451 enum qeth_discipline_id discipline) 5452 { 5453 mutex_lock(&qeth_mod_mutex); 5454 switch (discipline) { 5455 case QETH_DISCIPLINE_LAYER3: 5456 card->discipline = try_then_request_module( 5457 symbol_get(qeth_l3_discipline), "qeth_l3"); 5458 break; 5459 case QETH_DISCIPLINE_LAYER2: 5460 card->discipline = try_then_request_module( 5461 symbol_get(qeth_l2_discipline), "qeth_l2"); 5462 break; 5463 default: 5464 break; 5465 } 5466 mutex_unlock(&qeth_mod_mutex); 5467 5468 if (!card->discipline) { 5469 dev_err(&card->gdev->dev, "There is no kernel module to " 5470 "support discipline %d\n", discipline); 5471 return -EINVAL; 5472 } 5473 5474 card->options.layer = discipline; 5475 return 0; 5476 } 5477 5478 void qeth_core_free_discipline(struct qeth_card *card) 5479 { 5480 if (IS_LAYER2(card)) 5481 symbol_put(qeth_l2_discipline); 5482 else 5483 symbol_put(qeth_l3_discipline); 5484 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 5485 card->discipline = NULL; 5486 } 5487 5488 const struct device_type qeth_generic_devtype = { 5489 .name = "qeth_generic", 5490 .groups = qeth_generic_attr_groups, 5491 }; 5492 EXPORT_SYMBOL_GPL(qeth_generic_devtype); 5493 5494 static const struct device_type qeth_osn_devtype = { 5495 .name = "qeth_osn", 5496 .groups = qeth_osn_attr_groups, 5497 }; 5498 5499 #define DBF_NAME_LEN 20 5500 5501 struct qeth_dbf_entry { 5502 char dbf_name[DBF_NAME_LEN]; 5503 debug_info_t *dbf_info; 5504 struct list_head dbf_list; 5505 }; 5506 5507 static LIST_HEAD(qeth_dbf_list); 5508 static DEFINE_MUTEX(qeth_dbf_list_mutex); 5509 5510 static debug_info_t *qeth_get_dbf_entry(char *name) 5511 { 5512 struct qeth_dbf_entry *entry; 5513 debug_info_t *rc = NULL; 5514 5515 mutex_lock(&qeth_dbf_list_mutex); 5516 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { 5517 if (strcmp(entry->dbf_name, name) == 0) { 5518 rc = entry->dbf_info; 5519 break; 5520 } 5521 } 5522 mutex_unlock(&qeth_dbf_list_mutex); 5523 return rc; 5524 } 5525 5526 static int qeth_add_dbf_entry(struct qeth_card *card, char *name) 5527 { 5528 struct qeth_dbf_entry *new_entry; 5529 5530 card->debug = debug_register(name, 2, 1, 8); 5531 if (!card->debug) { 5532 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); 5533 goto err; 5534 } 5535 if (debug_register_view(card->debug, &debug_hex_ascii_view)) 5536 goto err_dbg; 5537 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); 5538 if (!new_entry) 5539 goto err_dbg; 5540 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); 5541 new_entry->dbf_info = card->debug; 5542 mutex_lock(&qeth_dbf_list_mutex); 5543 list_add(&new_entry->dbf_list, &qeth_dbf_list); 5544 mutex_unlock(&qeth_dbf_list_mutex); 5545 5546 return 0; 5547 5548 err_dbg: 5549 debug_unregister(card->debug); 5550 err: 5551 return -ENOMEM; 5552 } 5553 5554 static void qeth_clear_dbf_list(void) 5555 { 5556 struct qeth_dbf_entry *entry, *tmp; 5557 5558 mutex_lock(&qeth_dbf_list_mutex); 5559 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { 5560 list_del(&entry->dbf_list); 5561 debug_unregister(entry->dbf_info); 5562 kfree(entry); 5563 } 5564 mutex_unlock(&qeth_dbf_list_mutex); 5565 } 5566 5567 static struct net_device *qeth_alloc_netdev(struct qeth_card *card) 5568 { 5569 struct net_device *dev; 5570 5571 switch (card->info.type) { 5572 case QETH_CARD_TYPE_IQD: 5573 dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN, 5574 ether_setup, QETH_MAX_QUEUES, 1); 5575 break; 5576 case QETH_CARD_TYPE_OSM: 5577 dev = alloc_etherdev(0); 5578 break; 5579 case QETH_CARD_TYPE_OSN: 5580 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup); 5581 break; 5582 default: 5583 dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1); 5584 } 5585 5586 if (!dev) 5587 return NULL; 5588 5589 dev->ml_priv = card; 5590 dev->watchdog_timeo = QETH_TX_TIMEOUT; 5591 dev->min_mtu = IS_OSN(card) ? 64 : 576; 5592 /* initialized when device first goes online: */ 5593 dev->max_mtu = 0; 5594 dev->mtu = 0; 5595 SET_NETDEV_DEV(dev, &card->gdev->dev); 5596 netif_carrier_off(dev); 5597 5598 if (IS_OSN(card)) { 5599 dev->ethtool_ops = &qeth_osn_ethtool_ops; 5600 } else { 5601 dev->ethtool_ops = &qeth_ethtool_ops; 5602 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 5603 dev->hw_features |= NETIF_F_SG; 5604 dev->vlan_features |= NETIF_F_SG; 5605 if (IS_IQD(card)) { 5606 dev->features |= NETIF_F_SG; 5607 if (netif_set_real_num_tx_queues(dev, 5608 QETH_IQD_MIN_TXQ)) { 5609 free_netdev(dev); 5610 return NULL; 5611 } 5612 } 5613 } 5614 5615 return dev; 5616 } 5617 5618 struct net_device *qeth_clone_netdev(struct net_device *orig) 5619 { 5620 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); 5621 5622 if (!clone) 5623 return NULL; 5624 5625 clone->dev_port = orig->dev_port; 5626 return clone; 5627 } 5628 5629 static int qeth_core_probe_device(struct ccwgroup_device *gdev) 5630 { 5631 struct qeth_card *card; 5632 struct device *dev; 5633 int rc; 5634 enum qeth_discipline_id enforced_disc; 5635 char dbf_name[DBF_NAME_LEN]; 5636 5637 QETH_DBF_TEXT(SETUP, 2, "probedev"); 5638 5639 dev = &gdev->dev; 5640 if (!get_device(dev)) 5641 return -ENODEV; 5642 5643 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); 5644 5645 card = qeth_alloc_card(gdev); 5646 if (!card) { 5647 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); 5648 rc = -ENOMEM; 5649 goto err_dev; 5650 } 5651 5652 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", 5653 dev_name(&gdev->dev)); 5654 card->debug = qeth_get_dbf_entry(dbf_name); 5655 if (!card->debug) { 5656 rc = qeth_add_dbf_entry(card, dbf_name); 5657 if (rc) 5658 goto err_card; 5659 } 5660 5661 qeth_setup_card(card); 5662 card->dev = qeth_alloc_netdev(card); 5663 if (!card->dev) { 5664 rc = -ENOMEM; 5665 goto err_card; 5666 } 5667 5668 card->qdio.no_out_queues = card->dev->num_tx_queues; 5669 rc = qeth_update_from_chp_desc(card); 5670 if (rc) 5671 goto err_chp_desc; 5672 qeth_determine_capabilities(card); 5673 enforced_disc = qeth_enforce_discipline(card); 5674 switch (enforced_disc) { 5675 case QETH_DISCIPLINE_UNDETERMINED: 5676 gdev->dev.type = &qeth_generic_devtype; 5677 break; 5678 default: 5679 card->info.layer_enforced = true; 5680 rc = qeth_core_load_discipline(card, enforced_disc); 5681 if (rc) 5682 goto err_load; 5683 5684 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype : 5685 card->discipline->devtype; 5686 rc = card->discipline->setup(card->gdev); 5687 if (rc) 5688 goto err_disc; 5689 break; 5690 } 5691 5692 return 0; 5693 5694 err_disc: 5695 qeth_core_free_discipline(card); 5696 err_load: 5697 err_chp_desc: 5698 free_netdev(card->dev); 5699 err_card: 5700 qeth_core_free_card(card); 5701 err_dev: 5702 put_device(dev); 5703 return rc; 5704 } 5705 5706 static void qeth_core_remove_device(struct ccwgroup_device *gdev) 5707 { 5708 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5709 5710 QETH_DBF_TEXT(SETUP, 2, "removedv"); 5711 5712 if (card->discipline) { 5713 card->discipline->remove(gdev); 5714 qeth_core_free_discipline(card); 5715 } 5716 5717 free_netdev(card->dev); 5718 qeth_core_free_card(card); 5719 put_device(&gdev->dev); 5720 } 5721 5722 static int qeth_core_set_online(struct ccwgroup_device *gdev) 5723 { 5724 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5725 int rc = 0; 5726 enum qeth_discipline_id def_discipline; 5727 5728 if (!card->discipline) { 5729 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 5730 QETH_DISCIPLINE_LAYER2; 5731 rc = qeth_core_load_discipline(card, def_discipline); 5732 if (rc) 5733 goto err; 5734 rc = card->discipline->setup(card->gdev); 5735 if (rc) { 5736 qeth_core_free_discipline(card); 5737 goto err; 5738 } 5739 } 5740 rc = card->discipline->set_online(gdev); 5741 err: 5742 return rc; 5743 } 5744 5745 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 5746 { 5747 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5748 return card->discipline->set_offline(gdev); 5749 } 5750 5751 static void qeth_core_shutdown(struct ccwgroup_device *gdev) 5752 { 5753 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5754 qeth_set_allowed_threads(card, 0, 1); 5755 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 5756 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 5757 qeth_qdio_clear_card(card, 0); 5758 qeth_drain_output_queues(card); 5759 qdio_free(CARD_DDEV(card)); 5760 } 5761 5762 static int qeth_core_freeze(struct ccwgroup_device *gdev) 5763 { 5764 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5765 if (card->discipline && card->discipline->freeze) 5766 return card->discipline->freeze(gdev); 5767 return 0; 5768 } 5769 5770 static int qeth_core_thaw(struct ccwgroup_device *gdev) 5771 { 5772 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5773 if (card->discipline && card->discipline->thaw) 5774 return card->discipline->thaw(gdev); 5775 return 0; 5776 } 5777 5778 static int qeth_core_restore(struct ccwgroup_device *gdev) 5779 { 5780 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5781 if (card->discipline && card->discipline->restore) 5782 return card->discipline->restore(gdev); 5783 return 0; 5784 } 5785 5786 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 5787 size_t count) 5788 { 5789 int err; 5790 5791 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, 5792 buf); 5793 5794 return err ? err : count; 5795 } 5796 static DRIVER_ATTR_WO(group); 5797 5798 static struct attribute *qeth_drv_attrs[] = { 5799 &driver_attr_group.attr, 5800 NULL, 5801 }; 5802 static struct attribute_group qeth_drv_attr_group = { 5803 .attrs = qeth_drv_attrs, 5804 }; 5805 static const struct attribute_group *qeth_drv_attr_groups[] = { 5806 &qeth_drv_attr_group, 5807 NULL, 5808 }; 5809 5810 static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 5811 .driver = { 5812 .groups = qeth_drv_attr_groups, 5813 .owner = THIS_MODULE, 5814 .name = "qeth", 5815 }, 5816 .ccw_driver = &qeth_ccw_driver, 5817 .setup = qeth_core_probe_device, 5818 .remove = qeth_core_remove_device, 5819 .set_online = qeth_core_set_online, 5820 .set_offline = qeth_core_set_offline, 5821 .shutdown = qeth_core_shutdown, 5822 .prepare = NULL, 5823 .complete = NULL, 5824 .freeze = qeth_core_freeze, 5825 .thaw = qeth_core_thaw, 5826 .restore = qeth_core_restore, 5827 }; 5828 5829 struct qeth_card *qeth_get_card_by_busid(char *bus_id) 5830 { 5831 struct ccwgroup_device *gdev; 5832 struct qeth_card *card; 5833 5834 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id); 5835 if (!gdev) 5836 return NULL; 5837 5838 card = dev_get_drvdata(&gdev->dev); 5839 put_device(&gdev->dev); 5840 return card; 5841 } 5842 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid); 5843 5844 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 5845 { 5846 struct qeth_card *card = dev->ml_priv; 5847 struct mii_ioctl_data *mii_data; 5848 int rc = 0; 5849 5850 if (!card) 5851 return -ENODEV; 5852 5853 switch (cmd) { 5854 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 5855 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); 5856 break; 5857 case SIOC_QETH_GET_CARD_TYPE: 5858 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && 5859 !IS_VM_NIC(card)) 5860 return 1; 5861 return 0; 5862 case SIOCGMIIPHY: 5863 mii_data = if_mii(rq); 5864 mii_data->phy_id = 0; 5865 break; 5866 case SIOCGMIIREG: 5867 mii_data = if_mii(rq); 5868 if (mii_data->phy_id != 0) 5869 rc = -EINVAL; 5870 else 5871 mii_data->val_out = qeth_mdio_read(dev, 5872 mii_data->phy_id, mii_data->reg_num); 5873 break; 5874 case SIOC_QETH_QUERY_OAT: 5875 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); 5876 break; 5877 default: 5878 if (card->discipline->do_ioctl) 5879 rc = card->discipline->do_ioctl(dev, rq, cmd); 5880 else 5881 rc = -EOPNOTSUPP; 5882 } 5883 if (rc) 5884 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 5885 return rc; 5886 } 5887 EXPORT_SYMBOL_GPL(qeth_do_ioctl); 5888 5889 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, 5890 unsigned long data) 5891 { 5892 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5893 u32 *features = reply->param; 5894 5895 if (qeth_setassparms_inspect_rc(cmd)) 5896 return -EIO; 5897 5898 *features = cmd->data.setassparms.data.flags_32bit; 5899 return 0; 5900 } 5901 5902 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, 5903 enum qeth_prot_versions prot) 5904 { 5905 return qeth_send_simple_setassparms_prot(card, cstype, 5906 IPA_CMD_ASS_STOP, 0, prot); 5907 } 5908 5909 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, 5910 enum qeth_prot_versions prot) 5911 { 5912 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; 5913 struct qeth_cmd_buffer *iob; 5914 struct qeth_ipa_caps caps; 5915 u32 features; 5916 int rc; 5917 5918 /* some L3 HW requires combined L3+L4 csum offload: */ 5919 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && 5920 cstype == IPA_OUTBOUND_CHECKSUM) 5921 required_features |= QETH_IPA_CHECKSUM_IP_HDR; 5922 5923 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, 5924 prot); 5925 if (!iob) 5926 return -ENOMEM; 5927 5928 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); 5929 if (rc) 5930 return rc; 5931 5932 if ((required_features & features) != required_features) { 5933 qeth_set_csum_off(card, cstype, prot); 5934 return -EOPNOTSUPP; 5935 } 5936 5937 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 4, 5938 prot); 5939 if (!iob) { 5940 qeth_set_csum_off(card, cstype, prot); 5941 return -ENOMEM; 5942 } 5943 5944 if (features & QETH_IPA_CHECKSUM_LP2LP) 5945 required_features |= QETH_IPA_CHECKSUM_LP2LP; 5946 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; 5947 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 5948 if (rc) { 5949 qeth_set_csum_off(card, cstype, prot); 5950 return rc; 5951 } 5952 5953 if (!qeth_ipa_caps_supported(&caps, required_features) || 5954 !qeth_ipa_caps_enabled(&caps, required_features)) { 5955 qeth_set_csum_off(card, cstype, prot); 5956 return -EOPNOTSUPP; 5957 } 5958 5959 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", 5960 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); 5961 if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) && 5962 cstype == IPA_OUTBOUND_CHECKSUM) 5963 dev_warn(&card->gdev->dev, 5964 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", 5965 QETH_CARD_IFNAME(card)); 5966 return 0; 5967 } 5968 5969 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, 5970 enum qeth_prot_versions prot) 5971 { 5972 return on ? qeth_set_csum_on(card, cstype, prot) : 5973 qeth_set_csum_off(card, cstype, prot); 5974 } 5975 5976 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, 5977 unsigned long data) 5978 { 5979 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5980 struct qeth_tso_start_data *tso_data = reply->param; 5981 5982 if (qeth_setassparms_inspect_rc(cmd)) 5983 return -EIO; 5984 5985 tso_data->mss = cmd->data.setassparms.data.tso.mss; 5986 tso_data->supported = cmd->data.setassparms.data.tso.supported; 5987 return 0; 5988 } 5989 5990 static int qeth_set_tso_off(struct qeth_card *card, 5991 enum qeth_prot_versions prot) 5992 { 5993 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, 5994 IPA_CMD_ASS_STOP, 0, prot); 5995 } 5996 5997 static int qeth_set_tso_on(struct qeth_card *card, 5998 enum qeth_prot_versions prot) 5999 { 6000 struct qeth_tso_start_data tso_data; 6001 struct qeth_cmd_buffer *iob; 6002 struct qeth_ipa_caps caps; 6003 int rc; 6004 6005 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6006 IPA_CMD_ASS_START, 0, prot); 6007 if (!iob) 6008 return -ENOMEM; 6009 6010 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); 6011 if (rc) 6012 return rc; 6013 6014 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { 6015 qeth_set_tso_off(card, prot); 6016 return -EOPNOTSUPP; 6017 } 6018 6019 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6020 IPA_CMD_ASS_ENABLE, sizeof(caps), prot); 6021 if (!iob) { 6022 qeth_set_tso_off(card, prot); 6023 return -ENOMEM; 6024 } 6025 6026 /* enable TSO capability */ 6027 __ipa_cmd(iob)->data.setassparms.data.caps.enabled = 6028 QETH_IPA_LARGE_SEND_TCP; 6029 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6030 if (rc) { 6031 qeth_set_tso_off(card, prot); 6032 return rc; 6033 } 6034 6035 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || 6036 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { 6037 qeth_set_tso_off(card, prot); 6038 return -EOPNOTSUPP; 6039 } 6040 6041 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, 6042 tso_data.mss); 6043 return 0; 6044 } 6045 6046 static int qeth_set_ipa_tso(struct qeth_card *card, bool on, 6047 enum qeth_prot_versions prot) 6048 { 6049 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); 6050 } 6051 6052 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) 6053 { 6054 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; 6055 int rc_ipv6; 6056 6057 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6058 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6059 QETH_PROT_IPV4); 6060 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6061 /* no/one Offload Assist available, so the rc is trivial */ 6062 return rc_ipv4; 6063 6064 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6065 QETH_PROT_IPV6); 6066 6067 if (on) 6068 /* enable: success if any Assist is active */ 6069 return (rc_ipv6) ? rc_ipv4 : 0; 6070 6071 /* disable: failure if any Assist is still active */ 6072 return (rc_ipv6) ? rc_ipv6 : rc_ipv4; 6073 } 6074 6075 /** 6076 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features 6077 * @dev: a net_device 6078 */ 6079 void qeth_enable_hw_features(struct net_device *dev) 6080 { 6081 struct qeth_card *card = dev->ml_priv; 6082 netdev_features_t features; 6083 6084 features = dev->features; 6085 /* force-off any feature that might need an IPA sequence. 6086 * netdev_update_features() will restart them. 6087 */ 6088 dev->features &= ~dev->hw_features; 6089 /* toggle VLAN filter, so that VIDs are re-programmed: */ 6090 if (IS_LAYER2(card) && IS_VM_NIC(card)) { 6091 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 6092 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6093 } 6094 netdev_update_features(dev); 6095 if (features != dev->features) 6096 dev_warn(&card->gdev->dev, 6097 "Device recovery failed to restore all offload features\n"); 6098 } 6099 EXPORT_SYMBOL_GPL(qeth_enable_hw_features); 6100 6101 int qeth_set_features(struct net_device *dev, netdev_features_t features) 6102 { 6103 struct qeth_card *card = dev->ml_priv; 6104 netdev_features_t changed = dev->features ^ features; 6105 int rc = 0; 6106 6107 QETH_DBF_TEXT(SETUP, 2, "setfeat"); 6108 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); 6109 6110 if ((changed & NETIF_F_IP_CSUM)) { 6111 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, 6112 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4); 6113 if (rc) 6114 changed ^= NETIF_F_IP_CSUM; 6115 } 6116 if (changed & NETIF_F_IPV6_CSUM) { 6117 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, 6118 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6); 6119 if (rc) 6120 changed ^= NETIF_F_IPV6_CSUM; 6121 } 6122 if (changed & NETIF_F_RXCSUM) { 6123 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); 6124 if (rc) 6125 changed ^= NETIF_F_RXCSUM; 6126 } 6127 if (changed & NETIF_F_TSO) { 6128 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, 6129 QETH_PROT_IPV4); 6130 if (rc) 6131 changed ^= NETIF_F_TSO; 6132 } 6133 if (changed & NETIF_F_TSO6) { 6134 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, 6135 QETH_PROT_IPV6); 6136 if (rc) 6137 changed ^= NETIF_F_TSO6; 6138 } 6139 6140 /* everything changed successfully? */ 6141 if ((dev->features ^ features) == changed) 6142 return 0; 6143 /* something went wrong. save changed features and return error */ 6144 dev->features ^= changed; 6145 return -EIO; 6146 } 6147 EXPORT_SYMBOL_GPL(qeth_set_features); 6148 6149 netdev_features_t qeth_fix_features(struct net_device *dev, 6150 netdev_features_t features) 6151 { 6152 struct qeth_card *card = dev->ml_priv; 6153 6154 QETH_DBF_TEXT(SETUP, 2, "fixfeat"); 6155 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 6156 features &= ~NETIF_F_IP_CSUM; 6157 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) 6158 features &= ~NETIF_F_IPV6_CSUM; 6159 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && 6160 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6161 features &= ~NETIF_F_RXCSUM; 6162 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 6163 features &= ~NETIF_F_TSO; 6164 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) 6165 features &= ~NETIF_F_TSO6; 6166 6167 QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); 6168 return features; 6169 } 6170 EXPORT_SYMBOL_GPL(qeth_fix_features); 6171 6172 netdev_features_t qeth_features_check(struct sk_buff *skb, 6173 struct net_device *dev, 6174 netdev_features_t features) 6175 { 6176 /* GSO segmentation builds skbs with 6177 * a (small) linear part for the headers, and 6178 * page frags for the data. 6179 * Compared to a linear skb, the header-only part consumes an 6180 * additional buffer element. This reduces buffer utilization, and 6181 * hurts throughput. So compress small segments into one element. 6182 */ 6183 if (netif_needs_gso(skb, features)) { 6184 /* match skb_segment(): */ 6185 unsigned int doffset = skb->data - skb_mac_header(skb); 6186 unsigned int hsize = skb_shinfo(skb)->gso_size; 6187 unsigned int hroom = skb_headroom(skb); 6188 6189 /* linearize only if resulting skb allocations are order-0: */ 6190 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) 6191 features &= ~NETIF_F_SG; 6192 } 6193 6194 return vlan_features_check(skb, features); 6195 } 6196 EXPORT_SYMBOL_GPL(qeth_features_check); 6197 6198 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6199 { 6200 struct qeth_card *card = dev->ml_priv; 6201 struct qeth_qdio_out_q *queue; 6202 unsigned int i; 6203 6204 QETH_CARD_TEXT(card, 5, "getstat"); 6205 6206 stats->rx_packets = card->stats.rx_packets; 6207 stats->rx_bytes = card->stats.rx_bytes; 6208 stats->rx_errors = card->stats.rx_errors; 6209 stats->rx_dropped = card->stats.rx_dropped; 6210 stats->multicast = card->stats.rx_multicast; 6211 6212 for (i = 0; i < card->qdio.no_out_queues; i++) { 6213 queue = card->qdio.out_qs[i]; 6214 6215 stats->tx_packets += queue->stats.tx_packets; 6216 stats->tx_bytes += queue->stats.tx_bytes; 6217 stats->tx_errors += queue->stats.tx_errors; 6218 stats->tx_dropped += queue->stats.tx_dropped; 6219 } 6220 } 6221 EXPORT_SYMBOL_GPL(qeth_get_stats64); 6222 6223 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 6224 u8 cast_type, struct net_device *sb_dev) 6225 { 6226 if (cast_type != RTN_UNICAST) 6227 return QETH_IQD_MCAST_TXQ; 6228 return QETH_IQD_MIN_UCAST_TXQ; 6229 } 6230 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); 6231 6232 int qeth_open(struct net_device *dev) 6233 { 6234 struct qeth_card *card = dev->ml_priv; 6235 6236 QETH_CARD_TEXT(card, 4, "qethopen"); 6237 6238 if (qdio_stop_irq(CARD_DDEV(card), 0) < 0) 6239 return -EIO; 6240 6241 card->data.state = CH_STATE_UP; 6242 netif_tx_start_all_queues(dev); 6243 6244 napi_enable(&card->napi); 6245 local_bh_disable(); 6246 napi_schedule(&card->napi); 6247 /* kick-start the NAPI softirq: */ 6248 local_bh_enable(); 6249 return 0; 6250 } 6251 EXPORT_SYMBOL_GPL(qeth_open); 6252 6253 int qeth_stop(struct net_device *dev) 6254 { 6255 struct qeth_card *card = dev->ml_priv; 6256 6257 QETH_CARD_TEXT(card, 4, "qethstop"); 6258 netif_tx_disable(dev); 6259 napi_disable(&card->napi); 6260 return 0; 6261 } 6262 EXPORT_SYMBOL_GPL(qeth_stop); 6263 6264 static int __init qeth_core_init(void) 6265 { 6266 int rc; 6267 6268 pr_info("loading core functions\n"); 6269 6270 rc = qeth_register_dbf_views(); 6271 if (rc) 6272 goto dbf_err; 6273 qeth_core_root_dev = root_device_register("qeth"); 6274 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 6275 if (rc) 6276 goto register_err; 6277 qeth_core_header_cache = 6278 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, 6279 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), 6280 0, NULL); 6281 if (!qeth_core_header_cache) { 6282 rc = -ENOMEM; 6283 goto slab_err; 6284 } 6285 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 6286 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 6287 if (!qeth_qdio_outbuf_cache) { 6288 rc = -ENOMEM; 6289 goto cqslab_err; 6290 } 6291 rc = ccw_driver_register(&qeth_ccw_driver); 6292 if (rc) 6293 goto ccw_err; 6294 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); 6295 if (rc) 6296 goto ccwgroup_err; 6297 6298 return 0; 6299 6300 ccwgroup_err: 6301 ccw_driver_unregister(&qeth_ccw_driver); 6302 ccw_err: 6303 kmem_cache_destroy(qeth_qdio_outbuf_cache); 6304 cqslab_err: 6305 kmem_cache_destroy(qeth_core_header_cache); 6306 slab_err: 6307 root_device_unregister(qeth_core_root_dev); 6308 register_err: 6309 qeth_unregister_dbf_views(); 6310 dbf_err: 6311 pr_err("Initializing the qeth device driver failed\n"); 6312 return rc; 6313 } 6314 6315 static void __exit qeth_core_exit(void) 6316 { 6317 qeth_clear_dbf_list(); 6318 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 6319 ccw_driver_unregister(&qeth_ccw_driver); 6320 kmem_cache_destroy(qeth_qdio_outbuf_cache); 6321 kmem_cache_destroy(qeth_core_header_cache); 6322 root_device_unregister(qeth_core_root_dev); 6323 qeth_unregister_dbf_views(); 6324 pr_info("core functions removed\n"); 6325 } 6326 6327 module_init(qeth_core_init); 6328 module_exit(qeth_core_exit); 6329 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 6330 MODULE_DESCRIPTION("qeth core functions"); 6331 MODULE_LICENSE("GPL"); 6332