1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>, 7 * Frank Blaschka <frank.blaschka@de.ibm.com> 8 */ 9 10 #define KMSG_COMPONENT "qeth" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/compat.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/kernel.h> 19 #include <linux/log2.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/mii.h> 23 #include <linux/mm.h> 24 #include <linux/kthread.h> 25 #include <linux/slab.h> 26 #include <linux/if_vlan.h> 27 #include <linux/netdevice.h> 28 #include <linux/netdev_features.h> 29 #include <linux/skbuff.h> 30 #include <linux/vmalloc.h> 31 32 #include <net/iucv/af_iucv.h> 33 #include <net/dsfield.h> 34 35 #include <asm/ebcdic.h> 36 #include <asm/chpid.h> 37 #include <asm/io.h> 38 #include <asm/sysinfo.h> 39 #include <asm/diag.h> 40 #include <asm/cio.h> 41 #include <asm/ccwdev.h> 42 #include <asm/cpcmd.h> 43 44 #include "qeth_core.h" 45 46 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { 47 /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ 48 /* N P A M L V H */ 49 [QETH_DBF_SETUP] = {"qeth_setup", 50 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, 51 [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, 52 &debug_sprintf_view, NULL}, 53 [QETH_DBF_CTRL] = {"qeth_control", 54 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, 55 }; 56 EXPORT_SYMBOL_GPL(qeth_dbf); 57 58 struct kmem_cache *qeth_core_header_cache; 59 EXPORT_SYMBOL_GPL(qeth_core_header_cache); 60 static struct kmem_cache *qeth_qdio_outbuf_cache; 61 62 static struct device *qeth_core_root_dev; 63 static struct lock_class_key qdio_out_skb_queue_key; 64 65 static void qeth_issue_next_read_cb(struct qeth_card *card, 66 struct qeth_cmd_buffer *iob, 67 unsigned int data_length); 68 static void qeth_free_buffer_pool(struct qeth_card *); 69 static int qeth_qdio_establish(struct qeth_card *); 70 static void qeth_free_qdio_queues(struct qeth_card *card); 71 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, 72 struct qeth_qdio_out_buffer *buf, 73 enum iucv_tx_notify notification); 74 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, 75 int budget); 76 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 77 78 static void qeth_close_dev_handler(struct work_struct *work) 79 { 80 struct qeth_card *card; 81 82 card = container_of(work, struct qeth_card, close_dev_work); 83 QETH_CARD_TEXT(card, 2, "cldevhdl"); 84 ccwgroup_set_offline(card->gdev); 85 } 86 87 static const char *qeth_get_cardname(struct qeth_card *card) 88 { 89 if (IS_VM_NIC(card)) { 90 switch (card->info.type) { 91 case QETH_CARD_TYPE_OSD: 92 return " Virtual NIC QDIO"; 93 case QETH_CARD_TYPE_IQD: 94 return " Virtual NIC Hiper"; 95 case QETH_CARD_TYPE_OSM: 96 return " Virtual NIC QDIO - OSM"; 97 case QETH_CARD_TYPE_OSX: 98 return " Virtual NIC QDIO - OSX"; 99 default: 100 return " unknown"; 101 } 102 } else { 103 switch (card->info.type) { 104 case QETH_CARD_TYPE_OSD: 105 return " OSD Express"; 106 case QETH_CARD_TYPE_IQD: 107 return " HiperSockets"; 108 case QETH_CARD_TYPE_OSN: 109 return " OSN QDIO"; 110 case QETH_CARD_TYPE_OSM: 111 return " OSM QDIO"; 112 case QETH_CARD_TYPE_OSX: 113 return " OSX QDIO"; 114 default: 115 return " unknown"; 116 } 117 } 118 return " n/a"; 119 } 120 121 /* max length to be returned: 14 */ 122 const char *qeth_get_cardname_short(struct qeth_card *card) 123 { 124 if (IS_VM_NIC(card)) { 125 switch (card->info.type) { 126 case QETH_CARD_TYPE_OSD: 127 return "Virt.NIC QDIO"; 128 case QETH_CARD_TYPE_IQD: 129 return "Virt.NIC Hiper"; 130 case QETH_CARD_TYPE_OSM: 131 return "Virt.NIC OSM"; 132 case QETH_CARD_TYPE_OSX: 133 return "Virt.NIC OSX"; 134 default: 135 return "unknown"; 136 } 137 } else { 138 switch (card->info.type) { 139 case QETH_CARD_TYPE_OSD: 140 switch (card->info.link_type) { 141 case QETH_LINK_TYPE_FAST_ETH: 142 return "OSD_100"; 143 case QETH_LINK_TYPE_HSTR: 144 return "HSTR"; 145 case QETH_LINK_TYPE_GBIT_ETH: 146 return "OSD_1000"; 147 case QETH_LINK_TYPE_10GBIT_ETH: 148 return "OSD_10GIG"; 149 case QETH_LINK_TYPE_25GBIT_ETH: 150 return "OSD_25GIG"; 151 case QETH_LINK_TYPE_LANE_ETH100: 152 return "OSD_FE_LANE"; 153 case QETH_LINK_TYPE_LANE_TR: 154 return "OSD_TR_LANE"; 155 case QETH_LINK_TYPE_LANE_ETH1000: 156 return "OSD_GbE_LANE"; 157 case QETH_LINK_TYPE_LANE: 158 return "OSD_ATM_LANE"; 159 default: 160 return "OSD_Express"; 161 } 162 case QETH_CARD_TYPE_IQD: 163 return "HiperSockets"; 164 case QETH_CARD_TYPE_OSN: 165 return "OSN"; 166 case QETH_CARD_TYPE_OSM: 167 return "OSM_1000"; 168 case QETH_CARD_TYPE_OSX: 169 return "OSX_10GIG"; 170 default: 171 return "unknown"; 172 } 173 } 174 return "n/a"; 175 } 176 177 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 178 int clear_start_mask) 179 { 180 unsigned long flags; 181 182 spin_lock_irqsave(&card->thread_mask_lock, flags); 183 card->thread_allowed_mask = threads; 184 if (clear_start_mask) 185 card->thread_start_mask &= threads; 186 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 187 wake_up(&card->wait_q); 188 } 189 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); 190 191 int qeth_threads_running(struct qeth_card *card, unsigned long threads) 192 { 193 unsigned long flags; 194 int rc = 0; 195 196 spin_lock_irqsave(&card->thread_mask_lock, flags); 197 rc = (card->thread_running_mask & threads); 198 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 199 return rc; 200 } 201 EXPORT_SYMBOL_GPL(qeth_threads_running); 202 203 void qeth_clear_working_pool_list(struct qeth_card *card) 204 { 205 struct qeth_buffer_pool_entry *pool_entry, *tmp; 206 207 QETH_CARD_TEXT(card, 5, "clwrklst"); 208 list_for_each_entry_safe(pool_entry, tmp, 209 &card->qdio.in_buf_pool.entry_list, list){ 210 list_del(&pool_entry->list); 211 } 212 } 213 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); 214 215 static int qeth_alloc_buffer_pool(struct qeth_card *card) 216 { 217 struct qeth_buffer_pool_entry *pool_entry; 218 void *ptr; 219 int i, j; 220 221 QETH_CARD_TEXT(card, 5, "alocpool"); 222 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 223 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); 224 if (!pool_entry) { 225 qeth_free_buffer_pool(card); 226 return -ENOMEM; 227 } 228 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { 229 ptr = (void *) __get_free_page(GFP_KERNEL); 230 if (!ptr) { 231 while (j > 0) 232 free_page((unsigned long) 233 pool_entry->elements[--j]); 234 kfree(pool_entry); 235 qeth_free_buffer_pool(card); 236 return -ENOMEM; 237 } 238 pool_entry->elements[j] = ptr; 239 } 240 list_add(&pool_entry->init_list, 241 &card->qdio.init_pool.entry_list); 242 } 243 return 0; 244 } 245 246 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) 247 { 248 QETH_CARD_TEXT(card, 2, "realcbp"); 249 250 if (card->state != CARD_STATE_DOWN) 251 return -EPERM; 252 253 /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ 254 qeth_clear_working_pool_list(card); 255 qeth_free_buffer_pool(card); 256 card->qdio.in_buf_pool.buf_count = bufcnt; 257 card->qdio.init_pool.buf_count = bufcnt; 258 return qeth_alloc_buffer_pool(card); 259 } 260 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); 261 262 static void qeth_free_qdio_queue(struct qeth_qdio_q *q) 263 { 264 if (!q) 265 return; 266 267 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 268 kfree(q); 269 } 270 271 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) 272 { 273 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 274 int i; 275 276 if (!q) 277 return NULL; 278 279 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 280 kfree(q); 281 return NULL; 282 } 283 284 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) 285 q->bufs[i].buffer = q->qdio_bufs[i]; 286 287 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); 288 return q; 289 } 290 291 static int qeth_cq_init(struct qeth_card *card) 292 { 293 int rc; 294 295 if (card->options.cq == QETH_CQ_ENABLED) { 296 QETH_CARD_TEXT(card, 2, "cqinit"); 297 qdio_reset_buffers(card->qdio.c_q->qdio_bufs, 298 QDIO_MAX_BUFFERS_PER_Q); 299 card->qdio.c_q->next_buf_to_init = 127; 300 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 301 card->qdio.no_in_queues - 1, 0, 302 127); 303 if (rc) { 304 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 305 goto out; 306 } 307 } 308 rc = 0; 309 out: 310 return rc; 311 } 312 313 static int qeth_alloc_cq(struct qeth_card *card) 314 { 315 int rc; 316 317 if (card->options.cq == QETH_CQ_ENABLED) { 318 int i; 319 struct qdio_outbuf_state *outbuf_states; 320 321 QETH_CARD_TEXT(card, 2, "cqon"); 322 card->qdio.c_q = qeth_alloc_qdio_queue(); 323 if (!card->qdio.c_q) { 324 rc = -1; 325 goto kmsg_out; 326 } 327 card->qdio.no_in_queues = 2; 328 card->qdio.out_bufstates = 329 kcalloc(card->qdio.no_out_queues * 330 QDIO_MAX_BUFFERS_PER_Q, 331 sizeof(struct qdio_outbuf_state), 332 GFP_KERNEL); 333 outbuf_states = card->qdio.out_bufstates; 334 if (outbuf_states == NULL) { 335 rc = -1; 336 goto free_cq_out; 337 } 338 for (i = 0; i < card->qdio.no_out_queues; ++i) { 339 card->qdio.out_qs[i]->bufstates = outbuf_states; 340 outbuf_states += QDIO_MAX_BUFFERS_PER_Q; 341 } 342 } else { 343 QETH_CARD_TEXT(card, 2, "nocq"); 344 card->qdio.c_q = NULL; 345 card->qdio.no_in_queues = 1; 346 } 347 QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues); 348 rc = 0; 349 out: 350 return rc; 351 free_cq_out: 352 qeth_free_qdio_queue(card->qdio.c_q); 353 card->qdio.c_q = NULL; 354 kmsg_out: 355 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); 356 goto out; 357 } 358 359 static void qeth_free_cq(struct qeth_card *card) 360 { 361 if (card->qdio.c_q) { 362 --card->qdio.no_in_queues; 363 qeth_free_qdio_queue(card->qdio.c_q); 364 card->qdio.c_q = NULL; 365 } 366 kfree(card->qdio.out_bufstates); 367 card->qdio.out_bufstates = NULL; 368 } 369 370 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, 371 int delayed) 372 { 373 enum iucv_tx_notify n; 374 375 switch (sbalf15) { 376 case 0: 377 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; 378 break; 379 case 4: 380 case 16: 381 case 17: 382 case 18: 383 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : 384 TX_NOTIFY_UNREACHABLE; 385 break; 386 default: 387 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : 388 TX_NOTIFY_GENERALERROR; 389 break; 390 } 391 392 return n; 393 } 394 395 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, 396 int forced_cleanup) 397 { 398 if (q->card->options.cq != QETH_CQ_ENABLED) 399 return; 400 401 if (q->bufs[bidx]->next_pending != NULL) { 402 struct qeth_qdio_out_buffer *head = q->bufs[bidx]; 403 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; 404 405 while (c) { 406 if (forced_cleanup || 407 atomic_read(&c->state) == 408 QETH_QDIO_BUF_HANDLED_DELAYED) { 409 struct qeth_qdio_out_buffer *f = c; 410 QETH_CARD_TEXT(f->q->card, 5, "fp"); 411 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); 412 /* release here to avoid interleaving between 413 outbound tasklet and inbound tasklet 414 regarding notifications and lifecycle */ 415 qeth_tx_complete_buf(c, forced_cleanup, 0); 416 417 c = f->next_pending; 418 WARN_ON_ONCE(head->next_pending != f); 419 head->next_pending = c; 420 kmem_cache_free(qeth_qdio_outbuf_cache, f); 421 } else { 422 head = c; 423 c = c->next_pending; 424 } 425 426 } 427 } 428 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == 429 QETH_QDIO_BUF_HANDLED_DELAYED)) { 430 /* for recovery situations */ 431 qeth_init_qdio_out_buf(q, bidx); 432 QETH_CARD_TEXT(q->card, 2, "clprecov"); 433 } 434 } 435 436 437 static void qeth_qdio_handle_aob(struct qeth_card *card, 438 unsigned long phys_aob_addr) 439 { 440 struct qaob *aob; 441 struct qeth_qdio_out_buffer *buffer; 442 enum iucv_tx_notify notification; 443 unsigned int i; 444 445 aob = (struct qaob *) phys_to_virt(phys_aob_addr); 446 QETH_CARD_TEXT(card, 5, "haob"); 447 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); 448 buffer = (struct qeth_qdio_out_buffer *) aob->user1; 449 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); 450 451 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, 452 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) { 453 notification = TX_NOTIFY_OK; 454 } else { 455 WARN_ON_ONCE(atomic_read(&buffer->state) != 456 QETH_QDIO_BUF_PENDING); 457 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); 458 notification = TX_NOTIFY_DELAYED_OK; 459 } 460 461 if (aob->aorc != 0) { 462 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); 463 notification = qeth_compute_cq_notification(aob->aorc, 1); 464 } 465 qeth_notify_skbs(buffer->q, buffer, notification); 466 467 /* Free dangling allocations. The attached skbs are handled by 468 * qeth_cleanup_handled_pending(). 469 */ 470 for (i = 0; 471 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); 472 i++) { 473 if (aob->sba[i] && buffer->is_header[i]) 474 kmem_cache_free(qeth_core_header_cache, 475 (void *) aob->sba[i]); 476 } 477 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); 478 479 qdio_release_aob(aob); 480 } 481 482 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) 483 { 484 return card->options.cq == QETH_CQ_ENABLED && 485 card->qdio.c_q != NULL && 486 queue != 0 && 487 queue == card->qdio.no_in_queues - 1; 488 } 489 490 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, 491 void *data) 492 { 493 ccw->cmd_code = cmd_code; 494 ccw->flags = flags | CCW_FLAG_SLI; 495 ccw->count = len; 496 ccw->cda = (__u32) __pa(data); 497 } 498 499 static int __qeth_issue_next_read(struct qeth_card *card) 500 { 501 struct qeth_cmd_buffer *iob = card->read_cmd; 502 struct qeth_channel *channel = iob->channel; 503 struct ccw1 *ccw = __ccw_from_cmd(iob); 504 int rc; 505 506 QETH_CARD_TEXT(card, 5, "issnxrd"); 507 if (channel->state != CH_STATE_UP) 508 return -EIO; 509 510 memset(iob->data, 0, iob->length); 511 qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data); 512 iob->callback = qeth_issue_next_read_cb; 513 /* keep the cmd alive after completion: */ 514 qeth_get_cmd(iob); 515 516 QETH_CARD_TEXT(card, 6, "noirqpnd"); 517 rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); 518 if (rc) { 519 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 520 rc, CARD_DEVID(card)); 521 atomic_set(&channel->irq_pending, 0); 522 qeth_put_cmd(iob); 523 card->read_or_write_problem = 1; 524 qeth_schedule_recovery(card); 525 wake_up(&card->wait_q); 526 } 527 return rc; 528 } 529 530 static int qeth_issue_next_read(struct qeth_card *card) 531 { 532 int ret; 533 534 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); 535 ret = __qeth_issue_next_read(card); 536 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); 537 538 return ret; 539 } 540 541 static void qeth_enqueue_cmd(struct qeth_card *card, 542 struct qeth_cmd_buffer *iob) 543 { 544 spin_lock_irq(&card->lock); 545 list_add_tail(&iob->list, &card->cmd_waiter_list); 546 spin_unlock_irq(&card->lock); 547 } 548 549 static void qeth_dequeue_cmd(struct qeth_card *card, 550 struct qeth_cmd_buffer *iob) 551 { 552 spin_lock_irq(&card->lock); 553 list_del(&iob->list); 554 spin_unlock_irq(&card->lock); 555 } 556 557 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) 558 { 559 iob->rc = reason; 560 complete(&iob->done); 561 } 562 EXPORT_SYMBOL_GPL(qeth_notify_cmd); 563 564 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 565 struct qeth_card *card) 566 { 567 const char *ipa_name; 568 int com = cmd->hdr.command; 569 ipa_name = qeth_get_ipa_cmd_name(com); 570 571 if (rc) 572 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", 573 ipa_name, com, CARD_DEVID(card), rc, 574 qeth_get_ipa_msg(rc)); 575 else 576 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", 577 ipa_name, com, CARD_DEVID(card)); 578 } 579 580 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, 581 struct qeth_ipa_cmd *cmd) 582 { 583 QETH_CARD_TEXT(card, 5, "chkipad"); 584 585 if (IS_IPA_REPLY(cmd)) { 586 if (cmd->hdr.command != IPA_CMD_SETCCID && 587 cmd->hdr.command != IPA_CMD_DELCCID && 588 cmd->hdr.command != IPA_CMD_MODCCID && 589 cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) 590 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 591 return cmd; 592 } 593 594 /* handle unsolicited event: */ 595 switch (cmd->hdr.command) { 596 case IPA_CMD_STOPLAN: 597 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 598 dev_err(&card->gdev->dev, 599 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", 600 QETH_CARD_IFNAME(card)); 601 schedule_work(&card->close_dev_work); 602 } else { 603 dev_warn(&card->gdev->dev, 604 "The link for interface %s on CHPID 0x%X failed\n", 605 QETH_CARD_IFNAME(card), card->info.chpid); 606 qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); 607 netif_carrier_off(card->dev); 608 } 609 return NULL; 610 case IPA_CMD_STARTLAN: 611 dev_info(&card->gdev->dev, 612 "The link for %s on CHPID 0x%X has been restored\n", 613 QETH_CARD_IFNAME(card), card->info.chpid); 614 if (card->info.hwtrap) 615 card->info.hwtrap = 2; 616 qeth_schedule_recovery(card); 617 return NULL; 618 case IPA_CMD_SETBRIDGEPORT_IQD: 619 case IPA_CMD_SETBRIDGEPORT_OSA: 620 case IPA_CMD_ADDRESS_CHANGE_NOTIF: 621 if (card->discipline->control_event_handler(card, cmd)) 622 return cmd; 623 return NULL; 624 case IPA_CMD_MODCCID: 625 return cmd; 626 case IPA_CMD_REGISTER_LOCAL_ADDR: 627 QETH_CARD_TEXT(card, 3, "irla"); 628 return NULL; 629 case IPA_CMD_UNREGISTER_LOCAL_ADDR: 630 QETH_CARD_TEXT(card, 3, "urla"); 631 return NULL; 632 default: 633 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); 634 return cmd; 635 } 636 } 637 638 void qeth_clear_ipacmd_list(struct qeth_card *card) 639 { 640 struct qeth_cmd_buffer *iob; 641 unsigned long flags; 642 643 QETH_CARD_TEXT(card, 4, "clipalst"); 644 645 spin_lock_irqsave(&card->lock, flags); 646 list_for_each_entry(iob, &card->cmd_waiter_list, list) 647 qeth_notify_cmd(iob, -EIO); 648 spin_unlock_irqrestore(&card->lock, flags); 649 } 650 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); 651 652 static int qeth_check_idx_response(struct qeth_card *card, 653 unsigned char *buffer) 654 { 655 QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); 656 if ((buffer[2] & 0xc0) == 0xc0) { 657 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", 658 buffer[4]); 659 QETH_CARD_TEXT(card, 2, "ckidxres"); 660 QETH_CARD_TEXT(card, 2, " idxterm"); 661 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 662 if (buffer[4] == 0xf6) { 663 dev_err(&card->gdev->dev, 664 "The qeth device is not configured " 665 "for the OSI layer required by z/VM\n"); 666 return -EPERM; 667 } 668 return -EIO; 669 } 670 return 0; 671 } 672 673 void qeth_put_cmd(struct qeth_cmd_buffer *iob) 674 { 675 if (refcount_dec_and_test(&iob->ref_count)) { 676 kfree(iob->data); 677 kfree(iob); 678 } 679 } 680 EXPORT_SYMBOL_GPL(qeth_put_cmd); 681 682 static void qeth_release_buffer_cb(struct qeth_card *card, 683 struct qeth_cmd_buffer *iob, 684 unsigned int data_length) 685 { 686 qeth_put_cmd(iob); 687 } 688 689 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) 690 { 691 qeth_notify_cmd(iob, rc); 692 qeth_put_cmd(iob); 693 } 694 695 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, 696 unsigned int length, unsigned int ccws, 697 long timeout) 698 { 699 struct qeth_cmd_buffer *iob; 700 701 if (length > QETH_BUFSIZE) 702 return NULL; 703 704 iob = kzalloc(sizeof(*iob), GFP_KERNEL); 705 if (!iob) 706 return NULL; 707 708 iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1), 709 GFP_KERNEL | GFP_DMA); 710 if (!iob->data) { 711 kfree(iob); 712 return NULL; 713 } 714 715 init_completion(&iob->done); 716 spin_lock_init(&iob->lock); 717 INIT_LIST_HEAD(&iob->list); 718 refcount_set(&iob->ref_count, 1); 719 iob->channel = channel; 720 iob->timeout = timeout; 721 iob->length = length; 722 return iob; 723 } 724 EXPORT_SYMBOL_GPL(qeth_alloc_cmd); 725 726 static void qeth_issue_next_read_cb(struct qeth_card *card, 727 struct qeth_cmd_buffer *iob, 728 unsigned int data_length) 729 { 730 struct qeth_cmd_buffer *request = NULL; 731 struct qeth_ipa_cmd *cmd = NULL; 732 struct qeth_reply *reply = NULL; 733 struct qeth_cmd_buffer *tmp; 734 unsigned long flags; 735 int rc = 0; 736 737 QETH_CARD_TEXT(card, 4, "sndctlcb"); 738 rc = qeth_check_idx_response(card, iob->data); 739 switch (rc) { 740 case 0: 741 break; 742 case -EIO: 743 qeth_clear_ipacmd_list(card); 744 qeth_schedule_recovery(card); 745 /* fall through */ 746 default: 747 goto out; 748 } 749 750 if (IS_IPA(iob->data)) { 751 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); 752 cmd = qeth_check_ipa_data(card, cmd); 753 if (!cmd) 754 goto out; 755 if (IS_OSN(card) && card->osn_info.assist_cb && 756 cmd->hdr.command != IPA_CMD_STARTLAN) { 757 card->osn_info.assist_cb(card->dev, cmd); 758 goto out; 759 } 760 } else { 761 /* non-IPA commands should only flow during initialization */ 762 if (card->state != CARD_STATE_DOWN) 763 goto out; 764 } 765 766 /* match against pending cmd requests */ 767 spin_lock_irqsave(&card->lock, flags); 768 list_for_each_entry(tmp, &card->cmd_waiter_list, list) { 769 if (!IS_IPA(tmp->data) || 770 __ipa_cmd(tmp)->hdr.seqno == cmd->hdr.seqno) { 771 request = tmp; 772 /* take the object outside the lock */ 773 qeth_get_cmd(request); 774 break; 775 } 776 } 777 spin_unlock_irqrestore(&card->lock, flags); 778 779 if (!request) 780 goto out; 781 782 reply = &request->reply; 783 if (!reply->callback) { 784 rc = 0; 785 goto no_callback; 786 } 787 788 spin_lock_irqsave(&request->lock, flags); 789 if (request->rc) 790 /* Bail out when the requestor has already left: */ 791 rc = request->rc; 792 else 793 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : 794 (unsigned long)iob); 795 spin_unlock_irqrestore(&request->lock, flags); 796 797 no_callback: 798 if (rc <= 0) 799 qeth_notify_cmd(request, rc); 800 qeth_put_cmd(request); 801 out: 802 memcpy(&card->seqno.pdu_hdr_ack, 803 QETH_PDU_HEADER_SEQ_NO(iob->data), 804 QETH_SEQ_NO_LENGTH); 805 qeth_put_cmd(iob); 806 __qeth_issue_next_read(card); 807 } 808 809 static int qeth_set_thread_start_bit(struct qeth_card *card, 810 unsigned long thread) 811 { 812 unsigned long flags; 813 814 spin_lock_irqsave(&card->thread_mask_lock, flags); 815 if (!(card->thread_allowed_mask & thread) || 816 (card->thread_start_mask & thread)) { 817 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 818 return -EPERM; 819 } 820 card->thread_start_mask |= thread; 821 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 822 return 0; 823 } 824 825 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread) 826 { 827 unsigned long flags; 828 829 spin_lock_irqsave(&card->thread_mask_lock, flags); 830 card->thread_start_mask &= ~thread; 831 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 832 wake_up(&card->wait_q); 833 } 834 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit); 835 836 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) 837 { 838 unsigned long flags; 839 840 spin_lock_irqsave(&card->thread_mask_lock, flags); 841 card->thread_running_mask &= ~thread; 842 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 843 wake_up_all(&card->wait_q); 844 } 845 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); 846 847 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 848 { 849 unsigned long flags; 850 int rc = 0; 851 852 spin_lock_irqsave(&card->thread_mask_lock, flags); 853 if (card->thread_start_mask & thread) { 854 if ((card->thread_allowed_mask & thread) && 855 !(card->thread_running_mask & thread)) { 856 rc = 1; 857 card->thread_start_mask &= ~thread; 858 card->thread_running_mask |= thread; 859 } else 860 rc = -EPERM; 861 } 862 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 863 return rc; 864 } 865 866 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 867 { 868 int rc = 0; 869 870 wait_event(card->wait_q, 871 (rc = __qeth_do_run_thread(card, thread)) >= 0); 872 return rc; 873 } 874 EXPORT_SYMBOL_GPL(qeth_do_run_thread); 875 876 void qeth_schedule_recovery(struct qeth_card *card) 877 { 878 QETH_CARD_TEXT(card, 2, "startrec"); 879 if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) 880 schedule_work(&card->kernel_thread_starter); 881 } 882 EXPORT_SYMBOL_GPL(qeth_schedule_recovery); 883 884 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, 885 struct irb *irb) 886 { 887 int dstat, cstat; 888 char *sense; 889 890 sense = (char *) irb->ecw; 891 cstat = irb->scsw.cmd.cstat; 892 dstat = irb->scsw.cmd.dstat; 893 894 if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | 895 SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | 896 SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { 897 QETH_CARD_TEXT(card, 2, "CGENCHK"); 898 dev_warn(&cdev->dev, "The qeth device driver " 899 "failed to recover an error on the device\n"); 900 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", 901 CCW_DEVID(cdev), dstat, cstat); 902 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 903 16, 1, irb, 64, 1); 904 return 1; 905 } 906 907 if (dstat & DEV_STAT_UNIT_CHECK) { 908 if (sense[SENSE_RESETTING_EVENT_BYTE] & 909 SENSE_RESETTING_EVENT_FLAG) { 910 QETH_CARD_TEXT(card, 2, "REVIND"); 911 return 1; 912 } 913 if (sense[SENSE_COMMAND_REJECT_BYTE] & 914 SENSE_COMMAND_REJECT_FLAG) { 915 QETH_CARD_TEXT(card, 2, "CMDREJi"); 916 return 1; 917 } 918 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 919 QETH_CARD_TEXT(card, 2, "AFFE"); 920 return 1; 921 } 922 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 923 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 924 return 0; 925 } 926 QETH_CARD_TEXT(card, 2, "DGENCHK"); 927 return 1; 928 } 929 return 0; 930 } 931 932 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, 933 struct irb *irb) 934 { 935 if (!IS_ERR(irb)) 936 return 0; 937 938 switch (PTR_ERR(irb)) { 939 case -EIO: 940 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", 941 CCW_DEVID(cdev)); 942 QETH_CARD_TEXT(card, 2, "ckirberr"); 943 QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); 944 return -EIO; 945 case -ETIMEDOUT: 946 dev_warn(&cdev->dev, "A hardware operation timed out" 947 " on the device\n"); 948 QETH_CARD_TEXT(card, 2, "ckirberr"); 949 QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); 950 return -ETIMEDOUT; 951 default: 952 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", 953 PTR_ERR(irb), CCW_DEVID(cdev)); 954 QETH_CARD_TEXT(card, 2, "ckirberr"); 955 QETH_CARD_TEXT(card, 2, " rc???"); 956 return PTR_ERR(irb); 957 } 958 } 959 960 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, 961 struct irb *irb) 962 { 963 int rc; 964 int cstat, dstat; 965 struct qeth_cmd_buffer *iob = NULL; 966 struct ccwgroup_device *gdev; 967 struct qeth_channel *channel; 968 struct qeth_card *card; 969 970 /* while we hold the ccwdev lock, this stays valid: */ 971 gdev = dev_get_drvdata(&cdev->dev); 972 card = dev_get_drvdata(&gdev->dev); 973 if (!card) 974 return; 975 976 QETH_CARD_TEXT(card, 5, "irq"); 977 978 if (card->read.ccwdev == cdev) { 979 channel = &card->read; 980 QETH_CARD_TEXT(card, 5, "read"); 981 } else if (card->write.ccwdev == cdev) { 982 channel = &card->write; 983 QETH_CARD_TEXT(card, 5, "write"); 984 } else { 985 channel = &card->data; 986 QETH_CARD_TEXT(card, 5, "data"); 987 } 988 989 if (qeth_intparm_is_iob(intparm)) 990 iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); 991 992 rc = qeth_check_irb_error(card, cdev, irb); 993 if (rc) { 994 /* IO was terminated, free its resources. */ 995 if (iob) 996 qeth_cancel_cmd(iob, rc); 997 atomic_set(&channel->irq_pending, 0); 998 wake_up(&card->wait_q); 999 return; 1000 } 1001 1002 atomic_set(&channel->irq_pending, 0); 1003 1004 if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) 1005 channel->state = CH_STATE_STOPPED; 1006 1007 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) 1008 channel->state = CH_STATE_HALTED; 1009 1010 if (intparm == QETH_CLEAR_CHANNEL_PARM) { 1011 QETH_CARD_TEXT(card, 6, "clrchpar"); 1012 /* we don't have to handle this further */ 1013 intparm = 0; 1014 } 1015 if (intparm == QETH_HALT_CHANNEL_PARM) { 1016 QETH_CARD_TEXT(card, 6, "hltchpar"); 1017 /* we don't have to handle this further */ 1018 intparm = 0; 1019 } 1020 1021 cstat = irb->scsw.cmd.cstat; 1022 dstat = irb->scsw.cmd.dstat; 1023 1024 if ((dstat & DEV_STAT_UNIT_EXCEP) || 1025 (dstat & DEV_STAT_UNIT_CHECK) || 1026 (cstat)) { 1027 if (irb->esw.esw0.erw.cons) { 1028 dev_warn(&channel->ccwdev->dev, 1029 "The qeth device driver failed to recover " 1030 "an error on the device\n"); 1031 QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", 1032 CCW_DEVID(channel->ccwdev), cstat, 1033 dstat); 1034 print_hex_dump(KERN_WARNING, "qeth: irb ", 1035 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); 1036 print_hex_dump(KERN_WARNING, "qeth: sense data ", 1037 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); 1038 } 1039 1040 rc = qeth_get_problem(card, cdev, irb); 1041 if (rc) { 1042 card->read_or_write_problem = 1; 1043 if (iob) 1044 qeth_cancel_cmd(iob, rc); 1045 qeth_clear_ipacmd_list(card); 1046 qeth_schedule_recovery(card); 1047 goto out; 1048 } 1049 } 1050 1051 if (iob) { 1052 /* sanity check: */ 1053 if (irb->scsw.cmd.count > iob->length) { 1054 qeth_cancel_cmd(iob, -EIO); 1055 goto out; 1056 } 1057 if (iob->callback) 1058 iob->callback(card, iob, 1059 iob->length - irb->scsw.cmd.count); 1060 } 1061 1062 out: 1063 wake_up(&card->wait_q); 1064 return; 1065 } 1066 1067 static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 1068 struct qeth_qdio_out_buffer *buf, 1069 enum iucv_tx_notify notification) 1070 { 1071 struct sk_buff *skb; 1072 1073 skb_queue_walk(&buf->skb_list, skb) { 1074 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); 1075 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); 1076 if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk) 1077 iucv_sk(skb->sk)->sk_txnotify(skb, notification); 1078 } 1079 } 1080 1081 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, 1082 int budget) 1083 { 1084 struct qeth_qdio_out_q *queue = buf->q; 1085 struct sk_buff *skb; 1086 1087 /* release may never happen from within CQ tasklet scope */ 1088 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); 1089 1090 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) 1091 qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR); 1092 1093 /* Empty buffer? */ 1094 if (buf->next_element_to_fill == 0) 1095 return; 1096 1097 QETH_TXQ_STAT_INC(queue, bufs); 1098 QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); 1099 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { 1100 unsigned int bytes = qdisc_pkt_len(skb); 1101 bool is_tso = skb_is_gso(skb); 1102 unsigned int packets; 1103 1104 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; 1105 if (error) { 1106 QETH_TXQ_STAT_ADD(queue, tx_errors, packets); 1107 } else { 1108 QETH_TXQ_STAT_ADD(queue, tx_packets, packets); 1109 QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes); 1110 if (skb->ip_summed == CHECKSUM_PARTIAL) 1111 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); 1112 if (skb_is_nonlinear(skb)) 1113 QETH_TXQ_STAT_INC(queue, skbs_sg); 1114 if (is_tso) { 1115 QETH_TXQ_STAT_INC(queue, skbs_tso); 1116 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); 1117 } 1118 } 1119 1120 napi_consume_skb(skb, budget); 1121 } 1122 } 1123 1124 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1125 struct qeth_qdio_out_buffer *buf, 1126 bool error, int budget) 1127 { 1128 int i; 1129 1130 /* is PCI flag set on buffer? */ 1131 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) 1132 atomic_dec(&queue->set_pci_flags_count); 1133 1134 qeth_tx_complete_buf(buf, error, budget); 1135 1136 for (i = 0; i < queue->max_elements; ++i) { 1137 if (buf->buffer->element[i].addr && buf->is_header[i]) 1138 kmem_cache_free(qeth_core_header_cache, 1139 buf->buffer->element[i].addr); 1140 buf->is_header[i] = 0; 1141 } 1142 1143 qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); 1144 buf->next_element_to_fill = 0; 1145 buf->bytes = 0; 1146 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 1147 } 1148 1149 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) 1150 { 1151 int j; 1152 1153 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 1154 if (!q->bufs[j]) 1155 continue; 1156 qeth_cleanup_handled_pending(q, j, 1); 1157 qeth_clear_output_buffer(q, q->bufs[j], true, 0); 1158 if (free) { 1159 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); 1160 q->bufs[j] = NULL; 1161 } 1162 } 1163 } 1164 1165 void qeth_drain_output_queues(struct qeth_card *card) 1166 { 1167 int i; 1168 1169 QETH_CARD_TEXT(card, 2, "clearqdbf"); 1170 /* clear outbound buffers to free skbs */ 1171 for (i = 0; i < card->qdio.no_out_queues; ++i) { 1172 if (card->qdio.out_qs[i]) 1173 qeth_drain_output_queue(card->qdio.out_qs[i], false); 1174 } 1175 } 1176 EXPORT_SYMBOL_GPL(qeth_drain_output_queues); 1177 1178 static void qeth_free_buffer_pool(struct qeth_card *card) 1179 { 1180 struct qeth_buffer_pool_entry *pool_entry, *tmp; 1181 int i = 0; 1182 list_for_each_entry_safe(pool_entry, tmp, 1183 &card->qdio.init_pool.entry_list, init_list){ 1184 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) 1185 free_page((unsigned long)pool_entry->elements[i]); 1186 list_del(&pool_entry->init_list); 1187 kfree(pool_entry); 1188 } 1189 } 1190 1191 static void qeth_clean_channel(struct qeth_channel *channel) 1192 { 1193 struct ccw_device *cdev = channel->ccwdev; 1194 1195 QETH_DBF_TEXT(SETUP, 2, "freech"); 1196 1197 spin_lock_irq(get_ccwdev_lock(cdev)); 1198 cdev->handler = NULL; 1199 spin_unlock_irq(get_ccwdev_lock(cdev)); 1200 } 1201 1202 static void qeth_setup_channel(struct qeth_channel *channel) 1203 { 1204 struct ccw_device *cdev = channel->ccwdev; 1205 1206 QETH_DBF_TEXT(SETUP, 2, "setupch"); 1207 1208 channel->state = CH_STATE_DOWN; 1209 atomic_set(&channel->irq_pending, 0); 1210 1211 spin_lock_irq(get_ccwdev_lock(cdev)); 1212 cdev->handler = qeth_irq; 1213 spin_unlock_irq(get_ccwdev_lock(cdev)); 1214 } 1215 1216 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single) 1217 { 1218 unsigned int count = single ? 1 : card->dev->num_tx_queues; 1219 int rc; 1220 1221 rtnl_lock(); 1222 rc = netif_set_real_num_tx_queues(card->dev, count); 1223 rtnl_unlock(); 1224 1225 if (rc) 1226 return rc; 1227 1228 if (card->qdio.no_out_queues == count) 1229 return 0; 1230 1231 if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) 1232 qeth_free_qdio_queues(card); 1233 1234 if (count == 1) 1235 dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); 1236 1237 card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE; 1238 card->qdio.no_out_queues = count; 1239 return 0; 1240 } 1241 1242 static int qeth_update_from_chp_desc(struct qeth_card *card) 1243 { 1244 struct ccw_device *ccwdev; 1245 struct channel_path_desc_fmt0 *chp_dsc; 1246 int rc = 0; 1247 1248 QETH_CARD_TEXT(card, 2, "chp_desc"); 1249 1250 ccwdev = card->data.ccwdev; 1251 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 1252 if (!chp_dsc) 1253 return -ENOMEM; 1254 1255 card->info.func_level = 0x4100 + chp_dsc->desc; 1256 1257 if (IS_OSD(card) || IS_OSX(card)) 1258 /* CHPP field bit 6 == 1 -> single queue */ 1259 rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); 1260 1261 kfree(chp_dsc); 1262 QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues); 1263 QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level); 1264 return rc; 1265 } 1266 1267 static void qeth_init_qdio_info(struct qeth_card *card) 1268 { 1269 QETH_CARD_TEXT(card, 4, "intqdinf"); 1270 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 1271 card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; 1272 card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; 1273 1274 /* inbound */ 1275 card->qdio.no_in_queues = 1; 1276 card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; 1277 if (IS_IQD(card)) 1278 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; 1279 else 1280 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; 1281 card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; 1282 INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); 1283 INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); 1284 } 1285 1286 static void qeth_set_initial_options(struct qeth_card *card) 1287 { 1288 card->options.route4.type = NO_ROUTER; 1289 card->options.route6.type = NO_ROUTER; 1290 card->options.rx_sg_cb = QETH_RX_SG_CB; 1291 card->options.isolation = ISOLATION_MODE_NONE; 1292 card->options.cq = QETH_CQ_DISABLED; 1293 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 1294 } 1295 1296 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1297 { 1298 unsigned long flags; 1299 int rc = 0; 1300 1301 spin_lock_irqsave(&card->thread_mask_lock, flags); 1302 QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", 1303 (u8) card->thread_start_mask, 1304 (u8) card->thread_allowed_mask, 1305 (u8) card->thread_running_mask); 1306 rc = (card->thread_start_mask & thread); 1307 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 1308 return rc; 1309 } 1310 1311 static void qeth_start_kernel_thread(struct work_struct *work) 1312 { 1313 struct task_struct *ts; 1314 struct qeth_card *card = container_of(work, struct qeth_card, 1315 kernel_thread_starter); 1316 QETH_CARD_TEXT(card , 2, "strthrd"); 1317 1318 if (card->read.state != CH_STATE_UP && 1319 card->write.state != CH_STATE_UP) 1320 return; 1321 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1322 ts = kthread_run(card->discipline->recover, (void *)card, 1323 "qeth_recover"); 1324 if (IS_ERR(ts)) { 1325 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1326 qeth_clear_thread_running_bit(card, 1327 QETH_RECOVER_THREAD); 1328 } 1329 } 1330 } 1331 1332 static void qeth_buffer_reclaim_work(struct work_struct *); 1333 static void qeth_setup_card(struct qeth_card *card) 1334 { 1335 QETH_CARD_TEXT(card, 2, "setupcrd"); 1336 1337 card->info.type = CARD_RDEV(card)->id.driver_info; 1338 card->state = CARD_STATE_DOWN; 1339 spin_lock_init(&card->lock); 1340 spin_lock_init(&card->thread_mask_lock); 1341 mutex_init(&card->conf_mutex); 1342 mutex_init(&card->discipline_mutex); 1343 INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); 1344 INIT_LIST_HEAD(&card->cmd_waiter_list); 1345 init_waitqueue_head(&card->wait_q); 1346 qeth_set_initial_options(card); 1347 /* IP address takeover */ 1348 INIT_LIST_HEAD(&card->ipato.entries); 1349 qeth_init_qdio_info(card); 1350 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1351 INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); 1352 } 1353 1354 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) 1355 { 1356 struct qeth_card *card = container_of(slr, struct qeth_card, 1357 qeth_service_level); 1358 if (card->info.mcl_level[0]) 1359 seq_printf(m, "qeth: %s firmware level %s\n", 1360 CARD_BUS_ID(card), card->info.mcl_level); 1361 } 1362 1363 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) 1364 { 1365 struct qeth_card *card; 1366 1367 QETH_DBF_TEXT(SETUP, 2, "alloccrd"); 1368 card = kzalloc(sizeof(*card), GFP_KERNEL); 1369 if (!card) 1370 goto out; 1371 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1372 1373 card->gdev = gdev; 1374 dev_set_drvdata(&gdev->dev, card); 1375 CARD_RDEV(card) = gdev->cdev[0]; 1376 CARD_WDEV(card) = gdev->cdev[1]; 1377 CARD_DDEV(card) = gdev->cdev[2]; 1378 1379 card->event_wq = alloc_ordered_workqueue("%s_event", 0, 1380 dev_name(&gdev->dev)); 1381 if (!card->event_wq) 1382 goto out_wq; 1383 1384 card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0); 1385 if (!card->read_cmd) 1386 goto out_read_cmd; 1387 1388 qeth_setup_channel(&card->read); 1389 qeth_setup_channel(&card->write); 1390 qeth_setup_channel(&card->data); 1391 card->qeth_service_level.seq_print = qeth_core_sl_print; 1392 register_service_level(&card->qeth_service_level); 1393 return card; 1394 1395 out_read_cmd: 1396 destroy_workqueue(card->event_wq); 1397 out_wq: 1398 dev_set_drvdata(&gdev->dev, NULL); 1399 kfree(card); 1400 out: 1401 return NULL; 1402 } 1403 1404 static int qeth_clear_channel(struct qeth_card *card, 1405 struct qeth_channel *channel) 1406 { 1407 int rc; 1408 1409 QETH_CARD_TEXT(card, 3, "clearch"); 1410 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1411 rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); 1412 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1413 1414 if (rc) 1415 return rc; 1416 rc = wait_event_interruptible_timeout(card->wait_q, 1417 channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); 1418 if (rc == -ERESTARTSYS) 1419 return rc; 1420 if (channel->state != CH_STATE_STOPPED) 1421 return -ETIME; 1422 channel->state = CH_STATE_DOWN; 1423 return 0; 1424 } 1425 1426 static int qeth_halt_channel(struct qeth_card *card, 1427 struct qeth_channel *channel) 1428 { 1429 int rc; 1430 1431 QETH_CARD_TEXT(card, 3, "haltch"); 1432 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1433 rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); 1434 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1435 1436 if (rc) 1437 return rc; 1438 rc = wait_event_interruptible_timeout(card->wait_q, 1439 channel->state == CH_STATE_HALTED, QETH_TIMEOUT); 1440 if (rc == -ERESTARTSYS) 1441 return rc; 1442 if (channel->state != CH_STATE_HALTED) 1443 return -ETIME; 1444 return 0; 1445 } 1446 1447 static int qeth_halt_channels(struct qeth_card *card) 1448 { 1449 int rc1 = 0, rc2 = 0, rc3 = 0; 1450 1451 QETH_CARD_TEXT(card, 3, "haltchs"); 1452 rc1 = qeth_halt_channel(card, &card->read); 1453 rc2 = qeth_halt_channel(card, &card->write); 1454 rc3 = qeth_halt_channel(card, &card->data); 1455 if (rc1) 1456 return rc1; 1457 if (rc2) 1458 return rc2; 1459 return rc3; 1460 } 1461 1462 static int qeth_clear_channels(struct qeth_card *card) 1463 { 1464 int rc1 = 0, rc2 = 0, rc3 = 0; 1465 1466 QETH_CARD_TEXT(card, 3, "clearchs"); 1467 rc1 = qeth_clear_channel(card, &card->read); 1468 rc2 = qeth_clear_channel(card, &card->write); 1469 rc3 = qeth_clear_channel(card, &card->data); 1470 if (rc1) 1471 return rc1; 1472 if (rc2) 1473 return rc2; 1474 return rc3; 1475 } 1476 1477 static int qeth_clear_halt_card(struct qeth_card *card, int halt) 1478 { 1479 int rc = 0; 1480 1481 QETH_CARD_TEXT(card, 3, "clhacrd"); 1482 1483 if (halt) 1484 rc = qeth_halt_channels(card); 1485 if (rc) 1486 return rc; 1487 return qeth_clear_channels(card); 1488 } 1489 1490 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) 1491 { 1492 int rc = 0; 1493 1494 QETH_CARD_TEXT(card, 3, "qdioclr"); 1495 switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, 1496 QETH_QDIO_CLEANING)) { 1497 case QETH_QDIO_ESTABLISHED: 1498 if (IS_IQD(card)) 1499 rc = qdio_shutdown(CARD_DDEV(card), 1500 QDIO_FLAG_CLEANUP_USING_HALT); 1501 else 1502 rc = qdio_shutdown(CARD_DDEV(card), 1503 QDIO_FLAG_CLEANUP_USING_CLEAR); 1504 if (rc) 1505 QETH_CARD_TEXT_(card, 3, "1err%d", rc); 1506 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 1507 break; 1508 case QETH_QDIO_CLEANING: 1509 return rc; 1510 default: 1511 break; 1512 } 1513 rc = qeth_clear_halt_card(card, use_halt); 1514 if (rc) 1515 QETH_CARD_TEXT_(card, 3, "2err%d", rc); 1516 card->state = CARD_STATE_DOWN; 1517 return rc; 1518 } 1519 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card); 1520 1521 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) 1522 { 1523 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1524 struct diag26c_vnic_resp *response = NULL; 1525 struct diag26c_vnic_req *request = NULL; 1526 struct ccw_dev_id id; 1527 char userid[80]; 1528 int rc = 0; 1529 1530 QETH_CARD_TEXT(card, 2, "vmlayer"); 1531 1532 cpcmd("QUERY USERID", userid, sizeof(userid), &rc); 1533 if (rc) 1534 goto out; 1535 1536 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 1537 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 1538 if (!request || !response) { 1539 rc = -ENOMEM; 1540 goto out; 1541 } 1542 1543 ccw_device_get_id(CARD_RDEV(card), &id); 1544 request->resp_buf_len = sizeof(*response); 1545 request->resp_version = DIAG26C_VERSION6_VM65918; 1546 request->req_format = DIAG26C_VNIC_INFO; 1547 ASCEBC(userid, 8); 1548 memcpy(&request->sys_name, userid, 8); 1549 request->devno = id.devno; 1550 1551 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1552 rc = diag26c(request, response, DIAG26C_PORT_VNIC); 1553 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 1554 if (rc) 1555 goto out; 1556 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 1557 1558 if (request->resp_buf_len < sizeof(*response) || 1559 response->version != request->resp_version) { 1560 rc = -EIO; 1561 goto out; 1562 } 1563 1564 if (response->protocol == VNIC_INFO_PROT_L2) 1565 disc = QETH_DISCIPLINE_LAYER2; 1566 else if (response->protocol == VNIC_INFO_PROT_L3) 1567 disc = QETH_DISCIPLINE_LAYER3; 1568 1569 out: 1570 kfree(response); 1571 kfree(request); 1572 if (rc) 1573 QETH_CARD_TEXT_(card, 2, "err%x", rc); 1574 return disc; 1575 } 1576 1577 /* Determine whether the device requires a specific layer discipline */ 1578 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) 1579 { 1580 enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; 1581 1582 if (IS_OSM(card) || IS_OSN(card)) 1583 disc = QETH_DISCIPLINE_LAYER2; 1584 else if (IS_VM_NIC(card)) 1585 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 1586 qeth_vm_detect_layer(card); 1587 1588 switch (disc) { 1589 case QETH_DISCIPLINE_LAYER2: 1590 QETH_CARD_TEXT(card, 3, "force l2"); 1591 break; 1592 case QETH_DISCIPLINE_LAYER3: 1593 QETH_CARD_TEXT(card, 3, "force l3"); 1594 break; 1595 default: 1596 QETH_CARD_TEXT(card, 3, "force no"); 1597 } 1598 1599 return disc; 1600 } 1601 1602 static void qeth_set_blkt_defaults(struct qeth_card *card) 1603 { 1604 QETH_CARD_TEXT(card, 2, "cfgblkt"); 1605 1606 if (card->info.use_v1_blkt) { 1607 card->info.blkt.time_total = 0; 1608 card->info.blkt.inter_packet = 0; 1609 card->info.blkt.inter_packet_jumbo = 0; 1610 } else { 1611 card->info.blkt.time_total = 250; 1612 card->info.blkt.inter_packet = 5; 1613 card->info.blkt.inter_packet_jumbo = 15; 1614 } 1615 } 1616 1617 static void qeth_init_tokens(struct qeth_card *card) 1618 { 1619 card->token.issuer_rm_w = 0x00010103UL; 1620 card->token.cm_filter_w = 0x00010108UL; 1621 card->token.cm_connection_w = 0x0001010aUL; 1622 card->token.ulp_filter_w = 0x0001010bUL; 1623 card->token.ulp_connection_w = 0x0001010dUL; 1624 } 1625 1626 static void qeth_init_func_level(struct qeth_card *card) 1627 { 1628 switch (card->info.type) { 1629 case QETH_CARD_TYPE_IQD: 1630 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; 1631 break; 1632 case QETH_CARD_TYPE_OSD: 1633 case QETH_CARD_TYPE_OSN: 1634 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; 1635 break; 1636 default: 1637 break; 1638 } 1639 } 1640 1641 static void qeth_idx_finalize_cmd(struct qeth_card *card, 1642 struct qeth_cmd_buffer *iob) 1643 { 1644 memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, 1645 QETH_SEQ_NO_LENGTH); 1646 if (iob->channel == &card->write) 1647 card->seqno.trans_hdr++; 1648 } 1649 1650 static int qeth_peer_func_level(int level) 1651 { 1652 if ((level & 0xff) == 8) 1653 return (level & 0xff) + 0x400; 1654 if (((level >> 8) & 3) == 1) 1655 return (level & 0xff) + 0x200; 1656 return level; 1657 } 1658 1659 static void qeth_mpc_finalize_cmd(struct qeth_card *card, 1660 struct qeth_cmd_buffer *iob) 1661 { 1662 qeth_idx_finalize_cmd(card, iob); 1663 1664 memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), 1665 &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); 1666 card->seqno.pdu_hdr++; 1667 memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), 1668 &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); 1669 1670 iob->callback = qeth_release_buffer_cb; 1671 } 1672 1673 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, 1674 void *data, 1675 unsigned int data_length) 1676 { 1677 struct qeth_cmd_buffer *iob; 1678 1679 iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT); 1680 if (!iob) 1681 return NULL; 1682 1683 memcpy(iob->data, data, data_length); 1684 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length, 1685 iob->data); 1686 iob->finalize = qeth_mpc_finalize_cmd; 1687 return iob; 1688 } 1689 1690 /** 1691 * qeth_send_control_data() - send control command to the card 1692 * @card: qeth_card structure pointer 1693 * @iob: qeth_cmd_buffer pointer 1694 * @reply_cb: callback function pointer 1695 * @cb_card: pointer to the qeth_card structure 1696 * @cb_reply: pointer to the qeth_reply structure 1697 * @cb_cmd: pointer to the original iob for non-IPA 1698 * commands, or to the qeth_ipa_cmd structure 1699 * for the IPA commands. 1700 * @reply_param: private pointer passed to the callback 1701 * 1702 * Callback function gets called one or more times, with cb_cmd 1703 * pointing to the response returned by the hardware. Callback 1704 * function must return 1705 * > 0 if more reply blocks are expected, 1706 * 0 if the last or only reply block is received, and 1707 * < 0 on error. 1708 * Callback function can get the value of the reply_param pointer from the 1709 * field 'param' of the structure qeth_reply. 1710 */ 1711 1712 static int qeth_send_control_data(struct qeth_card *card, 1713 struct qeth_cmd_buffer *iob, 1714 int (*reply_cb)(struct qeth_card *cb_card, 1715 struct qeth_reply *cb_reply, 1716 unsigned long cb_cmd), 1717 void *reply_param) 1718 { 1719 struct qeth_channel *channel = iob->channel; 1720 struct qeth_reply *reply = &iob->reply; 1721 long timeout = iob->timeout; 1722 int rc; 1723 1724 QETH_CARD_TEXT(card, 2, "sendctl"); 1725 1726 reply->callback = reply_cb; 1727 reply->param = reply_param; 1728 1729 timeout = wait_event_interruptible_timeout(card->wait_q, 1730 qeth_trylock_channel(channel), 1731 timeout); 1732 if (timeout <= 0) { 1733 qeth_put_cmd(iob); 1734 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1735 } 1736 1737 if (iob->finalize) 1738 iob->finalize(card, iob); 1739 QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); 1740 1741 qeth_enqueue_cmd(card, iob); 1742 1743 /* This pairs with iob->callback, and keeps the iob alive after IO: */ 1744 qeth_get_cmd(iob); 1745 1746 QETH_CARD_TEXT(card, 6, "noirqpnd"); 1747 spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); 1748 rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), 1749 (addr_t) iob, 0, 0, timeout); 1750 spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); 1751 if (rc) { 1752 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", 1753 CARD_DEVID(card), rc); 1754 QETH_CARD_TEXT_(card, 2, " err%d", rc); 1755 qeth_dequeue_cmd(card, iob); 1756 qeth_put_cmd(iob); 1757 atomic_set(&channel->irq_pending, 0); 1758 wake_up(&card->wait_q); 1759 goto out; 1760 } 1761 1762 timeout = wait_for_completion_interruptible_timeout(&iob->done, 1763 timeout); 1764 if (timeout <= 0) 1765 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1766 1767 qeth_dequeue_cmd(card, iob); 1768 1769 if (reply_cb) { 1770 /* Wait until the callback for a late reply has completed: */ 1771 spin_lock_irq(&iob->lock); 1772 if (rc) 1773 /* Zap any callback that's still pending: */ 1774 iob->rc = rc; 1775 spin_unlock_irq(&iob->lock); 1776 } 1777 1778 if (!rc) 1779 rc = iob->rc; 1780 1781 out: 1782 qeth_put_cmd(iob); 1783 return rc; 1784 } 1785 1786 struct qeth_node_desc { 1787 struct node_descriptor nd1; 1788 struct node_descriptor nd2; 1789 struct node_descriptor nd3; 1790 }; 1791 1792 static void qeth_read_conf_data_cb(struct qeth_card *card, 1793 struct qeth_cmd_buffer *iob, 1794 unsigned int data_length) 1795 { 1796 struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; 1797 int rc = 0; 1798 u8 *tag; 1799 1800 QETH_CARD_TEXT(card, 2, "cfgunit"); 1801 1802 if (data_length < sizeof(*nd)) { 1803 rc = -EINVAL; 1804 goto out; 1805 } 1806 1807 card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && 1808 nd->nd1.plant[1] == _ascebc['M']; 1809 tag = (u8 *)&nd->nd1.tag; 1810 card->info.chpid = tag[0]; 1811 card->info.unit_addr2 = tag[1]; 1812 1813 tag = (u8 *)&nd->nd2.tag; 1814 card->info.cula = tag[1]; 1815 1816 card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && 1817 nd->nd3.model[1] == 0xF0 && 1818 nd->nd3.model[2] >= 0xF1 && 1819 nd->nd3.model[2] <= 0xF4; 1820 1821 out: 1822 qeth_notify_cmd(iob, rc); 1823 qeth_put_cmd(iob); 1824 } 1825 1826 static int qeth_read_conf_data(struct qeth_card *card) 1827 { 1828 struct qeth_channel *channel = &card->data; 1829 struct qeth_cmd_buffer *iob; 1830 struct ciw *ciw; 1831 1832 /* scan for RCD command in extended SenseID data */ 1833 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); 1834 if (!ciw || ciw->cmd == 0) 1835 return -EOPNOTSUPP; 1836 if (ciw->count < sizeof(struct qeth_node_desc)) 1837 return -EINVAL; 1838 1839 iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); 1840 if (!iob) 1841 return -ENOMEM; 1842 1843 iob->callback = qeth_read_conf_data_cb; 1844 qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length, 1845 iob->data); 1846 1847 return qeth_send_control_data(card, iob, NULL, NULL); 1848 } 1849 1850 static int qeth_idx_check_activate_response(struct qeth_card *card, 1851 struct qeth_channel *channel, 1852 struct qeth_cmd_buffer *iob) 1853 { 1854 int rc; 1855 1856 rc = qeth_check_idx_response(card, iob->data); 1857 if (rc) 1858 return rc; 1859 1860 if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) 1861 return 0; 1862 1863 /* negative reply: */ 1864 QETH_CARD_TEXT_(card, 2, "idxneg%c", 1865 QETH_IDX_ACT_CAUSE_CODE(iob->data)); 1866 1867 switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { 1868 case QETH_IDX_ACT_ERR_EXCL: 1869 dev_err(&channel->ccwdev->dev, 1870 "The adapter is used exclusively by another host\n"); 1871 return -EBUSY; 1872 case QETH_IDX_ACT_ERR_AUTH: 1873 case QETH_IDX_ACT_ERR_AUTH_USER: 1874 dev_err(&channel->ccwdev->dev, 1875 "Setting the device online failed because of insufficient authorization\n"); 1876 return -EPERM; 1877 default: 1878 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", 1879 CCW_DEVID(channel->ccwdev)); 1880 return -EIO; 1881 } 1882 } 1883 1884 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, 1885 struct qeth_cmd_buffer *iob, 1886 unsigned int data_length) 1887 { 1888 struct qeth_channel *channel = iob->channel; 1889 u16 peer_level; 1890 int rc; 1891 1892 QETH_CARD_TEXT(card, 2, "idxrdcb"); 1893 1894 rc = qeth_idx_check_activate_response(card, channel, iob); 1895 if (rc) 1896 goto out; 1897 1898 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 1899 if (peer_level != qeth_peer_func_level(card->info.func_level)) { 1900 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 1901 CCW_DEVID(channel->ccwdev), 1902 card->info.func_level, peer_level); 1903 rc = -EINVAL; 1904 goto out; 1905 } 1906 1907 memcpy(&card->token.issuer_rm_r, 1908 QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 1909 QETH_MPC_TOKEN_LENGTH); 1910 memcpy(&card->info.mcl_level[0], 1911 QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); 1912 1913 out: 1914 qeth_notify_cmd(iob, rc); 1915 qeth_put_cmd(iob); 1916 } 1917 1918 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, 1919 struct qeth_cmd_buffer *iob, 1920 unsigned int data_length) 1921 { 1922 struct qeth_channel *channel = iob->channel; 1923 u16 peer_level; 1924 int rc; 1925 1926 QETH_CARD_TEXT(card, 2, "idxwrcb"); 1927 1928 rc = qeth_idx_check_activate_response(card, channel, iob); 1929 if (rc) 1930 goto out; 1931 1932 memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); 1933 if ((peer_level & ~0x0100) != 1934 qeth_peer_func_level(card->info.func_level)) { 1935 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", 1936 CCW_DEVID(channel->ccwdev), 1937 card->info.func_level, peer_level); 1938 rc = -EINVAL; 1939 } 1940 1941 out: 1942 qeth_notify_cmd(iob, rc); 1943 qeth_put_cmd(iob); 1944 } 1945 1946 static void qeth_idx_setup_activate_cmd(struct qeth_card *card, 1947 struct qeth_cmd_buffer *iob) 1948 { 1949 u16 addr = (card->info.cula << 8) + card->info.unit_addr2; 1950 u8 port = ((u8)card->dev->dev_port) | 0x80; 1951 struct ccw1 *ccw = __ccw_from_cmd(iob); 1952 struct ccw_dev_id dev_id; 1953 1954 qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE, 1955 iob->data); 1956 qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data); 1957 ccw_device_get_id(CARD_DDEV(card), &dev_id); 1958 iob->finalize = qeth_idx_finalize_cmd; 1959 1960 memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); 1961 memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), 1962 &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); 1963 memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), 1964 &card->info.func_level, 2); 1965 memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2); 1966 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); 1967 } 1968 1969 static int qeth_idx_activate_read_channel(struct qeth_card *card) 1970 { 1971 struct qeth_channel *channel = &card->read; 1972 struct qeth_cmd_buffer *iob; 1973 int rc; 1974 1975 QETH_CARD_TEXT(card, 2, "idxread"); 1976 1977 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 1978 if (!iob) 1979 return -ENOMEM; 1980 1981 memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); 1982 qeth_idx_setup_activate_cmd(card, iob); 1983 iob->callback = qeth_idx_activate_read_channel_cb; 1984 1985 rc = qeth_send_control_data(card, iob, NULL, NULL); 1986 if (rc) 1987 return rc; 1988 1989 channel->state = CH_STATE_UP; 1990 return 0; 1991 } 1992 1993 static int qeth_idx_activate_write_channel(struct qeth_card *card) 1994 { 1995 struct qeth_channel *channel = &card->write; 1996 struct qeth_cmd_buffer *iob; 1997 int rc; 1998 1999 QETH_CARD_TEXT(card, 2, "idxwrite"); 2000 2001 iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); 2002 if (!iob) 2003 return -ENOMEM; 2004 2005 memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); 2006 qeth_idx_setup_activate_cmd(card, iob); 2007 iob->callback = qeth_idx_activate_write_channel_cb; 2008 2009 rc = qeth_send_control_data(card, iob, NULL, NULL); 2010 if (rc) 2011 return rc; 2012 2013 channel->state = CH_STATE_UP; 2014 return 0; 2015 } 2016 2017 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2018 unsigned long data) 2019 { 2020 struct qeth_cmd_buffer *iob; 2021 2022 QETH_CARD_TEXT(card, 2, "cmenblcb"); 2023 2024 iob = (struct qeth_cmd_buffer *) data; 2025 memcpy(&card->token.cm_filter_r, 2026 QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), 2027 QETH_MPC_TOKEN_LENGTH); 2028 return 0; 2029 } 2030 2031 static int qeth_cm_enable(struct qeth_card *card) 2032 { 2033 struct qeth_cmd_buffer *iob; 2034 2035 QETH_CARD_TEXT(card, 2, "cmenable"); 2036 2037 iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE); 2038 if (!iob) 2039 return -ENOMEM; 2040 2041 memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), 2042 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2043 memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), 2044 &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); 2045 2046 return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL); 2047 } 2048 2049 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2050 unsigned long data) 2051 { 2052 struct qeth_cmd_buffer *iob; 2053 2054 QETH_CARD_TEXT(card, 2, "cmsetpcb"); 2055 2056 iob = (struct qeth_cmd_buffer *) data; 2057 memcpy(&card->token.cm_connection_r, 2058 QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), 2059 QETH_MPC_TOKEN_LENGTH); 2060 return 0; 2061 } 2062 2063 static int qeth_cm_setup(struct qeth_card *card) 2064 { 2065 struct qeth_cmd_buffer *iob; 2066 2067 QETH_CARD_TEXT(card, 2, "cmsetup"); 2068 2069 iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE); 2070 if (!iob) 2071 return -ENOMEM; 2072 2073 memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), 2074 &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); 2075 memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), 2076 &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); 2077 memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), 2078 &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); 2079 return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); 2080 } 2081 2082 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) 2083 { 2084 struct net_device *dev = card->dev; 2085 unsigned int new_mtu; 2086 2087 if (!max_mtu) { 2088 /* IQD needs accurate max MTU to set up its RX buffers: */ 2089 if (IS_IQD(card)) 2090 return -EINVAL; 2091 /* tolerate quirky HW: */ 2092 max_mtu = ETH_MAX_MTU; 2093 } 2094 2095 rtnl_lock(); 2096 if (IS_IQD(card)) { 2097 /* move any device with default MTU to new max MTU: */ 2098 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; 2099 2100 /* adjust RX buffer size to new max MTU: */ 2101 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; 2102 if (dev->max_mtu && dev->max_mtu != max_mtu) 2103 qeth_free_qdio_queues(card); 2104 } else { 2105 if (dev->mtu) 2106 new_mtu = dev->mtu; 2107 /* default MTUs for first setup: */ 2108 else if (IS_LAYER2(card)) 2109 new_mtu = ETH_DATA_LEN; 2110 else 2111 new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ 2112 } 2113 2114 dev->max_mtu = max_mtu; 2115 dev->mtu = min(new_mtu, max_mtu); 2116 rtnl_unlock(); 2117 return 0; 2118 } 2119 2120 static int qeth_get_mtu_outof_framesize(int framesize) 2121 { 2122 switch (framesize) { 2123 case 0x4000: 2124 return 8192; 2125 case 0x6000: 2126 return 16384; 2127 case 0xa000: 2128 return 32768; 2129 case 0xffff: 2130 return 57344; 2131 default: 2132 return 0; 2133 } 2134 } 2135 2136 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, 2137 unsigned long data) 2138 { 2139 __u16 mtu, framesize; 2140 __u16 len; 2141 __u8 link_type; 2142 struct qeth_cmd_buffer *iob; 2143 2144 QETH_CARD_TEXT(card, 2, "ulpenacb"); 2145 2146 iob = (struct qeth_cmd_buffer *) data; 2147 memcpy(&card->token.ulp_filter_r, 2148 QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), 2149 QETH_MPC_TOKEN_LENGTH); 2150 if (IS_IQD(card)) { 2151 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); 2152 mtu = qeth_get_mtu_outof_framesize(framesize); 2153 } else { 2154 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); 2155 } 2156 *(u16 *)reply->param = mtu; 2157 2158 memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); 2159 if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { 2160 memcpy(&link_type, 2161 QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); 2162 card->info.link_type = link_type; 2163 } else 2164 card->info.link_type = 0; 2165 QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); 2166 return 0; 2167 } 2168 2169 static u8 qeth_mpc_select_prot_type(struct qeth_card *card) 2170 { 2171 if (IS_OSN(card)) 2172 return QETH_PROT_OSN2; 2173 return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; 2174 } 2175 2176 static int qeth_ulp_enable(struct qeth_card *card) 2177 { 2178 u8 prot_type = qeth_mpc_select_prot_type(card); 2179 struct qeth_cmd_buffer *iob; 2180 u16 max_mtu; 2181 int rc; 2182 2183 QETH_CARD_TEXT(card, 2, "ulpenabl"); 2184 2185 iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE); 2186 if (!iob) 2187 return -ENOMEM; 2188 2189 *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; 2190 memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); 2191 memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), 2192 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2193 memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), 2194 &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); 2195 rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu); 2196 if (rc) 2197 return rc; 2198 return qeth_update_max_mtu(card, max_mtu); 2199 } 2200 2201 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, 2202 unsigned long data) 2203 { 2204 struct qeth_cmd_buffer *iob; 2205 2206 QETH_CARD_TEXT(card, 2, "ulpstpcb"); 2207 2208 iob = (struct qeth_cmd_buffer *) data; 2209 memcpy(&card->token.ulp_connection_r, 2210 QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2211 QETH_MPC_TOKEN_LENGTH); 2212 if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), 2213 3)) { 2214 QETH_CARD_TEXT(card, 2, "olmlimit"); 2215 dev_err(&card->gdev->dev, "A connection could not be " 2216 "established because of an OLM limit\n"); 2217 return -EMLINK; 2218 } 2219 return 0; 2220 } 2221 2222 static int qeth_ulp_setup(struct qeth_card *card) 2223 { 2224 __u16 temp; 2225 struct qeth_cmd_buffer *iob; 2226 struct ccw_dev_id dev_id; 2227 2228 QETH_CARD_TEXT(card, 2, "ulpsetup"); 2229 2230 iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE); 2231 if (!iob) 2232 return -ENOMEM; 2233 2234 memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), 2235 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2236 memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), 2237 &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); 2238 memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), 2239 &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); 2240 2241 ccw_device_get_id(CARD_DDEV(card), &dev_id); 2242 memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2); 2243 temp = (card->info.cula << 8) + card->info.unit_addr2; 2244 memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); 2245 return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); 2246 } 2247 2248 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) 2249 { 2250 struct qeth_qdio_out_buffer *newbuf; 2251 2252 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); 2253 if (!newbuf) 2254 return -ENOMEM; 2255 2256 newbuf->buffer = q->qdio_bufs[bidx]; 2257 skb_queue_head_init(&newbuf->skb_list); 2258 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2259 newbuf->q = q; 2260 newbuf->next_pending = q->bufs[bidx]; 2261 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); 2262 q->bufs[bidx] = newbuf; 2263 return 0; 2264 } 2265 2266 static void qeth_free_output_queue(struct qeth_qdio_out_q *q) 2267 { 2268 if (!q) 2269 return; 2270 2271 qeth_drain_output_queue(q, true); 2272 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2273 kfree(q); 2274 } 2275 2276 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) 2277 { 2278 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); 2279 2280 if (!q) 2281 return NULL; 2282 2283 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { 2284 kfree(q); 2285 return NULL; 2286 } 2287 return q; 2288 } 2289 2290 static void qeth_tx_completion_timer(struct timer_list *timer) 2291 { 2292 struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); 2293 2294 napi_schedule(&queue->napi); 2295 QETH_TXQ_STAT_INC(queue, completion_timer); 2296 } 2297 2298 static int qeth_alloc_qdio_queues(struct qeth_card *card) 2299 { 2300 int i, j; 2301 2302 QETH_CARD_TEXT(card, 2, "allcqdbf"); 2303 2304 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, 2305 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2306 return 0; 2307 2308 QETH_CARD_TEXT(card, 2, "inq"); 2309 card->qdio.in_q = qeth_alloc_qdio_queue(); 2310 if (!card->qdio.in_q) 2311 goto out_nomem; 2312 2313 /* inbound buffer pool */ 2314 if (qeth_alloc_buffer_pool(card)) 2315 goto out_freeinq; 2316 2317 /* outbound */ 2318 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2319 struct qeth_qdio_out_q *queue; 2320 2321 queue = qeth_alloc_output_queue(); 2322 if (!queue) 2323 goto out_freeoutq; 2324 QETH_CARD_TEXT_(card, 2, "outq %i", i); 2325 QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); 2326 card->qdio.out_qs[i] = queue; 2327 queue->card = card; 2328 queue->queue_no = i; 2329 timer_setup(&queue->timer, qeth_tx_completion_timer, 0); 2330 2331 /* give outbound qeth_qdio_buffers their qdio_buffers */ 2332 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2333 WARN_ON(queue->bufs[j]); 2334 if (qeth_init_qdio_out_buf(queue, j)) 2335 goto out_freeoutqbufs; 2336 } 2337 } 2338 2339 /* completion */ 2340 if (qeth_alloc_cq(card)) 2341 goto out_freeoutq; 2342 2343 return 0; 2344 2345 out_freeoutqbufs: 2346 while (j > 0) { 2347 --j; 2348 kmem_cache_free(qeth_qdio_outbuf_cache, 2349 card->qdio.out_qs[i]->bufs[j]); 2350 card->qdio.out_qs[i]->bufs[j] = NULL; 2351 } 2352 out_freeoutq: 2353 while (i > 0) { 2354 qeth_free_output_queue(card->qdio.out_qs[--i]); 2355 card->qdio.out_qs[i] = NULL; 2356 } 2357 qeth_free_buffer_pool(card); 2358 out_freeinq: 2359 qeth_free_qdio_queue(card->qdio.in_q); 2360 card->qdio.in_q = NULL; 2361 out_nomem: 2362 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2363 return -ENOMEM; 2364 } 2365 2366 static void qeth_free_qdio_queues(struct qeth_card *card) 2367 { 2368 int i, j; 2369 2370 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 2371 QETH_QDIO_UNINITIALIZED) 2372 return; 2373 2374 qeth_free_cq(card); 2375 cancel_delayed_work_sync(&card->buffer_reclaim_work); 2376 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2377 if (card->qdio.in_q->bufs[j].rx_skb) 2378 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); 2379 } 2380 qeth_free_qdio_queue(card->qdio.in_q); 2381 card->qdio.in_q = NULL; 2382 /* inbound buffer pool */ 2383 qeth_free_buffer_pool(card); 2384 /* free outbound qdio_qs */ 2385 for (i = 0; i < card->qdio.no_out_queues; i++) { 2386 qeth_free_output_queue(card->qdio.out_qs[i]); 2387 card->qdio.out_qs[i] = NULL; 2388 } 2389 } 2390 2391 static void qeth_create_qib_param_field(struct qeth_card *card, 2392 char *param_field) 2393 { 2394 2395 param_field[0] = _ascebc['P']; 2396 param_field[1] = _ascebc['C']; 2397 param_field[2] = _ascebc['I']; 2398 param_field[3] = _ascebc['T']; 2399 *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card); 2400 *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card); 2401 *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card); 2402 } 2403 2404 static void qeth_create_qib_param_field_blkt(struct qeth_card *card, 2405 char *param_field) 2406 { 2407 param_field[16] = _ascebc['B']; 2408 param_field[17] = _ascebc['L']; 2409 param_field[18] = _ascebc['K']; 2410 param_field[19] = _ascebc['T']; 2411 *((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total; 2412 *((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet; 2413 *((unsigned int *) (¶m_field[28])) = 2414 card->info.blkt.inter_packet_jumbo; 2415 } 2416 2417 static int qeth_qdio_activate(struct qeth_card *card) 2418 { 2419 QETH_CARD_TEXT(card, 3, "qdioact"); 2420 return qdio_activate(CARD_DDEV(card)); 2421 } 2422 2423 static int qeth_dm_act(struct qeth_card *card) 2424 { 2425 struct qeth_cmd_buffer *iob; 2426 2427 QETH_CARD_TEXT(card, 2, "dmact"); 2428 2429 iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE); 2430 if (!iob) 2431 return -ENOMEM; 2432 2433 memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), 2434 &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); 2435 memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), 2436 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2437 return qeth_send_control_data(card, iob, NULL, NULL); 2438 } 2439 2440 static int qeth_mpc_initialize(struct qeth_card *card) 2441 { 2442 int rc; 2443 2444 QETH_CARD_TEXT(card, 2, "mpcinit"); 2445 2446 rc = qeth_issue_next_read(card); 2447 if (rc) { 2448 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2449 return rc; 2450 } 2451 rc = qeth_cm_enable(card); 2452 if (rc) { 2453 QETH_CARD_TEXT_(card, 2, "2err%d", rc); 2454 goto out_qdio; 2455 } 2456 rc = qeth_cm_setup(card); 2457 if (rc) { 2458 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2459 goto out_qdio; 2460 } 2461 rc = qeth_ulp_enable(card); 2462 if (rc) { 2463 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 2464 goto out_qdio; 2465 } 2466 rc = qeth_ulp_setup(card); 2467 if (rc) { 2468 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2469 goto out_qdio; 2470 } 2471 rc = qeth_alloc_qdio_queues(card); 2472 if (rc) { 2473 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2474 goto out_qdio; 2475 } 2476 rc = qeth_qdio_establish(card); 2477 if (rc) { 2478 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2479 qeth_free_qdio_queues(card); 2480 goto out_qdio; 2481 } 2482 rc = qeth_qdio_activate(card); 2483 if (rc) { 2484 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 2485 goto out_qdio; 2486 } 2487 rc = qeth_dm_act(card); 2488 if (rc) { 2489 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 2490 goto out_qdio; 2491 } 2492 2493 return 0; 2494 out_qdio: 2495 qeth_qdio_clear_card(card, !IS_IQD(card)); 2496 qdio_free(CARD_DDEV(card)); 2497 return rc; 2498 } 2499 2500 void qeth_print_status_message(struct qeth_card *card) 2501 { 2502 switch (card->info.type) { 2503 case QETH_CARD_TYPE_OSD: 2504 case QETH_CARD_TYPE_OSM: 2505 case QETH_CARD_TYPE_OSX: 2506 /* VM will use a non-zero first character 2507 * to indicate a HiperSockets like reporting 2508 * of the level OSA sets the first character to zero 2509 * */ 2510 if (!card->info.mcl_level[0]) { 2511 sprintf(card->info.mcl_level, "%02x%02x", 2512 card->info.mcl_level[2], 2513 card->info.mcl_level[3]); 2514 break; 2515 } 2516 /* fallthrough */ 2517 case QETH_CARD_TYPE_IQD: 2518 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { 2519 card->info.mcl_level[0] = (char) _ebcasc[(__u8) 2520 card->info.mcl_level[0]]; 2521 card->info.mcl_level[1] = (char) _ebcasc[(__u8) 2522 card->info.mcl_level[1]]; 2523 card->info.mcl_level[2] = (char) _ebcasc[(__u8) 2524 card->info.mcl_level[2]]; 2525 card->info.mcl_level[3] = (char) _ebcasc[(__u8) 2526 card->info.mcl_level[3]]; 2527 card->info.mcl_level[QETH_MCL_LENGTH] = 0; 2528 } 2529 break; 2530 default: 2531 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); 2532 } 2533 dev_info(&card->gdev->dev, 2534 "Device is a%s card%s%s%s\nwith link type %s.\n", 2535 qeth_get_cardname(card), 2536 (card->info.mcl_level[0]) ? " (level: " : "", 2537 (card->info.mcl_level[0]) ? card->info.mcl_level : "", 2538 (card->info.mcl_level[0]) ? ")" : "", 2539 qeth_get_cardname_short(card)); 2540 } 2541 EXPORT_SYMBOL_GPL(qeth_print_status_message); 2542 2543 static void qeth_initialize_working_pool_list(struct qeth_card *card) 2544 { 2545 struct qeth_buffer_pool_entry *entry; 2546 2547 QETH_CARD_TEXT(card, 5, "inwrklst"); 2548 2549 list_for_each_entry(entry, 2550 &card->qdio.init_pool.entry_list, init_list) { 2551 qeth_put_buffer_pool_entry(card, entry); 2552 } 2553 } 2554 2555 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( 2556 struct qeth_card *card) 2557 { 2558 struct list_head *plh; 2559 struct qeth_buffer_pool_entry *entry; 2560 int i, free; 2561 struct page *page; 2562 2563 if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2564 return NULL; 2565 2566 list_for_each(plh, &card->qdio.in_buf_pool.entry_list) { 2567 entry = list_entry(plh, struct qeth_buffer_pool_entry, list); 2568 free = 1; 2569 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2570 if (page_count(virt_to_page(entry->elements[i])) > 1) { 2571 free = 0; 2572 break; 2573 } 2574 } 2575 if (free) { 2576 list_del_init(&entry->list); 2577 return entry; 2578 } 2579 } 2580 2581 /* no free buffer in pool so take first one and swap pages */ 2582 entry = list_entry(card->qdio.in_buf_pool.entry_list.next, 2583 struct qeth_buffer_pool_entry, list); 2584 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2585 if (page_count(virt_to_page(entry->elements[i])) > 1) { 2586 page = alloc_page(GFP_ATOMIC); 2587 if (!page) { 2588 return NULL; 2589 } else { 2590 free_page((unsigned long)entry->elements[i]); 2591 entry->elements[i] = page_address(page); 2592 QETH_CARD_STAT_INC(card, rx_sg_alloc_page); 2593 } 2594 } 2595 } 2596 list_del_init(&entry->list); 2597 return entry; 2598 } 2599 2600 static int qeth_init_input_buffer(struct qeth_card *card, 2601 struct qeth_qdio_buffer *buf) 2602 { 2603 struct qeth_buffer_pool_entry *pool_entry; 2604 int i; 2605 2606 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { 2607 buf->rx_skb = netdev_alloc_skb(card->dev, 2608 QETH_RX_PULL_LEN + ETH_HLEN); 2609 if (!buf->rx_skb) 2610 return 1; 2611 } 2612 2613 pool_entry = qeth_find_free_buffer_pool_entry(card); 2614 if (!pool_entry) 2615 return 1; 2616 2617 /* 2618 * since the buffer is accessed only from the input_tasklet 2619 * there shouldn't be a need to synchronize; also, since we use 2620 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off 2621 * buffers 2622 */ 2623 2624 buf->pool_entry = pool_entry; 2625 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2626 buf->buffer->element[i].length = PAGE_SIZE; 2627 buf->buffer->element[i].addr = pool_entry->elements[i]; 2628 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2629 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2630 else 2631 buf->buffer->element[i].eflags = 0; 2632 buf->buffer->element[i].sflags = 0; 2633 } 2634 return 0; 2635 } 2636 2637 int qeth_init_qdio_queues(struct qeth_card *card) 2638 { 2639 unsigned int i; 2640 int rc; 2641 2642 QETH_CARD_TEXT(card, 2, "initqdqs"); 2643 2644 /* inbound queue */ 2645 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2646 memset(&card->rx, 0, sizeof(struct qeth_rx)); 2647 qeth_initialize_working_pool_list(card); 2648 /*give only as many buffers to hardware as we have buffer pool entries*/ 2649 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) 2650 qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); 2651 card->qdio.in_q->next_buf_to_init = 2652 card->qdio.in_buf_pool.buf_count - 1; 2653 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, 2654 card->qdio.in_buf_pool.buf_count - 1); 2655 if (rc) { 2656 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 2657 return rc; 2658 } 2659 2660 /* completion */ 2661 rc = qeth_cq_init(card); 2662 if (rc) { 2663 return rc; 2664 } 2665 2666 /* outbound queue */ 2667 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2668 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; 2669 2670 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2671 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); 2672 queue->next_buf_to_fill = 0; 2673 queue->do_pack = 0; 2674 queue->prev_hdr = NULL; 2675 queue->bulk_start = 0; 2676 atomic_set(&queue->used_buffers, 0); 2677 atomic_set(&queue->set_pci_flags_count, 0); 2678 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 2679 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); 2680 } 2681 return 0; 2682 } 2683 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues); 2684 2685 static void qeth_ipa_finalize_cmd(struct qeth_card *card, 2686 struct qeth_cmd_buffer *iob) 2687 { 2688 qeth_mpc_finalize_cmd(card, iob); 2689 2690 /* override with IPA-specific values: */ 2691 __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; 2692 } 2693 2694 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 2695 u16 cmd_length) 2696 { 2697 u8 prot_type = qeth_mpc_select_prot_type(card); 2698 u16 total_length = iob->length; 2699 2700 qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, 2701 iob->data); 2702 iob->finalize = qeth_ipa_finalize_cmd; 2703 2704 memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); 2705 memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); 2706 memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); 2707 memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); 2708 memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); 2709 memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), 2710 &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); 2711 memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); 2712 } 2713 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); 2714 2715 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, 2716 enum qeth_ipa_cmds cmd_code, 2717 enum qeth_prot_versions prot, 2718 unsigned int data_length) 2719 { 2720 enum qeth_link_types link_type = card->info.link_type; 2721 struct qeth_cmd_buffer *iob; 2722 struct qeth_ipacmd_hdr *hdr; 2723 2724 data_length += offsetof(struct qeth_ipa_cmd, data); 2725 iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1, 2726 QETH_IPA_TIMEOUT); 2727 if (!iob) 2728 return NULL; 2729 2730 qeth_prepare_ipa_cmd(card, iob, data_length); 2731 2732 hdr = &__ipa_cmd(iob)->hdr; 2733 hdr->command = cmd_code; 2734 hdr->initiator = IPA_CMD_INITIATOR_HOST; 2735 /* hdr->seqno is set by qeth_send_control_data() */ 2736 hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1; 2737 hdr->rel_adapter_no = (u8) card->dev->dev_port; 2738 hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; 2739 hdr->param_count = 1; 2740 hdr->prot_version = prot; 2741 return iob; 2742 } 2743 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd); 2744 2745 static int qeth_send_ipa_cmd_cb(struct qeth_card *card, 2746 struct qeth_reply *reply, unsigned long data) 2747 { 2748 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2749 2750 return (cmd->hdr.return_code) ? -EIO : 0; 2751 } 2752 2753 /** 2754 * qeth_send_ipa_cmd() - send an IPA command 2755 * 2756 * See qeth_send_control_data() for explanation of the arguments. 2757 */ 2758 2759 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, 2760 int (*reply_cb)(struct qeth_card *, struct qeth_reply*, 2761 unsigned long), 2762 void *reply_param) 2763 { 2764 int rc; 2765 2766 QETH_CARD_TEXT(card, 4, "sendipa"); 2767 2768 if (card->read_or_write_problem) { 2769 qeth_put_cmd(iob); 2770 return -EIO; 2771 } 2772 2773 if (reply_cb == NULL) 2774 reply_cb = qeth_send_ipa_cmd_cb; 2775 rc = qeth_send_control_data(card, iob, reply_cb, reply_param); 2776 if (rc == -ETIME) { 2777 qeth_clear_ipacmd_list(card); 2778 qeth_schedule_recovery(card); 2779 } 2780 return rc; 2781 } 2782 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); 2783 2784 static int qeth_send_startlan_cb(struct qeth_card *card, 2785 struct qeth_reply *reply, unsigned long data) 2786 { 2787 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2788 2789 if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) 2790 return -ENETDOWN; 2791 2792 return (cmd->hdr.return_code) ? -EIO : 0; 2793 } 2794 2795 static int qeth_send_startlan(struct qeth_card *card) 2796 { 2797 struct qeth_cmd_buffer *iob; 2798 2799 QETH_CARD_TEXT(card, 2, "strtlan"); 2800 2801 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0); 2802 if (!iob) 2803 return -ENOMEM; 2804 return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); 2805 } 2806 2807 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) 2808 { 2809 if (!cmd->hdr.return_code) 2810 cmd->hdr.return_code = 2811 cmd->data.setadapterparms.hdr.return_code; 2812 return cmd->hdr.return_code; 2813 } 2814 2815 static int qeth_query_setadapterparms_cb(struct qeth_card *card, 2816 struct qeth_reply *reply, unsigned long data) 2817 { 2818 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2819 2820 QETH_CARD_TEXT(card, 3, "quyadpcb"); 2821 if (qeth_setadpparms_inspect_rc(cmd)) 2822 return -EIO; 2823 2824 if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { 2825 card->info.link_type = 2826 cmd->data.setadapterparms.data.query_cmds_supp.lan_type; 2827 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); 2828 } 2829 card->options.adp.supported_funcs = 2830 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; 2831 return 0; 2832 } 2833 2834 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, 2835 enum qeth_ipa_setadp_cmd adp_cmd, 2836 unsigned int data_length) 2837 { 2838 struct qeth_ipacmd_setadpparms_hdr *hdr; 2839 struct qeth_cmd_buffer *iob; 2840 2841 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4, 2842 data_length + 2843 offsetof(struct qeth_ipacmd_setadpparms, 2844 data)); 2845 if (!iob) 2846 return NULL; 2847 2848 hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr; 2849 hdr->cmdlength = sizeof(*hdr) + data_length; 2850 hdr->command_code = adp_cmd; 2851 hdr->used_total = 1; 2852 hdr->seq_no = 1; 2853 return iob; 2854 } 2855 2856 static int qeth_query_setadapterparms(struct qeth_card *card) 2857 { 2858 int rc; 2859 struct qeth_cmd_buffer *iob; 2860 2861 QETH_CARD_TEXT(card, 3, "queryadp"); 2862 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, 2863 SETADP_DATA_SIZEOF(query_cmds_supp)); 2864 if (!iob) 2865 return -ENOMEM; 2866 rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); 2867 return rc; 2868 } 2869 2870 static int qeth_query_ipassists_cb(struct qeth_card *card, 2871 struct qeth_reply *reply, unsigned long data) 2872 { 2873 struct qeth_ipa_cmd *cmd; 2874 2875 QETH_CARD_TEXT(card, 2, "qipasscb"); 2876 2877 cmd = (struct qeth_ipa_cmd *) data; 2878 2879 switch (cmd->hdr.return_code) { 2880 case IPA_RC_SUCCESS: 2881 break; 2882 case IPA_RC_NOTSUPP: 2883 case IPA_RC_L2_UNSUPPORTED_CMD: 2884 QETH_CARD_TEXT(card, 2, "ipaunsup"); 2885 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; 2886 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; 2887 return -EOPNOTSUPP; 2888 default: 2889 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", 2890 CARD_DEVID(card), cmd->hdr.return_code); 2891 return -EIO; 2892 } 2893 2894 if (cmd->hdr.prot_version == QETH_PROT_IPV4) { 2895 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; 2896 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 2897 } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { 2898 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; 2899 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 2900 } else 2901 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", 2902 CARD_DEVID(card)); 2903 return 0; 2904 } 2905 2906 static int qeth_query_ipassists(struct qeth_card *card, 2907 enum qeth_prot_versions prot) 2908 { 2909 int rc; 2910 struct qeth_cmd_buffer *iob; 2911 2912 QETH_CARD_TEXT_(card, 2, "qipassi%i", prot); 2913 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0); 2914 if (!iob) 2915 return -ENOMEM; 2916 rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); 2917 return rc; 2918 } 2919 2920 static int qeth_query_switch_attributes_cb(struct qeth_card *card, 2921 struct qeth_reply *reply, unsigned long data) 2922 { 2923 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2924 struct qeth_query_switch_attributes *attrs; 2925 struct qeth_switch_info *sw_info; 2926 2927 QETH_CARD_TEXT(card, 2, "qswiatcb"); 2928 if (qeth_setadpparms_inspect_rc(cmd)) 2929 return -EIO; 2930 2931 sw_info = (struct qeth_switch_info *)reply->param; 2932 attrs = &cmd->data.setadapterparms.data.query_switch_attributes; 2933 sw_info->capabilities = attrs->capabilities; 2934 sw_info->settings = attrs->settings; 2935 QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, 2936 sw_info->settings); 2937 return 0; 2938 } 2939 2940 int qeth_query_switch_attributes(struct qeth_card *card, 2941 struct qeth_switch_info *sw_info) 2942 { 2943 struct qeth_cmd_buffer *iob; 2944 2945 QETH_CARD_TEXT(card, 2, "qswiattr"); 2946 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) 2947 return -EOPNOTSUPP; 2948 if (!netif_carrier_ok(card->dev)) 2949 return -ENOMEDIUM; 2950 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0); 2951 if (!iob) 2952 return -ENOMEM; 2953 return qeth_send_ipa_cmd(card, iob, 2954 qeth_query_switch_attributes_cb, sw_info); 2955 } 2956 2957 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, 2958 enum qeth_diags_cmds sub_cmd, 2959 unsigned int data_length) 2960 { 2961 struct qeth_ipacmd_diagass *cmd; 2962 struct qeth_cmd_buffer *iob; 2963 2964 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE, 2965 DIAG_HDR_LEN + data_length); 2966 if (!iob) 2967 return NULL; 2968 2969 cmd = &__ipa_cmd(iob)->data.diagass; 2970 cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length; 2971 cmd->subcmd = sub_cmd; 2972 return iob; 2973 } 2974 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd); 2975 2976 static int qeth_query_setdiagass_cb(struct qeth_card *card, 2977 struct qeth_reply *reply, unsigned long data) 2978 { 2979 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 2980 u16 rc = cmd->hdr.return_code; 2981 2982 if (rc) { 2983 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); 2984 return -EIO; 2985 } 2986 2987 card->info.diagass_support = cmd->data.diagass.ext; 2988 return 0; 2989 } 2990 2991 static int qeth_query_setdiagass(struct qeth_card *card) 2992 { 2993 struct qeth_cmd_buffer *iob; 2994 2995 QETH_CARD_TEXT(card, 2, "qdiagass"); 2996 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0); 2997 if (!iob) 2998 return -ENOMEM; 2999 return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); 3000 } 3001 3002 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) 3003 { 3004 unsigned long info = get_zeroed_page(GFP_KERNEL); 3005 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 3006 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 3007 struct ccw_dev_id ccwid; 3008 int level; 3009 3010 tid->chpid = card->info.chpid; 3011 ccw_device_get_id(CARD_RDEV(card), &ccwid); 3012 tid->ssid = ccwid.ssid; 3013 tid->devno = ccwid.devno; 3014 if (!info) 3015 return; 3016 level = stsi(NULL, 0, 0, 0); 3017 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 3018 tid->lparnr = info222->lpar_number; 3019 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 3020 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 3021 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 3022 } 3023 free_page(info); 3024 return; 3025 } 3026 3027 static int qeth_hw_trap_cb(struct qeth_card *card, 3028 struct qeth_reply *reply, unsigned long data) 3029 { 3030 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 3031 u16 rc = cmd->hdr.return_code; 3032 3033 if (rc) { 3034 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); 3035 return -EIO; 3036 } 3037 return 0; 3038 } 3039 3040 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) 3041 { 3042 struct qeth_cmd_buffer *iob; 3043 struct qeth_ipa_cmd *cmd; 3044 3045 QETH_CARD_TEXT(card, 2, "diagtrap"); 3046 iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64); 3047 if (!iob) 3048 return -ENOMEM; 3049 cmd = __ipa_cmd(iob); 3050 cmd->data.diagass.type = 1; 3051 cmd->data.diagass.action = action; 3052 switch (action) { 3053 case QETH_DIAGS_TRAP_ARM: 3054 cmd->data.diagass.options = 0x0003; 3055 cmd->data.diagass.ext = 0x00010000 + 3056 sizeof(struct qeth_trap_id); 3057 qeth_get_trap_id(card, 3058 (struct qeth_trap_id *)cmd->data.diagass.cdata); 3059 break; 3060 case QETH_DIAGS_TRAP_DISARM: 3061 cmd->data.diagass.options = 0x0001; 3062 break; 3063 case QETH_DIAGS_TRAP_CAPTURE: 3064 break; 3065 } 3066 return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); 3067 } 3068 EXPORT_SYMBOL_GPL(qeth_hw_trap); 3069 3070 static int qeth_check_qdio_errors(struct qeth_card *card, 3071 struct qdio_buffer *buf, 3072 unsigned int qdio_error, 3073 const char *dbftext) 3074 { 3075 if (qdio_error) { 3076 QETH_CARD_TEXT(card, 2, dbftext); 3077 QETH_CARD_TEXT_(card, 2, " F15=%02X", 3078 buf->element[15].sflags); 3079 QETH_CARD_TEXT_(card, 2, " F14=%02X", 3080 buf->element[14].sflags); 3081 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 3082 if ((buf->element[15].sflags) == 0x12) { 3083 QETH_CARD_STAT_INC(card, rx_dropped); 3084 return 0; 3085 } else 3086 return 1; 3087 } 3088 return 0; 3089 } 3090 3091 static void qeth_queue_input_buffer(struct qeth_card *card, int index) 3092 { 3093 struct qeth_qdio_q *queue = card->qdio.in_q; 3094 struct list_head *lh; 3095 int count; 3096 int i; 3097 int rc; 3098 int newcount = 0; 3099 3100 count = (index < queue->next_buf_to_init)? 3101 card->qdio.in_buf_pool.buf_count - 3102 (queue->next_buf_to_init - index) : 3103 card->qdio.in_buf_pool.buf_count - 3104 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index); 3105 /* only requeue at a certain threshold to avoid SIGAs */ 3106 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { 3107 for (i = queue->next_buf_to_init; 3108 i < queue->next_buf_to_init + count; ++i) { 3109 if (qeth_init_input_buffer(card, 3110 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) { 3111 break; 3112 } else { 3113 newcount++; 3114 } 3115 } 3116 3117 if (newcount < count) { 3118 /* we are in memory shortage so we switch back to 3119 traditional skb allocation and drop packages */ 3120 atomic_set(&card->force_alloc_skb, 3); 3121 count = newcount; 3122 } else { 3123 atomic_add_unless(&card->force_alloc_skb, -1, 0); 3124 } 3125 3126 if (!count) { 3127 i = 0; 3128 list_for_each(lh, &card->qdio.in_buf_pool.entry_list) 3129 i++; 3130 if (i == card->qdio.in_buf_pool.buf_count) { 3131 QETH_CARD_TEXT(card, 2, "qsarbw"); 3132 card->reclaim_index = index; 3133 schedule_delayed_work( 3134 &card->buffer_reclaim_work, 3135 QETH_RECLAIM_WORK_TIME); 3136 } 3137 return; 3138 } 3139 3140 /* 3141 * according to old code it should be avoided to requeue all 3142 * 128 buffers in order to benefit from PCI avoidance. 3143 * this function keeps at least one buffer (the buffer at 3144 * 'index') un-requeued -> this buffer is the first buffer that 3145 * will be requeued the next time 3146 */ 3147 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 3148 queue->next_buf_to_init, count); 3149 if (rc) { 3150 QETH_CARD_TEXT(card, 2, "qinberr"); 3151 } 3152 queue->next_buf_to_init = (queue->next_buf_to_init + count) % 3153 QDIO_MAX_BUFFERS_PER_Q; 3154 } 3155 } 3156 3157 static void qeth_buffer_reclaim_work(struct work_struct *work) 3158 { 3159 struct qeth_card *card = container_of(work, struct qeth_card, 3160 buffer_reclaim_work.work); 3161 3162 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); 3163 qeth_queue_input_buffer(card, card->reclaim_index); 3164 } 3165 3166 static void qeth_handle_send_error(struct qeth_card *card, 3167 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 3168 { 3169 int sbalf15 = buffer->buffer->element[15].sflags; 3170 3171 QETH_CARD_TEXT(card, 6, "hdsnderr"); 3172 qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); 3173 3174 if (!qdio_err) 3175 return; 3176 3177 if ((sbalf15 >= 15) && (sbalf15 <= 31)) 3178 return; 3179 3180 QETH_CARD_TEXT(card, 1, "lnkfail"); 3181 QETH_CARD_TEXT_(card, 1, "%04x %02x", 3182 (u16)qdio_err, (u8)sbalf15); 3183 } 3184 3185 /** 3186 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. 3187 * @queue: queue to check for packing buffer 3188 * 3189 * Returns number of buffers that were prepared for flush. 3190 */ 3191 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) 3192 { 3193 struct qeth_qdio_out_buffer *buffer; 3194 3195 buffer = queue->bufs[queue->next_buf_to_fill]; 3196 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 3197 (buffer->next_element_to_fill > 0)) { 3198 /* it's a packing buffer */ 3199 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3200 queue->next_buf_to_fill = 3201 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; 3202 return 1; 3203 } 3204 return 0; 3205 } 3206 3207 /* 3208 * Switched to packing state if the number of used buffers on a queue 3209 * reaches a certain limit. 3210 */ 3211 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 3212 { 3213 if (!queue->do_pack) { 3214 if (atomic_read(&queue->used_buffers) 3215 >= QETH_HIGH_WATERMARK_PACK){ 3216 /* switch non-PACKING -> PACKING */ 3217 QETH_CARD_TEXT(queue->card, 6, "np->pack"); 3218 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3219 queue->do_pack = 1; 3220 } 3221 } 3222 } 3223 3224 /* 3225 * Switches from packing to non-packing mode. If there is a packing 3226 * buffer on the queue this buffer will be prepared to be flushed. 3227 * In that case 1 is returned to inform the caller. If no buffer 3228 * has to be flushed, zero is returned. 3229 */ 3230 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 3231 { 3232 if (queue->do_pack) { 3233 if (atomic_read(&queue->used_buffers) 3234 <= QETH_LOW_WATERMARK_PACK) { 3235 /* switch PACKING -> non-PACKING */ 3236 QETH_CARD_TEXT(queue->card, 6, "pack->np"); 3237 QETH_TXQ_STAT_INC(queue, packing_mode_switch); 3238 queue->do_pack = 0; 3239 return qeth_prep_flush_pack_buffer(queue); 3240 } 3241 } 3242 return 0; 3243 } 3244 3245 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, 3246 int count) 3247 { 3248 struct qeth_card *card = queue->card; 3249 struct qeth_qdio_out_buffer *buf; 3250 int rc; 3251 int i; 3252 unsigned int qdio_flags; 3253 3254 for (i = index; i < index + count; ++i) { 3255 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 3256 buf = queue->bufs[bidx]; 3257 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 3258 SBAL_EFLAGS_LAST_ENTRY; 3259 3260 if (queue->bufstates) 3261 queue->bufstates[bidx].user = buf; 3262 3263 if (IS_IQD(queue->card)) 3264 continue; 3265 3266 if (!queue->do_pack) { 3267 if ((atomic_read(&queue->used_buffers) >= 3268 (QETH_HIGH_WATERMARK_PACK - 3269 QETH_WATERMARK_PACK_FUZZ)) && 3270 !atomic_read(&queue->set_pci_flags_count)) { 3271 /* it's likely that we'll go to packing 3272 * mode soon */ 3273 atomic_inc(&queue->set_pci_flags_count); 3274 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3275 } 3276 } else { 3277 if (!atomic_read(&queue->set_pci_flags_count)) { 3278 /* 3279 * there's no outstanding PCI any more, so we 3280 * have to request a PCI to be sure the the PCI 3281 * will wake at some time in the future then we 3282 * can flush packed buffers that might still be 3283 * hanging around, which can happen if no 3284 * further send was requested by the stack 3285 */ 3286 atomic_inc(&queue->set_pci_flags_count); 3287 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; 3288 } 3289 } 3290 } 3291 3292 qdio_flags = QDIO_FLAG_SYNC_OUTPUT; 3293 if (atomic_read(&queue->set_pci_flags_count)) 3294 qdio_flags |= QDIO_FLAG_PCI_OUT; 3295 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, 3296 queue->queue_no, index, count); 3297 3298 /* Fake the TX completion interrupt: */ 3299 if (IS_IQD(card)) 3300 napi_schedule(&queue->napi); 3301 3302 if (rc) { 3303 /* ignore temporary SIGA errors without busy condition */ 3304 if (rc == -ENOBUFS) 3305 return; 3306 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 3307 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); 3308 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); 3309 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); 3310 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 3311 3312 /* this must not happen under normal circumstances. if it 3313 * happens something is really wrong -> recover */ 3314 qeth_schedule_recovery(queue->card); 3315 return; 3316 } 3317 } 3318 3319 static void qeth_flush_queue(struct qeth_qdio_out_q *queue) 3320 { 3321 qeth_flush_buffers(queue, queue->bulk_start, 1); 3322 3323 queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1); 3324 queue->prev_hdr = NULL; 3325 } 3326 3327 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 3328 { 3329 int index; 3330 int flush_cnt = 0; 3331 int q_was_packing = 0; 3332 3333 /* 3334 * check if weed have to switch to non-packing mode or if 3335 * we have to get a pci flag out on the queue 3336 */ 3337 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || 3338 !atomic_read(&queue->set_pci_flags_count)) { 3339 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == 3340 QETH_OUT_Q_UNLOCKED) { 3341 /* 3342 * If we get in here, there was no action in 3343 * do_send_packet. So, we check if there is a 3344 * packing buffer to be flushed here. 3345 */ 3346 index = queue->next_buf_to_fill; 3347 q_was_packing = queue->do_pack; 3348 /* queue->do_pack may change */ 3349 barrier(); 3350 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); 3351 if (!flush_cnt && 3352 !atomic_read(&queue->set_pci_flags_count)) 3353 flush_cnt += qeth_prep_flush_pack_buffer(queue); 3354 if (q_was_packing) 3355 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); 3356 if (flush_cnt) 3357 qeth_flush_buffers(queue, index, flush_cnt); 3358 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3359 } 3360 } 3361 } 3362 3363 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, 3364 unsigned long card_ptr) 3365 { 3366 struct qeth_card *card = (struct qeth_card *)card_ptr; 3367 3368 if (card->dev->flags & IFF_UP) 3369 napi_schedule(&card->napi); 3370 } 3371 3372 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) 3373 { 3374 int rc; 3375 3376 if (card->options.cq == QETH_CQ_NOTAVAILABLE) { 3377 rc = -1; 3378 goto out; 3379 } else { 3380 if (card->options.cq == cq) { 3381 rc = 0; 3382 goto out; 3383 } 3384 3385 if (card->state != CARD_STATE_DOWN) { 3386 rc = -1; 3387 goto out; 3388 } 3389 3390 qeth_free_qdio_queues(card); 3391 card->options.cq = cq; 3392 rc = 0; 3393 } 3394 out: 3395 return rc; 3396 3397 } 3398 EXPORT_SYMBOL_GPL(qeth_configure_cq); 3399 3400 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, 3401 unsigned int queue, int first_element, 3402 int count) 3403 { 3404 struct qeth_qdio_q *cq = card->qdio.c_q; 3405 int i; 3406 int rc; 3407 3408 if (!qeth_is_cq(card, queue)) 3409 return; 3410 3411 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); 3412 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); 3413 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); 3414 3415 if (qdio_err) { 3416 netif_tx_stop_all_queues(card->dev); 3417 qeth_schedule_recovery(card); 3418 return; 3419 } 3420 3421 for (i = first_element; i < first_element + count; ++i) { 3422 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 3423 struct qdio_buffer *buffer = cq->qdio_bufs[bidx]; 3424 int e = 0; 3425 3426 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3427 buffer->element[e].addr) { 3428 unsigned long phys_aob_addr; 3429 3430 phys_aob_addr = (unsigned long) buffer->element[e].addr; 3431 qeth_qdio_handle_aob(card, phys_aob_addr); 3432 ++e; 3433 } 3434 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); 3435 } 3436 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, 3437 card->qdio.c_q->next_buf_to_init, 3438 count); 3439 if (rc) { 3440 dev_warn(&card->gdev->dev, 3441 "QDIO reported an error, rc=%i\n", rc); 3442 QETH_CARD_TEXT(card, 2, "qcqherr"); 3443 } 3444 card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init 3445 + count) % QDIO_MAX_BUFFERS_PER_Q; 3446 } 3447 3448 static void qeth_qdio_input_handler(struct ccw_device *ccwdev, 3449 unsigned int qdio_err, int queue, 3450 int first_elem, int count, 3451 unsigned long card_ptr) 3452 { 3453 struct qeth_card *card = (struct qeth_card *)card_ptr; 3454 3455 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3456 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); 3457 3458 if (qeth_is_cq(card, queue)) 3459 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); 3460 else if (qdio_err) 3461 qeth_schedule_recovery(card); 3462 } 3463 3464 static void qeth_qdio_output_handler(struct ccw_device *ccwdev, 3465 unsigned int qdio_error, int __queue, 3466 int first_element, int count, 3467 unsigned long card_ptr) 3468 { 3469 struct qeth_card *card = (struct qeth_card *) card_ptr; 3470 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 3471 struct qeth_qdio_out_buffer *buffer; 3472 struct net_device *dev = card->dev; 3473 struct netdev_queue *txq; 3474 int i; 3475 3476 QETH_CARD_TEXT(card, 6, "qdouhdl"); 3477 if (qdio_error & QDIO_ERROR_FATAL) { 3478 QETH_CARD_TEXT(card, 2, "achkcond"); 3479 netif_tx_stop_all_queues(dev); 3480 qeth_schedule_recovery(card); 3481 return; 3482 } 3483 3484 for (i = first_element; i < (first_element + count); ++i) { 3485 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 3486 buffer = queue->bufs[bidx]; 3487 qeth_handle_send_error(card, buffer, qdio_error); 3488 qeth_clear_output_buffer(queue, buffer, qdio_error, 0); 3489 } 3490 3491 atomic_sub(count, &queue->used_buffers); 3492 qeth_check_outbound_queue(queue); 3493 3494 txq = netdev_get_tx_queue(dev, __queue); 3495 /* xmit may have observed the full-condition, but not yet stopped the 3496 * txq. In which case the code below won't trigger. So before returning, 3497 * xmit will re-check the txq's fill level and wake it up if needed. 3498 */ 3499 if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue)) 3500 netif_tx_wake_queue(txq); 3501 } 3502 3503 /** 3504 * Note: Function assumes that we have 4 outbound queues. 3505 */ 3506 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) 3507 { 3508 struct vlan_ethhdr *veth = vlan_eth_hdr(skb); 3509 u8 tos; 3510 3511 switch (card->qdio.do_prio_queueing) { 3512 case QETH_PRIO_Q_ING_TOS: 3513 case QETH_PRIO_Q_ING_PREC: 3514 switch (qeth_get_ip_version(skb)) { 3515 case 4: 3516 tos = ipv4_get_dsfield(ip_hdr(skb)); 3517 break; 3518 case 6: 3519 tos = ipv6_get_dsfield(ipv6_hdr(skb)); 3520 break; 3521 default: 3522 return card->qdio.default_out_queue; 3523 } 3524 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) 3525 return ~tos >> 6 & 3; 3526 if (tos & IPTOS_MINCOST) 3527 return 3; 3528 if (tos & IPTOS_RELIABILITY) 3529 return 2; 3530 if (tos & IPTOS_THROUGHPUT) 3531 return 1; 3532 if (tos & IPTOS_LOWDELAY) 3533 return 0; 3534 break; 3535 case QETH_PRIO_Q_ING_SKB: 3536 if (skb->priority > 5) 3537 return 0; 3538 return ~skb->priority >> 1 & 3; 3539 case QETH_PRIO_Q_ING_VLAN: 3540 if (veth->h_vlan_proto == htons(ETH_P_8021Q)) 3541 return ~ntohs(veth->h_vlan_TCI) >> 3542 (VLAN_PRIO_SHIFT + 1) & 3; 3543 break; 3544 default: 3545 break; 3546 } 3547 return card->qdio.default_out_queue; 3548 } 3549 EXPORT_SYMBOL_GPL(qeth_get_priority_queue); 3550 3551 /** 3552 * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. 3553 * @skb: SKB address 3554 * 3555 * Returns the number of pages, and thus QDIO buffer elements, needed to cover 3556 * fragmented part of the SKB. Returns zero for linear SKB. 3557 */ 3558 static int qeth_get_elements_for_frags(struct sk_buff *skb) 3559 { 3560 int cnt, elements = 0; 3561 3562 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3563 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3564 3565 elements += qeth_get_elements_for_range( 3566 (addr_t)skb_frag_address(frag), 3567 (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); 3568 } 3569 return elements; 3570 } 3571 3572 /** 3573 * qeth_count_elements() - Counts the number of QDIO buffer elements needed 3574 * to transmit an skb. 3575 * @skb: the skb to operate on. 3576 * @data_offset: skip this part of the skb's linear data 3577 * 3578 * Returns the number of pages, and thus QDIO buffer elements, needed to map the 3579 * skb's data (both its linear part and paged fragments). 3580 */ 3581 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset) 3582 { 3583 unsigned int elements = qeth_get_elements_for_frags(skb); 3584 addr_t end = (addr_t)skb->data + skb_headlen(skb); 3585 addr_t start = (addr_t)skb->data + data_offset; 3586 3587 if (start != end) 3588 elements += qeth_get_elements_for_range(start, end); 3589 return elements; 3590 } 3591 EXPORT_SYMBOL_GPL(qeth_count_elements); 3592 3593 #define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ 3594 MAX_TCP_HEADER) 3595 3596 /** 3597 * qeth_add_hw_header() - add a HW header to an skb. 3598 * @skb: skb that the HW header should be added to. 3599 * @hdr: double pointer to a qeth_hdr. When returning with >= 0, 3600 * it contains a valid pointer to a qeth_hdr. 3601 * @hdr_len: length of the HW header. 3602 * @proto_len: length of protocol headers that need to be in same page as the 3603 * HW header. 3604 * 3605 * Returns the pushed length. If the header can't be pushed on 3606 * (eg. because it would cross a page boundary), it is allocated from 3607 * the cache instead and 0 is returned. 3608 * The number of needed buffer elements is returned in @elements. 3609 * Error to create the hdr is indicated by returning with < 0. 3610 */ 3611 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, 3612 struct sk_buff *skb, struct qeth_hdr **hdr, 3613 unsigned int hdr_len, unsigned int proto_len, 3614 unsigned int *elements) 3615 { 3616 const unsigned int contiguous = proto_len ? proto_len : 1; 3617 const unsigned int max_elements = queue->max_elements; 3618 unsigned int __elements; 3619 addr_t start, end; 3620 bool push_ok; 3621 int rc; 3622 3623 check_layout: 3624 start = (addr_t)skb->data - hdr_len; 3625 end = (addr_t)skb->data; 3626 3627 if (qeth_get_elements_for_range(start, end + contiguous) == 1) { 3628 /* Push HW header into same page as first protocol header. */ 3629 push_ok = true; 3630 /* ... but TSO always needs a separate element for headers: */ 3631 if (skb_is_gso(skb)) 3632 __elements = 1 + qeth_count_elements(skb, proto_len); 3633 else 3634 __elements = qeth_count_elements(skb, 0); 3635 } else if (!proto_len && PAGE_ALIGNED(skb->data)) { 3636 /* Push HW header into preceding page, flush with skb->data. */ 3637 push_ok = true; 3638 __elements = 1 + qeth_count_elements(skb, 0); 3639 } else { 3640 /* Use header cache, copy protocol headers up. */ 3641 push_ok = false; 3642 __elements = 1 + qeth_count_elements(skb, proto_len); 3643 } 3644 3645 /* Compress skb to fit into one IO buffer: */ 3646 if (__elements > max_elements) { 3647 if (!skb_is_nonlinear(skb)) { 3648 /* Drop it, no easy way of shrinking it further. */ 3649 QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", 3650 max_elements, __elements, skb->len); 3651 return -E2BIG; 3652 } 3653 3654 rc = skb_linearize(skb); 3655 if (rc) { 3656 QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); 3657 return rc; 3658 } 3659 3660 QETH_TXQ_STAT_INC(queue, skbs_linearized); 3661 /* Linearization changed the layout, re-evaluate: */ 3662 goto check_layout; 3663 } 3664 3665 *elements = __elements; 3666 /* Add the header: */ 3667 if (push_ok) { 3668 *hdr = skb_push(skb, hdr_len); 3669 return hdr_len; 3670 } 3671 /* fall back */ 3672 if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) 3673 return -E2BIG; 3674 *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 3675 if (!*hdr) 3676 return -ENOMEM; 3677 /* Copy protocol headers behind HW header: */ 3678 skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); 3679 return 0; 3680 } 3681 3682 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, 3683 struct qeth_qdio_out_buffer *buffer, 3684 struct sk_buff *curr_skb, 3685 struct qeth_hdr *curr_hdr) 3686 { 3687 struct qeth_hdr *prev_hdr = queue->prev_hdr; 3688 3689 if (!prev_hdr) 3690 return true; 3691 3692 /* All packets must have the same target: */ 3693 if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { 3694 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); 3695 3696 return ether_addr_equal(eth_hdr(prev_skb)->h_dest, 3697 eth_hdr(curr_skb)->h_dest) && 3698 qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); 3699 } 3700 3701 return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && 3702 qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); 3703 } 3704 3705 static unsigned int __qeth_fill_buffer(struct sk_buff *skb, 3706 struct qeth_qdio_out_buffer *buf, 3707 bool is_first_elem, unsigned int offset) 3708 { 3709 struct qdio_buffer *buffer = buf->buffer; 3710 int element = buf->next_element_to_fill; 3711 int length = skb_headlen(skb) - offset; 3712 char *data = skb->data + offset; 3713 unsigned int elem_length, cnt; 3714 3715 /* map linear part into buffer element(s) */ 3716 while (length > 0) { 3717 elem_length = min_t(unsigned int, length, 3718 PAGE_SIZE - offset_in_page(data)); 3719 3720 buffer->element[element].addr = data; 3721 buffer->element[element].length = elem_length; 3722 length -= elem_length; 3723 if (is_first_elem) { 3724 is_first_elem = false; 3725 if (length || skb_is_nonlinear(skb)) 3726 /* skb needs additional elements */ 3727 buffer->element[element].eflags = 3728 SBAL_EFLAGS_FIRST_FRAG; 3729 else 3730 buffer->element[element].eflags = 0; 3731 } else { 3732 buffer->element[element].eflags = 3733 SBAL_EFLAGS_MIDDLE_FRAG; 3734 } 3735 3736 data += elem_length; 3737 element++; 3738 } 3739 3740 /* map page frags into buffer element(s) */ 3741 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3742 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; 3743 3744 data = skb_frag_address(frag); 3745 length = skb_frag_size(frag); 3746 while (length > 0) { 3747 elem_length = min_t(unsigned int, length, 3748 PAGE_SIZE - offset_in_page(data)); 3749 3750 buffer->element[element].addr = data; 3751 buffer->element[element].length = elem_length; 3752 buffer->element[element].eflags = 3753 SBAL_EFLAGS_MIDDLE_FRAG; 3754 3755 length -= elem_length; 3756 data += elem_length; 3757 element++; 3758 } 3759 } 3760 3761 if (buffer->element[element - 1].eflags) 3762 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; 3763 buf->next_element_to_fill = element; 3764 return element; 3765 } 3766 3767 /** 3768 * qeth_fill_buffer() - map skb into an output buffer 3769 * @buf: buffer to transport the skb 3770 * @skb: skb to map into the buffer 3771 * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated 3772 * from qeth_core_header_cache. 3773 * @offset: when mapping the skb, start at skb->data + offset 3774 * @hd_len: if > 0, build a dedicated header element of this size 3775 */ 3776 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, 3777 struct sk_buff *skb, struct qeth_hdr *hdr, 3778 unsigned int offset, unsigned int hd_len) 3779 { 3780 struct qdio_buffer *buffer = buf->buffer; 3781 bool is_first_elem = true; 3782 3783 __skb_queue_tail(&buf->skb_list, skb); 3784 3785 /* build dedicated header element */ 3786 if (hd_len) { 3787 int element = buf->next_element_to_fill; 3788 is_first_elem = false; 3789 3790 buffer->element[element].addr = hdr; 3791 buffer->element[element].length = hd_len; 3792 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 3793 /* remember to free cache-allocated qeth_hdr: */ 3794 buf->is_header[element] = ((void *)hdr != skb->data); 3795 buf->next_element_to_fill++; 3796 } 3797 3798 return __qeth_fill_buffer(skb, buf, is_first_elem, offset); 3799 } 3800 3801 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, 3802 struct sk_buff *skb, unsigned int elements, 3803 struct qeth_hdr *hdr, unsigned int offset, 3804 unsigned int hd_len) 3805 { 3806 struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; 3807 unsigned int bytes = qdisc_pkt_len(skb); 3808 unsigned int next_element; 3809 struct netdev_queue *txq; 3810 bool stopped = false; 3811 bool flush; 3812 3813 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 3814 3815 /* Just a sanity check, the wake/stop logic should ensure that we always 3816 * get a free buffer. 3817 */ 3818 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 3819 return -EBUSY; 3820 3821 if ((buffer->next_element_to_fill + elements > queue->max_elements) || 3822 !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) { 3823 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3824 qeth_flush_queue(queue); 3825 buffer = queue->bufs[queue->bulk_start]; 3826 3827 /* Sanity-check again: */ 3828 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) 3829 return -EBUSY; 3830 } 3831 3832 if (buffer->next_element_to_fill == 0 && 3833 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 3834 /* If a TX completion happens right _here_ and misses to wake 3835 * the txq, then our re-check below will catch the race. 3836 */ 3837 QETH_TXQ_STAT_INC(queue, stopped); 3838 netif_tx_stop_queue(txq); 3839 stopped = true; 3840 } 3841 3842 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 3843 buffer->bytes += bytes; 3844 queue->prev_hdr = hdr; 3845 3846 flush = __netdev_tx_sent_queue(txq, bytes, 3847 !stopped && netdev_xmit_more()); 3848 3849 if (flush || next_element >= queue->max_elements) { 3850 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3851 qeth_flush_queue(queue); 3852 } 3853 3854 if (stopped && !qeth_out_queue_is_full(queue)) 3855 netif_tx_start_queue(txq); 3856 return 0; 3857 } 3858 3859 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 3860 struct sk_buff *skb, struct qeth_hdr *hdr, 3861 unsigned int offset, unsigned int hd_len, 3862 int elements_needed) 3863 { 3864 struct qeth_qdio_out_buffer *buffer; 3865 unsigned int next_element; 3866 struct netdev_queue *txq; 3867 bool stopped = false; 3868 int start_index; 3869 int flush_count = 0; 3870 int do_pack = 0; 3871 int tmp; 3872 int rc = 0; 3873 3874 /* spin until we get the queue ... */ 3875 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3876 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3877 start_index = queue->next_buf_to_fill; 3878 buffer = queue->bufs[queue->next_buf_to_fill]; 3879 3880 /* Just a sanity check, the wake/stop logic should ensure that we always 3881 * get a free buffer. 3882 */ 3883 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { 3884 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3885 return -EBUSY; 3886 } 3887 3888 txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); 3889 3890 /* check if we need to switch packing state of this queue */ 3891 qeth_switch_to_packing_if_needed(queue); 3892 if (queue->do_pack) { 3893 do_pack = 1; 3894 /* does packet fit in current buffer? */ 3895 if (buffer->next_element_to_fill + elements_needed > 3896 queue->max_elements) { 3897 /* ... no -> set state PRIMED */ 3898 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3899 flush_count++; 3900 queue->next_buf_to_fill = 3901 (queue->next_buf_to_fill + 1) % 3902 QDIO_MAX_BUFFERS_PER_Q; 3903 buffer = queue->bufs[queue->next_buf_to_fill]; 3904 3905 /* We stepped forward, so sanity-check again: */ 3906 if (atomic_read(&buffer->state) != 3907 QETH_QDIO_BUF_EMPTY) { 3908 qeth_flush_buffers(queue, start_index, 3909 flush_count); 3910 atomic_set(&queue->state, 3911 QETH_OUT_Q_UNLOCKED); 3912 rc = -EBUSY; 3913 goto out; 3914 } 3915 } 3916 } 3917 3918 if (buffer->next_element_to_fill == 0 && 3919 atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { 3920 /* If a TX completion happens right _here_ and misses to wake 3921 * the txq, then our re-check below will catch the race. 3922 */ 3923 QETH_TXQ_STAT_INC(queue, stopped); 3924 netif_tx_stop_queue(txq); 3925 stopped = true; 3926 } 3927 3928 next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); 3929 3930 if (queue->do_pack) 3931 QETH_TXQ_STAT_INC(queue, skbs_pack); 3932 if (!queue->do_pack || stopped || next_element >= queue->max_elements) { 3933 flush_count++; 3934 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); 3935 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % 3936 QDIO_MAX_BUFFERS_PER_Q; 3937 } 3938 3939 if (flush_count) 3940 qeth_flush_buffers(queue, start_index, flush_count); 3941 else if (!atomic_read(&queue->set_pci_flags_count)) 3942 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); 3943 /* 3944 * queue->state will go from LOCKED -> UNLOCKED or from 3945 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us 3946 * (switch packing state or flush buffer to get another pci flag out). 3947 * In that case we will enter this loop 3948 */ 3949 while (atomic_dec_return(&queue->state)) { 3950 start_index = queue->next_buf_to_fill; 3951 /* check if we can go back to non-packing state */ 3952 tmp = qeth_switch_to_nonpacking_if_needed(queue); 3953 /* 3954 * check if we need to flush a packing buffer to get a pci 3955 * flag out on the queue 3956 */ 3957 if (!tmp && !atomic_read(&queue->set_pci_flags_count)) 3958 tmp = qeth_prep_flush_pack_buffer(queue); 3959 if (tmp) { 3960 qeth_flush_buffers(queue, start_index, tmp); 3961 flush_count += tmp; 3962 } 3963 } 3964 out: 3965 /* at this point the queue is UNLOCKED again */ 3966 if (do_pack) 3967 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); 3968 3969 if (stopped && !qeth_out_queue_is_full(queue)) 3970 netif_tx_start_queue(txq); 3971 return rc; 3972 } 3973 EXPORT_SYMBOL_GPL(qeth_do_send_packet); 3974 3975 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, 3976 unsigned int payload_len, struct sk_buff *skb, 3977 unsigned int proto_len) 3978 { 3979 struct qeth_hdr_ext_tso *ext = &hdr->ext; 3980 3981 ext->hdr_tot_len = sizeof(*ext); 3982 ext->imb_hdr_no = 1; 3983 ext->hdr_type = 1; 3984 ext->hdr_version = 1; 3985 ext->hdr_len = 28; 3986 ext->payload_len = payload_len; 3987 ext->mss = skb_shinfo(skb)->gso_size; 3988 ext->dg_hdr_len = proto_len; 3989 } 3990 3991 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, 3992 struct qeth_qdio_out_q *queue, int ipv, 3993 void (*fill_header)(struct qeth_qdio_out_q *queue, 3994 struct qeth_hdr *hdr, struct sk_buff *skb, 3995 int ipv, unsigned int data_len)) 3996 { 3997 unsigned int proto_len, hw_hdr_len; 3998 unsigned int frame_len = skb->len; 3999 bool is_tso = skb_is_gso(skb); 4000 unsigned int data_offset = 0; 4001 struct qeth_hdr *hdr = NULL; 4002 unsigned int hd_len = 0; 4003 unsigned int elements; 4004 int push_len, rc; 4005 4006 if (is_tso) { 4007 hw_hdr_len = sizeof(struct qeth_hdr_tso); 4008 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 4009 } else { 4010 hw_hdr_len = sizeof(struct qeth_hdr); 4011 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; 4012 } 4013 4014 rc = skb_cow_head(skb, hw_hdr_len); 4015 if (rc) 4016 return rc; 4017 4018 push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, 4019 &elements); 4020 if (push_len < 0) 4021 return push_len; 4022 if (is_tso || !push_len) { 4023 /* HW header needs its own buffer element. */ 4024 hd_len = hw_hdr_len + proto_len; 4025 data_offset = push_len + proto_len; 4026 } 4027 memset(hdr, 0, hw_hdr_len); 4028 fill_header(queue, hdr, skb, ipv, frame_len); 4029 if (is_tso) 4030 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, 4031 frame_len - proto_len, skb, proto_len); 4032 4033 if (IS_IQD(card)) { 4034 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, 4035 hd_len); 4036 } else { 4037 /* TODO: drop skb_orphan() once TX completion is fast enough */ 4038 skb_orphan(skb); 4039 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, 4040 hd_len, elements); 4041 } 4042 4043 if (rc && !push_len) 4044 kmem_cache_free(qeth_core_header_cache, hdr); 4045 4046 return rc; 4047 } 4048 EXPORT_SYMBOL_GPL(qeth_xmit); 4049 4050 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, 4051 struct qeth_reply *reply, unsigned long data) 4052 { 4053 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4054 struct qeth_ipacmd_setadpparms *setparms; 4055 4056 QETH_CARD_TEXT(card, 4, "prmadpcb"); 4057 4058 setparms = &(cmd->data.setadapterparms); 4059 if (qeth_setadpparms_inspect_rc(cmd)) { 4060 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); 4061 setparms->data.mode = SET_PROMISC_MODE_OFF; 4062 } 4063 card->info.promisc_mode = setparms->data.mode; 4064 return (cmd->hdr.return_code) ? -EIO : 0; 4065 } 4066 4067 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) 4068 { 4069 enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : 4070 SET_PROMISC_MODE_OFF; 4071 struct qeth_cmd_buffer *iob; 4072 struct qeth_ipa_cmd *cmd; 4073 4074 QETH_CARD_TEXT(card, 4, "setprom"); 4075 QETH_CARD_TEXT_(card, 4, "mode:%x", mode); 4076 4077 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, 4078 SETADP_DATA_SIZEOF(mode)); 4079 if (!iob) 4080 return; 4081 cmd = __ipa_cmd(iob); 4082 cmd->data.setadapterparms.data.mode = mode; 4083 qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); 4084 } 4085 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); 4086 4087 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, 4088 struct qeth_reply *reply, unsigned long data) 4089 { 4090 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4091 struct qeth_ipacmd_setadpparms *adp_cmd; 4092 4093 QETH_CARD_TEXT(card, 4, "chgmaccb"); 4094 if (qeth_setadpparms_inspect_rc(cmd)) 4095 return -EIO; 4096 4097 adp_cmd = &cmd->data.setadapterparms; 4098 if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) 4099 return -EADDRNOTAVAIL; 4100 4101 if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && 4102 !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) 4103 return -EADDRNOTAVAIL; 4104 4105 ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); 4106 return 0; 4107 } 4108 4109 int qeth_setadpparms_change_macaddr(struct qeth_card *card) 4110 { 4111 int rc; 4112 struct qeth_cmd_buffer *iob; 4113 struct qeth_ipa_cmd *cmd; 4114 4115 QETH_CARD_TEXT(card, 4, "chgmac"); 4116 4117 iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, 4118 SETADP_DATA_SIZEOF(change_addr)); 4119 if (!iob) 4120 return -ENOMEM; 4121 cmd = __ipa_cmd(iob); 4122 cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; 4123 cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; 4124 ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, 4125 card->dev->dev_addr); 4126 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, 4127 NULL); 4128 return rc; 4129 } 4130 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 4131 4132 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, 4133 struct qeth_reply *reply, unsigned long data) 4134 { 4135 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4136 struct qeth_set_access_ctrl *access_ctrl_req; 4137 int fallback = *(int *)reply->param; 4138 4139 QETH_CARD_TEXT(card, 4, "setaccb"); 4140 if (cmd->hdr.return_code) 4141 return -EIO; 4142 qeth_setadpparms_inspect_rc(cmd); 4143 4144 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4145 QETH_CARD_TEXT_(card, 2, "rc=%d", 4146 cmd->data.setadapterparms.hdr.return_code); 4147 if (cmd->data.setadapterparms.hdr.return_code != 4148 SET_ACCESS_CTRL_RC_SUCCESS) 4149 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", 4150 access_ctrl_req->subcmd_code, CARD_DEVID(card), 4151 cmd->data.setadapterparms.hdr.return_code); 4152 switch (cmd->data.setadapterparms.hdr.return_code) { 4153 case SET_ACCESS_CTRL_RC_SUCCESS: 4154 if (card->options.isolation == ISOLATION_MODE_NONE) { 4155 dev_info(&card->gdev->dev, 4156 "QDIO data connection isolation is deactivated\n"); 4157 } else { 4158 dev_info(&card->gdev->dev, 4159 "QDIO data connection isolation is activated\n"); 4160 } 4161 break; 4162 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: 4163 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", 4164 CARD_DEVID(card)); 4165 if (fallback) 4166 card->options.isolation = card->options.prev_isolation; 4167 break; 4168 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: 4169 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", 4170 CARD_DEVID(card)); 4171 if (fallback) 4172 card->options.isolation = card->options.prev_isolation; 4173 break; 4174 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: 4175 dev_err(&card->gdev->dev, "Adapter does not " 4176 "support QDIO data connection isolation\n"); 4177 break; 4178 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: 4179 dev_err(&card->gdev->dev, 4180 "Adapter is dedicated. " 4181 "QDIO data connection isolation not supported\n"); 4182 if (fallback) 4183 card->options.isolation = card->options.prev_isolation; 4184 break; 4185 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: 4186 dev_err(&card->gdev->dev, 4187 "TSO does not permit QDIO data connection isolation\n"); 4188 if (fallback) 4189 card->options.isolation = card->options.prev_isolation; 4190 break; 4191 case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: 4192 dev_err(&card->gdev->dev, "The adjacent switch port does not " 4193 "support reflective relay mode\n"); 4194 if (fallback) 4195 card->options.isolation = card->options.prev_isolation; 4196 break; 4197 case SET_ACCESS_CTRL_RC_REFLREL_FAILED: 4198 dev_err(&card->gdev->dev, "The reflective relay mode cannot be " 4199 "enabled at the adjacent switch port"); 4200 if (fallback) 4201 card->options.isolation = card->options.prev_isolation; 4202 break; 4203 case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: 4204 dev_warn(&card->gdev->dev, "Turning off reflective relay mode " 4205 "at the adjacent switch failed\n"); 4206 break; 4207 default: 4208 /* this should never happen */ 4209 if (fallback) 4210 card->options.isolation = card->options.prev_isolation; 4211 break; 4212 } 4213 return (cmd->hdr.return_code) ? -EIO : 0; 4214 } 4215 4216 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, 4217 enum qeth_ipa_isolation_modes isolation, int fallback) 4218 { 4219 int rc; 4220 struct qeth_cmd_buffer *iob; 4221 struct qeth_ipa_cmd *cmd; 4222 struct qeth_set_access_ctrl *access_ctrl_req; 4223 4224 QETH_CARD_TEXT(card, 4, "setacctl"); 4225 4226 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, 4227 SETADP_DATA_SIZEOF(set_access_ctrl)); 4228 if (!iob) 4229 return -ENOMEM; 4230 cmd = __ipa_cmd(iob); 4231 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; 4232 access_ctrl_req->subcmd_code = isolation; 4233 4234 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, 4235 &fallback); 4236 QETH_CARD_TEXT_(card, 2, "rc=%d", rc); 4237 return rc; 4238 } 4239 4240 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) 4241 { 4242 int rc = 0; 4243 4244 QETH_CARD_TEXT(card, 4, "setactlo"); 4245 4246 if ((IS_OSD(card) || IS_OSX(card)) && 4247 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { 4248 rc = qeth_setadpparms_set_access_ctrl(card, 4249 card->options.isolation, fallback); 4250 if (rc) { 4251 QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", 4252 rc, CARD_DEVID(card)); 4253 rc = -EOPNOTSUPP; 4254 } 4255 } else if (card->options.isolation != ISOLATION_MODE_NONE) { 4256 card->options.isolation = ISOLATION_MODE_NONE; 4257 4258 dev_err(&card->gdev->dev, "Adapter does not " 4259 "support QDIO data connection isolation\n"); 4260 rc = -EOPNOTSUPP; 4261 } 4262 return rc; 4263 } 4264 EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online); 4265 4266 void qeth_tx_timeout(struct net_device *dev) 4267 { 4268 struct qeth_card *card; 4269 4270 card = dev->ml_priv; 4271 QETH_CARD_TEXT(card, 4, "txtimeo"); 4272 qeth_schedule_recovery(card); 4273 } 4274 EXPORT_SYMBOL_GPL(qeth_tx_timeout); 4275 4276 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 4277 { 4278 struct qeth_card *card = dev->ml_priv; 4279 int rc = 0; 4280 4281 switch (regnum) { 4282 case MII_BMCR: /* Basic mode control register */ 4283 rc = BMCR_FULLDPLX; 4284 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && 4285 (card->info.link_type != QETH_LINK_TYPE_OSN) && 4286 (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && 4287 (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) 4288 rc |= BMCR_SPEED100; 4289 break; 4290 case MII_BMSR: /* Basic mode status register */ 4291 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | 4292 BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | 4293 BMSR_100BASE4; 4294 break; 4295 case MII_PHYSID1: /* PHYS ID 1 */ 4296 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | 4297 dev->dev_addr[2]; 4298 rc = (rc >> 5) & 0xFFFF; 4299 break; 4300 case MII_PHYSID2: /* PHYS ID 2 */ 4301 rc = (dev->dev_addr[2] << 10) & 0xFFFF; 4302 break; 4303 case MII_ADVERTISE: /* Advertisement control reg */ 4304 rc = ADVERTISE_ALL; 4305 break; 4306 case MII_LPA: /* Link partner ability reg */ 4307 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | 4308 LPA_100BASE4 | LPA_LPACK; 4309 break; 4310 case MII_EXPANSION: /* Expansion register */ 4311 break; 4312 case MII_DCOUNTER: /* disconnect counter */ 4313 break; 4314 case MII_FCSCOUNTER: /* false carrier counter */ 4315 break; 4316 case MII_NWAYTEST: /* N-way auto-neg test register */ 4317 break; 4318 case MII_RERRCOUNTER: /* rx error counter */ 4319 rc = card->stats.rx_errors; 4320 break; 4321 case MII_SREVISION: /* silicon revision */ 4322 break; 4323 case MII_RESV1: /* reserved 1 */ 4324 break; 4325 case MII_LBRERROR: /* loopback, rx, bypass error */ 4326 break; 4327 case MII_PHYADDR: /* physical address */ 4328 break; 4329 case MII_RESV2: /* reserved 2 */ 4330 break; 4331 case MII_TPISTATUS: /* TPI status for 10mbps */ 4332 break; 4333 case MII_NCONFIG: /* network interface config */ 4334 break; 4335 default: 4336 break; 4337 } 4338 return rc; 4339 } 4340 4341 static int qeth_snmp_command_cb(struct qeth_card *card, 4342 struct qeth_reply *reply, unsigned long data) 4343 { 4344 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 4345 struct qeth_arp_query_info *qinfo = reply->param; 4346 struct qeth_ipacmd_setadpparms *adp_cmd; 4347 unsigned int data_len; 4348 void *snmp_data; 4349 4350 QETH_CARD_TEXT(card, 3, "snpcmdcb"); 4351 4352 if (cmd->hdr.return_code) { 4353 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); 4354 return -EIO; 4355 } 4356 if (cmd->data.setadapterparms.hdr.return_code) { 4357 cmd->hdr.return_code = 4358 cmd->data.setadapterparms.hdr.return_code; 4359 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); 4360 return -EIO; 4361 } 4362 4363 adp_cmd = &cmd->data.setadapterparms; 4364 data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); 4365 if (adp_cmd->hdr.seq_no == 1) { 4366 snmp_data = &adp_cmd->data.snmp; 4367 } else { 4368 snmp_data = &adp_cmd->data.snmp.request; 4369 data_len -= offsetof(struct qeth_snmp_cmd, request); 4370 } 4371 4372 /* check if there is enough room in userspace */ 4373 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 4374 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); 4375 return -ENOSPC; 4376 } 4377 QETH_CARD_TEXT_(card, 4, "snore%i", 4378 cmd->data.setadapterparms.hdr.used_total); 4379 QETH_CARD_TEXT_(card, 4, "sseqn%i", 4380 cmd->data.setadapterparms.hdr.seq_no); 4381 /*copy entries to user buffer*/ 4382 memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); 4383 qinfo->udata_offset += data_len; 4384 4385 if (cmd->data.setadapterparms.hdr.seq_no < 4386 cmd->data.setadapterparms.hdr.used_total) 4387 return 1; 4388 return 0; 4389 } 4390 4391 static int qeth_snmp_command(struct qeth_card *card, char __user *udata) 4392 { 4393 struct qeth_snmp_ureq __user *ureq; 4394 struct qeth_cmd_buffer *iob; 4395 unsigned int req_len; 4396 struct qeth_arp_query_info qinfo = {0, }; 4397 int rc = 0; 4398 4399 QETH_CARD_TEXT(card, 3, "snmpcmd"); 4400 4401 if (IS_VM_NIC(card)) 4402 return -EOPNOTSUPP; 4403 4404 if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && 4405 IS_LAYER3(card)) 4406 return -EOPNOTSUPP; 4407 4408 ureq = (struct qeth_snmp_ureq __user *) udata; 4409 if (get_user(qinfo.udata_len, &ureq->hdr.data_len) || 4410 get_user(req_len, &ureq->hdr.req_len)) 4411 return -EFAULT; 4412 4413 /* Sanitize user input, to avoid overflows in iob size calculation: */ 4414 if (req_len > QETH_BUFSIZE) 4415 return -EINVAL; 4416 4417 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4418 if (!iob) 4419 return -ENOMEM; 4420 4421 if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp, 4422 &ureq->cmd, req_len)) { 4423 qeth_put_cmd(iob); 4424 return -EFAULT; 4425 } 4426 4427 qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); 4428 if (!qinfo.udata) { 4429 qeth_put_cmd(iob); 4430 return -ENOMEM; 4431 } 4432 qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); 4433 4434 rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); 4435 if (rc) 4436 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", 4437 CARD_DEVID(card), rc); 4438 else { 4439 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4440 rc = -EFAULT; 4441 } 4442 4443 kfree(qinfo.udata); 4444 return rc; 4445 } 4446 4447 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, 4448 struct qeth_reply *reply, unsigned long data) 4449 { 4450 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4451 struct qeth_qoat_priv *priv; 4452 char *resdata; 4453 int resdatalen; 4454 4455 QETH_CARD_TEXT(card, 3, "qoatcb"); 4456 if (qeth_setadpparms_inspect_rc(cmd)) 4457 return -EIO; 4458 4459 priv = (struct qeth_qoat_priv *)reply->param; 4460 resdatalen = cmd->data.setadapterparms.hdr.cmdlength; 4461 resdata = (char *)data + 28; 4462 4463 if (resdatalen > (priv->buffer_len - priv->response_len)) 4464 return -ENOSPC; 4465 4466 memcpy((priv->buffer + priv->response_len), resdata, 4467 resdatalen); 4468 priv->response_len += resdatalen; 4469 4470 if (cmd->data.setadapterparms.hdr.seq_no < 4471 cmd->data.setadapterparms.hdr.used_total) 4472 return 1; 4473 return 0; 4474 } 4475 4476 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) 4477 { 4478 int rc = 0; 4479 struct qeth_cmd_buffer *iob; 4480 struct qeth_ipa_cmd *cmd; 4481 struct qeth_query_oat *oat_req; 4482 struct qeth_query_oat_data oat_data; 4483 struct qeth_qoat_priv priv; 4484 void __user *tmp; 4485 4486 QETH_CARD_TEXT(card, 3, "qoatcmd"); 4487 4488 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { 4489 rc = -EOPNOTSUPP; 4490 goto out; 4491 } 4492 4493 if (copy_from_user(&oat_data, udata, 4494 sizeof(struct qeth_query_oat_data))) { 4495 rc = -EFAULT; 4496 goto out; 4497 } 4498 4499 priv.buffer_len = oat_data.buffer_len; 4500 priv.response_len = 0; 4501 priv.buffer = vzalloc(oat_data.buffer_len); 4502 if (!priv.buffer) { 4503 rc = -ENOMEM; 4504 goto out; 4505 } 4506 4507 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, 4508 SETADP_DATA_SIZEOF(query_oat)); 4509 if (!iob) { 4510 rc = -ENOMEM; 4511 goto out_free; 4512 } 4513 cmd = __ipa_cmd(iob); 4514 oat_req = &cmd->data.setadapterparms.data.query_oat; 4515 oat_req->subcmd_code = oat_data.command; 4516 4517 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, 4518 &priv); 4519 if (!rc) { 4520 if (is_compat_task()) 4521 tmp = compat_ptr(oat_data.ptr); 4522 else 4523 tmp = (void __user *)(unsigned long)oat_data.ptr; 4524 4525 if (copy_to_user(tmp, priv.buffer, 4526 priv.response_len)) { 4527 rc = -EFAULT; 4528 goto out_free; 4529 } 4530 4531 oat_data.response_len = priv.response_len; 4532 4533 if (copy_to_user(udata, &oat_data, 4534 sizeof(struct qeth_query_oat_data))) 4535 rc = -EFAULT; 4536 } 4537 4538 out_free: 4539 vfree(priv.buffer); 4540 out: 4541 return rc; 4542 } 4543 4544 static int qeth_query_card_info_cb(struct qeth_card *card, 4545 struct qeth_reply *reply, unsigned long data) 4546 { 4547 struct carrier_info *carrier_info = (struct carrier_info *)reply->param; 4548 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; 4549 struct qeth_query_card_info *card_info; 4550 4551 QETH_CARD_TEXT(card, 2, "qcrdincb"); 4552 if (qeth_setadpparms_inspect_rc(cmd)) 4553 return -EIO; 4554 4555 card_info = &cmd->data.setadapterparms.data.card_info; 4556 carrier_info->card_type = card_info->card_type; 4557 carrier_info->port_mode = card_info->port_mode; 4558 carrier_info->port_speed = card_info->port_speed; 4559 return 0; 4560 } 4561 4562 int qeth_query_card_info(struct qeth_card *card, 4563 struct carrier_info *carrier_info) 4564 { 4565 struct qeth_cmd_buffer *iob; 4566 4567 QETH_CARD_TEXT(card, 2, "qcrdinfo"); 4568 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) 4569 return -EOPNOTSUPP; 4570 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0); 4571 if (!iob) 4572 return -ENOMEM; 4573 return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, 4574 (void *)carrier_info); 4575 } 4576 4577 /** 4578 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address 4579 * @card: pointer to a qeth_card 4580 * 4581 * Returns 4582 * 0, if a MAC address has been set for the card's netdevice 4583 * a return code, for various error conditions 4584 */ 4585 int qeth_vm_request_mac(struct qeth_card *card) 4586 { 4587 struct diag26c_mac_resp *response; 4588 struct diag26c_mac_req *request; 4589 struct ccw_dev_id id; 4590 int rc; 4591 4592 QETH_CARD_TEXT(card, 2, "vmreqmac"); 4593 4594 request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); 4595 response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); 4596 if (!request || !response) { 4597 rc = -ENOMEM; 4598 goto out; 4599 } 4600 4601 ccw_device_get_id(CARD_DDEV(card), &id); 4602 request->resp_buf_len = sizeof(*response); 4603 request->resp_version = DIAG26C_VERSION2; 4604 request->op_code = DIAG26C_GET_MAC; 4605 request->devno = id.devno; 4606 4607 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4608 rc = diag26c(request, response, DIAG26C_MAC_SERVICES); 4609 QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); 4610 if (rc) 4611 goto out; 4612 QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); 4613 4614 if (request->resp_buf_len < sizeof(*response) || 4615 response->version != request->resp_version) { 4616 rc = -EIO; 4617 QETH_CARD_TEXT(card, 2, "badresp"); 4618 QETH_CARD_HEX(card, 2, &request->resp_buf_len, 4619 sizeof(request->resp_buf_len)); 4620 } else if (!is_valid_ether_addr(response->mac)) { 4621 rc = -EINVAL; 4622 QETH_CARD_TEXT(card, 2, "badmac"); 4623 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN); 4624 } else { 4625 ether_addr_copy(card->dev->dev_addr, response->mac); 4626 } 4627 4628 out: 4629 kfree(response); 4630 kfree(request); 4631 return rc; 4632 } 4633 EXPORT_SYMBOL_GPL(qeth_vm_request_mac); 4634 4635 static void qeth_determine_capabilities(struct qeth_card *card) 4636 { 4637 int rc; 4638 struct ccw_device *ddev; 4639 int ddev_offline = 0; 4640 4641 QETH_CARD_TEXT(card, 2, "detcapab"); 4642 ddev = CARD_DDEV(card); 4643 if (!ddev->online) { 4644 ddev_offline = 1; 4645 rc = ccw_device_set_online(ddev); 4646 if (rc) { 4647 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 4648 goto out; 4649 } 4650 } 4651 4652 rc = qeth_read_conf_data(card); 4653 if (rc) { 4654 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", 4655 CARD_DEVID(card), rc); 4656 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 4657 goto out_offline; 4658 } 4659 4660 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 4661 if (rc) 4662 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 4663 4664 QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt); 4665 QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1); 4666 QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2); 4667 QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3); 4668 QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt); 4669 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || 4670 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || 4671 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { 4672 dev_info(&card->gdev->dev, 4673 "Completion Queueing supported\n"); 4674 } else { 4675 card->options.cq = QETH_CQ_NOTAVAILABLE; 4676 } 4677 4678 4679 out_offline: 4680 if (ddev_offline == 1) 4681 ccw_device_set_offline(ddev); 4682 out: 4683 return; 4684 } 4685 4686 static void qeth_qdio_establish_cq(struct qeth_card *card, 4687 struct qdio_buffer **in_sbal_ptrs, 4688 void (**queue_start_poll) 4689 (struct ccw_device *, int, 4690 unsigned long)) 4691 { 4692 int i; 4693 4694 if (card->options.cq == QETH_CQ_ENABLED) { 4695 int offset = QDIO_MAX_BUFFERS_PER_Q * 4696 (card->qdio.no_in_queues - 1); 4697 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { 4698 in_sbal_ptrs[offset + i] = (struct qdio_buffer *) 4699 virt_to_phys(card->qdio.c_q->bufs[i].buffer); 4700 } 4701 4702 queue_start_poll[card->qdio.no_in_queues - 1] = NULL; 4703 } 4704 } 4705 4706 static int qeth_qdio_establish(struct qeth_card *card) 4707 { 4708 struct qdio_initialize init_data; 4709 char *qib_param_field; 4710 struct qdio_buffer **in_sbal_ptrs; 4711 void (**queue_start_poll) (struct ccw_device *, int, unsigned long); 4712 struct qdio_buffer **out_sbal_ptrs; 4713 int i, j, k; 4714 int rc = 0; 4715 4716 QETH_CARD_TEXT(card, 2, "qdioest"); 4717 4718 qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL); 4719 if (!qib_param_field) { 4720 rc = -ENOMEM; 4721 goto out_free_nothing; 4722 } 4723 4724 qeth_create_qib_param_field(card, qib_param_field); 4725 qeth_create_qib_param_field_blkt(card, qib_param_field); 4726 4727 in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q, 4728 sizeof(void *), 4729 GFP_KERNEL); 4730 if (!in_sbal_ptrs) { 4731 rc = -ENOMEM; 4732 goto out_free_qib_param; 4733 } 4734 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { 4735 in_sbal_ptrs[i] = (struct qdio_buffer *) 4736 virt_to_phys(card->qdio.in_q->bufs[i].buffer); 4737 } 4738 4739 queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *), 4740 GFP_KERNEL); 4741 if (!queue_start_poll) { 4742 rc = -ENOMEM; 4743 goto out_free_in_sbals; 4744 } 4745 for (i = 0; i < card->qdio.no_in_queues; ++i) 4746 queue_start_poll[i] = qeth_qdio_start_poll; 4747 4748 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); 4749 4750 out_sbal_ptrs = 4751 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q, 4752 sizeof(void *), 4753 GFP_KERNEL); 4754 if (!out_sbal_ptrs) { 4755 rc = -ENOMEM; 4756 goto out_free_queue_start_poll; 4757 } 4758 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) 4759 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { 4760 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( 4761 card->qdio.out_qs[i]->bufs[j]->buffer); 4762 } 4763 4764 memset(&init_data, 0, sizeof(struct qdio_initialize)); 4765 init_data.cdev = CARD_DDEV(card); 4766 init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : 4767 QDIO_QETH_QFMT; 4768 init_data.qib_param_field_format = 0; 4769 init_data.qib_param_field = qib_param_field; 4770 init_data.no_input_qs = card->qdio.no_in_queues; 4771 init_data.no_output_qs = card->qdio.no_out_queues; 4772 init_data.input_handler = qeth_qdio_input_handler; 4773 init_data.output_handler = qeth_qdio_output_handler; 4774 init_data.queue_start_poll_array = queue_start_poll; 4775 init_data.int_parm = (unsigned long) card; 4776 init_data.input_sbal_addr_array = in_sbal_ptrs; 4777 init_data.output_sbal_addr_array = out_sbal_ptrs; 4778 init_data.output_sbal_state_array = card->qdio.out_bufstates; 4779 init_data.scan_threshold = IS_IQD(card) ? 0 : 32; 4780 4781 if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, 4782 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { 4783 rc = qdio_allocate(&init_data); 4784 if (rc) { 4785 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 4786 goto out; 4787 } 4788 rc = qdio_establish(&init_data); 4789 if (rc) { 4790 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); 4791 qdio_free(CARD_DDEV(card)); 4792 } 4793 } 4794 4795 switch (card->options.cq) { 4796 case QETH_CQ_ENABLED: 4797 dev_info(&card->gdev->dev, "Completion Queue support enabled"); 4798 break; 4799 case QETH_CQ_DISABLED: 4800 dev_info(&card->gdev->dev, "Completion Queue support disabled"); 4801 break; 4802 default: 4803 break; 4804 } 4805 out: 4806 kfree(out_sbal_ptrs); 4807 out_free_queue_start_poll: 4808 kfree(queue_start_poll); 4809 out_free_in_sbals: 4810 kfree(in_sbal_ptrs); 4811 out_free_qib_param: 4812 kfree(qib_param_field); 4813 out_free_nothing: 4814 return rc; 4815 } 4816 4817 static void qeth_core_free_card(struct qeth_card *card) 4818 { 4819 QETH_CARD_TEXT(card, 2, "freecrd"); 4820 qeth_clean_channel(&card->read); 4821 qeth_clean_channel(&card->write); 4822 qeth_clean_channel(&card->data); 4823 qeth_put_cmd(card->read_cmd); 4824 destroy_workqueue(card->event_wq); 4825 qeth_free_qdio_queues(card); 4826 unregister_service_level(&card->qeth_service_level); 4827 dev_set_drvdata(&card->gdev->dev, NULL); 4828 kfree(card); 4829 } 4830 4831 void qeth_trace_features(struct qeth_card *card) 4832 { 4833 QETH_CARD_TEXT(card, 2, "features"); 4834 QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); 4835 QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); 4836 QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); 4837 QETH_CARD_HEX(card, 2, &card->info.diagass_support, 4838 sizeof(card->info.diagass_support)); 4839 } 4840 EXPORT_SYMBOL_GPL(qeth_trace_features); 4841 4842 static struct ccw_device_id qeth_ids[] = { 4843 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 4844 .driver_info = QETH_CARD_TYPE_OSD}, 4845 {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), 4846 .driver_info = QETH_CARD_TYPE_IQD}, 4847 {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), 4848 .driver_info = QETH_CARD_TYPE_OSN}, 4849 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), 4850 .driver_info = QETH_CARD_TYPE_OSM}, 4851 {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), 4852 .driver_info = QETH_CARD_TYPE_OSX}, 4853 {}, 4854 }; 4855 MODULE_DEVICE_TABLE(ccw, qeth_ids); 4856 4857 static struct ccw_driver qeth_ccw_driver = { 4858 .driver = { 4859 .owner = THIS_MODULE, 4860 .name = "qeth", 4861 }, 4862 .ids = qeth_ids, 4863 .probe = ccwgroup_probe_ccwdev, 4864 .remove = ccwgroup_remove_ccwdev, 4865 }; 4866 4867 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok) 4868 { 4869 int retries = 3; 4870 int rc; 4871 4872 QETH_CARD_TEXT(card, 2, "hrdsetup"); 4873 atomic_set(&card->force_alloc_skb, 0); 4874 rc = qeth_update_from_chp_desc(card); 4875 if (rc) 4876 return rc; 4877 retry: 4878 if (retries < 3) 4879 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", 4880 CARD_DEVID(card)); 4881 rc = qeth_qdio_clear_card(card, !IS_IQD(card)); 4882 ccw_device_set_offline(CARD_DDEV(card)); 4883 ccw_device_set_offline(CARD_WDEV(card)); 4884 ccw_device_set_offline(CARD_RDEV(card)); 4885 qdio_free(CARD_DDEV(card)); 4886 rc = ccw_device_set_online(CARD_RDEV(card)); 4887 if (rc) 4888 goto retriable; 4889 rc = ccw_device_set_online(CARD_WDEV(card)); 4890 if (rc) 4891 goto retriable; 4892 rc = ccw_device_set_online(CARD_DDEV(card)); 4893 if (rc) 4894 goto retriable; 4895 retriable: 4896 if (rc == -ERESTARTSYS) { 4897 QETH_CARD_TEXT(card, 2, "break1"); 4898 return rc; 4899 } else if (rc) { 4900 QETH_CARD_TEXT_(card, 2, "1err%d", rc); 4901 if (--retries < 0) 4902 goto out; 4903 else 4904 goto retry; 4905 } 4906 qeth_determine_capabilities(card); 4907 qeth_init_tokens(card); 4908 qeth_init_func_level(card); 4909 4910 rc = qeth_idx_activate_read_channel(card); 4911 if (rc == -EINTR) { 4912 QETH_CARD_TEXT(card, 2, "break2"); 4913 return rc; 4914 } else if (rc) { 4915 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 4916 if (--retries < 0) 4917 goto out; 4918 else 4919 goto retry; 4920 } 4921 4922 rc = qeth_idx_activate_write_channel(card); 4923 if (rc == -EINTR) { 4924 QETH_CARD_TEXT(card, 2, "break3"); 4925 return rc; 4926 } else if (rc) { 4927 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 4928 if (--retries < 0) 4929 goto out; 4930 else 4931 goto retry; 4932 } 4933 card->read_or_write_problem = 0; 4934 rc = qeth_mpc_initialize(card); 4935 if (rc) { 4936 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 4937 goto out; 4938 } 4939 4940 rc = qeth_send_startlan(card); 4941 if (rc) { 4942 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 4943 if (rc == -ENETDOWN) { 4944 dev_warn(&card->gdev->dev, "The LAN is offline\n"); 4945 *carrier_ok = false; 4946 } else { 4947 goto out; 4948 } 4949 } else { 4950 *carrier_ok = true; 4951 } 4952 4953 card->options.ipa4.supported_funcs = 0; 4954 card->options.ipa6.supported_funcs = 0; 4955 card->options.adp.supported_funcs = 0; 4956 card->options.sbp.supported_funcs = 0; 4957 card->info.diagass_support = 0; 4958 rc = qeth_query_ipassists(card, QETH_PROT_IPV4); 4959 if (rc == -ENOMEM) 4960 goto out; 4961 if (qeth_is_supported(card, IPA_IPV6)) { 4962 rc = qeth_query_ipassists(card, QETH_PROT_IPV6); 4963 if (rc == -ENOMEM) 4964 goto out; 4965 } 4966 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 4967 rc = qeth_query_setadapterparms(card); 4968 if (rc < 0) { 4969 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 4970 goto out; 4971 } 4972 } 4973 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 4974 rc = qeth_query_setdiagass(card); 4975 if (rc < 0) { 4976 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 4977 goto out; 4978 } 4979 } 4980 return 0; 4981 out: 4982 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " 4983 "an error on the device\n"); 4984 QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", 4985 CARD_DEVID(card), rc); 4986 return rc; 4987 } 4988 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); 4989 4990 static void qeth_create_skb_frag(struct qdio_buffer_element *element, 4991 struct sk_buff *skb, int offset, int data_len) 4992 { 4993 struct page *page = virt_to_page(element->addr); 4994 unsigned int next_frag; 4995 4996 /* first fill the linear space */ 4997 if (!skb->len) { 4998 unsigned int linear = min(data_len, skb_tailroom(skb)); 4999 5000 skb_put_data(skb, element->addr + offset, linear); 5001 data_len -= linear; 5002 if (!data_len) 5003 return; 5004 offset += linear; 5005 /* fall through to add page frag for remaining data */ 5006 } 5007 5008 next_frag = skb_shinfo(skb)->nr_frags; 5009 get_page(page); 5010 skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len); 5011 } 5012 5013 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 5014 { 5015 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); 5016 } 5017 5018 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, 5019 struct qeth_qdio_buffer *qethbuffer, 5020 struct qdio_buffer_element **__element, int *__offset, 5021 struct qeth_hdr **hdr) 5022 { 5023 struct qdio_buffer_element *element = *__element; 5024 struct qdio_buffer *buffer = qethbuffer->buffer; 5025 int offset = *__offset; 5026 struct sk_buff *skb; 5027 int skb_len = 0; 5028 void *data_ptr; 5029 int data_len; 5030 int headroom = 0; 5031 int use_rx_sg = 0; 5032 5033 /* qeth_hdr must not cross element boundaries */ 5034 while (element->length < offset + sizeof(struct qeth_hdr)) { 5035 if (qeth_is_last_sbale(element)) 5036 return NULL; 5037 element++; 5038 offset = 0; 5039 } 5040 *hdr = element->addr + offset; 5041 5042 offset += sizeof(struct qeth_hdr); 5043 switch ((*hdr)->hdr.l2.id) { 5044 case QETH_HEADER_TYPE_LAYER2: 5045 skb_len = (*hdr)->hdr.l2.pkt_length; 5046 break; 5047 case QETH_HEADER_TYPE_LAYER3: 5048 skb_len = (*hdr)->hdr.l3.length; 5049 headroom = ETH_HLEN; 5050 break; 5051 case QETH_HEADER_TYPE_OSN: 5052 skb_len = (*hdr)->hdr.osn.pdu_length; 5053 headroom = sizeof(struct qeth_hdr); 5054 break; 5055 default: 5056 break; 5057 } 5058 5059 if (!skb_len) 5060 return NULL; 5061 5062 if (((skb_len >= card->options.rx_sg_cb) && 5063 !IS_OSN(card) && 5064 (!atomic_read(&card->force_alloc_skb))) || 5065 (card->options.cq == QETH_CQ_ENABLED)) 5066 use_rx_sg = 1; 5067 5068 if (use_rx_sg && qethbuffer->rx_skb) { 5069 /* QETH_CQ_ENABLED only: */ 5070 skb = qethbuffer->rx_skb; 5071 qethbuffer->rx_skb = NULL; 5072 } else { 5073 unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len; 5074 5075 skb = napi_alloc_skb(&card->napi, linear + headroom); 5076 } 5077 if (!skb) 5078 goto no_mem; 5079 if (headroom) 5080 skb_reserve(skb, headroom); 5081 5082 data_ptr = element->addr + offset; 5083 while (skb_len) { 5084 data_len = min(skb_len, (int)(element->length - offset)); 5085 if (data_len) { 5086 if (use_rx_sg) 5087 qeth_create_skb_frag(element, skb, offset, 5088 data_len); 5089 else 5090 skb_put_data(skb, data_ptr, data_len); 5091 } 5092 skb_len -= data_len; 5093 if (skb_len) { 5094 if (qeth_is_last_sbale(element)) { 5095 QETH_CARD_TEXT(card, 4, "unexeob"); 5096 QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); 5097 dev_kfree_skb_any(skb); 5098 QETH_CARD_STAT_INC(card, rx_errors); 5099 return NULL; 5100 } 5101 element++; 5102 offset = 0; 5103 data_ptr = element->addr; 5104 } else { 5105 offset += data_len; 5106 } 5107 } 5108 *__element = element; 5109 *__offset = offset; 5110 if (use_rx_sg) { 5111 QETH_CARD_STAT_INC(card, rx_sg_skbs); 5112 QETH_CARD_STAT_ADD(card, rx_sg_frags, 5113 skb_shinfo(skb)->nr_frags); 5114 } 5115 return skb; 5116 no_mem: 5117 if (net_ratelimit()) { 5118 QETH_CARD_TEXT(card, 2, "noskbmem"); 5119 } 5120 QETH_CARD_STAT_INC(card, rx_dropped); 5121 return NULL; 5122 } 5123 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); 5124 5125 int qeth_poll(struct napi_struct *napi, int budget) 5126 { 5127 struct qeth_card *card = container_of(napi, struct qeth_card, napi); 5128 int work_done = 0; 5129 struct qeth_qdio_buffer *buffer; 5130 int done; 5131 int new_budget = budget; 5132 5133 while (1) { 5134 if (!card->rx.b_count) { 5135 card->rx.qdio_err = 0; 5136 card->rx.b_count = qdio_get_next_buffers( 5137 card->data.ccwdev, 0, &card->rx.b_index, 5138 &card->rx.qdio_err); 5139 if (card->rx.b_count <= 0) { 5140 card->rx.b_count = 0; 5141 break; 5142 } 5143 card->rx.b_element = 5144 &card->qdio.in_q->bufs[card->rx.b_index] 5145 .buffer->element[0]; 5146 card->rx.e_offset = 0; 5147 } 5148 5149 while (card->rx.b_count) { 5150 buffer = &card->qdio.in_q->bufs[card->rx.b_index]; 5151 if (!(card->rx.qdio_err && 5152 qeth_check_qdio_errors(card, buffer->buffer, 5153 card->rx.qdio_err, "qinerr"))) 5154 work_done += 5155 card->discipline->process_rx_buffer( 5156 card, new_budget, &done); 5157 else 5158 done = 1; 5159 5160 if (done) { 5161 QETH_CARD_STAT_INC(card, rx_bufs); 5162 qeth_put_buffer_pool_entry(card, 5163 buffer->pool_entry); 5164 qeth_queue_input_buffer(card, card->rx.b_index); 5165 card->rx.b_count--; 5166 if (card->rx.b_count) { 5167 card->rx.b_index = 5168 (card->rx.b_index + 1) % 5169 QDIO_MAX_BUFFERS_PER_Q; 5170 card->rx.b_element = 5171 &card->qdio.in_q 5172 ->bufs[card->rx.b_index] 5173 .buffer->element[0]; 5174 card->rx.e_offset = 0; 5175 } 5176 } 5177 5178 if (work_done >= budget) 5179 goto out; 5180 else 5181 new_budget = budget - work_done; 5182 } 5183 } 5184 5185 napi_complete_done(napi, work_done); 5186 if (qdio_start_irq(card->data.ccwdev, 0)) 5187 napi_schedule(&card->napi); 5188 out: 5189 return work_done; 5190 } 5191 EXPORT_SYMBOL_GPL(qeth_poll); 5192 5193 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, 5194 unsigned int bidx, bool error, int budget) 5195 { 5196 struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; 5197 u8 sflags = buffer->buffer->element[15].sflags; 5198 struct qeth_card *card = queue->card; 5199 5200 if (queue->bufstates && (queue->bufstates[bidx].flags & 5201 QDIO_OUTBUF_STATE_FLAG_PENDING)) { 5202 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); 5203 5204 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, 5205 QETH_QDIO_BUF_PENDING) == 5206 QETH_QDIO_BUF_PRIMED) 5207 qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); 5208 5209 QETH_CARD_TEXT_(card, 5, "pel%u", bidx); 5210 5211 /* prepare the queue slot for re-use: */ 5212 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); 5213 if (qeth_init_qdio_out_buf(queue, bidx)) { 5214 QETH_CARD_TEXT(card, 2, "outofbuf"); 5215 qeth_schedule_recovery(card); 5216 } 5217 5218 return; 5219 } 5220 5221 if (card->options.cq == QETH_CQ_ENABLED) 5222 qeth_notify_skbs(queue, buffer, 5223 qeth_compute_cq_notification(sflags, 0)); 5224 qeth_clear_output_buffer(queue, buffer, error, budget); 5225 } 5226 5227 static int qeth_tx_poll(struct napi_struct *napi, int budget) 5228 { 5229 struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); 5230 unsigned int queue_no = queue->queue_no; 5231 struct qeth_card *card = queue->card; 5232 struct net_device *dev = card->dev; 5233 unsigned int work_done = 0; 5234 struct netdev_queue *txq; 5235 5236 txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); 5237 5238 while (1) { 5239 unsigned int start, error, i; 5240 unsigned int packets = 0; 5241 unsigned int bytes = 0; 5242 int completed; 5243 5244 if (qeth_out_queue_is_empty(queue)) { 5245 napi_complete(napi); 5246 return 0; 5247 } 5248 5249 /* Give the CPU a breather: */ 5250 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { 5251 QETH_TXQ_STAT_INC(queue, completion_yield); 5252 if (napi_complete_done(napi, 0)) 5253 napi_schedule(napi); 5254 return 0; 5255 } 5256 5257 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, 5258 &start, &error); 5259 if (completed <= 0) { 5260 /* Ensure we see TX completion for pending work: */ 5261 if (napi_complete_done(napi, 0)) 5262 qeth_tx_arm_timer(queue); 5263 return 0; 5264 } 5265 5266 for (i = start; i < start + completed; i++) { 5267 struct qeth_qdio_out_buffer *buffer; 5268 unsigned int bidx = QDIO_BUFNR(i); 5269 5270 buffer = queue->bufs[bidx]; 5271 packets += skb_queue_len(&buffer->skb_list); 5272 bytes += buffer->bytes; 5273 5274 qeth_handle_send_error(card, buffer, error); 5275 qeth_iqd_tx_complete(queue, bidx, error, budget); 5276 qeth_cleanup_handled_pending(queue, bidx, false); 5277 } 5278 5279 netdev_tx_completed_queue(txq, packets, bytes); 5280 atomic_sub(completed, &queue->used_buffers); 5281 work_done += completed; 5282 5283 /* xmit may have observed the full-condition, but not yet 5284 * stopped the txq. In which case the code below won't trigger. 5285 * So before returning, xmit will re-check the txq's fill level 5286 * and wake it up if needed. 5287 */ 5288 if (netif_tx_queue_stopped(txq) && 5289 !qeth_out_queue_is_full(queue)) 5290 netif_tx_wake_queue(txq); 5291 } 5292 } 5293 5294 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) 5295 { 5296 if (!cmd->hdr.return_code) 5297 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 5298 return cmd->hdr.return_code; 5299 } 5300 5301 static int qeth_setassparms_get_caps_cb(struct qeth_card *card, 5302 struct qeth_reply *reply, 5303 unsigned long data) 5304 { 5305 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5306 struct qeth_ipa_caps *caps = reply->param; 5307 5308 if (qeth_setassparms_inspect_rc(cmd)) 5309 return -EIO; 5310 5311 caps->supported = cmd->data.setassparms.data.caps.supported; 5312 caps->enabled = cmd->data.setassparms.data.caps.enabled; 5313 return 0; 5314 } 5315 5316 int qeth_setassparms_cb(struct qeth_card *card, 5317 struct qeth_reply *reply, unsigned long data) 5318 { 5319 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5320 5321 QETH_CARD_TEXT(card, 4, "defadpcb"); 5322 5323 if (cmd->hdr.return_code) 5324 return -EIO; 5325 5326 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; 5327 if (cmd->hdr.prot_version == QETH_PROT_IPV4) 5328 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 5329 if (cmd->hdr.prot_version == QETH_PROT_IPV6) 5330 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 5331 return 0; 5332 } 5333 EXPORT_SYMBOL_GPL(qeth_setassparms_cb); 5334 5335 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, 5336 enum qeth_ipa_funcs ipa_func, 5337 u16 cmd_code, 5338 unsigned int data_length, 5339 enum qeth_prot_versions prot) 5340 { 5341 struct qeth_ipacmd_setassparms *setassparms; 5342 struct qeth_ipacmd_setassparms_hdr *hdr; 5343 struct qeth_cmd_buffer *iob; 5344 5345 QETH_CARD_TEXT(card, 4, "getasscm"); 5346 iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot, 5347 data_length + 5348 offsetof(struct qeth_ipacmd_setassparms, 5349 data)); 5350 if (!iob) 5351 return NULL; 5352 5353 setassparms = &__ipa_cmd(iob)->data.setassparms; 5354 setassparms->assist_no = ipa_func; 5355 5356 hdr = &setassparms->hdr; 5357 hdr->length = sizeof(*hdr) + data_length; 5358 hdr->command_code = cmd_code; 5359 return iob; 5360 } 5361 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); 5362 5363 int qeth_send_simple_setassparms_prot(struct qeth_card *card, 5364 enum qeth_ipa_funcs ipa_func, 5365 u16 cmd_code, u32 *data, 5366 enum qeth_prot_versions prot) 5367 { 5368 unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0; 5369 struct qeth_cmd_buffer *iob; 5370 5371 QETH_CARD_TEXT_(card, 4, "simassp%i", prot); 5372 iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); 5373 if (!iob) 5374 return -ENOMEM; 5375 5376 if (data) 5377 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data; 5378 return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); 5379 } 5380 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); 5381 5382 static void qeth_unregister_dbf_views(void) 5383 { 5384 int x; 5385 for (x = 0; x < QETH_DBF_INFOS; x++) { 5386 debug_unregister(qeth_dbf[x].id); 5387 qeth_dbf[x].id = NULL; 5388 } 5389 } 5390 5391 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) 5392 { 5393 char dbf_txt_buf[32]; 5394 va_list args; 5395 5396 if (!debug_level_enabled(id, level)) 5397 return; 5398 va_start(args, fmt); 5399 vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); 5400 va_end(args); 5401 debug_text_event(id, level, dbf_txt_buf); 5402 } 5403 EXPORT_SYMBOL_GPL(qeth_dbf_longtext); 5404 5405 static int qeth_register_dbf_views(void) 5406 { 5407 int ret; 5408 int x; 5409 5410 for (x = 0; x < QETH_DBF_INFOS; x++) { 5411 /* register the areas */ 5412 qeth_dbf[x].id = debug_register(qeth_dbf[x].name, 5413 qeth_dbf[x].pages, 5414 qeth_dbf[x].areas, 5415 qeth_dbf[x].len); 5416 if (qeth_dbf[x].id == NULL) { 5417 qeth_unregister_dbf_views(); 5418 return -ENOMEM; 5419 } 5420 5421 /* register a view */ 5422 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); 5423 if (ret) { 5424 qeth_unregister_dbf_views(); 5425 return ret; 5426 } 5427 5428 /* set a passing level */ 5429 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); 5430 } 5431 5432 return 0; 5433 } 5434 5435 static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ 5436 5437 int qeth_core_load_discipline(struct qeth_card *card, 5438 enum qeth_discipline_id discipline) 5439 { 5440 mutex_lock(&qeth_mod_mutex); 5441 switch (discipline) { 5442 case QETH_DISCIPLINE_LAYER3: 5443 card->discipline = try_then_request_module( 5444 symbol_get(qeth_l3_discipline), "qeth_l3"); 5445 break; 5446 case QETH_DISCIPLINE_LAYER2: 5447 card->discipline = try_then_request_module( 5448 symbol_get(qeth_l2_discipline), "qeth_l2"); 5449 break; 5450 default: 5451 break; 5452 } 5453 mutex_unlock(&qeth_mod_mutex); 5454 5455 if (!card->discipline) { 5456 dev_err(&card->gdev->dev, "There is no kernel module to " 5457 "support discipline %d\n", discipline); 5458 return -EINVAL; 5459 } 5460 5461 card->options.layer = discipline; 5462 return 0; 5463 } 5464 5465 void qeth_core_free_discipline(struct qeth_card *card) 5466 { 5467 if (IS_LAYER2(card)) 5468 symbol_put(qeth_l2_discipline); 5469 else 5470 symbol_put(qeth_l3_discipline); 5471 card->options.layer = QETH_DISCIPLINE_UNDETERMINED; 5472 card->discipline = NULL; 5473 } 5474 5475 const struct device_type qeth_generic_devtype = { 5476 .name = "qeth_generic", 5477 .groups = qeth_generic_attr_groups, 5478 }; 5479 EXPORT_SYMBOL_GPL(qeth_generic_devtype); 5480 5481 static const struct device_type qeth_osn_devtype = { 5482 .name = "qeth_osn", 5483 .groups = qeth_osn_attr_groups, 5484 }; 5485 5486 #define DBF_NAME_LEN 20 5487 5488 struct qeth_dbf_entry { 5489 char dbf_name[DBF_NAME_LEN]; 5490 debug_info_t *dbf_info; 5491 struct list_head dbf_list; 5492 }; 5493 5494 static LIST_HEAD(qeth_dbf_list); 5495 static DEFINE_MUTEX(qeth_dbf_list_mutex); 5496 5497 static debug_info_t *qeth_get_dbf_entry(char *name) 5498 { 5499 struct qeth_dbf_entry *entry; 5500 debug_info_t *rc = NULL; 5501 5502 mutex_lock(&qeth_dbf_list_mutex); 5503 list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { 5504 if (strcmp(entry->dbf_name, name) == 0) { 5505 rc = entry->dbf_info; 5506 break; 5507 } 5508 } 5509 mutex_unlock(&qeth_dbf_list_mutex); 5510 return rc; 5511 } 5512 5513 static int qeth_add_dbf_entry(struct qeth_card *card, char *name) 5514 { 5515 struct qeth_dbf_entry *new_entry; 5516 5517 card->debug = debug_register(name, 2, 1, 8); 5518 if (!card->debug) { 5519 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); 5520 goto err; 5521 } 5522 if (debug_register_view(card->debug, &debug_hex_ascii_view)) 5523 goto err_dbg; 5524 new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); 5525 if (!new_entry) 5526 goto err_dbg; 5527 strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); 5528 new_entry->dbf_info = card->debug; 5529 mutex_lock(&qeth_dbf_list_mutex); 5530 list_add(&new_entry->dbf_list, &qeth_dbf_list); 5531 mutex_unlock(&qeth_dbf_list_mutex); 5532 5533 return 0; 5534 5535 err_dbg: 5536 debug_unregister(card->debug); 5537 err: 5538 return -ENOMEM; 5539 } 5540 5541 static void qeth_clear_dbf_list(void) 5542 { 5543 struct qeth_dbf_entry *entry, *tmp; 5544 5545 mutex_lock(&qeth_dbf_list_mutex); 5546 list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { 5547 list_del(&entry->dbf_list); 5548 debug_unregister(entry->dbf_info); 5549 kfree(entry); 5550 } 5551 mutex_unlock(&qeth_dbf_list_mutex); 5552 } 5553 5554 static struct net_device *qeth_alloc_netdev(struct qeth_card *card) 5555 { 5556 struct net_device *dev; 5557 5558 switch (card->info.type) { 5559 case QETH_CARD_TYPE_IQD: 5560 dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN, 5561 ether_setup, QETH_MAX_QUEUES, 1); 5562 break; 5563 case QETH_CARD_TYPE_OSM: 5564 dev = alloc_etherdev(0); 5565 break; 5566 case QETH_CARD_TYPE_OSN: 5567 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup); 5568 break; 5569 default: 5570 dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1); 5571 } 5572 5573 if (!dev) 5574 return NULL; 5575 5576 dev->ml_priv = card; 5577 dev->watchdog_timeo = QETH_TX_TIMEOUT; 5578 dev->min_mtu = IS_OSN(card) ? 64 : 576; 5579 /* initialized when device first goes online: */ 5580 dev->max_mtu = 0; 5581 dev->mtu = 0; 5582 SET_NETDEV_DEV(dev, &card->gdev->dev); 5583 netif_carrier_off(dev); 5584 5585 if (IS_OSN(card)) { 5586 dev->ethtool_ops = &qeth_osn_ethtool_ops; 5587 } else { 5588 dev->ethtool_ops = &qeth_ethtool_ops; 5589 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 5590 dev->hw_features |= NETIF_F_SG; 5591 dev->vlan_features |= NETIF_F_SG; 5592 if (IS_IQD(card)) { 5593 dev->features |= NETIF_F_SG; 5594 if (netif_set_real_num_tx_queues(dev, 5595 QETH_IQD_MIN_TXQ)) { 5596 free_netdev(dev); 5597 return NULL; 5598 } 5599 } 5600 } 5601 5602 return dev; 5603 } 5604 5605 struct net_device *qeth_clone_netdev(struct net_device *orig) 5606 { 5607 struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); 5608 5609 if (!clone) 5610 return NULL; 5611 5612 clone->dev_port = orig->dev_port; 5613 return clone; 5614 } 5615 5616 static int qeth_core_probe_device(struct ccwgroup_device *gdev) 5617 { 5618 struct qeth_card *card; 5619 struct device *dev; 5620 int rc; 5621 enum qeth_discipline_id enforced_disc; 5622 char dbf_name[DBF_NAME_LEN]; 5623 5624 QETH_DBF_TEXT(SETUP, 2, "probedev"); 5625 5626 dev = &gdev->dev; 5627 if (!get_device(dev)) 5628 return -ENODEV; 5629 5630 QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); 5631 5632 card = qeth_alloc_card(gdev); 5633 if (!card) { 5634 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); 5635 rc = -ENOMEM; 5636 goto err_dev; 5637 } 5638 5639 snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", 5640 dev_name(&gdev->dev)); 5641 card->debug = qeth_get_dbf_entry(dbf_name); 5642 if (!card->debug) { 5643 rc = qeth_add_dbf_entry(card, dbf_name); 5644 if (rc) 5645 goto err_card; 5646 } 5647 5648 qeth_setup_card(card); 5649 card->dev = qeth_alloc_netdev(card); 5650 if (!card->dev) { 5651 rc = -ENOMEM; 5652 goto err_card; 5653 } 5654 5655 card->qdio.no_out_queues = card->dev->num_tx_queues; 5656 rc = qeth_update_from_chp_desc(card); 5657 if (rc) 5658 goto err_chp_desc; 5659 qeth_determine_capabilities(card); 5660 qeth_set_blkt_defaults(card); 5661 5662 enforced_disc = qeth_enforce_discipline(card); 5663 switch (enforced_disc) { 5664 case QETH_DISCIPLINE_UNDETERMINED: 5665 gdev->dev.type = &qeth_generic_devtype; 5666 break; 5667 default: 5668 card->info.layer_enforced = true; 5669 rc = qeth_core_load_discipline(card, enforced_disc); 5670 if (rc) 5671 goto err_load; 5672 5673 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype : 5674 card->discipline->devtype; 5675 rc = card->discipline->setup(card->gdev); 5676 if (rc) 5677 goto err_disc; 5678 break; 5679 } 5680 5681 return 0; 5682 5683 err_disc: 5684 qeth_core_free_discipline(card); 5685 err_load: 5686 err_chp_desc: 5687 free_netdev(card->dev); 5688 err_card: 5689 qeth_core_free_card(card); 5690 err_dev: 5691 put_device(dev); 5692 return rc; 5693 } 5694 5695 static void qeth_core_remove_device(struct ccwgroup_device *gdev) 5696 { 5697 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5698 5699 QETH_CARD_TEXT(card, 2, "removedv"); 5700 5701 if (card->discipline) { 5702 card->discipline->remove(gdev); 5703 qeth_core_free_discipline(card); 5704 } 5705 5706 free_netdev(card->dev); 5707 qeth_core_free_card(card); 5708 put_device(&gdev->dev); 5709 } 5710 5711 static int qeth_core_set_online(struct ccwgroup_device *gdev) 5712 { 5713 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5714 int rc = 0; 5715 enum qeth_discipline_id def_discipline; 5716 5717 if (!card->discipline) { 5718 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 5719 QETH_DISCIPLINE_LAYER2; 5720 rc = qeth_core_load_discipline(card, def_discipline); 5721 if (rc) 5722 goto err; 5723 rc = card->discipline->setup(card->gdev); 5724 if (rc) { 5725 qeth_core_free_discipline(card); 5726 goto err; 5727 } 5728 } 5729 rc = card->discipline->set_online(gdev); 5730 err: 5731 return rc; 5732 } 5733 5734 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 5735 { 5736 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5737 return card->discipline->set_offline(gdev); 5738 } 5739 5740 static void qeth_core_shutdown(struct ccwgroup_device *gdev) 5741 { 5742 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5743 qeth_set_allowed_threads(card, 0, 1); 5744 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 5745 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 5746 qeth_qdio_clear_card(card, 0); 5747 qeth_drain_output_queues(card); 5748 qdio_free(CARD_DDEV(card)); 5749 } 5750 5751 static int qeth_suspend(struct ccwgroup_device *gdev) 5752 { 5753 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5754 5755 qeth_set_allowed_threads(card, 0, 1); 5756 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 5757 if (gdev->state == CCWGROUP_OFFLINE) 5758 return 0; 5759 5760 card->discipline->set_offline(gdev); 5761 return 0; 5762 } 5763 5764 static int qeth_resume(struct ccwgroup_device *gdev) 5765 { 5766 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 5767 int rc; 5768 5769 rc = card->discipline->set_online(gdev); 5770 5771 qeth_set_allowed_threads(card, 0xffffffff, 0); 5772 if (rc) 5773 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n"); 5774 return rc; 5775 } 5776 5777 static ssize_t group_store(struct device_driver *ddrv, const char *buf, 5778 size_t count) 5779 { 5780 int err; 5781 5782 err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, 5783 buf); 5784 5785 return err ? err : count; 5786 } 5787 static DRIVER_ATTR_WO(group); 5788 5789 static struct attribute *qeth_drv_attrs[] = { 5790 &driver_attr_group.attr, 5791 NULL, 5792 }; 5793 static struct attribute_group qeth_drv_attr_group = { 5794 .attrs = qeth_drv_attrs, 5795 }; 5796 static const struct attribute_group *qeth_drv_attr_groups[] = { 5797 &qeth_drv_attr_group, 5798 NULL, 5799 }; 5800 5801 static struct ccwgroup_driver qeth_core_ccwgroup_driver = { 5802 .driver = { 5803 .groups = qeth_drv_attr_groups, 5804 .owner = THIS_MODULE, 5805 .name = "qeth", 5806 }, 5807 .ccw_driver = &qeth_ccw_driver, 5808 .setup = qeth_core_probe_device, 5809 .remove = qeth_core_remove_device, 5810 .set_online = qeth_core_set_online, 5811 .set_offline = qeth_core_set_offline, 5812 .shutdown = qeth_core_shutdown, 5813 .prepare = NULL, 5814 .complete = NULL, 5815 .freeze = qeth_suspend, 5816 .thaw = qeth_resume, 5817 .restore = qeth_resume, 5818 }; 5819 5820 struct qeth_card *qeth_get_card_by_busid(char *bus_id) 5821 { 5822 struct ccwgroup_device *gdev; 5823 struct qeth_card *card; 5824 5825 gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id); 5826 if (!gdev) 5827 return NULL; 5828 5829 card = dev_get_drvdata(&gdev->dev); 5830 put_device(&gdev->dev); 5831 return card; 5832 } 5833 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid); 5834 5835 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 5836 { 5837 struct qeth_card *card = dev->ml_priv; 5838 struct mii_ioctl_data *mii_data; 5839 int rc = 0; 5840 5841 if (!card) 5842 return -ENODEV; 5843 5844 switch (cmd) { 5845 case SIOC_QETH_ADP_SET_SNMP_CONTROL: 5846 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); 5847 break; 5848 case SIOC_QETH_GET_CARD_TYPE: 5849 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && 5850 !IS_VM_NIC(card)) 5851 return 1; 5852 return 0; 5853 case SIOCGMIIPHY: 5854 mii_data = if_mii(rq); 5855 mii_data->phy_id = 0; 5856 break; 5857 case SIOCGMIIREG: 5858 mii_data = if_mii(rq); 5859 if (mii_data->phy_id != 0) 5860 rc = -EINVAL; 5861 else 5862 mii_data->val_out = qeth_mdio_read(dev, 5863 mii_data->phy_id, mii_data->reg_num); 5864 break; 5865 case SIOC_QETH_QUERY_OAT: 5866 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); 5867 break; 5868 default: 5869 if (card->discipline->do_ioctl) 5870 rc = card->discipline->do_ioctl(dev, rq, cmd); 5871 else 5872 rc = -EOPNOTSUPP; 5873 } 5874 if (rc) 5875 QETH_CARD_TEXT_(card, 2, "ioce%x", rc); 5876 return rc; 5877 } 5878 EXPORT_SYMBOL_GPL(qeth_do_ioctl); 5879 5880 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, 5881 unsigned long data) 5882 { 5883 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5884 u32 *features = reply->param; 5885 5886 if (qeth_setassparms_inspect_rc(cmd)) 5887 return -EIO; 5888 5889 *features = cmd->data.setassparms.data.flags_32bit; 5890 return 0; 5891 } 5892 5893 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, 5894 enum qeth_prot_versions prot) 5895 { 5896 return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP, 5897 NULL, prot); 5898 } 5899 5900 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, 5901 enum qeth_prot_versions prot) 5902 { 5903 u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; 5904 struct qeth_cmd_buffer *iob; 5905 struct qeth_ipa_caps caps; 5906 u32 features; 5907 int rc; 5908 5909 /* some L3 HW requires combined L3+L4 csum offload: */ 5910 if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && 5911 cstype == IPA_OUTBOUND_CHECKSUM) 5912 required_features |= QETH_IPA_CHECKSUM_IP_HDR; 5913 5914 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, 5915 prot); 5916 if (!iob) 5917 return -ENOMEM; 5918 5919 rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); 5920 if (rc) 5921 return rc; 5922 5923 if ((required_features & features) != required_features) { 5924 qeth_set_csum_off(card, cstype, prot); 5925 return -EOPNOTSUPP; 5926 } 5927 5928 iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 5929 SETASS_DATA_SIZEOF(flags_32bit), 5930 prot); 5931 if (!iob) { 5932 qeth_set_csum_off(card, cstype, prot); 5933 return -ENOMEM; 5934 } 5935 5936 if (features & QETH_IPA_CHECKSUM_LP2LP) 5937 required_features |= QETH_IPA_CHECKSUM_LP2LP; 5938 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; 5939 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 5940 if (rc) { 5941 qeth_set_csum_off(card, cstype, prot); 5942 return rc; 5943 } 5944 5945 if (!qeth_ipa_caps_supported(&caps, required_features) || 5946 !qeth_ipa_caps_enabled(&caps, required_features)) { 5947 qeth_set_csum_off(card, cstype, prot); 5948 return -EOPNOTSUPP; 5949 } 5950 5951 dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", 5952 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); 5953 if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) && 5954 cstype == IPA_OUTBOUND_CHECKSUM) 5955 dev_warn(&card->gdev->dev, 5956 "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", 5957 QETH_CARD_IFNAME(card)); 5958 return 0; 5959 } 5960 5961 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, 5962 enum qeth_prot_versions prot) 5963 { 5964 return on ? qeth_set_csum_on(card, cstype, prot) : 5965 qeth_set_csum_off(card, cstype, prot); 5966 } 5967 5968 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, 5969 unsigned long data) 5970 { 5971 struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; 5972 struct qeth_tso_start_data *tso_data = reply->param; 5973 5974 if (qeth_setassparms_inspect_rc(cmd)) 5975 return -EIO; 5976 5977 tso_data->mss = cmd->data.setassparms.data.tso.mss; 5978 tso_data->supported = cmd->data.setassparms.data.tso.supported; 5979 return 0; 5980 } 5981 5982 static int qeth_set_tso_off(struct qeth_card *card, 5983 enum qeth_prot_versions prot) 5984 { 5985 return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, 5986 IPA_CMD_ASS_STOP, NULL, prot); 5987 } 5988 5989 static int qeth_set_tso_on(struct qeth_card *card, 5990 enum qeth_prot_versions prot) 5991 { 5992 struct qeth_tso_start_data tso_data; 5993 struct qeth_cmd_buffer *iob; 5994 struct qeth_ipa_caps caps; 5995 int rc; 5996 5997 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 5998 IPA_CMD_ASS_START, 0, prot); 5999 if (!iob) 6000 return -ENOMEM; 6001 6002 rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); 6003 if (rc) 6004 return rc; 6005 6006 if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { 6007 qeth_set_tso_off(card, prot); 6008 return -EOPNOTSUPP; 6009 } 6010 6011 iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, 6012 IPA_CMD_ASS_ENABLE, 6013 SETASS_DATA_SIZEOF(caps), prot); 6014 if (!iob) { 6015 qeth_set_tso_off(card, prot); 6016 return -ENOMEM; 6017 } 6018 6019 /* enable TSO capability */ 6020 __ipa_cmd(iob)->data.setassparms.data.caps.enabled = 6021 QETH_IPA_LARGE_SEND_TCP; 6022 rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); 6023 if (rc) { 6024 qeth_set_tso_off(card, prot); 6025 return rc; 6026 } 6027 6028 if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || 6029 !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { 6030 qeth_set_tso_off(card, prot); 6031 return -EOPNOTSUPP; 6032 } 6033 6034 dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, 6035 tso_data.mss); 6036 return 0; 6037 } 6038 6039 static int qeth_set_ipa_tso(struct qeth_card *card, bool on, 6040 enum qeth_prot_versions prot) 6041 { 6042 return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); 6043 } 6044 6045 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) 6046 { 6047 int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; 6048 int rc_ipv6; 6049 6050 if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) 6051 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6052 QETH_PROT_IPV4); 6053 if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6054 /* no/one Offload Assist available, so the rc is trivial */ 6055 return rc_ipv4; 6056 6057 rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, 6058 QETH_PROT_IPV6); 6059 6060 if (on) 6061 /* enable: success if any Assist is active */ 6062 return (rc_ipv6) ? rc_ipv4 : 0; 6063 6064 /* disable: failure if any Assist is still active */ 6065 return (rc_ipv6) ? rc_ipv6 : rc_ipv4; 6066 } 6067 6068 /** 6069 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features 6070 * @dev: a net_device 6071 */ 6072 void qeth_enable_hw_features(struct net_device *dev) 6073 { 6074 struct qeth_card *card = dev->ml_priv; 6075 netdev_features_t features; 6076 6077 features = dev->features; 6078 /* force-off any feature that might need an IPA sequence. 6079 * netdev_update_features() will restart them. 6080 */ 6081 dev->features &= ~dev->hw_features; 6082 /* toggle VLAN filter, so that VIDs are re-programmed: */ 6083 if (IS_LAYER2(card) && IS_VM_NIC(card)) { 6084 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 6085 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 6086 } 6087 netdev_update_features(dev); 6088 if (features != dev->features) 6089 dev_warn(&card->gdev->dev, 6090 "Device recovery failed to restore all offload features\n"); 6091 } 6092 EXPORT_SYMBOL_GPL(qeth_enable_hw_features); 6093 6094 int qeth_set_features(struct net_device *dev, netdev_features_t features) 6095 { 6096 struct qeth_card *card = dev->ml_priv; 6097 netdev_features_t changed = dev->features ^ features; 6098 int rc = 0; 6099 6100 QETH_CARD_TEXT(card, 2, "setfeat"); 6101 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6102 6103 if ((changed & NETIF_F_IP_CSUM)) { 6104 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, 6105 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4); 6106 if (rc) 6107 changed ^= NETIF_F_IP_CSUM; 6108 } 6109 if (changed & NETIF_F_IPV6_CSUM) { 6110 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, 6111 IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6); 6112 if (rc) 6113 changed ^= NETIF_F_IPV6_CSUM; 6114 } 6115 if (changed & NETIF_F_RXCSUM) { 6116 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); 6117 if (rc) 6118 changed ^= NETIF_F_RXCSUM; 6119 } 6120 if (changed & NETIF_F_TSO) { 6121 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, 6122 QETH_PROT_IPV4); 6123 if (rc) 6124 changed ^= NETIF_F_TSO; 6125 } 6126 if (changed & NETIF_F_TSO6) { 6127 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, 6128 QETH_PROT_IPV6); 6129 if (rc) 6130 changed ^= NETIF_F_TSO6; 6131 } 6132 6133 /* everything changed successfully? */ 6134 if ((dev->features ^ features) == changed) 6135 return 0; 6136 /* something went wrong. save changed features and return error */ 6137 dev->features ^= changed; 6138 return -EIO; 6139 } 6140 EXPORT_SYMBOL_GPL(qeth_set_features); 6141 6142 netdev_features_t qeth_fix_features(struct net_device *dev, 6143 netdev_features_t features) 6144 { 6145 struct qeth_card *card = dev->ml_priv; 6146 6147 QETH_CARD_TEXT(card, 2, "fixfeat"); 6148 if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) 6149 features &= ~NETIF_F_IP_CSUM; 6150 if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) 6151 features &= ~NETIF_F_IPV6_CSUM; 6152 if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && 6153 !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) 6154 features &= ~NETIF_F_RXCSUM; 6155 if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) 6156 features &= ~NETIF_F_TSO; 6157 if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) 6158 features &= ~NETIF_F_TSO6; 6159 6160 QETH_CARD_HEX(card, 2, &features, sizeof(features)); 6161 return features; 6162 } 6163 EXPORT_SYMBOL_GPL(qeth_fix_features); 6164 6165 netdev_features_t qeth_features_check(struct sk_buff *skb, 6166 struct net_device *dev, 6167 netdev_features_t features) 6168 { 6169 /* GSO segmentation builds skbs with 6170 * a (small) linear part for the headers, and 6171 * page frags for the data. 6172 * Compared to a linear skb, the header-only part consumes an 6173 * additional buffer element. This reduces buffer utilization, and 6174 * hurts throughput. So compress small segments into one element. 6175 */ 6176 if (netif_needs_gso(skb, features)) { 6177 /* match skb_segment(): */ 6178 unsigned int doffset = skb->data - skb_mac_header(skb); 6179 unsigned int hsize = skb_shinfo(skb)->gso_size; 6180 unsigned int hroom = skb_headroom(skb); 6181 6182 /* linearize only if resulting skb allocations are order-0: */ 6183 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) 6184 features &= ~NETIF_F_SG; 6185 } 6186 6187 return vlan_features_check(skb, features); 6188 } 6189 EXPORT_SYMBOL_GPL(qeth_features_check); 6190 6191 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6192 { 6193 struct qeth_card *card = dev->ml_priv; 6194 struct qeth_qdio_out_q *queue; 6195 unsigned int i; 6196 6197 QETH_CARD_TEXT(card, 5, "getstat"); 6198 6199 stats->rx_packets = card->stats.rx_packets; 6200 stats->rx_bytes = card->stats.rx_bytes; 6201 stats->rx_errors = card->stats.rx_errors; 6202 stats->rx_dropped = card->stats.rx_dropped; 6203 stats->multicast = card->stats.rx_multicast; 6204 6205 for (i = 0; i < card->qdio.no_out_queues; i++) { 6206 queue = card->qdio.out_qs[i]; 6207 6208 stats->tx_packets += queue->stats.tx_packets; 6209 stats->tx_bytes += queue->stats.tx_bytes; 6210 stats->tx_errors += queue->stats.tx_errors; 6211 stats->tx_dropped += queue->stats.tx_dropped; 6212 } 6213 } 6214 EXPORT_SYMBOL_GPL(qeth_get_stats64); 6215 6216 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, 6217 u8 cast_type, struct net_device *sb_dev) 6218 { 6219 if (cast_type != RTN_UNICAST) 6220 return QETH_IQD_MCAST_TXQ; 6221 return QETH_IQD_MIN_UCAST_TXQ; 6222 } 6223 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); 6224 6225 int qeth_open(struct net_device *dev) 6226 { 6227 struct qeth_card *card = dev->ml_priv; 6228 6229 QETH_CARD_TEXT(card, 4, "qethopen"); 6230 6231 if (qdio_stop_irq(CARD_DDEV(card), 0) < 0) 6232 return -EIO; 6233 6234 card->data.state = CH_STATE_UP; 6235 netif_tx_start_all_queues(dev); 6236 6237 napi_enable(&card->napi); 6238 local_bh_disable(); 6239 napi_schedule(&card->napi); 6240 if (IS_IQD(card)) { 6241 struct qeth_qdio_out_q *queue; 6242 unsigned int i; 6243 6244 qeth_for_each_output_queue(card, queue, i) { 6245 netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, 6246 QETH_NAPI_WEIGHT); 6247 napi_enable(&queue->napi); 6248 napi_schedule(&queue->napi); 6249 } 6250 } 6251 /* kick-start the NAPI softirq: */ 6252 local_bh_enable(); 6253 return 0; 6254 } 6255 EXPORT_SYMBOL_GPL(qeth_open); 6256 6257 int qeth_stop(struct net_device *dev) 6258 { 6259 struct qeth_card *card = dev->ml_priv; 6260 6261 QETH_CARD_TEXT(card, 4, "qethstop"); 6262 if (IS_IQD(card)) { 6263 struct qeth_qdio_out_q *queue; 6264 unsigned int i; 6265 6266 /* Quiesce the NAPI instances: */ 6267 qeth_for_each_output_queue(card, queue, i) { 6268 napi_disable(&queue->napi); 6269 del_timer_sync(&queue->timer); 6270 } 6271 6272 /* Stop .ndo_start_xmit, might still access queue->napi. */ 6273 netif_tx_disable(dev); 6274 6275 /* Queues may get re-allocated, so remove the NAPIs here. */ 6276 qeth_for_each_output_queue(card, queue, i) 6277 netif_napi_del(&queue->napi); 6278 } else { 6279 netif_tx_disable(dev); 6280 } 6281 6282 napi_disable(&card->napi); 6283 return 0; 6284 } 6285 EXPORT_SYMBOL_GPL(qeth_stop); 6286 6287 static int __init qeth_core_init(void) 6288 { 6289 int rc; 6290 6291 pr_info("loading core functions\n"); 6292 6293 rc = qeth_register_dbf_views(); 6294 if (rc) 6295 goto dbf_err; 6296 qeth_core_root_dev = root_device_register("qeth"); 6297 rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); 6298 if (rc) 6299 goto register_err; 6300 qeth_core_header_cache = 6301 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, 6302 roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), 6303 0, NULL); 6304 if (!qeth_core_header_cache) { 6305 rc = -ENOMEM; 6306 goto slab_err; 6307 } 6308 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", 6309 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); 6310 if (!qeth_qdio_outbuf_cache) { 6311 rc = -ENOMEM; 6312 goto cqslab_err; 6313 } 6314 rc = ccw_driver_register(&qeth_ccw_driver); 6315 if (rc) 6316 goto ccw_err; 6317 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); 6318 if (rc) 6319 goto ccwgroup_err; 6320 6321 return 0; 6322 6323 ccwgroup_err: 6324 ccw_driver_unregister(&qeth_ccw_driver); 6325 ccw_err: 6326 kmem_cache_destroy(qeth_qdio_outbuf_cache); 6327 cqslab_err: 6328 kmem_cache_destroy(qeth_core_header_cache); 6329 slab_err: 6330 root_device_unregister(qeth_core_root_dev); 6331 register_err: 6332 qeth_unregister_dbf_views(); 6333 dbf_err: 6334 pr_err("Initializing the qeth device driver failed\n"); 6335 return rc; 6336 } 6337 6338 static void __exit qeth_core_exit(void) 6339 { 6340 qeth_clear_dbf_list(); 6341 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 6342 ccw_driver_unregister(&qeth_ccw_driver); 6343 kmem_cache_destroy(qeth_qdio_outbuf_cache); 6344 kmem_cache_destroy(qeth_core_header_cache); 6345 root_device_unregister(qeth_core_root_dev); 6346 qeth_unregister_dbf_views(); 6347 pr_info("core functions removed\n"); 6348 } 6349 6350 module_init(qeth_core_init); 6351 module_exit(qeth_core_exit); 6352 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); 6353 MODULE_DESCRIPTION("qeth core functions"); 6354 MODULE_LICENSE("GPL"); 6355