1 /* viohs.c: LDOM Virtual I/O handshake helper layer. 2 * 3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net> 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/export.h> 8 #include <linux/string.h> 9 #include <linux/delay.h> 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 13 #include <asm/ldc.h> 14 #include <asm/vio.h> 15 16 int vio_ldc_send(struct vio_driver_state *vio, void *data, int len) 17 { 18 int err, limit = 1000; 19 20 err = -EINVAL; 21 while (limit-- > 0) { 22 err = ldc_write(vio->lp, data, len); 23 if (!err || (err != -EAGAIN)) 24 break; 25 udelay(1); 26 } 27 28 return err; 29 } 30 EXPORT_SYMBOL(vio_ldc_send); 31 32 static int send_ctrl(struct vio_driver_state *vio, 33 struct vio_msg_tag *tag, int len) 34 { 35 tag->sid = vio_send_sid(vio); 36 return vio_ldc_send(vio, tag, len); 37 } 38 39 static void init_tag(struct vio_msg_tag *tag, u8 type, u8 stype, u16 stype_env) 40 { 41 tag->type = type; 42 tag->stype = stype; 43 tag->stype_env = stype_env; 44 } 45 46 static int send_version(struct vio_driver_state *vio, u16 major, u16 minor) 47 { 48 struct vio_ver_info pkt; 49 50 vio->_local_sid = (u32) sched_clock(); 51 52 memset(&pkt, 0, sizeof(pkt)); 53 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_VER_INFO); 54 pkt.major = major; 55 pkt.minor = minor; 56 pkt.dev_class = vio->dev_class; 57 58 viodbg(HS, "SEND VERSION INFO maj[%u] min[%u] devclass[%u]\n", 59 major, minor, vio->dev_class); 60 61 return send_ctrl(vio, &pkt.tag, sizeof(pkt)); 62 } 63 64 static int start_handshake(struct vio_driver_state *vio) 65 { 66 int err; 67 68 viodbg(HS, "START HANDSHAKE\n"); 69 70 vio->hs_state = VIO_HS_INVALID; 71 72 err = send_version(vio, 73 vio->ver_table[0].major, 74 vio->ver_table[0].minor); 75 if (err < 0) 76 return err; 77 78 return 0; 79 } 80 81 static void flush_rx_dring(struct vio_driver_state *vio) 82 { 83 struct vio_dring_state *dr; 84 u64 ident; 85 86 BUG_ON(!(vio->dr_state & VIO_DR_STATE_RXREG)); 87 88 dr = &vio->drings[VIO_DRIVER_RX_RING]; 89 ident = dr->ident; 90 91 BUG_ON(!vio->desc_buf); 92 kfree(vio->desc_buf); 93 vio->desc_buf = NULL; 94 95 memset(dr, 0, sizeof(*dr)); 96 dr->ident = ident; 97 } 98 99 void vio_link_state_change(struct vio_driver_state *vio, int event) 100 { 101 if (event == LDC_EVENT_UP) { 102 vio->hs_state = VIO_HS_INVALID; 103 104 switch (vio->dev_class) { 105 case VDEV_NETWORK: 106 case VDEV_NETWORK_SWITCH: 107 vio->dr_state = (VIO_DR_STATE_TXREQ | 108 VIO_DR_STATE_RXREQ); 109 break; 110 111 case VDEV_DISK: 112 vio->dr_state = VIO_DR_STATE_TXREQ; 113 break; 114 case VDEV_DISK_SERVER: 115 vio->dr_state = VIO_DR_STATE_RXREQ; 116 break; 117 } 118 start_handshake(vio); 119 } else if (event == LDC_EVENT_RESET) { 120 vio->hs_state = VIO_HS_INVALID; 121 122 if (vio->dr_state & VIO_DR_STATE_RXREG) 123 flush_rx_dring(vio); 124 125 vio->dr_state = 0x00; 126 memset(&vio->ver, 0, sizeof(vio->ver)); 127 128 ldc_disconnect(vio->lp); 129 } 130 } 131 EXPORT_SYMBOL(vio_link_state_change); 132 133 static int handshake_failure(struct vio_driver_state *vio) 134 { 135 struct vio_dring_state *dr; 136 137 /* XXX Put policy here... Perhaps start a timer to fire 138 * XXX in 100 ms, which will bring the link up and retry 139 * XXX the handshake. 140 */ 141 142 viodbg(HS, "HANDSHAKE FAILURE\n"); 143 144 vio->dr_state &= ~(VIO_DR_STATE_TXREG | 145 VIO_DR_STATE_RXREG); 146 147 dr = &vio->drings[VIO_DRIVER_RX_RING]; 148 memset(dr, 0, sizeof(*dr)); 149 150 kfree(vio->desc_buf); 151 vio->desc_buf = NULL; 152 vio->desc_buf_len = 0; 153 154 vio->hs_state = VIO_HS_INVALID; 155 156 return -ECONNRESET; 157 } 158 159 static int process_unknown(struct vio_driver_state *vio, void *arg) 160 { 161 struct vio_msg_tag *pkt = arg; 162 163 viodbg(HS, "UNKNOWN CONTROL [%02x:%02x:%04x:%08x]\n", 164 pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 165 166 printk(KERN_ERR "vio: ID[%lu] Resetting connection.\n", 167 vio->vdev->channel_id); 168 169 ldc_disconnect(vio->lp); 170 171 return -ECONNRESET; 172 } 173 174 static int send_dreg(struct vio_driver_state *vio) 175 { 176 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_TX_RING]; 177 union { 178 struct vio_dring_register pkt; 179 char all[sizeof(struct vio_dring_register) + 180 (sizeof(struct ldc_trans_cookie) * 181 dr->ncookies)]; 182 } u; 183 int i; 184 185 memset(&u, 0, sizeof(u)); 186 init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG); 187 u.pkt.dring_ident = 0; 188 u.pkt.num_descr = dr->num_entries; 189 u.pkt.descr_size = dr->entry_size; 190 u.pkt.options = VIO_TX_DRING; 191 u.pkt.num_cookies = dr->ncookies; 192 193 viodbg(HS, "SEND DRING_REG INFO ndesc[%u] dsz[%u] opt[0x%x] " 194 "ncookies[%u]\n", 195 u.pkt.num_descr, u.pkt.descr_size, u.pkt.options, 196 u.pkt.num_cookies); 197 198 for (i = 0; i < dr->ncookies; i++) { 199 u.pkt.cookies[i] = dr->cookies[i]; 200 201 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n", 202 i, 203 (unsigned long long) u.pkt.cookies[i].cookie_addr, 204 (unsigned long long) u.pkt.cookies[i].cookie_size); 205 } 206 207 return send_ctrl(vio, &u.pkt.tag, sizeof(u)); 208 } 209 210 static int send_rdx(struct vio_driver_state *vio) 211 { 212 struct vio_rdx pkt; 213 214 memset(&pkt, 0, sizeof(pkt)); 215 216 init_tag(&pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_RDX); 217 218 viodbg(HS, "SEND RDX INFO\n"); 219 220 return send_ctrl(vio, &pkt.tag, sizeof(pkt)); 221 } 222 223 static int send_attr(struct vio_driver_state *vio) 224 { 225 return vio->ops->send_attr(vio); 226 } 227 228 static struct vio_version *find_by_major(struct vio_driver_state *vio, 229 u16 major) 230 { 231 struct vio_version *ret = NULL; 232 int i; 233 234 for (i = 0; i < vio->ver_table_entries; i++) { 235 struct vio_version *v = &vio->ver_table[i]; 236 if (v->major <= major) { 237 ret = v; 238 break; 239 } 240 } 241 return ret; 242 } 243 244 static int process_ver_info(struct vio_driver_state *vio, 245 struct vio_ver_info *pkt) 246 { 247 struct vio_version *vap; 248 int err; 249 250 viodbg(HS, "GOT VERSION INFO maj[%u] min[%u] devclass[%u]\n", 251 pkt->major, pkt->minor, pkt->dev_class); 252 253 if (vio->hs_state != VIO_HS_INVALID) { 254 /* XXX Perhaps invoke start_handshake? XXX */ 255 memset(&vio->ver, 0, sizeof(vio->ver)); 256 vio->hs_state = VIO_HS_INVALID; 257 } 258 259 vap = find_by_major(vio, pkt->major); 260 261 vio->_peer_sid = pkt->tag.sid; 262 263 if (!vap) { 264 pkt->tag.stype = VIO_SUBTYPE_NACK; 265 pkt->major = 0; 266 pkt->minor = 0; 267 viodbg(HS, "SEND VERSION NACK maj[0] min[0]\n"); 268 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); 269 } else if (vap->major != pkt->major) { 270 pkt->tag.stype = VIO_SUBTYPE_NACK; 271 pkt->major = vap->major; 272 pkt->minor = vap->minor; 273 viodbg(HS, "SEND VERSION NACK maj[%u] min[%u]\n", 274 pkt->major, pkt->minor); 275 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); 276 } else { 277 struct vio_version ver = { 278 .major = pkt->major, 279 .minor = pkt->minor, 280 }; 281 if (ver.minor > vap->minor) 282 ver.minor = vap->minor; 283 pkt->minor = ver.minor; 284 pkt->tag.stype = VIO_SUBTYPE_ACK; 285 viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n", 286 pkt->major, pkt->minor); 287 err = send_ctrl(vio, &pkt->tag, sizeof(*pkt)); 288 if (err > 0) { 289 vio->ver = ver; 290 vio->hs_state = VIO_HS_GOTVERS; 291 } 292 } 293 if (err < 0) 294 return handshake_failure(vio); 295 296 return 0; 297 } 298 299 static int process_ver_ack(struct vio_driver_state *vio, 300 struct vio_ver_info *pkt) 301 { 302 viodbg(HS, "GOT VERSION ACK maj[%u] min[%u] devclass[%u]\n", 303 pkt->major, pkt->minor, pkt->dev_class); 304 305 if (vio->hs_state & VIO_HS_GOTVERS) { 306 if (vio->ver.major != pkt->major || 307 vio->ver.minor != pkt->minor) { 308 pkt->tag.stype = VIO_SUBTYPE_NACK; 309 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt)); 310 return handshake_failure(vio); 311 } 312 } else { 313 vio->ver.major = pkt->major; 314 vio->ver.minor = pkt->minor; 315 vio->hs_state = VIO_HS_GOTVERS; 316 } 317 318 switch (vio->dev_class) { 319 case VDEV_NETWORK: 320 case VDEV_DISK: 321 if (send_attr(vio) < 0) 322 return handshake_failure(vio); 323 break; 324 325 default: 326 break; 327 } 328 329 return 0; 330 } 331 332 static int process_ver_nack(struct vio_driver_state *vio, 333 struct vio_ver_info *pkt) 334 { 335 struct vio_version *nver; 336 337 viodbg(HS, "GOT VERSION NACK maj[%u] min[%u] devclass[%u]\n", 338 pkt->major, pkt->minor, pkt->dev_class); 339 340 if (pkt->major == 0 && pkt->minor == 0) 341 return handshake_failure(vio); 342 nver = find_by_major(vio, pkt->major); 343 if (!nver) 344 return handshake_failure(vio); 345 346 if (send_version(vio, nver->major, nver->minor) < 0) 347 return handshake_failure(vio); 348 349 return 0; 350 } 351 352 static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt) 353 { 354 switch (pkt->tag.stype) { 355 case VIO_SUBTYPE_INFO: 356 return process_ver_info(vio, pkt); 357 358 case VIO_SUBTYPE_ACK: 359 return process_ver_ack(vio, pkt); 360 361 case VIO_SUBTYPE_NACK: 362 return process_ver_nack(vio, pkt); 363 364 default: 365 return handshake_failure(vio); 366 } 367 } 368 369 static int process_attr(struct vio_driver_state *vio, void *pkt) 370 { 371 int err; 372 373 if (!(vio->hs_state & VIO_HS_GOTVERS)) 374 return handshake_failure(vio); 375 376 err = vio->ops->handle_attr(vio, pkt); 377 if (err < 0) { 378 return handshake_failure(vio); 379 } else { 380 vio->hs_state |= VIO_HS_GOT_ATTR; 381 382 if ((vio->dr_state & VIO_DR_STATE_TXREQ) && 383 !(vio->hs_state & VIO_HS_SENT_DREG)) { 384 if (send_dreg(vio) < 0) 385 return handshake_failure(vio); 386 387 vio->hs_state |= VIO_HS_SENT_DREG; 388 } 389 } 390 return 0; 391 } 392 393 static int all_drings_registered(struct vio_driver_state *vio) 394 { 395 int need_rx, need_tx; 396 397 need_rx = (vio->dr_state & VIO_DR_STATE_RXREQ); 398 need_tx = (vio->dr_state & VIO_DR_STATE_TXREQ); 399 400 if (need_rx && 401 !(vio->dr_state & VIO_DR_STATE_RXREG)) 402 return 0; 403 404 if (need_tx && 405 !(vio->dr_state & VIO_DR_STATE_TXREG)) 406 return 0; 407 408 return 1; 409 } 410 411 static int process_dreg_info(struct vio_driver_state *vio, 412 struct vio_dring_register *pkt) 413 { 414 struct vio_dring_state *dr; 415 int i, len; 416 417 viodbg(HS, "GOT DRING_REG INFO ident[%llx] " 418 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", 419 (unsigned long long) pkt->dring_ident, 420 pkt->num_descr, pkt->descr_size, pkt->options, 421 pkt->num_cookies); 422 423 if (!(vio->dr_state & VIO_DR_STATE_RXREQ)) 424 goto send_nack; 425 426 if (vio->dr_state & VIO_DR_STATE_RXREG) 427 goto send_nack; 428 429 /* v1.6 and higher, ACK with desired, supported mode, or NACK */ 430 if (vio_version_after_eq(vio, 1, 6)) { 431 if (!(pkt->options & VIO_TX_DRING)) 432 goto send_nack; 433 pkt->options = VIO_TX_DRING; 434 } 435 436 BUG_ON(vio->desc_buf); 437 438 vio->desc_buf = kzalloc(pkt->descr_size, GFP_ATOMIC); 439 if (!vio->desc_buf) 440 goto send_nack; 441 442 vio->desc_buf_len = pkt->descr_size; 443 444 dr = &vio->drings[VIO_DRIVER_RX_RING]; 445 446 dr->num_entries = pkt->num_descr; 447 dr->entry_size = pkt->descr_size; 448 dr->ncookies = pkt->num_cookies; 449 for (i = 0; i < dr->ncookies; i++) { 450 dr->cookies[i] = pkt->cookies[i]; 451 452 viodbg(HS, "DRING COOKIE(%d) [%016llx:%016llx]\n", 453 i, 454 (unsigned long long) 455 pkt->cookies[i].cookie_addr, 456 (unsigned long long) 457 pkt->cookies[i].cookie_size); 458 } 459 460 pkt->tag.stype = VIO_SUBTYPE_ACK; 461 pkt->dring_ident = ++dr->ident; 462 463 viodbg(HS, "SEND DRING_REG ACK ident[%llx] " 464 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", 465 (unsigned long long) pkt->dring_ident, 466 pkt->num_descr, pkt->descr_size, pkt->options, 467 pkt->num_cookies); 468 469 len = (sizeof(*pkt) + 470 (dr->ncookies * sizeof(struct ldc_trans_cookie))); 471 if (send_ctrl(vio, &pkt->tag, len) < 0) 472 goto send_nack; 473 474 vio->dr_state |= VIO_DR_STATE_RXREG; 475 476 return 0; 477 478 send_nack: 479 pkt->tag.stype = VIO_SUBTYPE_NACK; 480 viodbg(HS, "SEND DRING_REG NACK\n"); 481 (void) send_ctrl(vio, &pkt->tag, sizeof(*pkt)); 482 483 return handshake_failure(vio); 484 } 485 486 static int process_dreg_ack(struct vio_driver_state *vio, 487 struct vio_dring_register *pkt) 488 { 489 struct vio_dring_state *dr; 490 491 viodbg(HS, "GOT DRING_REG ACK ident[%llx] " 492 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", 493 (unsigned long long) pkt->dring_ident, 494 pkt->num_descr, pkt->descr_size, pkt->options, 495 pkt->num_cookies); 496 497 dr = &vio->drings[VIO_DRIVER_TX_RING]; 498 499 if (!(vio->dr_state & VIO_DR_STATE_TXREQ)) 500 return handshake_failure(vio); 501 502 dr->ident = pkt->dring_ident; 503 vio->dr_state |= VIO_DR_STATE_TXREG; 504 505 if (all_drings_registered(vio)) { 506 if (send_rdx(vio) < 0) 507 return handshake_failure(vio); 508 vio->hs_state = VIO_HS_SENT_RDX; 509 } 510 return 0; 511 } 512 513 static int process_dreg_nack(struct vio_driver_state *vio, 514 struct vio_dring_register *pkt) 515 { 516 viodbg(HS, "GOT DRING_REG NACK ident[%llx] " 517 "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n", 518 (unsigned long long) pkt->dring_ident, 519 pkt->num_descr, pkt->descr_size, pkt->options, 520 pkt->num_cookies); 521 522 return handshake_failure(vio); 523 } 524 525 static int process_dreg(struct vio_driver_state *vio, 526 struct vio_dring_register *pkt) 527 { 528 if (!(vio->hs_state & VIO_HS_GOTVERS)) 529 return handshake_failure(vio); 530 531 switch (pkt->tag.stype) { 532 case VIO_SUBTYPE_INFO: 533 return process_dreg_info(vio, pkt); 534 535 case VIO_SUBTYPE_ACK: 536 return process_dreg_ack(vio, pkt); 537 538 case VIO_SUBTYPE_NACK: 539 return process_dreg_nack(vio, pkt); 540 541 default: 542 return handshake_failure(vio); 543 } 544 } 545 546 static int process_dunreg(struct vio_driver_state *vio, 547 struct vio_dring_unregister *pkt) 548 { 549 struct vio_dring_state *dr = &vio->drings[VIO_DRIVER_RX_RING]; 550 551 viodbg(HS, "GOT DRING_UNREG\n"); 552 553 if (pkt->dring_ident != dr->ident) 554 return 0; 555 556 vio->dr_state &= ~VIO_DR_STATE_RXREG; 557 558 memset(dr, 0, sizeof(*dr)); 559 560 kfree(vio->desc_buf); 561 vio->desc_buf = NULL; 562 vio->desc_buf_len = 0; 563 564 return 0; 565 } 566 567 static int process_rdx_info(struct vio_driver_state *vio, struct vio_rdx *pkt) 568 { 569 viodbg(HS, "GOT RDX INFO\n"); 570 571 pkt->tag.stype = VIO_SUBTYPE_ACK; 572 viodbg(HS, "SEND RDX ACK\n"); 573 if (send_ctrl(vio, &pkt->tag, sizeof(*pkt)) < 0) 574 return handshake_failure(vio); 575 576 vio->hs_state |= VIO_HS_SENT_RDX_ACK; 577 return 0; 578 } 579 580 static int process_rdx_ack(struct vio_driver_state *vio, struct vio_rdx *pkt) 581 { 582 viodbg(HS, "GOT RDX ACK\n"); 583 584 if (!(vio->hs_state & VIO_HS_SENT_RDX)) 585 return handshake_failure(vio); 586 587 vio->hs_state |= VIO_HS_GOT_RDX_ACK; 588 return 0; 589 } 590 591 static int process_rdx_nack(struct vio_driver_state *vio, struct vio_rdx *pkt) 592 { 593 viodbg(HS, "GOT RDX NACK\n"); 594 595 return handshake_failure(vio); 596 } 597 598 static int process_rdx(struct vio_driver_state *vio, struct vio_rdx *pkt) 599 { 600 if (!all_drings_registered(vio)) 601 handshake_failure(vio); 602 603 switch (pkt->tag.stype) { 604 case VIO_SUBTYPE_INFO: 605 return process_rdx_info(vio, pkt); 606 607 case VIO_SUBTYPE_ACK: 608 return process_rdx_ack(vio, pkt); 609 610 case VIO_SUBTYPE_NACK: 611 return process_rdx_nack(vio, pkt); 612 613 default: 614 return handshake_failure(vio); 615 } 616 } 617 618 int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt) 619 { 620 struct vio_msg_tag *tag = pkt; 621 u8 prev_state = vio->hs_state; 622 int err; 623 624 switch (tag->stype_env) { 625 case VIO_VER_INFO: 626 err = process_ver(vio, pkt); 627 break; 628 629 case VIO_ATTR_INFO: 630 err = process_attr(vio, pkt); 631 break; 632 633 case VIO_DRING_REG: 634 err = process_dreg(vio, pkt); 635 break; 636 637 case VIO_DRING_UNREG: 638 err = process_dunreg(vio, pkt); 639 break; 640 641 case VIO_RDX: 642 err = process_rdx(vio, pkt); 643 break; 644 645 default: 646 err = process_unknown(vio, pkt); 647 break; 648 } 649 if (!err && 650 vio->hs_state != prev_state && 651 (vio->hs_state & VIO_HS_COMPLETE)) 652 vio->ops->handshake_complete(vio); 653 654 return err; 655 } 656 EXPORT_SYMBOL(vio_control_pkt_engine); 657 658 void vio_conn_reset(struct vio_driver_state *vio) 659 { 660 } 661 EXPORT_SYMBOL(vio_conn_reset); 662 663 /* The issue is that the Solaris virtual disk server just mirrors the 664 * SID values it gets from the client peer. So we work around that 665 * here in vio_{validate,send}_sid() so that the drivers don't need 666 * to be aware of this crap. 667 */ 668 int vio_validate_sid(struct vio_driver_state *vio, struct vio_msg_tag *tp) 669 { 670 u32 sid; 671 672 /* Always let VERSION+INFO packets through unchecked, they 673 * define the new SID. 674 */ 675 if (tp->type == VIO_TYPE_CTRL && 676 tp->stype == VIO_SUBTYPE_INFO && 677 tp->stype_env == VIO_VER_INFO) 678 return 0; 679 680 /* Ok, now figure out which SID to use. */ 681 switch (vio->dev_class) { 682 case VDEV_NETWORK: 683 case VDEV_NETWORK_SWITCH: 684 case VDEV_DISK_SERVER: 685 default: 686 sid = vio->_peer_sid; 687 break; 688 689 case VDEV_DISK: 690 sid = vio->_local_sid; 691 break; 692 } 693 694 if (sid == tp->sid) 695 return 0; 696 viodbg(DATA, "BAD SID tag->sid[%08x] peer_sid[%08x] local_sid[%08x]\n", 697 tp->sid, vio->_peer_sid, vio->_local_sid); 698 return -EINVAL; 699 } 700 EXPORT_SYMBOL(vio_validate_sid); 701 702 u32 vio_send_sid(struct vio_driver_state *vio) 703 { 704 switch (vio->dev_class) { 705 case VDEV_NETWORK: 706 case VDEV_NETWORK_SWITCH: 707 case VDEV_DISK: 708 default: 709 return vio->_local_sid; 710 711 case VDEV_DISK_SERVER: 712 return vio->_peer_sid; 713 } 714 } 715 EXPORT_SYMBOL(vio_send_sid); 716 717 int vio_ldc_alloc(struct vio_driver_state *vio, 718 struct ldc_channel_config *base_cfg, 719 void *event_arg) 720 { 721 struct ldc_channel_config cfg = *base_cfg; 722 struct ldc_channel *lp; 723 724 cfg.tx_irq = vio->vdev->tx_irq; 725 cfg.rx_irq = vio->vdev->rx_irq; 726 727 lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name); 728 if (IS_ERR(lp)) 729 return PTR_ERR(lp); 730 731 vio->lp = lp; 732 733 return 0; 734 } 735 EXPORT_SYMBOL(vio_ldc_alloc); 736 737 void vio_ldc_free(struct vio_driver_state *vio) 738 { 739 ldc_free(vio->lp); 740 vio->lp = NULL; 741 742 kfree(vio->desc_buf); 743 vio->desc_buf = NULL; 744 vio->desc_buf_len = 0; 745 } 746 EXPORT_SYMBOL(vio_ldc_free); 747 748 void vio_port_up(struct vio_driver_state *vio) 749 { 750 unsigned long flags; 751 int err, state; 752 753 spin_lock_irqsave(&vio->lock, flags); 754 755 state = ldc_state(vio->lp); 756 757 err = 0; 758 if (state == LDC_STATE_INIT) { 759 err = ldc_bind(vio->lp); 760 if (err) 761 printk(KERN_WARNING "%s: Port %lu bind failed, " 762 "err=%d\n", 763 vio->name, vio->vdev->channel_id, err); 764 } 765 766 if (!err) { 767 err = ldc_connect(vio->lp); 768 if (err) 769 printk(KERN_WARNING "%s: Port %lu connect failed, " 770 "err=%d\n", 771 vio->name, vio->vdev->channel_id, err); 772 } 773 if (err) { 774 unsigned long expires = jiffies + HZ; 775 776 expires = round_jiffies(expires); 777 mod_timer(&vio->timer, expires); 778 } 779 780 spin_unlock_irqrestore(&vio->lock, flags); 781 } 782 EXPORT_SYMBOL(vio_port_up); 783 784 static void vio_port_timer(unsigned long _arg) 785 { 786 struct vio_driver_state *vio = (struct vio_driver_state *) _arg; 787 788 vio_port_up(vio); 789 } 790 791 int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, 792 u8 dev_class, struct vio_version *ver_table, 793 int ver_table_size, struct vio_driver_ops *ops, 794 char *name) 795 { 796 switch (dev_class) { 797 case VDEV_NETWORK: 798 case VDEV_NETWORK_SWITCH: 799 case VDEV_DISK: 800 case VDEV_DISK_SERVER: 801 break; 802 803 default: 804 return -EINVAL; 805 } 806 807 if (!ops->send_attr || 808 !ops->handle_attr || 809 !ops->handshake_complete) 810 return -EINVAL; 811 812 if (!ver_table || ver_table_size < 0) 813 return -EINVAL; 814 815 if (!name) 816 return -EINVAL; 817 818 spin_lock_init(&vio->lock); 819 820 vio->name = name; 821 822 vio->dev_class = dev_class; 823 vio->vdev = vdev; 824 825 vio->ver_table = ver_table; 826 vio->ver_table_entries = ver_table_size; 827 828 vio->ops = ops; 829 830 setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio); 831 832 return 0; 833 } 834 EXPORT_SYMBOL(vio_driver_init); 835