1 /* ldc.c: Logical Domain Channel link-layer protocol driver. 2 * 3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/module.h> 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/delay.h> 11 #include <linux/errno.h> 12 #include <linux/string.h> 13 #include <linux/scatterlist.h> 14 #include <linux/interrupt.h> 15 #include <linux/list.h> 16 #include <linux/init.h> 17 #include <linux/bitmap.h> 18 19 #include <asm/hypervisor.h> 20 #include <asm/iommu.h> 21 #include <asm/page.h> 22 #include <asm/ldc.h> 23 #include <asm/mdesc.h> 24 25 #define DRV_MODULE_NAME "ldc" 26 #define PFX DRV_MODULE_NAME ": " 27 #define DRV_MODULE_VERSION "1.1" 28 #define DRV_MODULE_RELDATE "July 22, 2008" 29 30 static char version[] __devinitdata = 31 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 32 #define LDC_PACKET_SIZE 64 33 34 /* Packet header layout for unreliable and reliable mode frames. 35 * When in RAW mode, packets are simply straight 64-byte payloads 36 * with no headers. 37 */ 38 struct ldc_packet { 39 u8 type; 40 #define LDC_CTRL 0x01 41 #define LDC_DATA 0x02 42 #define LDC_ERR 0x10 43 44 u8 stype; 45 #define LDC_INFO 0x01 46 #define LDC_ACK 0x02 47 #define LDC_NACK 0x04 48 49 u8 ctrl; 50 #define LDC_VERS 0x01 /* Link Version */ 51 #define LDC_RTS 0x02 /* Request To Send */ 52 #define LDC_RTR 0x03 /* Ready To Receive */ 53 #define LDC_RDX 0x04 /* Ready for Data eXchange */ 54 #define LDC_CTRL_MSK 0x0f 55 56 u8 env; 57 #define LDC_LEN 0x3f 58 #define LDC_FRAG_MASK 0xc0 59 #define LDC_START 0x40 60 #define LDC_STOP 0x80 61 62 u32 seqid; 63 64 union { 65 u8 u_data[LDC_PACKET_SIZE - 8]; 66 struct { 67 u32 pad; 68 u32 ackid; 69 u8 r_data[LDC_PACKET_SIZE - 8 - 8]; 70 } r; 71 } u; 72 }; 73 74 struct ldc_version { 75 u16 major; 76 u16 minor; 77 }; 78 79 /* Ordered from largest major to lowest. */ 80 static struct ldc_version ver_arr[] = { 81 { .major = 1, .minor = 0 }, 82 }; 83 84 #define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE) 85 #define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE) 86 87 struct ldc_channel; 88 89 struct ldc_mode_ops { 90 int (*write)(struct ldc_channel *, const void *, unsigned int); 91 int (*read)(struct ldc_channel *, void *, unsigned int); 92 }; 93 94 static const struct ldc_mode_ops raw_ops; 95 static const struct ldc_mode_ops nonraw_ops; 96 static const struct ldc_mode_ops stream_ops; 97 98 int ldom_domaining_enabled; 99 100 struct ldc_iommu { 101 /* Protects arena alloc/free. */ 102 spinlock_t lock; 103 struct iommu_arena arena; 104 struct ldc_mtable_entry *page_table; 105 }; 106 107 struct ldc_channel { 108 /* Protects all operations that depend upon channel state. */ 109 spinlock_t lock; 110 111 unsigned long id; 112 113 u8 *mssbuf; 114 u32 mssbuf_len; 115 u32 mssbuf_off; 116 117 struct ldc_packet *tx_base; 118 unsigned long tx_head; 119 unsigned long tx_tail; 120 unsigned long tx_num_entries; 121 unsigned long tx_ra; 122 123 unsigned long tx_acked; 124 125 struct ldc_packet *rx_base; 126 unsigned long rx_head; 127 unsigned long rx_tail; 128 unsigned long rx_num_entries; 129 unsigned long rx_ra; 130 131 u32 rcv_nxt; 132 u32 snd_nxt; 133 134 unsigned long chan_state; 135 136 struct ldc_channel_config cfg; 137 void *event_arg; 138 139 const struct ldc_mode_ops *mops; 140 141 struct ldc_iommu iommu; 142 143 struct ldc_version ver; 144 145 u8 hs_state; 146 #define LDC_HS_CLOSED 0x00 147 #define LDC_HS_OPEN 0x01 148 #define LDC_HS_GOTVERS 0x02 149 #define LDC_HS_SENTRTR 0x03 150 #define LDC_HS_GOTRTR 0x04 151 #define LDC_HS_COMPLETE 0x10 152 153 u8 flags; 154 #define LDC_FLAG_ALLOCED_QUEUES 0x01 155 #define LDC_FLAG_REGISTERED_QUEUES 0x02 156 #define LDC_FLAG_REGISTERED_IRQS 0x04 157 #define LDC_FLAG_RESET 0x10 158 159 u8 mss; 160 u8 state; 161 162 #define LDC_IRQ_NAME_MAX 32 163 char rx_irq_name[LDC_IRQ_NAME_MAX]; 164 char tx_irq_name[LDC_IRQ_NAME_MAX]; 165 166 struct hlist_head mh_list; 167 168 struct hlist_node list; 169 }; 170 171 #define ldcdbg(TYPE, f, a...) \ 172 do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \ 173 printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \ 174 } while (0) 175 176 static const char *state_to_str(u8 state) 177 { 178 switch (state) { 179 case LDC_STATE_INVALID: 180 return "INVALID"; 181 case LDC_STATE_INIT: 182 return "INIT"; 183 case LDC_STATE_BOUND: 184 return "BOUND"; 185 case LDC_STATE_READY: 186 return "READY"; 187 case LDC_STATE_CONNECTED: 188 return "CONNECTED"; 189 default: 190 return "<UNKNOWN>"; 191 } 192 } 193 194 static void ldc_set_state(struct ldc_channel *lp, u8 state) 195 { 196 ldcdbg(STATE, "STATE (%s) --> (%s)\n", 197 state_to_str(lp->state), 198 state_to_str(state)); 199 200 lp->state = state; 201 } 202 203 static unsigned long __advance(unsigned long off, unsigned long num_entries) 204 { 205 off += LDC_PACKET_SIZE; 206 if (off == (num_entries * LDC_PACKET_SIZE)) 207 off = 0; 208 209 return off; 210 } 211 212 static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off) 213 { 214 return __advance(off, lp->rx_num_entries); 215 } 216 217 static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off) 218 { 219 return __advance(off, lp->tx_num_entries); 220 } 221 222 static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp, 223 unsigned long *new_tail) 224 { 225 struct ldc_packet *p; 226 unsigned long t; 227 228 t = tx_advance(lp, lp->tx_tail); 229 if (t == lp->tx_head) 230 return NULL; 231 232 *new_tail = t; 233 234 p = lp->tx_base; 235 return p + (lp->tx_tail / LDC_PACKET_SIZE); 236 } 237 238 /* When we are in reliable or stream mode, have to track the next packet 239 * we haven't gotten an ACK for in the TX queue using tx_acked. We have 240 * to be careful not to stomp over the queue past that point. During 241 * the handshake, we don't have TX data packets pending in the queue 242 * and that's why handshake_get_tx_packet() need not be mindful of 243 * lp->tx_acked. 244 */ 245 static unsigned long head_for_data(struct ldc_channel *lp) 246 { 247 if (lp->cfg.mode == LDC_MODE_STREAM) 248 return lp->tx_acked; 249 return lp->tx_head; 250 } 251 252 static int tx_has_space_for(struct ldc_channel *lp, unsigned int size) 253 { 254 unsigned long limit, tail, new_tail, diff; 255 unsigned int mss; 256 257 limit = head_for_data(lp); 258 tail = lp->tx_tail; 259 new_tail = tx_advance(lp, tail); 260 if (new_tail == limit) 261 return 0; 262 263 if (limit > new_tail) 264 diff = limit - new_tail; 265 else 266 diff = (limit + 267 ((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail)); 268 diff /= LDC_PACKET_SIZE; 269 mss = lp->mss; 270 271 if (diff * mss < size) 272 return 0; 273 274 return 1; 275 } 276 277 static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp, 278 unsigned long *new_tail) 279 { 280 struct ldc_packet *p; 281 unsigned long h, t; 282 283 h = head_for_data(lp); 284 t = tx_advance(lp, lp->tx_tail); 285 if (t == h) 286 return NULL; 287 288 *new_tail = t; 289 290 p = lp->tx_base; 291 return p + (lp->tx_tail / LDC_PACKET_SIZE); 292 } 293 294 static int set_tx_tail(struct ldc_channel *lp, unsigned long tail) 295 { 296 unsigned long orig_tail = lp->tx_tail; 297 int limit = 1000; 298 299 lp->tx_tail = tail; 300 while (limit-- > 0) { 301 unsigned long err; 302 303 err = sun4v_ldc_tx_set_qtail(lp->id, tail); 304 if (!err) 305 return 0; 306 307 if (err != HV_EWOULDBLOCK) { 308 lp->tx_tail = orig_tail; 309 return -EINVAL; 310 } 311 udelay(1); 312 } 313 314 lp->tx_tail = orig_tail; 315 return -EBUSY; 316 } 317 318 /* This just updates the head value in the hypervisor using 319 * a polling loop with a timeout. The caller takes care of 320 * upating software state representing the head change, if any. 321 */ 322 static int __set_rx_head(struct ldc_channel *lp, unsigned long head) 323 { 324 int limit = 1000; 325 326 while (limit-- > 0) { 327 unsigned long err; 328 329 err = sun4v_ldc_rx_set_qhead(lp->id, head); 330 if (!err) 331 return 0; 332 333 if (err != HV_EWOULDBLOCK) 334 return -EINVAL; 335 336 udelay(1); 337 } 338 339 return -EBUSY; 340 } 341 342 static int send_tx_packet(struct ldc_channel *lp, 343 struct ldc_packet *p, 344 unsigned long new_tail) 345 { 346 BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE))); 347 348 return set_tx_tail(lp, new_tail); 349 } 350 351 static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp, 352 u8 stype, u8 ctrl, 353 void *data, int dlen, 354 unsigned long *new_tail) 355 { 356 struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail); 357 358 if (p) { 359 memset(p, 0, sizeof(*p)); 360 p->type = LDC_CTRL; 361 p->stype = stype; 362 p->ctrl = ctrl; 363 if (data) 364 memcpy(p->u.u_data, data, dlen); 365 } 366 return p; 367 } 368 369 static int start_handshake(struct ldc_channel *lp) 370 { 371 struct ldc_packet *p; 372 struct ldc_version *ver; 373 unsigned long new_tail; 374 375 ver = &ver_arr[0]; 376 377 ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n", 378 ver->major, ver->minor); 379 380 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS, 381 ver, sizeof(*ver), &new_tail); 382 if (p) { 383 int err = send_tx_packet(lp, p, new_tail); 384 if (!err) 385 lp->flags &= ~LDC_FLAG_RESET; 386 return err; 387 } 388 return -EBUSY; 389 } 390 391 static int send_version_nack(struct ldc_channel *lp, 392 u16 major, u16 minor) 393 { 394 struct ldc_packet *p; 395 struct ldc_version ver; 396 unsigned long new_tail; 397 398 ver.major = major; 399 ver.minor = minor; 400 401 p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS, 402 &ver, sizeof(ver), &new_tail); 403 if (p) { 404 ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n", 405 ver.major, ver.minor); 406 407 return send_tx_packet(lp, p, new_tail); 408 } 409 return -EBUSY; 410 } 411 412 static int send_version_ack(struct ldc_channel *lp, 413 struct ldc_version *vp) 414 { 415 struct ldc_packet *p; 416 unsigned long new_tail; 417 418 p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS, 419 vp, sizeof(*vp), &new_tail); 420 if (p) { 421 ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n", 422 vp->major, vp->minor); 423 424 return send_tx_packet(lp, p, new_tail); 425 } 426 return -EBUSY; 427 } 428 429 static int send_rts(struct ldc_channel *lp) 430 { 431 struct ldc_packet *p; 432 unsigned long new_tail; 433 434 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0, 435 &new_tail); 436 if (p) { 437 p->env = lp->cfg.mode; 438 p->seqid = 0; 439 lp->rcv_nxt = 0; 440 441 ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n", 442 p->env, p->seqid); 443 444 return send_tx_packet(lp, p, new_tail); 445 } 446 return -EBUSY; 447 } 448 449 static int send_rtr(struct ldc_channel *lp) 450 { 451 struct ldc_packet *p; 452 unsigned long new_tail; 453 454 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0, 455 &new_tail); 456 if (p) { 457 p->env = lp->cfg.mode; 458 p->seqid = 0; 459 460 ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n", 461 p->env, p->seqid); 462 463 return send_tx_packet(lp, p, new_tail); 464 } 465 return -EBUSY; 466 } 467 468 static int send_rdx(struct ldc_channel *lp) 469 { 470 struct ldc_packet *p; 471 unsigned long new_tail; 472 473 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0, 474 &new_tail); 475 if (p) { 476 p->env = 0; 477 p->seqid = ++lp->snd_nxt; 478 p->u.r.ackid = lp->rcv_nxt; 479 480 ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n", 481 p->env, p->seqid, p->u.r.ackid); 482 483 return send_tx_packet(lp, p, new_tail); 484 } 485 return -EBUSY; 486 } 487 488 static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt) 489 { 490 struct ldc_packet *p; 491 unsigned long new_tail; 492 int err; 493 494 p = data_get_tx_packet(lp, &new_tail); 495 if (!p) 496 return -EBUSY; 497 memset(p, 0, sizeof(*p)); 498 p->type = data_pkt->type; 499 p->stype = LDC_NACK; 500 p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK; 501 p->seqid = lp->snd_nxt + 1; 502 p->u.r.ackid = lp->rcv_nxt; 503 504 ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n", 505 p->type, p->ctrl, p->seqid, p->u.r.ackid); 506 507 err = send_tx_packet(lp, p, new_tail); 508 if (!err) 509 lp->snd_nxt++; 510 511 return err; 512 } 513 514 static int ldc_abort(struct ldc_channel *lp) 515 { 516 unsigned long hv_err; 517 518 ldcdbg(STATE, "ABORT\n"); 519 520 /* We report but do not act upon the hypervisor errors because 521 * there really isn't much we can do if they fail at this point. 522 */ 523 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); 524 if (hv_err) 525 printk(KERN_ERR PFX "ldc_abort: " 526 "sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n", 527 lp->id, lp->tx_ra, lp->tx_num_entries, hv_err); 528 529 hv_err = sun4v_ldc_tx_get_state(lp->id, 530 &lp->tx_head, 531 &lp->tx_tail, 532 &lp->chan_state); 533 if (hv_err) 534 printk(KERN_ERR PFX "ldc_abort: " 535 "sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n", 536 lp->id, hv_err); 537 538 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); 539 if (hv_err) 540 printk(KERN_ERR PFX "ldc_abort: " 541 "sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n", 542 lp->id, lp->rx_ra, lp->rx_num_entries, hv_err); 543 544 /* Refetch the RX queue state as well, because we could be invoked 545 * here in the queue processing context. 546 */ 547 hv_err = sun4v_ldc_rx_get_state(lp->id, 548 &lp->rx_head, 549 &lp->rx_tail, 550 &lp->chan_state); 551 if (hv_err) 552 printk(KERN_ERR PFX "ldc_abort: " 553 "sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n", 554 lp->id, hv_err); 555 556 return -ECONNRESET; 557 } 558 559 static struct ldc_version *find_by_major(u16 major) 560 { 561 struct ldc_version *ret = NULL; 562 int i; 563 564 for (i = 0; i < ARRAY_SIZE(ver_arr); i++) { 565 struct ldc_version *v = &ver_arr[i]; 566 if (v->major <= major) { 567 ret = v; 568 break; 569 } 570 } 571 return ret; 572 } 573 574 static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp) 575 { 576 struct ldc_version *vap; 577 int err; 578 579 ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n", 580 vp->major, vp->minor); 581 582 if (lp->hs_state == LDC_HS_GOTVERS) { 583 lp->hs_state = LDC_HS_OPEN; 584 memset(&lp->ver, 0, sizeof(lp->ver)); 585 } 586 587 vap = find_by_major(vp->major); 588 if (!vap) { 589 err = send_version_nack(lp, 0, 0); 590 } else if (vap->major != vp->major) { 591 err = send_version_nack(lp, vap->major, vap->minor); 592 } else { 593 struct ldc_version ver = *vp; 594 if (ver.minor > vap->minor) 595 ver.minor = vap->minor; 596 err = send_version_ack(lp, &ver); 597 if (!err) { 598 lp->ver = ver; 599 lp->hs_state = LDC_HS_GOTVERS; 600 } 601 } 602 if (err) 603 return ldc_abort(lp); 604 605 return 0; 606 } 607 608 static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp) 609 { 610 ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n", 611 vp->major, vp->minor); 612 613 if (lp->hs_state == LDC_HS_GOTVERS) { 614 if (lp->ver.major != vp->major || 615 lp->ver.minor != vp->minor) 616 return ldc_abort(lp); 617 } else { 618 lp->ver = *vp; 619 lp->hs_state = LDC_HS_GOTVERS; 620 } 621 if (send_rts(lp)) 622 return ldc_abort(lp); 623 return 0; 624 } 625 626 static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp) 627 { 628 struct ldc_version *vap; 629 struct ldc_packet *p; 630 unsigned long new_tail; 631 632 if (vp->major == 0 && vp->minor == 0) 633 return ldc_abort(lp); 634 635 vap = find_by_major(vp->major); 636 if (!vap) 637 return ldc_abort(lp); 638 639 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS, 640 vap, sizeof(*vap), 641 &new_tail); 642 if (!p) 643 return ldc_abort(lp); 644 645 return send_tx_packet(lp, p, new_tail); 646 } 647 648 static int process_version(struct ldc_channel *lp, 649 struct ldc_packet *p) 650 { 651 struct ldc_version *vp; 652 653 vp = (struct ldc_version *) p->u.u_data; 654 655 switch (p->stype) { 656 case LDC_INFO: 657 return process_ver_info(lp, vp); 658 659 case LDC_ACK: 660 return process_ver_ack(lp, vp); 661 662 case LDC_NACK: 663 return process_ver_nack(lp, vp); 664 665 default: 666 return ldc_abort(lp); 667 } 668 } 669 670 static int process_rts(struct ldc_channel *lp, 671 struct ldc_packet *p) 672 { 673 ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n", 674 p->stype, p->seqid, p->env); 675 676 if (p->stype != LDC_INFO || 677 lp->hs_state != LDC_HS_GOTVERS || 678 p->env != lp->cfg.mode) 679 return ldc_abort(lp); 680 681 lp->snd_nxt = p->seqid; 682 lp->rcv_nxt = p->seqid; 683 lp->hs_state = LDC_HS_SENTRTR; 684 if (send_rtr(lp)) 685 return ldc_abort(lp); 686 687 return 0; 688 } 689 690 static int process_rtr(struct ldc_channel *lp, 691 struct ldc_packet *p) 692 { 693 ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n", 694 p->stype, p->seqid, p->env); 695 696 if (p->stype != LDC_INFO || 697 p->env != lp->cfg.mode) 698 return ldc_abort(lp); 699 700 lp->snd_nxt = p->seqid; 701 lp->hs_state = LDC_HS_COMPLETE; 702 ldc_set_state(lp, LDC_STATE_CONNECTED); 703 send_rdx(lp); 704 705 return LDC_EVENT_UP; 706 } 707 708 static int rx_seq_ok(struct ldc_channel *lp, u32 seqid) 709 { 710 return lp->rcv_nxt + 1 == seqid; 711 } 712 713 static int process_rdx(struct ldc_channel *lp, 714 struct ldc_packet *p) 715 { 716 ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n", 717 p->stype, p->seqid, p->env, p->u.r.ackid); 718 719 if (p->stype != LDC_INFO || 720 !(rx_seq_ok(lp, p->seqid))) 721 return ldc_abort(lp); 722 723 lp->rcv_nxt = p->seqid; 724 725 lp->hs_state = LDC_HS_COMPLETE; 726 ldc_set_state(lp, LDC_STATE_CONNECTED); 727 728 return LDC_EVENT_UP; 729 } 730 731 static int process_control_frame(struct ldc_channel *lp, 732 struct ldc_packet *p) 733 { 734 switch (p->ctrl) { 735 case LDC_VERS: 736 return process_version(lp, p); 737 738 case LDC_RTS: 739 return process_rts(lp, p); 740 741 case LDC_RTR: 742 return process_rtr(lp, p); 743 744 case LDC_RDX: 745 return process_rdx(lp, p); 746 747 default: 748 return ldc_abort(lp); 749 } 750 } 751 752 static int process_error_frame(struct ldc_channel *lp, 753 struct ldc_packet *p) 754 { 755 return ldc_abort(lp); 756 } 757 758 static int process_data_ack(struct ldc_channel *lp, 759 struct ldc_packet *ack) 760 { 761 unsigned long head = lp->tx_acked; 762 u32 ackid = ack->u.r.ackid; 763 764 while (1) { 765 struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE); 766 767 head = tx_advance(lp, head); 768 769 if (p->seqid == ackid) { 770 lp->tx_acked = head; 771 return 0; 772 } 773 if (head == lp->tx_tail) 774 return ldc_abort(lp); 775 } 776 777 return 0; 778 } 779 780 static void send_events(struct ldc_channel *lp, unsigned int event_mask) 781 { 782 if (event_mask & LDC_EVENT_RESET) 783 lp->cfg.event(lp->event_arg, LDC_EVENT_RESET); 784 if (event_mask & LDC_EVENT_UP) 785 lp->cfg.event(lp->event_arg, LDC_EVENT_UP); 786 if (event_mask & LDC_EVENT_DATA_READY) 787 lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY); 788 } 789 790 static irqreturn_t ldc_rx(int irq, void *dev_id) 791 { 792 struct ldc_channel *lp = dev_id; 793 unsigned long orig_state, flags; 794 unsigned int event_mask; 795 796 spin_lock_irqsave(&lp->lock, flags); 797 798 orig_state = lp->chan_state; 799 800 /* We should probably check for hypervisor errors here and 801 * reset the LDC channel if we get one. 802 */ 803 sun4v_ldc_rx_get_state(lp->id, 804 &lp->rx_head, 805 &lp->rx_tail, 806 &lp->chan_state); 807 808 ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", 809 orig_state, lp->chan_state, lp->rx_head, lp->rx_tail); 810 811 event_mask = 0; 812 813 if (lp->cfg.mode == LDC_MODE_RAW && 814 lp->chan_state == LDC_CHANNEL_UP) { 815 lp->hs_state = LDC_HS_COMPLETE; 816 ldc_set_state(lp, LDC_STATE_CONNECTED); 817 818 event_mask |= LDC_EVENT_UP; 819 820 orig_state = lp->chan_state; 821 } 822 823 /* If we are in reset state, flush the RX queue and ignore 824 * everything. 825 */ 826 if (lp->flags & LDC_FLAG_RESET) { 827 (void) __set_rx_head(lp, lp->rx_tail); 828 goto out; 829 } 830 831 /* Once we finish the handshake, we let the ldc_read() 832 * paths do all of the control frame and state management. 833 * Just trigger the callback. 834 */ 835 if (lp->hs_state == LDC_HS_COMPLETE) { 836 handshake_complete: 837 if (lp->chan_state != orig_state) { 838 unsigned int event = LDC_EVENT_RESET; 839 840 if (lp->chan_state == LDC_CHANNEL_UP) 841 event = LDC_EVENT_UP; 842 843 event_mask |= event; 844 } 845 if (lp->rx_head != lp->rx_tail) 846 event_mask |= LDC_EVENT_DATA_READY; 847 848 goto out; 849 } 850 851 if (lp->chan_state != orig_state) 852 goto out; 853 854 while (lp->rx_head != lp->rx_tail) { 855 struct ldc_packet *p; 856 unsigned long new; 857 int err; 858 859 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE); 860 861 switch (p->type) { 862 case LDC_CTRL: 863 err = process_control_frame(lp, p); 864 if (err > 0) 865 event_mask |= err; 866 break; 867 868 case LDC_DATA: 869 event_mask |= LDC_EVENT_DATA_READY; 870 err = 0; 871 break; 872 873 case LDC_ERR: 874 err = process_error_frame(lp, p); 875 break; 876 877 default: 878 err = ldc_abort(lp); 879 break; 880 } 881 882 if (err < 0) 883 break; 884 885 new = lp->rx_head; 886 new += LDC_PACKET_SIZE; 887 if (new == (lp->rx_num_entries * LDC_PACKET_SIZE)) 888 new = 0; 889 lp->rx_head = new; 890 891 err = __set_rx_head(lp, new); 892 if (err < 0) { 893 (void) ldc_abort(lp); 894 break; 895 } 896 if (lp->hs_state == LDC_HS_COMPLETE) 897 goto handshake_complete; 898 } 899 900 out: 901 spin_unlock_irqrestore(&lp->lock, flags); 902 903 send_events(lp, event_mask); 904 905 return IRQ_HANDLED; 906 } 907 908 static irqreturn_t ldc_tx(int irq, void *dev_id) 909 { 910 struct ldc_channel *lp = dev_id; 911 unsigned long flags, orig_state; 912 unsigned int event_mask = 0; 913 914 spin_lock_irqsave(&lp->lock, flags); 915 916 orig_state = lp->chan_state; 917 918 /* We should probably check for hypervisor errors here and 919 * reset the LDC channel if we get one. 920 */ 921 sun4v_ldc_tx_get_state(lp->id, 922 &lp->tx_head, 923 &lp->tx_tail, 924 &lp->chan_state); 925 926 ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", 927 orig_state, lp->chan_state, lp->tx_head, lp->tx_tail); 928 929 if (lp->cfg.mode == LDC_MODE_RAW && 930 lp->chan_state == LDC_CHANNEL_UP) { 931 lp->hs_state = LDC_HS_COMPLETE; 932 ldc_set_state(lp, LDC_STATE_CONNECTED); 933 934 event_mask |= LDC_EVENT_UP; 935 } 936 937 spin_unlock_irqrestore(&lp->lock, flags); 938 939 send_events(lp, event_mask); 940 941 return IRQ_HANDLED; 942 } 943 944 /* XXX ldc_alloc() and ldc_free() needs to run under a mutex so 945 * XXX that addition and removal from the ldc_channel_list has 946 * XXX atomicity, otherwise the __ldc_channel_exists() check is 947 * XXX totally pointless as another thread can slip into ldc_alloc() 948 * XXX and add a channel with the same ID. There also needs to be 949 * XXX a spinlock for ldc_channel_list. 950 */ 951 static HLIST_HEAD(ldc_channel_list); 952 953 static int __ldc_channel_exists(unsigned long id) 954 { 955 struct ldc_channel *lp; 956 struct hlist_node *n; 957 958 hlist_for_each_entry(lp, n, &ldc_channel_list, list) { 959 if (lp->id == id) 960 return 1; 961 } 962 return 0; 963 } 964 965 static int alloc_queue(const char *name, unsigned long num_entries, 966 struct ldc_packet **base, unsigned long *ra) 967 { 968 unsigned long size, order; 969 void *q; 970 971 size = num_entries * LDC_PACKET_SIZE; 972 order = get_order(size); 973 974 q = (void *) __get_free_pages(GFP_KERNEL, order); 975 if (!q) { 976 printk(KERN_ERR PFX "Alloc of %s queue failed with " 977 "size=%lu order=%lu\n", name, size, order); 978 return -ENOMEM; 979 } 980 981 memset(q, 0, PAGE_SIZE << order); 982 983 *base = q; 984 *ra = __pa(q); 985 986 return 0; 987 } 988 989 static void free_queue(unsigned long num_entries, struct ldc_packet *q) 990 { 991 unsigned long size, order; 992 993 if (!q) 994 return; 995 996 size = num_entries * LDC_PACKET_SIZE; 997 order = get_order(size); 998 999 free_pages((unsigned long)q, order); 1000 } 1001 1002 /* XXX Make this configurable... XXX */ 1003 #define LDC_IOTABLE_SIZE (8 * 1024) 1004 1005 static int ldc_iommu_init(struct ldc_channel *lp) 1006 { 1007 unsigned long sz, num_tsb_entries, tsbsize, order; 1008 struct ldc_iommu *iommu = &lp->iommu; 1009 struct ldc_mtable_entry *table; 1010 unsigned long hv_err; 1011 int err; 1012 1013 num_tsb_entries = LDC_IOTABLE_SIZE; 1014 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); 1015 1016 spin_lock_init(&iommu->lock); 1017 1018 sz = num_tsb_entries / 8; 1019 sz = (sz + 7UL) & ~7UL; 1020 iommu->arena.map = kzalloc(sz, GFP_KERNEL); 1021 if (!iommu->arena.map) { 1022 printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz); 1023 return -ENOMEM; 1024 } 1025 1026 iommu->arena.limit = num_tsb_entries; 1027 1028 order = get_order(tsbsize); 1029 1030 table = (struct ldc_mtable_entry *) 1031 __get_free_pages(GFP_KERNEL, order); 1032 err = -ENOMEM; 1033 if (!table) { 1034 printk(KERN_ERR PFX "Alloc of MTE table failed, " 1035 "size=%lu order=%lu\n", tsbsize, order); 1036 goto out_free_map; 1037 } 1038 1039 memset(table, 0, PAGE_SIZE << order); 1040 1041 iommu->page_table = table; 1042 1043 hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table), 1044 num_tsb_entries); 1045 err = -EINVAL; 1046 if (hv_err) 1047 goto out_free_table; 1048 1049 return 0; 1050 1051 out_free_table: 1052 free_pages((unsigned long) table, order); 1053 iommu->page_table = NULL; 1054 1055 out_free_map: 1056 kfree(iommu->arena.map); 1057 iommu->arena.map = NULL; 1058 1059 return err; 1060 } 1061 1062 static void ldc_iommu_release(struct ldc_channel *lp) 1063 { 1064 struct ldc_iommu *iommu = &lp->iommu; 1065 unsigned long num_tsb_entries, tsbsize, order; 1066 1067 (void) sun4v_ldc_set_map_table(lp->id, 0, 0); 1068 1069 num_tsb_entries = iommu->arena.limit; 1070 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); 1071 order = get_order(tsbsize); 1072 1073 free_pages((unsigned long) iommu->page_table, order); 1074 iommu->page_table = NULL; 1075 1076 kfree(iommu->arena.map); 1077 iommu->arena.map = NULL; 1078 } 1079 1080 struct ldc_channel *ldc_alloc(unsigned long id, 1081 const struct ldc_channel_config *cfgp, 1082 void *event_arg) 1083 { 1084 struct ldc_channel *lp; 1085 const struct ldc_mode_ops *mops; 1086 unsigned long dummy1, dummy2, hv_err; 1087 u8 mss, *mssbuf; 1088 int err; 1089 1090 err = -ENODEV; 1091 if (!ldom_domaining_enabled) 1092 goto out_err; 1093 1094 err = -EINVAL; 1095 if (!cfgp) 1096 goto out_err; 1097 1098 switch (cfgp->mode) { 1099 case LDC_MODE_RAW: 1100 mops = &raw_ops; 1101 mss = LDC_PACKET_SIZE; 1102 break; 1103 1104 case LDC_MODE_UNRELIABLE: 1105 mops = &nonraw_ops; 1106 mss = LDC_PACKET_SIZE - 8; 1107 break; 1108 1109 case LDC_MODE_STREAM: 1110 mops = &stream_ops; 1111 mss = LDC_PACKET_SIZE - 8 - 8; 1112 break; 1113 1114 default: 1115 goto out_err; 1116 } 1117 1118 if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq) 1119 goto out_err; 1120 1121 hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2); 1122 err = -ENODEV; 1123 if (hv_err == HV_ECHANNEL) 1124 goto out_err; 1125 1126 err = -EEXIST; 1127 if (__ldc_channel_exists(id)) 1128 goto out_err; 1129 1130 mssbuf = NULL; 1131 1132 lp = kzalloc(sizeof(*lp), GFP_KERNEL); 1133 err = -ENOMEM; 1134 if (!lp) 1135 goto out_err; 1136 1137 spin_lock_init(&lp->lock); 1138 1139 lp->id = id; 1140 1141 err = ldc_iommu_init(lp); 1142 if (err) 1143 goto out_free_ldc; 1144 1145 lp->mops = mops; 1146 lp->mss = mss; 1147 1148 lp->cfg = *cfgp; 1149 if (!lp->cfg.mtu) 1150 lp->cfg.mtu = LDC_DEFAULT_MTU; 1151 1152 if (lp->cfg.mode == LDC_MODE_STREAM) { 1153 mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL); 1154 if (!mssbuf) { 1155 err = -ENOMEM; 1156 goto out_free_iommu; 1157 } 1158 lp->mssbuf = mssbuf; 1159 } 1160 1161 lp->event_arg = event_arg; 1162 1163 /* XXX allow setting via ldc_channel_config to override defaults 1164 * XXX or use some formula based upon mtu 1165 */ 1166 lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES; 1167 lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES; 1168 1169 err = alloc_queue("TX", lp->tx_num_entries, 1170 &lp->tx_base, &lp->tx_ra); 1171 if (err) 1172 goto out_free_mssbuf; 1173 1174 err = alloc_queue("RX", lp->rx_num_entries, 1175 &lp->rx_base, &lp->rx_ra); 1176 if (err) 1177 goto out_free_txq; 1178 1179 lp->flags |= LDC_FLAG_ALLOCED_QUEUES; 1180 1181 lp->hs_state = LDC_HS_CLOSED; 1182 ldc_set_state(lp, LDC_STATE_INIT); 1183 1184 INIT_HLIST_NODE(&lp->list); 1185 hlist_add_head(&lp->list, &ldc_channel_list); 1186 1187 INIT_HLIST_HEAD(&lp->mh_list); 1188 1189 return lp; 1190 1191 out_free_txq: 1192 free_queue(lp->tx_num_entries, lp->tx_base); 1193 1194 out_free_mssbuf: 1195 kfree(mssbuf); 1196 1197 out_free_iommu: 1198 ldc_iommu_release(lp); 1199 1200 out_free_ldc: 1201 kfree(lp); 1202 1203 out_err: 1204 return ERR_PTR(err); 1205 } 1206 EXPORT_SYMBOL(ldc_alloc); 1207 1208 void ldc_free(struct ldc_channel *lp) 1209 { 1210 if (lp->flags & LDC_FLAG_REGISTERED_IRQS) { 1211 free_irq(lp->cfg.rx_irq, lp); 1212 free_irq(lp->cfg.tx_irq, lp); 1213 } 1214 1215 if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) { 1216 sun4v_ldc_tx_qconf(lp->id, 0, 0); 1217 sun4v_ldc_rx_qconf(lp->id, 0, 0); 1218 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES; 1219 } 1220 if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) { 1221 free_queue(lp->tx_num_entries, lp->tx_base); 1222 free_queue(lp->rx_num_entries, lp->rx_base); 1223 lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES; 1224 } 1225 1226 hlist_del(&lp->list); 1227 1228 kfree(lp->mssbuf); 1229 1230 ldc_iommu_release(lp); 1231 1232 kfree(lp); 1233 } 1234 EXPORT_SYMBOL(ldc_free); 1235 1236 /* Bind the channel. This registers the LDC queues with 1237 * the hypervisor and puts the channel into a pseudo-listening 1238 * state. This does not initiate a handshake, ldc_connect() does 1239 * that. 1240 */ 1241 int ldc_bind(struct ldc_channel *lp, const char *name) 1242 { 1243 unsigned long hv_err, flags; 1244 int err = -EINVAL; 1245 1246 if (!name || 1247 (lp->state != LDC_STATE_INIT)) 1248 return -EINVAL; 1249 1250 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); 1251 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); 1252 1253 err = request_irq(lp->cfg.rx_irq, ldc_rx, 1254 IRQF_SAMPLE_RANDOM | IRQF_DISABLED, 1255 lp->rx_irq_name, lp); 1256 if (err) 1257 return err; 1258 1259 err = request_irq(lp->cfg.tx_irq, ldc_tx, 1260 IRQF_SAMPLE_RANDOM | IRQF_DISABLED, 1261 lp->tx_irq_name, lp); 1262 if (err) { 1263 free_irq(lp->cfg.rx_irq, lp); 1264 return err; 1265 } 1266 1267 1268 spin_lock_irqsave(&lp->lock, flags); 1269 1270 enable_irq(lp->cfg.rx_irq); 1271 enable_irq(lp->cfg.tx_irq); 1272 1273 lp->flags |= LDC_FLAG_REGISTERED_IRQS; 1274 1275 err = -ENODEV; 1276 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0); 1277 if (hv_err) 1278 goto out_free_irqs; 1279 1280 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); 1281 if (hv_err) 1282 goto out_free_irqs; 1283 1284 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0); 1285 if (hv_err) 1286 goto out_unmap_tx; 1287 1288 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); 1289 if (hv_err) 1290 goto out_unmap_tx; 1291 1292 lp->flags |= LDC_FLAG_REGISTERED_QUEUES; 1293 1294 hv_err = sun4v_ldc_tx_get_state(lp->id, 1295 &lp->tx_head, 1296 &lp->tx_tail, 1297 &lp->chan_state); 1298 err = -EBUSY; 1299 if (hv_err) 1300 goto out_unmap_rx; 1301 1302 lp->tx_acked = lp->tx_head; 1303 1304 lp->hs_state = LDC_HS_OPEN; 1305 ldc_set_state(lp, LDC_STATE_BOUND); 1306 1307 spin_unlock_irqrestore(&lp->lock, flags); 1308 1309 return 0; 1310 1311 out_unmap_rx: 1312 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES; 1313 sun4v_ldc_rx_qconf(lp->id, 0, 0); 1314 1315 out_unmap_tx: 1316 sun4v_ldc_tx_qconf(lp->id, 0, 0); 1317 1318 out_free_irqs: 1319 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS; 1320 free_irq(lp->cfg.tx_irq, lp); 1321 free_irq(lp->cfg.rx_irq, lp); 1322 1323 spin_unlock_irqrestore(&lp->lock, flags); 1324 1325 return err; 1326 } 1327 EXPORT_SYMBOL(ldc_bind); 1328 1329 int ldc_connect(struct ldc_channel *lp) 1330 { 1331 unsigned long flags; 1332 int err; 1333 1334 if (lp->cfg.mode == LDC_MODE_RAW) 1335 return -EINVAL; 1336 1337 spin_lock_irqsave(&lp->lock, flags); 1338 1339 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || 1340 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) || 1341 lp->hs_state != LDC_HS_OPEN) 1342 err = -EINVAL; 1343 else 1344 err = start_handshake(lp); 1345 1346 spin_unlock_irqrestore(&lp->lock, flags); 1347 1348 return err; 1349 } 1350 EXPORT_SYMBOL(ldc_connect); 1351 1352 int ldc_disconnect(struct ldc_channel *lp) 1353 { 1354 unsigned long hv_err, flags; 1355 int err; 1356 1357 if (lp->cfg.mode == LDC_MODE_RAW) 1358 return -EINVAL; 1359 1360 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || 1361 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES)) 1362 return -EINVAL; 1363 1364 spin_lock_irqsave(&lp->lock, flags); 1365 1366 err = -ENODEV; 1367 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0); 1368 if (hv_err) 1369 goto out_err; 1370 1371 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); 1372 if (hv_err) 1373 goto out_err; 1374 1375 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0); 1376 if (hv_err) 1377 goto out_err; 1378 1379 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); 1380 if (hv_err) 1381 goto out_err; 1382 1383 ldc_set_state(lp, LDC_STATE_BOUND); 1384 lp->hs_state = LDC_HS_OPEN; 1385 lp->flags |= LDC_FLAG_RESET; 1386 1387 spin_unlock_irqrestore(&lp->lock, flags); 1388 1389 return 0; 1390 1391 out_err: 1392 sun4v_ldc_tx_qconf(lp->id, 0, 0); 1393 sun4v_ldc_rx_qconf(lp->id, 0, 0); 1394 free_irq(lp->cfg.tx_irq, lp); 1395 free_irq(lp->cfg.rx_irq, lp); 1396 lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS | 1397 LDC_FLAG_REGISTERED_QUEUES); 1398 ldc_set_state(lp, LDC_STATE_INIT); 1399 1400 spin_unlock_irqrestore(&lp->lock, flags); 1401 1402 return err; 1403 } 1404 EXPORT_SYMBOL(ldc_disconnect); 1405 1406 int ldc_state(struct ldc_channel *lp) 1407 { 1408 return lp->state; 1409 } 1410 EXPORT_SYMBOL(ldc_state); 1411 1412 static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size) 1413 { 1414 struct ldc_packet *p; 1415 unsigned long new_tail; 1416 int err; 1417 1418 if (size > LDC_PACKET_SIZE) 1419 return -EMSGSIZE; 1420 1421 p = data_get_tx_packet(lp, &new_tail); 1422 if (!p) 1423 return -EAGAIN; 1424 1425 memcpy(p, buf, size); 1426 1427 err = send_tx_packet(lp, p, new_tail); 1428 if (!err) 1429 err = size; 1430 1431 return err; 1432 } 1433 1434 static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size) 1435 { 1436 struct ldc_packet *p; 1437 unsigned long hv_err, new; 1438 int err; 1439 1440 if (size < LDC_PACKET_SIZE) 1441 return -EINVAL; 1442 1443 hv_err = sun4v_ldc_rx_get_state(lp->id, 1444 &lp->rx_head, 1445 &lp->rx_tail, 1446 &lp->chan_state); 1447 if (hv_err) 1448 return ldc_abort(lp); 1449 1450 if (lp->chan_state == LDC_CHANNEL_DOWN || 1451 lp->chan_state == LDC_CHANNEL_RESETTING) 1452 return -ECONNRESET; 1453 1454 if (lp->rx_head == lp->rx_tail) 1455 return 0; 1456 1457 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE); 1458 memcpy(buf, p, LDC_PACKET_SIZE); 1459 1460 new = rx_advance(lp, lp->rx_head); 1461 lp->rx_head = new; 1462 1463 err = __set_rx_head(lp, new); 1464 if (err < 0) 1465 err = -ECONNRESET; 1466 else 1467 err = LDC_PACKET_SIZE; 1468 1469 return err; 1470 } 1471 1472 static const struct ldc_mode_ops raw_ops = { 1473 .write = write_raw, 1474 .read = read_raw, 1475 }; 1476 1477 static int write_nonraw(struct ldc_channel *lp, const void *buf, 1478 unsigned int size) 1479 { 1480 unsigned long hv_err, tail; 1481 unsigned int copied; 1482 u32 seq; 1483 int err; 1484 1485 hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail, 1486 &lp->chan_state); 1487 if (unlikely(hv_err)) 1488 return -EBUSY; 1489 1490 if (unlikely(lp->chan_state != LDC_CHANNEL_UP)) 1491 return ldc_abort(lp); 1492 1493 if (!tx_has_space_for(lp, size)) 1494 return -EAGAIN; 1495 1496 seq = lp->snd_nxt; 1497 copied = 0; 1498 tail = lp->tx_tail; 1499 while (copied < size) { 1500 struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE); 1501 u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ? 1502 p->u.u_data : 1503 p->u.r.r_data); 1504 int data_len; 1505 1506 p->type = LDC_DATA; 1507 p->stype = LDC_INFO; 1508 p->ctrl = 0; 1509 1510 data_len = size - copied; 1511 if (data_len > lp->mss) 1512 data_len = lp->mss; 1513 1514 BUG_ON(data_len > LDC_LEN); 1515 1516 p->env = (data_len | 1517 (copied == 0 ? LDC_START : 0) | 1518 (data_len == size - copied ? LDC_STOP : 0)); 1519 1520 p->seqid = ++seq; 1521 1522 ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n", 1523 p->type, 1524 p->stype, 1525 p->ctrl, 1526 p->env, 1527 p->seqid); 1528 1529 memcpy(data, buf, data_len); 1530 buf += data_len; 1531 copied += data_len; 1532 1533 tail = tx_advance(lp, tail); 1534 } 1535 1536 err = set_tx_tail(lp, tail); 1537 if (!err) { 1538 lp->snd_nxt = seq; 1539 err = size; 1540 } 1541 1542 return err; 1543 } 1544 1545 static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p, 1546 struct ldc_packet *first_frag) 1547 { 1548 int err; 1549 1550 if (first_frag) 1551 lp->rcv_nxt = first_frag->seqid - 1; 1552 1553 err = send_data_nack(lp, p); 1554 if (err) 1555 return err; 1556 1557 err = __set_rx_head(lp, lp->rx_tail); 1558 if (err < 0) 1559 return ldc_abort(lp); 1560 1561 return 0; 1562 } 1563 1564 static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p) 1565 { 1566 if (p->stype & LDC_ACK) { 1567 int err = process_data_ack(lp, p); 1568 if (err) 1569 return err; 1570 } 1571 if (p->stype & LDC_NACK) 1572 return ldc_abort(lp); 1573 1574 return 0; 1575 } 1576 1577 static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head) 1578 { 1579 unsigned long dummy; 1580 int limit = 1000; 1581 1582 ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n", 1583 cur_head, lp->rx_head, lp->rx_tail); 1584 while (limit-- > 0) { 1585 unsigned long hv_err; 1586 1587 hv_err = sun4v_ldc_rx_get_state(lp->id, 1588 &dummy, 1589 &lp->rx_tail, 1590 &lp->chan_state); 1591 if (hv_err) 1592 return ldc_abort(lp); 1593 1594 if (lp->chan_state == LDC_CHANNEL_DOWN || 1595 lp->chan_state == LDC_CHANNEL_RESETTING) 1596 return -ECONNRESET; 1597 1598 if (cur_head != lp->rx_tail) { 1599 ldcdbg(DATA, "DATA WAIT DONE " 1600 "head[%lx] tail[%lx] chan_state[%lx]\n", 1601 dummy, lp->rx_tail, lp->chan_state); 1602 return 0; 1603 } 1604 1605 udelay(1); 1606 } 1607 return -EAGAIN; 1608 } 1609 1610 static int rx_set_head(struct ldc_channel *lp, unsigned long head) 1611 { 1612 int err = __set_rx_head(lp, head); 1613 1614 if (err < 0) 1615 return ldc_abort(lp); 1616 1617 lp->rx_head = head; 1618 return 0; 1619 } 1620 1621 static void send_data_ack(struct ldc_channel *lp) 1622 { 1623 unsigned long new_tail; 1624 struct ldc_packet *p; 1625 1626 p = data_get_tx_packet(lp, &new_tail); 1627 if (likely(p)) { 1628 int err; 1629 1630 memset(p, 0, sizeof(*p)); 1631 p->type = LDC_DATA; 1632 p->stype = LDC_ACK; 1633 p->ctrl = 0; 1634 p->seqid = lp->snd_nxt + 1; 1635 p->u.r.ackid = lp->rcv_nxt; 1636 1637 err = send_tx_packet(lp, p, new_tail); 1638 if (!err) 1639 lp->snd_nxt++; 1640 } 1641 } 1642 1643 static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size) 1644 { 1645 struct ldc_packet *first_frag; 1646 unsigned long hv_err, new; 1647 int err, copied; 1648 1649 hv_err = sun4v_ldc_rx_get_state(lp->id, 1650 &lp->rx_head, 1651 &lp->rx_tail, 1652 &lp->chan_state); 1653 if (hv_err) 1654 return ldc_abort(lp); 1655 1656 if (lp->chan_state == LDC_CHANNEL_DOWN || 1657 lp->chan_state == LDC_CHANNEL_RESETTING) 1658 return -ECONNRESET; 1659 1660 if (lp->rx_head == lp->rx_tail) 1661 return 0; 1662 1663 first_frag = NULL; 1664 copied = err = 0; 1665 new = lp->rx_head; 1666 while (1) { 1667 struct ldc_packet *p; 1668 int pkt_len; 1669 1670 BUG_ON(new == lp->rx_tail); 1671 p = lp->rx_base + (new / LDC_PACKET_SIZE); 1672 1673 ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] " 1674 "rcv_nxt[%08x]\n", 1675 p->type, 1676 p->stype, 1677 p->ctrl, 1678 p->env, 1679 p->seqid, 1680 p->u.r.ackid, 1681 lp->rcv_nxt); 1682 1683 if (unlikely(!rx_seq_ok(lp, p->seqid))) { 1684 err = rx_bad_seq(lp, p, first_frag); 1685 copied = 0; 1686 break; 1687 } 1688 1689 if (p->type & LDC_CTRL) { 1690 err = process_control_frame(lp, p); 1691 if (err < 0) 1692 break; 1693 err = 0; 1694 } 1695 1696 lp->rcv_nxt = p->seqid; 1697 1698 if (!(p->type & LDC_DATA)) { 1699 new = rx_advance(lp, new); 1700 goto no_data; 1701 } 1702 if (p->stype & (LDC_ACK | LDC_NACK)) { 1703 err = data_ack_nack(lp, p); 1704 if (err) 1705 break; 1706 } 1707 if (!(p->stype & LDC_INFO)) { 1708 new = rx_advance(lp, new); 1709 err = rx_set_head(lp, new); 1710 if (err) 1711 break; 1712 goto no_data; 1713 } 1714 1715 pkt_len = p->env & LDC_LEN; 1716 1717 /* Every initial packet starts with the START bit set. 1718 * 1719 * Singleton packets will have both START+STOP set. 1720 * 1721 * Fragments will have START set in the first frame, STOP 1722 * set in the last frame, and neither bit set in middle 1723 * frames of the packet. 1724 * 1725 * Therefore if we are at the beginning of a packet and 1726 * we don't see START, or we are in the middle of a fragmented 1727 * packet and do see START, we are unsynchronized and should 1728 * flush the RX queue. 1729 */ 1730 if ((first_frag == NULL && !(p->env & LDC_START)) || 1731 (first_frag != NULL && (p->env & LDC_START))) { 1732 if (!first_frag) 1733 new = rx_advance(lp, new); 1734 1735 err = rx_set_head(lp, new); 1736 if (err) 1737 break; 1738 1739 if (!first_frag) 1740 goto no_data; 1741 } 1742 if (!first_frag) 1743 first_frag = p; 1744 1745 if (pkt_len > size - copied) { 1746 /* User didn't give us a big enough buffer, 1747 * what to do? This is a pretty serious error. 1748 * 1749 * Since we haven't updated the RX ring head to 1750 * consume any of the packets, signal the error 1751 * to the user and just leave the RX ring alone. 1752 * 1753 * This seems the best behavior because this allows 1754 * a user of the LDC layer to start with a small 1755 * RX buffer for ldc_read() calls and use -EMSGSIZE 1756 * as a cue to enlarge it's read buffer. 1757 */ 1758 err = -EMSGSIZE; 1759 break; 1760 } 1761 1762 /* Ok, we are gonna eat this one. */ 1763 new = rx_advance(lp, new); 1764 1765 memcpy(buf, 1766 (lp->cfg.mode == LDC_MODE_UNRELIABLE ? 1767 p->u.u_data : p->u.r.r_data), pkt_len); 1768 buf += pkt_len; 1769 copied += pkt_len; 1770 1771 if (p->env & LDC_STOP) 1772 break; 1773 1774 no_data: 1775 if (new == lp->rx_tail) { 1776 err = rx_data_wait(lp, new); 1777 if (err) 1778 break; 1779 } 1780 } 1781 1782 if (!err) 1783 err = rx_set_head(lp, new); 1784 1785 if (err && first_frag) 1786 lp->rcv_nxt = first_frag->seqid - 1; 1787 1788 if (!err) { 1789 err = copied; 1790 if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE) 1791 send_data_ack(lp); 1792 } 1793 1794 return err; 1795 } 1796 1797 static const struct ldc_mode_ops nonraw_ops = { 1798 .write = write_nonraw, 1799 .read = read_nonraw, 1800 }; 1801 1802 static int write_stream(struct ldc_channel *lp, const void *buf, 1803 unsigned int size) 1804 { 1805 if (size > lp->cfg.mtu) 1806 size = lp->cfg.mtu; 1807 return write_nonraw(lp, buf, size); 1808 } 1809 1810 static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size) 1811 { 1812 if (!lp->mssbuf_len) { 1813 int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu); 1814 if (err < 0) 1815 return err; 1816 1817 lp->mssbuf_len = err; 1818 lp->mssbuf_off = 0; 1819 } 1820 1821 if (size > lp->mssbuf_len) 1822 size = lp->mssbuf_len; 1823 memcpy(buf, lp->mssbuf + lp->mssbuf_off, size); 1824 1825 lp->mssbuf_off += size; 1826 lp->mssbuf_len -= size; 1827 1828 return size; 1829 } 1830 1831 static const struct ldc_mode_ops stream_ops = { 1832 .write = write_stream, 1833 .read = read_stream, 1834 }; 1835 1836 int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size) 1837 { 1838 unsigned long flags; 1839 int err; 1840 1841 if (!buf) 1842 return -EINVAL; 1843 1844 if (!size) 1845 return 0; 1846 1847 spin_lock_irqsave(&lp->lock, flags); 1848 1849 if (lp->hs_state != LDC_HS_COMPLETE) 1850 err = -ENOTCONN; 1851 else 1852 err = lp->mops->write(lp, buf, size); 1853 1854 spin_unlock_irqrestore(&lp->lock, flags); 1855 1856 return err; 1857 } 1858 EXPORT_SYMBOL(ldc_write); 1859 1860 int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size) 1861 { 1862 unsigned long flags; 1863 int err; 1864 1865 if (!buf) 1866 return -EINVAL; 1867 1868 if (!size) 1869 return 0; 1870 1871 spin_lock_irqsave(&lp->lock, flags); 1872 1873 if (lp->hs_state != LDC_HS_COMPLETE) 1874 err = -ENOTCONN; 1875 else 1876 err = lp->mops->read(lp, buf, size); 1877 1878 spin_unlock_irqrestore(&lp->lock, flags); 1879 1880 return err; 1881 } 1882 EXPORT_SYMBOL(ldc_read); 1883 1884 static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages) 1885 { 1886 struct iommu_arena *arena = &iommu->arena; 1887 unsigned long n, start, end, limit; 1888 int pass; 1889 1890 limit = arena->limit; 1891 start = arena->hint; 1892 pass = 0; 1893 1894 again: 1895 n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0); 1896 end = n + npages; 1897 if (unlikely(end >= limit)) { 1898 if (likely(pass < 1)) { 1899 limit = start; 1900 start = 0; 1901 pass++; 1902 goto again; 1903 } else { 1904 /* Scanned the whole thing, give up. */ 1905 return -1; 1906 } 1907 } 1908 bitmap_set(arena->map, n, npages); 1909 1910 arena->hint = end; 1911 1912 return n; 1913 } 1914 1915 #define COOKIE_PGSZ_CODE 0xf000000000000000ULL 1916 #define COOKIE_PGSZ_CODE_SHIFT 60ULL 1917 1918 static u64 pagesize_code(void) 1919 { 1920 switch (PAGE_SIZE) { 1921 default: 1922 case (8ULL * 1024ULL): 1923 return 0; 1924 case (64ULL * 1024ULL): 1925 return 1; 1926 case (512ULL * 1024ULL): 1927 return 2; 1928 case (4ULL * 1024ULL * 1024ULL): 1929 return 3; 1930 case (32ULL * 1024ULL * 1024ULL): 1931 return 4; 1932 case (256ULL * 1024ULL * 1024ULL): 1933 return 5; 1934 } 1935 } 1936 1937 static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset) 1938 { 1939 return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) | 1940 (index << PAGE_SHIFT) | 1941 page_offset); 1942 } 1943 1944 static u64 cookie_to_index(u64 cookie, unsigned long *shift) 1945 { 1946 u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT; 1947 1948 cookie &= ~COOKIE_PGSZ_CODE; 1949 1950 *shift = szcode * 3; 1951 1952 return (cookie >> (13ULL + (szcode * 3ULL))); 1953 } 1954 1955 static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu, 1956 unsigned long npages) 1957 { 1958 long entry; 1959 1960 entry = arena_alloc(iommu, npages); 1961 if (unlikely(entry < 0)) 1962 return NULL; 1963 1964 return iommu->page_table + entry; 1965 } 1966 1967 static u64 perm_to_mte(unsigned int map_perm) 1968 { 1969 u64 mte_base; 1970 1971 mte_base = pagesize_code(); 1972 1973 if (map_perm & LDC_MAP_SHADOW) { 1974 if (map_perm & LDC_MAP_R) 1975 mte_base |= LDC_MTE_COPY_R; 1976 if (map_perm & LDC_MAP_W) 1977 mte_base |= LDC_MTE_COPY_W; 1978 } 1979 if (map_perm & LDC_MAP_DIRECT) { 1980 if (map_perm & LDC_MAP_R) 1981 mte_base |= LDC_MTE_READ; 1982 if (map_perm & LDC_MAP_W) 1983 mte_base |= LDC_MTE_WRITE; 1984 if (map_perm & LDC_MAP_X) 1985 mte_base |= LDC_MTE_EXEC; 1986 } 1987 if (map_perm & LDC_MAP_IO) { 1988 if (map_perm & LDC_MAP_R) 1989 mte_base |= LDC_MTE_IOMMU_R; 1990 if (map_perm & LDC_MAP_W) 1991 mte_base |= LDC_MTE_IOMMU_W; 1992 } 1993 1994 return mte_base; 1995 } 1996 1997 static int pages_in_region(unsigned long base, long len) 1998 { 1999 int count = 0; 2000 2001 do { 2002 unsigned long new = (base + PAGE_SIZE) & PAGE_MASK; 2003 2004 len -= (new - base); 2005 base = new; 2006 count++; 2007 } while (len > 0); 2008 2009 return count; 2010 } 2011 2012 struct cookie_state { 2013 struct ldc_mtable_entry *page_table; 2014 struct ldc_trans_cookie *cookies; 2015 u64 mte_base; 2016 u64 prev_cookie; 2017 u32 pte_idx; 2018 u32 nc; 2019 }; 2020 2021 static void fill_cookies(struct cookie_state *sp, unsigned long pa, 2022 unsigned long off, unsigned long len) 2023 { 2024 do { 2025 unsigned long tlen, new = pa + PAGE_SIZE; 2026 u64 this_cookie; 2027 2028 sp->page_table[sp->pte_idx].mte = sp->mte_base | pa; 2029 2030 tlen = PAGE_SIZE; 2031 if (off) 2032 tlen = PAGE_SIZE - off; 2033 if (tlen > len) 2034 tlen = len; 2035 2036 this_cookie = make_cookie(sp->pte_idx, 2037 pagesize_code(), off); 2038 2039 off = 0; 2040 2041 if (this_cookie == sp->prev_cookie) { 2042 sp->cookies[sp->nc - 1].cookie_size += tlen; 2043 } else { 2044 sp->cookies[sp->nc].cookie_addr = this_cookie; 2045 sp->cookies[sp->nc].cookie_size = tlen; 2046 sp->nc++; 2047 } 2048 sp->prev_cookie = this_cookie + tlen; 2049 2050 sp->pte_idx++; 2051 2052 len -= tlen; 2053 pa = new; 2054 } while (len > 0); 2055 } 2056 2057 static int sg_count_one(struct scatterlist *sg) 2058 { 2059 unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT; 2060 long len = sg->length; 2061 2062 if ((sg->offset | len) & (8UL - 1)) 2063 return -EFAULT; 2064 2065 return pages_in_region(base + sg->offset, len); 2066 } 2067 2068 static int sg_count_pages(struct scatterlist *sg, int num_sg) 2069 { 2070 int count; 2071 int i; 2072 2073 count = 0; 2074 for (i = 0; i < num_sg; i++) { 2075 int err = sg_count_one(sg + i); 2076 if (err < 0) 2077 return err; 2078 count += err; 2079 } 2080 2081 return count; 2082 } 2083 2084 int ldc_map_sg(struct ldc_channel *lp, 2085 struct scatterlist *sg, int num_sg, 2086 struct ldc_trans_cookie *cookies, int ncookies, 2087 unsigned int map_perm) 2088 { 2089 unsigned long i, npages, flags; 2090 struct ldc_mtable_entry *base; 2091 struct cookie_state state; 2092 struct ldc_iommu *iommu; 2093 int err; 2094 2095 if (map_perm & ~LDC_MAP_ALL) 2096 return -EINVAL; 2097 2098 err = sg_count_pages(sg, num_sg); 2099 if (err < 0) 2100 return err; 2101 2102 npages = err; 2103 if (err > ncookies) 2104 return -EMSGSIZE; 2105 2106 iommu = &lp->iommu; 2107 2108 spin_lock_irqsave(&iommu->lock, flags); 2109 base = alloc_npages(iommu, npages); 2110 spin_unlock_irqrestore(&iommu->lock, flags); 2111 2112 if (!base) 2113 return -ENOMEM; 2114 2115 state.page_table = iommu->page_table; 2116 state.cookies = cookies; 2117 state.mte_base = perm_to_mte(map_perm); 2118 state.prev_cookie = ~(u64)0; 2119 state.pte_idx = (base - iommu->page_table); 2120 state.nc = 0; 2121 2122 for (i = 0; i < num_sg; i++) 2123 fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT, 2124 sg[i].offset, sg[i].length); 2125 2126 return state.nc; 2127 } 2128 EXPORT_SYMBOL(ldc_map_sg); 2129 2130 int ldc_map_single(struct ldc_channel *lp, 2131 void *buf, unsigned int len, 2132 struct ldc_trans_cookie *cookies, int ncookies, 2133 unsigned int map_perm) 2134 { 2135 unsigned long npages, pa, flags; 2136 struct ldc_mtable_entry *base; 2137 struct cookie_state state; 2138 struct ldc_iommu *iommu; 2139 2140 if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1)) 2141 return -EINVAL; 2142 2143 pa = __pa(buf); 2144 if ((pa | len) & (8UL - 1)) 2145 return -EFAULT; 2146 2147 npages = pages_in_region(pa, len); 2148 2149 iommu = &lp->iommu; 2150 2151 spin_lock_irqsave(&iommu->lock, flags); 2152 base = alloc_npages(iommu, npages); 2153 spin_unlock_irqrestore(&iommu->lock, flags); 2154 2155 if (!base) 2156 return -ENOMEM; 2157 2158 state.page_table = iommu->page_table; 2159 state.cookies = cookies; 2160 state.mte_base = perm_to_mte(map_perm); 2161 state.prev_cookie = ~(u64)0; 2162 state.pte_idx = (base - iommu->page_table); 2163 state.nc = 0; 2164 fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len); 2165 BUG_ON(state.nc != 1); 2166 2167 return state.nc; 2168 } 2169 EXPORT_SYMBOL(ldc_map_single); 2170 2171 static void free_npages(unsigned long id, struct ldc_iommu *iommu, 2172 u64 cookie, u64 size) 2173 { 2174 struct iommu_arena *arena = &iommu->arena; 2175 unsigned long i, shift, index, npages; 2176 struct ldc_mtable_entry *base; 2177 2178 npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT; 2179 index = cookie_to_index(cookie, &shift); 2180 base = iommu->page_table + index; 2181 2182 BUG_ON(index > arena->limit || 2183 (index + npages) > arena->limit); 2184 2185 for (i = 0; i < npages; i++) { 2186 if (base->cookie) 2187 sun4v_ldc_revoke(id, cookie + (i << shift), 2188 base->cookie); 2189 base->mte = 0; 2190 __clear_bit(index + i, arena->map); 2191 } 2192 } 2193 2194 void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies, 2195 int ncookies) 2196 { 2197 struct ldc_iommu *iommu = &lp->iommu; 2198 unsigned long flags; 2199 int i; 2200 2201 spin_lock_irqsave(&iommu->lock, flags); 2202 for (i = 0; i < ncookies; i++) { 2203 u64 addr = cookies[i].cookie_addr; 2204 u64 size = cookies[i].cookie_size; 2205 2206 free_npages(lp->id, iommu, addr, size); 2207 } 2208 spin_unlock_irqrestore(&iommu->lock, flags); 2209 } 2210 EXPORT_SYMBOL(ldc_unmap); 2211 2212 int ldc_copy(struct ldc_channel *lp, int copy_dir, 2213 void *buf, unsigned int len, unsigned long offset, 2214 struct ldc_trans_cookie *cookies, int ncookies) 2215 { 2216 unsigned int orig_len; 2217 unsigned long ra; 2218 int i; 2219 2220 if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) { 2221 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n", 2222 lp->id, copy_dir); 2223 return -EINVAL; 2224 } 2225 2226 ra = __pa(buf); 2227 if ((ra | len | offset) & (8UL - 1)) { 2228 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer " 2229 "ra[%lx] len[%x] offset[%lx]\n", 2230 lp->id, ra, len, offset); 2231 return -EFAULT; 2232 } 2233 2234 if (lp->hs_state != LDC_HS_COMPLETE || 2235 (lp->flags & LDC_FLAG_RESET)) { 2236 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] " 2237 "flags[%x]\n", lp->id, lp->hs_state, lp->flags); 2238 return -ECONNRESET; 2239 } 2240 2241 orig_len = len; 2242 for (i = 0; i < ncookies; i++) { 2243 unsigned long cookie_raddr = cookies[i].cookie_addr; 2244 unsigned long this_len = cookies[i].cookie_size; 2245 unsigned long actual_len; 2246 2247 if (unlikely(offset)) { 2248 unsigned long this_off = offset; 2249 2250 if (this_off > this_len) 2251 this_off = this_len; 2252 2253 offset -= this_off; 2254 this_len -= this_off; 2255 if (!this_len) 2256 continue; 2257 cookie_raddr += this_off; 2258 } 2259 2260 if (this_len > len) 2261 this_len = len; 2262 2263 while (1) { 2264 unsigned long hv_err; 2265 2266 hv_err = sun4v_ldc_copy(lp->id, copy_dir, 2267 cookie_raddr, ra, 2268 this_len, &actual_len); 2269 if (unlikely(hv_err)) { 2270 printk(KERN_ERR PFX "ldc_copy: ID[%lu] " 2271 "HV error %lu\n", 2272 lp->id, hv_err); 2273 if (lp->hs_state != LDC_HS_COMPLETE || 2274 (lp->flags & LDC_FLAG_RESET)) 2275 return -ECONNRESET; 2276 else 2277 return -EFAULT; 2278 } 2279 2280 cookie_raddr += actual_len; 2281 ra += actual_len; 2282 len -= actual_len; 2283 if (actual_len == this_len) 2284 break; 2285 2286 this_len -= actual_len; 2287 } 2288 2289 if (!len) 2290 break; 2291 } 2292 2293 /* It is caller policy what to do about short copies. 2294 * For example, a networking driver can declare the 2295 * packet a runt and drop it. 2296 */ 2297 2298 return orig_len - len; 2299 } 2300 EXPORT_SYMBOL(ldc_copy); 2301 2302 void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len, 2303 struct ldc_trans_cookie *cookies, int *ncookies, 2304 unsigned int map_perm) 2305 { 2306 void *buf; 2307 int err; 2308 2309 if (len & (8UL - 1)) 2310 return ERR_PTR(-EINVAL); 2311 2312 buf = kzalloc(len, GFP_KERNEL); 2313 if (!buf) 2314 return ERR_PTR(-ENOMEM); 2315 2316 err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm); 2317 if (err < 0) { 2318 kfree(buf); 2319 return ERR_PTR(err); 2320 } 2321 *ncookies = err; 2322 2323 return buf; 2324 } 2325 EXPORT_SYMBOL(ldc_alloc_exp_dring); 2326 2327 void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len, 2328 struct ldc_trans_cookie *cookies, int ncookies) 2329 { 2330 ldc_unmap(lp, cookies, ncookies); 2331 kfree(buf); 2332 } 2333 EXPORT_SYMBOL(ldc_free_exp_dring); 2334 2335 static int __init ldc_init(void) 2336 { 2337 unsigned long major, minor; 2338 struct mdesc_handle *hp; 2339 const u64 *v; 2340 int err; 2341 u64 mp; 2342 2343 hp = mdesc_grab(); 2344 if (!hp) 2345 return -ENODEV; 2346 2347 mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); 2348 err = -ENODEV; 2349 if (mp == MDESC_NODE_NULL) 2350 goto out; 2351 2352 v = mdesc_get_property(hp, mp, "domaining-enabled", NULL); 2353 if (!v) 2354 goto out; 2355 2356 major = 1; 2357 minor = 0; 2358 if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) { 2359 printk(KERN_INFO PFX "Could not register LDOM hvapi.\n"); 2360 goto out; 2361 } 2362 2363 printk(KERN_INFO "%s", version); 2364 2365 if (!*v) { 2366 printk(KERN_INFO PFX "Domaining disabled.\n"); 2367 goto out; 2368 } 2369 ldom_domaining_enabled = 1; 2370 err = 0; 2371 2372 out: 2373 mdesc_release(hp); 2374 return err; 2375 } 2376 2377 core_initcall(ldc_init); 2378