1 /* ldc.c: Logical Domain Channel link-layer protocol driver. 2 * 3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/export.h> 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/delay.h> 11 #include <linux/errno.h> 12 #include <linux/string.h> 13 #include <linux/scatterlist.h> 14 #include <linux/interrupt.h> 15 #include <linux/list.h> 16 #include <linux/init.h> 17 #include <linux/bitmap.h> 18 19 #include <asm/hypervisor.h> 20 #include <asm/iommu.h> 21 #include <asm/page.h> 22 #include <asm/ldc.h> 23 #include <asm/mdesc.h> 24 25 #define DRV_MODULE_NAME "ldc" 26 #define PFX DRV_MODULE_NAME ": " 27 #define DRV_MODULE_VERSION "1.1" 28 #define DRV_MODULE_RELDATE "July 22, 2008" 29 30 static char version[] = 31 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 32 #define LDC_PACKET_SIZE 64 33 34 /* Packet header layout for unreliable and reliable mode frames. 35 * When in RAW mode, packets are simply straight 64-byte payloads 36 * with no headers. 37 */ 38 struct ldc_packet { 39 u8 type; 40 #define LDC_CTRL 0x01 41 #define LDC_DATA 0x02 42 #define LDC_ERR 0x10 43 44 u8 stype; 45 #define LDC_INFO 0x01 46 #define LDC_ACK 0x02 47 #define LDC_NACK 0x04 48 49 u8 ctrl; 50 #define LDC_VERS 0x01 /* Link Version */ 51 #define LDC_RTS 0x02 /* Request To Send */ 52 #define LDC_RTR 0x03 /* Ready To Receive */ 53 #define LDC_RDX 0x04 /* Ready for Data eXchange */ 54 #define LDC_CTRL_MSK 0x0f 55 56 u8 env; 57 #define LDC_LEN 0x3f 58 #define LDC_FRAG_MASK 0xc0 59 #define LDC_START 0x40 60 #define LDC_STOP 0x80 61 62 u32 seqid; 63 64 union { 65 u8 u_data[LDC_PACKET_SIZE - 8]; 66 struct { 67 u32 pad; 68 u32 ackid; 69 u8 r_data[LDC_PACKET_SIZE - 8 - 8]; 70 } r; 71 } u; 72 }; 73 74 struct ldc_version { 75 u16 major; 76 u16 minor; 77 }; 78 79 /* Ordered from largest major to lowest. */ 80 static struct ldc_version ver_arr[] = { 81 { .major = 1, .minor = 0 }, 82 }; 83 84 #define LDC_DEFAULT_MTU (4 * LDC_PACKET_SIZE) 85 #define LDC_DEFAULT_NUM_ENTRIES (PAGE_SIZE / LDC_PACKET_SIZE) 86 87 struct ldc_channel; 88 89 struct ldc_mode_ops { 90 int (*write)(struct ldc_channel *, const void *, unsigned int); 91 int (*read)(struct ldc_channel *, void *, unsigned int); 92 }; 93 94 static const struct ldc_mode_ops raw_ops; 95 static const struct ldc_mode_ops nonraw_ops; 96 static const struct ldc_mode_ops stream_ops; 97 98 int ldom_domaining_enabled; 99 100 struct ldc_iommu { 101 /* Protects arena alloc/free. */ 102 spinlock_t lock; 103 struct iommu_arena arena; 104 struct ldc_mtable_entry *page_table; 105 }; 106 107 struct ldc_channel { 108 /* Protects all operations that depend upon channel state. */ 109 spinlock_t lock; 110 111 unsigned long id; 112 113 u8 *mssbuf; 114 u32 mssbuf_len; 115 u32 mssbuf_off; 116 117 struct ldc_packet *tx_base; 118 unsigned long tx_head; 119 unsigned long tx_tail; 120 unsigned long tx_num_entries; 121 unsigned long tx_ra; 122 123 unsigned long tx_acked; 124 125 struct ldc_packet *rx_base; 126 unsigned long rx_head; 127 unsigned long rx_tail; 128 unsigned long rx_num_entries; 129 unsigned long rx_ra; 130 131 u32 rcv_nxt; 132 u32 snd_nxt; 133 134 unsigned long chan_state; 135 136 struct ldc_channel_config cfg; 137 void *event_arg; 138 139 const struct ldc_mode_ops *mops; 140 141 struct ldc_iommu iommu; 142 143 struct ldc_version ver; 144 145 u8 hs_state; 146 #define LDC_HS_CLOSED 0x00 147 #define LDC_HS_OPEN 0x01 148 #define LDC_HS_GOTVERS 0x02 149 #define LDC_HS_SENTRTR 0x03 150 #define LDC_HS_GOTRTR 0x04 151 #define LDC_HS_COMPLETE 0x10 152 153 u8 flags; 154 #define LDC_FLAG_ALLOCED_QUEUES 0x01 155 #define LDC_FLAG_REGISTERED_QUEUES 0x02 156 #define LDC_FLAG_REGISTERED_IRQS 0x04 157 #define LDC_FLAG_RESET 0x10 158 159 u8 mss; 160 u8 state; 161 162 #define LDC_IRQ_NAME_MAX 32 163 char rx_irq_name[LDC_IRQ_NAME_MAX]; 164 char tx_irq_name[LDC_IRQ_NAME_MAX]; 165 166 struct hlist_head mh_list; 167 168 struct hlist_node list; 169 }; 170 171 #define ldcdbg(TYPE, f, a...) \ 172 do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \ 173 printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \ 174 } while (0) 175 176 static const char *state_to_str(u8 state) 177 { 178 switch (state) { 179 case LDC_STATE_INVALID: 180 return "INVALID"; 181 case LDC_STATE_INIT: 182 return "INIT"; 183 case LDC_STATE_BOUND: 184 return "BOUND"; 185 case LDC_STATE_READY: 186 return "READY"; 187 case LDC_STATE_CONNECTED: 188 return "CONNECTED"; 189 default: 190 return "<UNKNOWN>"; 191 } 192 } 193 194 static void ldc_set_state(struct ldc_channel *lp, u8 state) 195 { 196 ldcdbg(STATE, "STATE (%s) --> (%s)\n", 197 state_to_str(lp->state), 198 state_to_str(state)); 199 200 lp->state = state; 201 } 202 203 static unsigned long __advance(unsigned long off, unsigned long num_entries) 204 { 205 off += LDC_PACKET_SIZE; 206 if (off == (num_entries * LDC_PACKET_SIZE)) 207 off = 0; 208 209 return off; 210 } 211 212 static unsigned long rx_advance(struct ldc_channel *lp, unsigned long off) 213 { 214 return __advance(off, lp->rx_num_entries); 215 } 216 217 static unsigned long tx_advance(struct ldc_channel *lp, unsigned long off) 218 { 219 return __advance(off, lp->tx_num_entries); 220 } 221 222 static struct ldc_packet *handshake_get_tx_packet(struct ldc_channel *lp, 223 unsigned long *new_tail) 224 { 225 struct ldc_packet *p; 226 unsigned long t; 227 228 t = tx_advance(lp, lp->tx_tail); 229 if (t == lp->tx_head) 230 return NULL; 231 232 *new_tail = t; 233 234 p = lp->tx_base; 235 return p + (lp->tx_tail / LDC_PACKET_SIZE); 236 } 237 238 /* When we are in reliable or stream mode, have to track the next packet 239 * we haven't gotten an ACK for in the TX queue using tx_acked. We have 240 * to be careful not to stomp over the queue past that point. During 241 * the handshake, we don't have TX data packets pending in the queue 242 * and that's why handshake_get_tx_packet() need not be mindful of 243 * lp->tx_acked. 244 */ 245 static unsigned long head_for_data(struct ldc_channel *lp) 246 { 247 if (lp->cfg.mode == LDC_MODE_STREAM) 248 return lp->tx_acked; 249 return lp->tx_head; 250 } 251 252 static int tx_has_space_for(struct ldc_channel *lp, unsigned int size) 253 { 254 unsigned long limit, tail, new_tail, diff; 255 unsigned int mss; 256 257 limit = head_for_data(lp); 258 tail = lp->tx_tail; 259 new_tail = tx_advance(lp, tail); 260 if (new_tail == limit) 261 return 0; 262 263 if (limit > new_tail) 264 diff = limit - new_tail; 265 else 266 diff = (limit + 267 ((lp->tx_num_entries * LDC_PACKET_SIZE) - new_tail)); 268 diff /= LDC_PACKET_SIZE; 269 mss = lp->mss; 270 271 if (diff * mss < size) 272 return 0; 273 274 return 1; 275 } 276 277 static struct ldc_packet *data_get_tx_packet(struct ldc_channel *lp, 278 unsigned long *new_tail) 279 { 280 struct ldc_packet *p; 281 unsigned long h, t; 282 283 h = head_for_data(lp); 284 t = tx_advance(lp, lp->tx_tail); 285 if (t == h) 286 return NULL; 287 288 *new_tail = t; 289 290 p = lp->tx_base; 291 return p + (lp->tx_tail / LDC_PACKET_SIZE); 292 } 293 294 static int set_tx_tail(struct ldc_channel *lp, unsigned long tail) 295 { 296 unsigned long orig_tail = lp->tx_tail; 297 int limit = 1000; 298 299 lp->tx_tail = tail; 300 while (limit-- > 0) { 301 unsigned long err; 302 303 err = sun4v_ldc_tx_set_qtail(lp->id, tail); 304 if (!err) 305 return 0; 306 307 if (err != HV_EWOULDBLOCK) { 308 lp->tx_tail = orig_tail; 309 return -EINVAL; 310 } 311 udelay(1); 312 } 313 314 lp->tx_tail = orig_tail; 315 return -EBUSY; 316 } 317 318 /* This just updates the head value in the hypervisor using 319 * a polling loop with a timeout. The caller takes care of 320 * upating software state representing the head change, if any. 321 */ 322 static int __set_rx_head(struct ldc_channel *lp, unsigned long head) 323 { 324 int limit = 1000; 325 326 while (limit-- > 0) { 327 unsigned long err; 328 329 err = sun4v_ldc_rx_set_qhead(lp->id, head); 330 if (!err) 331 return 0; 332 333 if (err != HV_EWOULDBLOCK) 334 return -EINVAL; 335 336 udelay(1); 337 } 338 339 return -EBUSY; 340 } 341 342 static int send_tx_packet(struct ldc_channel *lp, 343 struct ldc_packet *p, 344 unsigned long new_tail) 345 { 346 BUG_ON(p != (lp->tx_base + (lp->tx_tail / LDC_PACKET_SIZE))); 347 348 return set_tx_tail(lp, new_tail); 349 } 350 351 static struct ldc_packet *handshake_compose_ctrl(struct ldc_channel *lp, 352 u8 stype, u8 ctrl, 353 void *data, int dlen, 354 unsigned long *new_tail) 355 { 356 struct ldc_packet *p = handshake_get_tx_packet(lp, new_tail); 357 358 if (p) { 359 memset(p, 0, sizeof(*p)); 360 p->type = LDC_CTRL; 361 p->stype = stype; 362 p->ctrl = ctrl; 363 if (data) 364 memcpy(p->u.u_data, data, dlen); 365 } 366 return p; 367 } 368 369 static int start_handshake(struct ldc_channel *lp) 370 { 371 struct ldc_packet *p; 372 struct ldc_version *ver; 373 unsigned long new_tail; 374 375 ver = &ver_arr[0]; 376 377 ldcdbg(HS, "SEND VER INFO maj[%u] min[%u]\n", 378 ver->major, ver->minor); 379 380 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS, 381 ver, sizeof(*ver), &new_tail); 382 if (p) { 383 int err = send_tx_packet(lp, p, new_tail); 384 if (!err) 385 lp->flags &= ~LDC_FLAG_RESET; 386 return err; 387 } 388 return -EBUSY; 389 } 390 391 static int send_version_nack(struct ldc_channel *lp, 392 u16 major, u16 minor) 393 { 394 struct ldc_packet *p; 395 struct ldc_version ver; 396 unsigned long new_tail; 397 398 ver.major = major; 399 ver.minor = minor; 400 401 p = handshake_compose_ctrl(lp, LDC_NACK, LDC_VERS, 402 &ver, sizeof(ver), &new_tail); 403 if (p) { 404 ldcdbg(HS, "SEND VER NACK maj[%u] min[%u]\n", 405 ver.major, ver.minor); 406 407 return send_tx_packet(lp, p, new_tail); 408 } 409 return -EBUSY; 410 } 411 412 static int send_version_ack(struct ldc_channel *lp, 413 struct ldc_version *vp) 414 { 415 struct ldc_packet *p; 416 unsigned long new_tail; 417 418 p = handshake_compose_ctrl(lp, LDC_ACK, LDC_VERS, 419 vp, sizeof(*vp), &new_tail); 420 if (p) { 421 ldcdbg(HS, "SEND VER ACK maj[%u] min[%u]\n", 422 vp->major, vp->minor); 423 424 return send_tx_packet(lp, p, new_tail); 425 } 426 return -EBUSY; 427 } 428 429 static int send_rts(struct ldc_channel *lp) 430 { 431 struct ldc_packet *p; 432 unsigned long new_tail; 433 434 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTS, NULL, 0, 435 &new_tail); 436 if (p) { 437 p->env = lp->cfg.mode; 438 p->seqid = 0; 439 lp->rcv_nxt = 0; 440 441 ldcdbg(HS, "SEND RTS env[0x%x] seqid[0x%x]\n", 442 p->env, p->seqid); 443 444 return send_tx_packet(lp, p, new_tail); 445 } 446 return -EBUSY; 447 } 448 449 static int send_rtr(struct ldc_channel *lp) 450 { 451 struct ldc_packet *p; 452 unsigned long new_tail; 453 454 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RTR, NULL, 0, 455 &new_tail); 456 if (p) { 457 p->env = lp->cfg.mode; 458 p->seqid = 0; 459 460 ldcdbg(HS, "SEND RTR env[0x%x] seqid[0x%x]\n", 461 p->env, p->seqid); 462 463 return send_tx_packet(lp, p, new_tail); 464 } 465 return -EBUSY; 466 } 467 468 static int send_rdx(struct ldc_channel *lp) 469 { 470 struct ldc_packet *p; 471 unsigned long new_tail; 472 473 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_RDX, NULL, 0, 474 &new_tail); 475 if (p) { 476 p->env = 0; 477 p->seqid = ++lp->snd_nxt; 478 p->u.r.ackid = lp->rcv_nxt; 479 480 ldcdbg(HS, "SEND RDX env[0x%x] seqid[0x%x] ackid[0x%x]\n", 481 p->env, p->seqid, p->u.r.ackid); 482 483 return send_tx_packet(lp, p, new_tail); 484 } 485 return -EBUSY; 486 } 487 488 static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt) 489 { 490 struct ldc_packet *p; 491 unsigned long new_tail; 492 int err; 493 494 p = data_get_tx_packet(lp, &new_tail); 495 if (!p) 496 return -EBUSY; 497 memset(p, 0, sizeof(*p)); 498 p->type = data_pkt->type; 499 p->stype = LDC_NACK; 500 p->ctrl = data_pkt->ctrl & LDC_CTRL_MSK; 501 p->seqid = lp->snd_nxt + 1; 502 p->u.r.ackid = lp->rcv_nxt; 503 504 ldcdbg(HS, "SEND DATA NACK type[0x%x] ctl[0x%x] seq[0x%x] ack[0x%x]\n", 505 p->type, p->ctrl, p->seqid, p->u.r.ackid); 506 507 err = send_tx_packet(lp, p, new_tail); 508 if (!err) 509 lp->snd_nxt++; 510 511 return err; 512 } 513 514 static int ldc_abort(struct ldc_channel *lp) 515 { 516 unsigned long hv_err; 517 518 ldcdbg(STATE, "ABORT\n"); 519 520 /* We report but do not act upon the hypervisor errors because 521 * there really isn't much we can do if they fail at this point. 522 */ 523 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); 524 if (hv_err) 525 printk(KERN_ERR PFX "ldc_abort: " 526 "sun4v_ldc_tx_qconf(%lx,%lx,%lx) failed, err=%lu\n", 527 lp->id, lp->tx_ra, lp->tx_num_entries, hv_err); 528 529 hv_err = sun4v_ldc_tx_get_state(lp->id, 530 &lp->tx_head, 531 &lp->tx_tail, 532 &lp->chan_state); 533 if (hv_err) 534 printk(KERN_ERR PFX "ldc_abort: " 535 "sun4v_ldc_tx_get_state(%lx,...) failed, err=%lu\n", 536 lp->id, hv_err); 537 538 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); 539 if (hv_err) 540 printk(KERN_ERR PFX "ldc_abort: " 541 "sun4v_ldc_rx_qconf(%lx,%lx,%lx) failed, err=%lu\n", 542 lp->id, lp->rx_ra, lp->rx_num_entries, hv_err); 543 544 /* Refetch the RX queue state as well, because we could be invoked 545 * here in the queue processing context. 546 */ 547 hv_err = sun4v_ldc_rx_get_state(lp->id, 548 &lp->rx_head, 549 &lp->rx_tail, 550 &lp->chan_state); 551 if (hv_err) 552 printk(KERN_ERR PFX "ldc_abort: " 553 "sun4v_ldc_rx_get_state(%lx,...) failed, err=%lu\n", 554 lp->id, hv_err); 555 556 return -ECONNRESET; 557 } 558 559 static struct ldc_version *find_by_major(u16 major) 560 { 561 struct ldc_version *ret = NULL; 562 int i; 563 564 for (i = 0; i < ARRAY_SIZE(ver_arr); i++) { 565 struct ldc_version *v = &ver_arr[i]; 566 if (v->major <= major) { 567 ret = v; 568 break; 569 } 570 } 571 return ret; 572 } 573 574 static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp) 575 { 576 struct ldc_version *vap; 577 int err; 578 579 ldcdbg(HS, "GOT VERSION INFO major[%x] minor[%x]\n", 580 vp->major, vp->minor); 581 582 if (lp->hs_state == LDC_HS_GOTVERS) { 583 lp->hs_state = LDC_HS_OPEN; 584 memset(&lp->ver, 0, sizeof(lp->ver)); 585 } 586 587 vap = find_by_major(vp->major); 588 if (!vap) { 589 err = send_version_nack(lp, 0, 0); 590 } else if (vap->major != vp->major) { 591 err = send_version_nack(lp, vap->major, vap->minor); 592 } else { 593 struct ldc_version ver = *vp; 594 if (ver.minor > vap->minor) 595 ver.minor = vap->minor; 596 err = send_version_ack(lp, &ver); 597 if (!err) { 598 lp->ver = ver; 599 lp->hs_state = LDC_HS_GOTVERS; 600 } 601 } 602 if (err) 603 return ldc_abort(lp); 604 605 return 0; 606 } 607 608 static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp) 609 { 610 ldcdbg(HS, "GOT VERSION ACK major[%x] minor[%x]\n", 611 vp->major, vp->minor); 612 613 if (lp->hs_state == LDC_HS_GOTVERS) { 614 if (lp->ver.major != vp->major || 615 lp->ver.minor != vp->minor) 616 return ldc_abort(lp); 617 } else { 618 lp->ver = *vp; 619 lp->hs_state = LDC_HS_GOTVERS; 620 } 621 if (send_rts(lp)) 622 return ldc_abort(lp); 623 return 0; 624 } 625 626 static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp) 627 { 628 struct ldc_version *vap; 629 struct ldc_packet *p; 630 unsigned long new_tail; 631 632 if (vp->major == 0 && vp->minor == 0) 633 return ldc_abort(lp); 634 635 vap = find_by_major(vp->major); 636 if (!vap) 637 return ldc_abort(lp); 638 639 p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS, 640 vap, sizeof(*vap), 641 &new_tail); 642 if (!p) 643 return ldc_abort(lp); 644 645 return send_tx_packet(lp, p, new_tail); 646 } 647 648 static int process_version(struct ldc_channel *lp, 649 struct ldc_packet *p) 650 { 651 struct ldc_version *vp; 652 653 vp = (struct ldc_version *) p->u.u_data; 654 655 switch (p->stype) { 656 case LDC_INFO: 657 return process_ver_info(lp, vp); 658 659 case LDC_ACK: 660 return process_ver_ack(lp, vp); 661 662 case LDC_NACK: 663 return process_ver_nack(lp, vp); 664 665 default: 666 return ldc_abort(lp); 667 } 668 } 669 670 static int process_rts(struct ldc_channel *lp, 671 struct ldc_packet *p) 672 { 673 ldcdbg(HS, "GOT RTS stype[%x] seqid[%x] env[%x]\n", 674 p->stype, p->seqid, p->env); 675 676 if (p->stype != LDC_INFO || 677 lp->hs_state != LDC_HS_GOTVERS || 678 p->env != lp->cfg.mode) 679 return ldc_abort(lp); 680 681 lp->snd_nxt = p->seqid; 682 lp->rcv_nxt = p->seqid; 683 lp->hs_state = LDC_HS_SENTRTR; 684 if (send_rtr(lp)) 685 return ldc_abort(lp); 686 687 return 0; 688 } 689 690 static int process_rtr(struct ldc_channel *lp, 691 struct ldc_packet *p) 692 { 693 ldcdbg(HS, "GOT RTR stype[%x] seqid[%x] env[%x]\n", 694 p->stype, p->seqid, p->env); 695 696 if (p->stype != LDC_INFO || 697 p->env != lp->cfg.mode) 698 return ldc_abort(lp); 699 700 lp->snd_nxt = p->seqid; 701 lp->hs_state = LDC_HS_COMPLETE; 702 ldc_set_state(lp, LDC_STATE_CONNECTED); 703 send_rdx(lp); 704 705 return LDC_EVENT_UP; 706 } 707 708 static int rx_seq_ok(struct ldc_channel *lp, u32 seqid) 709 { 710 return lp->rcv_nxt + 1 == seqid; 711 } 712 713 static int process_rdx(struct ldc_channel *lp, 714 struct ldc_packet *p) 715 { 716 ldcdbg(HS, "GOT RDX stype[%x] seqid[%x] env[%x] ackid[%x]\n", 717 p->stype, p->seqid, p->env, p->u.r.ackid); 718 719 if (p->stype != LDC_INFO || 720 !(rx_seq_ok(lp, p->seqid))) 721 return ldc_abort(lp); 722 723 lp->rcv_nxt = p->seqid; 724 725 lp->hs_state = LDC_HS_COMPLETE; 726 ldc_set_state(lp, LDC_STATE_CONNECTED); 727 728 return LDC_EVENT_UP; 729 } 730 731 static int process_control_frame(struct ldc_channel *lp, 732 struct ldc_packet *p) 733 { 734 switch (p->ctrl) { 735 case LDC_VERS: 736 return process_version(lp, p); 737 738 case LDC_RTS: 739 return process_rts(lp, p); 740 741 case LDC_RTR: 742 return process_rtr(lp, p); 743 744 case LDC_RDX: 745 return process_rdx(lp, p); 746 747 default: 748 return ldc_abort(lp); 749 } 750 } 751 752 static int process_error_frame(struct ldc_channel *lp, 753 struct ldc_packet *p) 754 { 755 return ldc_abort(lp); 756 } 757 758 static int process_data_ack(struct ldc_channel *lp, 759 struct ldc_packet *ack) 760 { 761 unsigned long head = lp->tx_acked; 762 u32 ackid = ack->u.r.ackid; 763 764 while (1) { 765 struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE); 766 767 head = tx_advance(lp, head); 768 769 if (p->seqid == ackid) { 770 lp->tx_acked = head; 771 return 0; 772 } 773 if (head == lp->tx_tail) 774 return ldc_abort(lp); 775 } 776 777 return 0; 778 } 779 780 static void send_events(struct ldc_channel *lp, unsigned int event_mask) 781 { 782 if (event_mask & LDC_EVENT_RESET) 783 lp->cfg.event(lp->event_arg, LDC_EVENT_RESET); 784 if (event_mask & LDC_EVENT_UP) 785 lp->cfg.event(lp->event_arg, LDC_EVENT_UP); 786 if (event_mask & LDC_EVENT_DATA_READY) 787 lp->cfg.event(lp->event_arg, LDC_EVENT_DATA_READY); 788 } 789 790 static irqreturn_t ldc_rx(int irq, void *dev_id) 791 { 792 struct ldc_channel *lp = dev_id; 793 unsigned long orig_state, flags; 794 unsigned int event_mask; 795 796 spin_lock_irqsave(&lp->lock, flags); 797 798 orig_state = lp->chan_state; 799 800 /* We should probably check for hypervisor errors here and 801 * reset the LDC channel if we get one. 802 */ 803 sun4v_ldc_rx_get_state(lp->id, 804 &lp->rx_head, 805 &lp->rx_tail, 806 &lp->chan_state); 807 808 ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", 809 orig_state, lp->chan_state, lp->rx_head, lp->rx_tail); 810 811 event_mask = 0; 812 813 if (lp->cfg.mode == LDC_MODE_RAW && 814 lp->chan_state == LDC_CHANNEL_UP) { 815 lp->hs_state = LDC_HS_COMPLETE; 816 ldc_set_state(lp, LDC_STATE_CONNECTED); 817 818 event_mask |= LDC_EVENT_UP; 819 820 orig_state = lp->chan_state; 821 } 822 823 /* If we are in reset state, flush the RX queue and ignore 824 * everything. 825 */ 826 if (lp->flags & LDC_FLAG_RESET) { 827 (void) __set_rx_head(lp, lp->rx_tail); 828 goto out; 829 } 830 831 /* Once we finish the handshake, we let the ldc_read() 832 * paths do all of the control frame and state management. 833 * Just trigger the callback. 834 */ 835 if (lp->hs_state == LDC_HS_COMPLETE) { 836 handshake_complete: 837 if (lp->chan_state != orig_state) { 838 unsigned int event = LDC_EVENT_RESET; 839 840 if (lp->chan_state == LDC_CHANNEL_UP) 841 event = LDC_EVENT_UP; 842 843 event_mask |= event; 844 } 845 if (lp->rx_head != lp->rx_tail) 846 event_mask |= LDC_EVENT_DATA_READY; 847 848 goto out; 849 } 850 851 if (lp->chan_state != orig_state) 852 goto out; 853 854 while (lp->rx_head != lp->rx_tail) { 855 struct ldc_packet *p; 856 unsigned long new; 857 int err; 858 859 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE); 860 861 switch (p->type) { 862 case LDC_CTRL: 863 err = process_control_frame(lp, p); 864 if (err > 0) 865 event_mask |= err; 866 break; 867 868 case LDC_DATA: 869 event_mask |= LDC_EVENT_DATA_READY; 870 err = 0; 871 break; 872 873 case LDC_ERR: 874 err = process_error_frame(lp, p); 875 break; 876 877 default: 878 err = ldc_abort(lp); 879 break; 880 } 881 882 if (err < 0) 883 break; 884 885 new = lp->rx_head; 886 new += LDC_PACKET_SIZE; 887 if (new == (lp->rx_num_entries * LDC_PACKET_SIZE)) 888 new = 0; 889 lp->rx_head = new; 890 891 err = __set_rx_head(lp, new); 892 if (err < 0) { 893 (void) ldc_abort(lp); 894 break; 895 } 896 if (lp->hs_state == LDC_HS_COMPLETE) 897 goto handshake_complete; 898 } 899 900 out: 901 spin_unlock_irqrestore(&lp->lock, flags); 902 903 send_events(lp, event_mask); 904 905 return IRQ_HANDLED; 906 } 907 908 static irqreturn_t ldc_tx(int irq, void *dev_id) 909 { 910 struct ldc_channel *lp = dev_id; 911 unsigned long flags, orig_state; 912 unsigned int event_mask = 0; 913 914 spin_lock_irqsave(&lp->lock, flags); 915 916 orig_state = lp->chan_state; 917 918 /* We should probably check for hypervisor errors here and 919 * reset the LDC channel if we get one. 920 */ 921 sun4v_ldc_tx_get_state(lp->id, 922 &lp->tx_head, 923 &lp->tx_tail, 924 &lp->chan_state); 925 926 ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", 927 orig_state, lp->chan_state, lp->tx_head, lp->tx_tail); 928 929 if (lp->cfg.mode == LDC_MODE_RAW && 930 lp->chan_state == LDC_CHANNEL_UP) { 931 lp->hs_state = LDC_HS_COMPLETE; 932 ldc_set_state(lp, LDC_STATE_CONNECTED); 933 934 event_mask |= LDC_EVENT_UP; 935 } 936 937 spin_unlock_irqrestore(&lp->lock, flags); 938 939 send_events(lp, event_mask); 940 941 return IRQ_HANDLED; 942 } 943 944 /* XXX ldc_alloc() and ldc_free() needs to run under a mutex so 945 * XXX that addition and removal from the ldc_channel_list has 946 * XXX atomicity, otherwise the __ldc_channel_exists() check is 947 * XXX totally pointless as another thread can slip into ldc_alloc() 948 * XXX and add a channel with the same ID. There also needs to be 949 * XXX a spinlock for ldc_channel_list. 950 */ 951 static HLIST_HEAD(ldc_channel_list); 952 953 static int __ldc_channel_exists(unsigned long id) 954 { 955 struct ldc_channel *lp; 956 957 hlist_for_each_entry(lp, &ldc_channel_list, list) { 958 if (lp->id == id) 959 return 1; 960 } 961 return 0; 962 } 963 964 static int alloc_queue(const char *name, unsigned long num_entries, 965 struct ldc_packet **base, unsigned long *ra) 966 { 967 unsigned long size, order; 968 void *q; 969 970 size = num_entries * LDC_PACKET_SIZE; 971 order = get_order(size); 972 973 q = (void *) __get_free_pages(GFP_KERNEL, order); 974 if (!q) { 975 printk(KERN_ERR PFX "Alloc of %s queue failed with " 976 "size=%lu order=%lu\n", name, size, order); 977 return -ENOMEM; 978 } 979 980 memset(q, 0, PAGE_SIZE << order); 981 982 *base = q; 983 *ra = __pa(q); 984 985 return 0; 986 } 987 988 static void free_queue(unsigned long num_entries, struct ldc_packet *q) 989 { 990 unsigned long size, order; 991 992 if (!q) 993 return; 994 995 size = num_entries * LDC_PACKET_SIZE; 996 order = get_order(size); 997 998 free_pages((unsigned long)q, order); 999 } 1000 1001 /* XXX Make this configurable... XXX */ 1002 #define LDC_IOTABLE_SIZE (8 * 1024) 1003 1004 static int ldc_iommu_init(struct ldc_channel *lp) 1005 { 1006 unsigned long sz, num_tsb_entries, tsbsize, order; 1007 struct ldc_iommu *iommu = &lp->iommu; 1008 struct ldc_mtable_entry *table; 1009 unsigned long hv_err; 1010 int err; 1011 1012 num_tsb_entries = LDC_IOTABLE_SIZE; 1013 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); 1014 1015 spin_lock_init(&iommu->lock); 1016 1017 sz = num_tsb_entries / 8; 1018 sz = (sz + 7UL) & ~7UL; 1019 iommu->arena.map = kzalloc(sz, GFP_KERNEL); 1020 if (!iommu->arena.map) { 1021 printk(KERN_ERR PFX "Alloc of arena map failed, sz=%lu\n", sz); 1022 return -ENOMEM; 1023 } 1024 1025 iommu->arena.limit = num_tsb_entries; 1026 1027 order = get_order(tsbsize); 1028 1029 table = (struct ldc_mtable_entry *) 1030 __get_free_pages(GFP_KERNEL, order); 1031 err = -ENOMEM; 1032 if (!table) { 1033 printk(KERN_ERR PFX "Alloc of MTE table failed, " 1034 "size=%lu order=%lu\n", tsbsize, order); 1035 goto out_free_map; 1036 } 1037 1038 memset(table, 0, PAGE_SIZE << order); 1039 1040 iommu->page_table = table; 1041 1042 hv_err = sun4v_ldc_set_map_table(lp->id, __pa(table), 1043 num_tsb_entries); 1044 err = -EINVAL; 1045 if (hv_err) 1046 goto out_free_table; 1047 1048 return 0; 1049 1050 out_free_table: 1051 free_pages((unsigned long) table, order); 1052 iommu->page_table = NULL; 1053 1054 out_free_map: 1055 kfree(iommu->arena.map); 1056 iommu->arena.map = NULL; 1057 1058 return err; 1059 } 1060 1061 static void ldc_iommu_release(struct ldc_channel *lp) 1062 { 1063 struct ldc_iommu *iommu = &lp->iommu; 1064 unsigned long num_tsb_entries, tsbsize, order; 1065 1066 (void) sun4v_ldc_set_map_table(lp->id, 0, 0); 1067 1068 num_tsb_entries = iommu->arena.limit; 1069 tsbsize = num_tsb_entries * sizeof(struct ldc_mtable_entry); 1070 order = get_order(tsbsize); 1071 1072 free_pages((unsigned long) iommu->page_table, order); 1073 iommu->page_table = NULL; 1074 1075 kfree(iommu->arena.map); 1076 iommu->arena.map = NULL; 1077 } 1078 1079 struct ldc_channel *ldc_alloc(unsigned long id, 1080 const struct ldc_channel_config *cfgp, 1081 void *event_arg, 1082 const char *name) 1083 { 1084 struct ldc_channel *lp; 1085 const struct ldc_mode_ops *mops; 1086 unsigned long dummy1, dummy2, hv_err; 1087 u8 mss, *mssbuf; 1088 int err; 1089 1090 err = -ENODEV; 1091 if (!ldom_domaining_enabled) 1092 goto out_err; 1093 1094 err = -EINVAL; 1095 if (!cfgp) 1096 goto out_err; 1097 if (!name) 1098 goto out_err; 1099 1100 switch (cfgp->mode) { 1101 case LDC_MODE_RAW: 1102 mops = &raw_ops; 1103 mss = LDC_PACKET_SIZE; 1104 break; 1105 1106 case LDC_MODE_UNRELIABLE: 1107 mops = &nonraw_ops; 1108 mss = LDC_PACKET_SIZE - 8; 1109 break; 1110 1111 case LDC_MODE_STREAM: 1112 mops = &stream_ops; 1113 mss = LDC_PACKET_SIZE - 8 - 8; 1114 break; 1115 1116 default: 1117 goto out_err; 1118 } 1119 1120 if (!cfgp->event || !event_arg || !cfgp->rx_irq || !cfgp->tx_irq) 1121 goto out_err; 1122 1123 hv_err = sun4v_ldc_tx_qinfo(id, &dummy1, &dummy2); 1124 err = -ENODEV; 1125 if (hv_err == HV_ECHANNEL) 1126 goto out_err; 1127 1128 err = -EEXIST; 1129 if (__ldc_channel_exists(id)) 1130 goto out_err; 1131 1132 mssbuf = NULL; 1133 1134 lp = kzalloc(sizeof(*lp), GFP_KERNEL); 1135 err = -ENOMEM; 1136 if (!lp) 1137 goto out_err; 1138 1139 spin_lock_init(&lp->lock); 1140 1141 lp->id = id; 1142 1143 err = ldc_iommu_init(lp); 1144 if (err) 1145 goto out_free_ldc; 1146 1147 lp->mops = mops; 1148 lp->mss = mss; 1149 1150 lp->cfg = *cfgp; 1151 if (!lp->cfg.mtu) 1152 lp->cfg.mtu = LDC_DEFAULT_MTU; 1153 1154 if (lp->cfg.mode == LDC_MODE_STREAM) { 1155 mssbuf = kzalloc(lp->cfg.mtu, GFP_KERNEL); 1156 if (!mssbuf) { 1157 err = -ENOMEM; 1158 goto out_free_iommu; 1159 } 1160 lp->mssbuf = mssbuf; 1161 } 1162 1163 lp->event_arg = event_arg; 1164 1165 /* XXX allow setting via ldc_channel_config to override defaults 1166 * XXX or use some formula based upon mtu 1167 */ 1168 lp->tx_num_entries = LDC_DEFAULT_NUM_ENTRIES; 1169 lp->rx_num_entries = LDC_DEFAULT_NUM_ENTRIES; 1170 1171 err = alloc_queue("TX", lp->tx_num_entries, 1172 &lp->tx_base, &lp->tx_ra); 1173 if (err) 1174 goto out_free_mssbuf; 1175 1176 err = alloc_queue("RX", lp->rx_num_entries, 1177 &lp->rx_base, &lp->rx_ra); 1178 if (err) 1179 goto out_free_txq; 1180 1181 lp->flags |= LDC_FLAG_ALLOCED_QUEUES; 1182 1183 lp->hs_state = LDC_HS_CLOSED; 1184 ldc_set_state(lp, LDC_STATE_INIT); 1185 1186 INIT_HLIST_NODE(&lp->list); 1187 hlist_add_head(&lp->list, &ldc_channel_list); 1188 1189 INIT_HLIST_HEAD(&lp->mh_list); 1190 1191 snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); 1192 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); 1193 1194 err = request_irq(lp->cfg.rx_irq, ldc_rx, 0, 1195 lp->rx_irq_name, lp); 1196 if (err) 1197 goto out_free_txq; 1198 1199 err = request_irq(lp->cfg.tx_irq, ldc_tx, 0, 1200 lp->tx_irq_name, lp); 1201 if (err) { 1202 free_irq(lp->cfg.rx_irq, lp); 1203 goto out_free_txq; 1204 } 1205 1206 return lp; 1207 1208 out_free_txq: 1209 free_queue(lp->tx_num_entries, lp->tx_base); 1210 1211 out_free_mssbuf: 1212 kfree(mssbuf); 1213 1214 out_free_iommu: 1215 ldc_iommu_release(lp); 1216 1217 out_free_ldc: 1218 kfree(lp); 1219 1220 out_err: 1221 return ERR_PTR(err); 1222 } 1223 EXPORT_SYMBOL(ldc_alloc); 1224 1225 void ldc_unbind(struct ldc_channel *lp) 1226 { 1227 if (lp->flags & LDC_FLAG_REGISTERED_IRQS) { 1228 free_irq(lp->cfg.rx_irq, lp); 1229 free_irq(lp->cfg.tx_irq, lp); 1230 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS; 1231 } 1232 1233 if (lp->flags & LDC_FLAG_REGISTERED_QUEUES) { 1234 sun4v_ldc_tx_qconf(lp->id, 0, 0); 1235 sun4v_ldc_rx_qconf(lp->id, 0, 0); 1236 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES; 1237 } 1238 if (lp->flags & LDC_FLAG_ALLOCED_QUEUES) { 1239 free_queue(lp->tx_num_entries, lp->tx_base); 1240 free_queue(lp->rx_num_entries, lp->rx_base); 1241 lp->flags &= ~LDC_FLAG_ALLOCED_QUEUES; 1242 } 1243 1244 ldc_set_state(lp, LDC_STATE_INIT); 1245 } 1246 EXPORT_SYMBOL(ldc_unbind); 1247 1248 void ldc_free(struct ldc_channel *lp) 1249 { 1250 ldc_unbind(lp); 1251 hlist_del(&lp->list); 1252 kfree(lp->mssbuf); 1253 ldc_iommu_release(lp); 1254 1255 kfree(lp); 1256 } 1257 EXPORT_SYMBOL(ldc_free); 1258 1259 /* Bind the channel. This registers the LDC queues with 1260 * the hypervisor and puts the channel into a pseudo-listening 1261 * state. This does not initiate a handshake, ldc_connect() does 1262 * that. 1263 */ 1264 int ldc_bind(struct ldc_channel *lp) 1265 { 1266 unsigned long hv_err, flags; 1267 int err = -EINVAL; 1268 1269 if (lp->state != LDC_STATE_INIT) 1270 return -EINVAL; 1271 1272 spin_lock_irqsave(&lp->lock, flags); 1273 1274 enable_irq(lp->cfg.rx_irq); 1275 enable_irq(lp->cfg.tx_irq); 1276 1277 lp->flags |= LDC_FLAG_REGISTERED_IRQS; 1278 1279 err = -ENODEV; 1280 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0); 1281 if (hv_err) 1282 goto out_free_irqs; 1283 1284 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); 1285 if (hv_err) 1286 goto out_free_irqs; 1287 1288 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0); 1289 if (hv_err) 1290 goto out_unmap_tx; 1291 1292 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); 1293 if (hv_err) 1294 goto out_unmap_tx; 1295 1296 lp->flags |= LDC_FLAG_REGISTERED_QUEUES; 1297 1298 hv_err = sun4v_ldc_tx_get_state(lp->id, 1299 &lp->tx_head, 1300 &lp->tx_tail, 1301 &lp->chan_state); 1302 err = -EBUSY; 1303 if (hv_err) 1304 goto out_unmap_rx; 1305 1306 lp->tx_acked = lp->tx_head; 1307 1308 lp->hs_state = LDC_HS_OPEN; 1309 ldc_set_state(lp, LDC_STATE_BOUND); 1310 1311 spin_unlock_irqrestore(&lp->lock, flags); 1312 1313 return 0; 1314 1315 out_unmap_rx: 1316 lp->flags &= ~LDC_FLAG_REGISTERED_QUEUES; 1317 sun4v_ldc_rx_qconf(lp->id, 0, 0); 1318 1319 out_unmap_tx: 1320 sun4v_ldc_tx_qconf(lp->id, 0, 0); 1321 1322 out_free_irqs: 1323 lp->flags &= ~LDC_FLAG_REGISTERED_IRQS; 1324 free_irq(lp->cfg.tx_irq, lp); 1325 free_irq(lp->cfg.rx_irq, lp); 1326 1327 spin_unlock_irqrestore(&lp->lock, flags); 1328 1329 return err; 1330 } 1331 EXPORT_SYMBOL(ldc_bind); 1332 1333 int ldc_connect(struct ldc_channel *lp) 1334 { 1335 unsigned long flags; 1336 int err; 1337 1338 if (lp->cfg.mode == LDC_MODE_RAW) 1339 return -EINVAL; 1340 1341 spin_lock_irqsave(&lp->lock, flags); 1342 1343 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || 1344 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) || 1345 lp->hs_state != LDC_HS_OPEN) 1346 err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL); 1347 else 1348 err = start_handshake(lp); 1349 1350 spin_unlock_irqrestore(&lp->lock, flags); 1351 1352 return err; 1353 } 1354 EXPORT_SYMBOL(ldc_connect); 1355 1356 int ldc_disconnect(struct ldc_channel *lp) 1357 { 1358 unsigned long hv_err, flags; 1359 int err; 1360 1361 if (lp->cfg.mode == LDC_MODE_RAW) 1362 return -EINVAL; 1363 1364 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || 1365 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES)) 1366 return -EINVAL; 1367 1368 spin_lock_irqsave(&lp->lock, flags); 1369 1370 err = -ENODEV; 1371 hv_err = sun4v_ldc_tx_qconf(lp->id, 0, 0); 1372 if (hv_err) 1373 goto out_err; 1374 1375 hv_err = sun4v_ldc_tx_qconf(lp->id, lp->tx_ra, lp->tx_num_entries); 1376 if (hv_err) 1377 goto out_err; 1378 1379 hv_err = sun4v_ldc_rx_qconf(lp->id, 0, 0); 1380 if (hv_err) 1381 goto out_err; 1382 1383 hv_err = sun4v_ldc_rx_qconf(lp->id, lp->rx_ra, lp->rx_num_entries); 1384 if (hv_err) 1385 goto out_err; 1386 1387 ldc_set_state(lp, LDC_STATE_BOUND); 1388 lp->hs_state = LDC_HS_OPEN; 1389 lp->flags |= LDC_FLAG_RESET; 1390 1391 spin_unlock_irqrestore(&lp->lock, flags); 1392 1393 return 0; 1394 1395 out_err: 1396 sun4v_ldc_tx_qconf(lp->id, 0, 0); 1397 sun4v_ldc_rx_qconf(lp->id, 0, 0); 1398 free_irq(lp->cfg.tx_irq, lp); 1399 free_irq(lp->cfg.rx_irq, lp); 1400 lp->flags &= ~(LDC_FLAG_REGISTERED_IRQS | 1401 LDC_FLAG_REGISTERED_QUEUES); 1402 ldc_set_state(lp, LDC_STATE_INIT); 1403 1404 spin_unlock_irqrestore(&lp->lock, flags); 1405 1406 return err; 1407 } 1408 EXPORT_SYMBOL(ldc_disconnect); 1409 1410 int ldc_state(struct ldc_channel *lp) 1411 { 1412 return lp->state; 1413 } 1414 EXPORT_SYMBOL(ldc_state); 1415 1416 static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size) 1417 { 1418 struct ldc_packet *p; 1419 unsigned long new_tail; 1420 int err; 1421 1422 if (size > LDC_PACKET_SIZE) 1423 return -EMSGSIZE; 1424 1425 p = data_get_tx_packet(lp, &new_tail); 1426 if (!p) 1427 return -EAGAIN; 1428 1429 memcpy(p, buf, size); 1430 1431 err = send_tx_packet(lp, p, new_tail); 1432 if (!err) 1433 err = size; 1434 1435 return err; 1436 } 1437 1438 static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size) 1439 { 1440 struct ldc_packet *p; 1441 unsigned long hv_err, new; 1442 int err; 1443 1444 if (size < LDC_PACKET_SIZE) 1445 return -EINVAL; 1446 1447 hv_err = sun4v_ldc_rx_get_state(lp->id, 1448 &lp->rx_head, 1449 &lp->rx_tail, 1450 &lp->chan_state); 1451 if (hv_err) 1452 return ldc_abort(lp); 1453 1454 if (lp->chan_state == LDC_CHANNEL_DOWN || 1455 lp->chan_state == LDC_CHANNEL_RESETTING) 1456 return -ECONNRESET; 1457 1458 if (lp->rx_head == lp->rx_tail) 1459 return 0; 1460 1461 p = lp->rx_base + (lp->rx_head / LDC_PACKET_SIZE); 1462 memcpy(buf, p, LDC_PACKET_SIZE); 1463 1464 new = rx_advance(lp, lp->rx_head); 1465 lp->rx_head = new; 1466 1467 err = __set_rx_head(lp, new); 1468 if (err < 0) 1469 err = -ECONNRESET; 1470 else 1471 err = LDC_PACKET_SIZE; 1472 1473 return err; 1474 } 1475 1476 static const struct ldc_mode_ops raw_ops = { 1477 .write = write_raw, 1478 .read = read_raw, 1479 }; 1480 1481 static int write_nonraw(struct ldc_channel *lp, const void *buf, 1482 unsigned int size) 1483 { 1484 unsigned long hv_err, tail; 1485 unsigned int copied; 1486 u32 seq; 1487 int err; 1488 1489 hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail, 1490 &lp->chan_state); 1491 if (unlikely(hv_err)) 1492 return -EBUSY; 1493 1494 if (unlikely(lp->chan_state != LDC_CHANNEL_UP)) 1495 return ldc_abort(lp); 1496 1497 if (!tx_has_space_for(lp, size)) 1498 return -EAGAIN; 1499 1500 seq = lp->snd_nxt; 1501 copied = 0; 1502 tail = lp->tx_tail; 1503 while (copied < size) { 1504 struct ldc_packet *p = lp->tx_base + (tail / LDC_PACKET_SIZE); 1505 u8 *data = ((lp->cfg.mode == LDC_MODE_UNRELIABLE) ? 1506 p->u.u_data : 1507 p->u.r.r_data); 1508 int data_len; 1509 1510 p->type = LDC_DATA; 1511 p->stype = LDC_INFO; 1512 p->ctrl = 0; 1513 1514 data_len = size - copied; 1515 if (data_len > lp->mss) 1516 data_len = lp->mss; 1517 1518 BUG_ON(data_len > LDC_LEN); 1519 1520 p->env = (data_len | 1521 (copied == 0 ? LDC_START : 0) | 1522 (data_len == size - copied ? LDC_STOP : 0)); 1523 1524 p->seqid = ++seq; 1525 1526 ldcdbg(DATA, "SENT DATA [%02x:%02x:%02x:%02x:%08x]\n", 1527 p->type, 1528 p->stype, 1529 p->ctrl, 1530 p->env, 1531 p->seqid); 1532 1533 memcpy(data, buf, data_len); 1534 buf += data_len; 1535 copied += data_len; 1536 1537 tail = tx_advance(lp, tail); 1538 } 1539 1540 err = set_tx_tail(lp, tail); 1541 if (!err) { 1542 lp->snd_nxt = seq; 1543 err = size; 1544 } 1545 1546 return err; 1547 } 1548 1549 static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p, 1550 struct ldc_packet *first_frag) 1551 { 1552 int err; 1553 1554 if (first_frag) 1555 lp->rcv_nxt = first_frag->seqid - 1; 1556 1557 err = send_data_nack(lp, p); 1558 if (err) 1559 return err; 1560 1561 err = __set_rx_head(lp, lp->rx_tail); 1562 if (err < 0) 1563 return ldc_abort(lp); 1564 1565 return 0; 1566 } 1567 1568 static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p) 1569 { 1570 if (p->stype & LDC_ACK) { 1571 int err = process_data_ack(lp, p); 1572 if (err) 1573 return err; 1574 } 1575 if (p->stype & LDC_NACK) 1576 return ldc_abort(lp); 1577 1578 return 0; 1579 } 1580 1581 static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head) 1582 { 1583 unsigned long dummy; 1584 int limit = 1000; 1585 1586 ldcdbg(DATA, "DATA WAIT cur_head[%lx] rx_head[%lx] rx_tail[%lx]\n", 1587 cur_head, lp->rx_head, lp->rx_tail); 1588 while (limit-- > 0) { 1589 unsigned long hv_err; 1590 1591 hv_err = sun4v_ldc_rx_get_state(lp->id, 1592 &dummy, 1593 &lp->rx_tail, 1594 &lp->chan_state); 1595 if (hv_err) 1596 return ldc_abort(lp); 1597 1598 if (lp->chan_state == LDC_CHANNEL_DOWN || 1599 lp->chan_state == LDC_CHANNEL_RESETTING) 1600 return -ECONNRESET; 1601 1602 if (cur_head != lp->rx_tail) { 1603 ldcdbg(DATA, "DATA WAIT DONE " 1604 "head[%lx] tail[%lx] chan_state[%lx]\n", 1605 dummy, lp->rx_tail, lp->chan_state); 1606 return 0; 1607 } 1608 1609 udelay(1); 1610 } 1611 return -EAGAIN; 1612 } 1613 1614 static int rx_set_head(struct ldc_channel *lp, unsigned long head) 1615 { 1616 int err = __set_rx_head(lp, head); 1617 1618 if (err < 0) 1619 return ldc_abort(lp); 1620 1621 lp->rx_head = head; 1622 return 0; 1623 } 1624 1625 static void send_data_ack(struct ldc_channel *lp) 1626 { 1627 unsigned long new_tail; 1628 struct ldc_packet *p; 1629 1630 p = data_get_tx_packet(lp, &new_tail); 1631 if (likely(p)) { 1632 int err; 1633 1634 memset(p, 0, sizeof(*p)); 1635 p->type = LDC_DATA; 1636 p->stype = LDC_ACK; 1637 p->ctrl = 0; 1638 p->seqid = lp->snd_nxt + 1; 1639 p->u.r.ackid = lp->rcv_nxt; 1640 1641 err = send_tx_packet(lp, p, new_tail); 1642 if (!err) 1643 lp->snd_nxt++; 1644 } 1645 } 1646 1647 static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size) 1648 { 1649 struct ldc_packet *first_frag; 1650 unsigned long hv_err, new; 1651 int err, copied; 1652 1653 hv_err = sun4v_ldc_rx_get_state(lp->id, 1654 &lp->rx_head, 1655 &lp->rx_tail, 1656 &lp->chan_state); 1657 if (hv_err) 1658 return ldc_abort(lp); 1659 1660 if (lp->chan_state == LDC_CHANNEL_DOWN || 1661 lp->chan_state == LDC_CHANNEL_RESETTING) 1662 return -ECONNRESET; 1663 1664 if (lp->rx_head == lp->rx_tail) 1665 return 0; 1666 1667 first_frag = NULL; 1668 copied = err = 0; 1669 new = lp->rx_head; 1670 while (1) { 1671 struct ldc_packet *p; 1672 int pkt_len; 1673 1674 BUG_ON(new == lp->rx_tail); 1675 p = lp->rx_base + (new / LDC_PACKET_SIZE); 1676 1677 ldcdbg(RX, "RX read pkt[%02x:%02x:%02x:%02x:%08x:%08x] " 1678 "rcv_nxt[%08x]\n", 1679 p->type, 1680 p->stype, 1681 p->ctrl, 1682 p->env, 1683 p->seqid, 1684 p->u.r.ackid, 1685 lp->rcv_nxt); 1686 1687 if (unlikely(!rx_seq_ok(lp, p->seqid))) { 1688 err = rx_bad_seq(lp, p, first_frag); 1689 copied = 0; 1690 break; 1691 } 1692 1693 if (p->type & LDC_CTRL) { 1694 err = process_control_frame(lp, p); 1695 if (err < 0) 1696 break; 1697 err = 0; 1698 } 1699 1700 lp->rcv_nxt = p->seqid; 1701 1702 if (!(p->type & LDC_DATA)) { 1703 new = rx_advance(lp, new); 1704 goto no_data; 1705 } 1706 if (p->stype & (LDC_ACK | LDC_NACK)) { 1707 err = data_ack_nack(lp, p); 1708 if (err) 1709 break; 1710 } 1711 if (!(p->stype & LDC_INFO)) { 1712 new = rx_advance(lp, new); 1713 err = rx_set_head(lp, new); 1714 if (err) 1715 break; 1716 goto no_data; 1717 } 1718 1719 pkt_len = p->env & LDC_LEN; 1720 1721 /* Every initial packet starts with the START bit set. 1722 * 1723 * Singleton packets will have both START+STOP set. 1724 * 1725 * Fragments will have START set in the first frame, STOP 1726 * set in the last frame, and neither bit set in middle 1727 * frames of the packet. 1728 * 1729 * Therefore if we are at the beginning of a packet and 1730 * we don't see START, or we are in the middle of a fragmented 1731 * packet and do see START, we are unsynchronized and should 1732 * flush the RX queue. 1733 */ 1734 if ((first_frag == NULL && !(p->env & LDC_START)) || 1735 (first_frag != NULL && (p->env & LDC_START))) { 1736 if (!first_frag) 1737 new = rx_advance(lp, new); 1738 1739 err = rx_set_head(lp, new); 1740 if (err) 1741 break; 1742 1743 if (!first_frag) 1744 goto no_data; 1745 } 1746 if (!first_frag) 1747 first_frag = p; 1748 1749 if (pkt_len > size - copied) { 1750 /* User didn't give us a big enough buffer, 1751 * what to do? This is a pretty serious error. 1752 * 1753 * Since we haven't updated the RX ring head to 1754 * consume any of the packets, signal the error 1755 * to the user and just leave the RX ring alone. 1756 * 1757 * This seems the best behavior because this allows 1758 * a user of the LDC layer to start with a small 1759 * RX buffer for ldc_read() calls and use -EMSGSIZE 1760 * as a cue to enlarge it's read buffer. 1761 */ 1762 err = -EMSGSIZE; 1763 break; 1764 } 1765 1766 /* Ok, we are gonna eat this one. */ 1767 new = rx_advance(lp, new); 1768 1769 memcpy(buf, 1770 (lp->cfg.mode == LDC_MODE_UNRELIABLE ? 1771 p->u.u_data : p->u.r.r_data), pkt_len); 1772 buf += pkt_len; 1773 copied += pkt_len; 1774 1775 if (p->env & LDC_STOP) 1776 break; 1777 1778 no_data: 1779 if (new == lp->rx_tail) { 1780 err = rx_data_wait(lp, new); 1781 if (err) 1782 break; 1783 } 1784 } 1785 1786 if (!err) 1787 err = rx_set_head(lp, new); 1788 1789 if (err && first_frag) 1790 lp->rcv_nxt = first_frag->seqid - 1; 1791 1792 if (!err) { 1793 err = copied; 1794 if (err > 0 && lp->cfg.mode != LDC_MODE_UNRELIABLE) 1795 send_data_ack(lp); 1796 } 1797 1798 return err; 1799 } 1800 1801 static const struct ldc_mode_ops nonraw_ops = { 1802 .write = write_nonraw, 1803 .read = read_nonraw, 1804 }; 1805 1806 static int write_stream(struct ldc_channel *lp, const void *buf, 1807 unsigned int size) 1808 { 1809 if (size > lp->cfg.mtu) 1810 size = lp->cfg.mtu; 1811 return write_nonraw(lp, buf, size); 1812 } 1813 1814 static int read_stream(struct ldc_channel *lp, void *buf, unsigned int size) 1815 { 1816 if (!lp->mssbuf_len) { 1817 int err = read_nonraw(lp, lp->mssbuf, lp->cfg.mtu); 1818 if (err < 0) 1819 return err; 1820 1821 lp->mssbuf_len = err; 1822 lp->mssbuf_off = 0; 1823 } 1824 1825 if (size > lp->mssbuf_len) 1826 size = lp->mssbuf_len; 1827 memcpy(buf, lp->mssbuf + lp->mssbuf_off, size); 1828 1829 lp->mssbuf_off += size; 1830 lp->mssbuf_len -= size; 1831 1832 return size; 1833 } 1834 1835 static const struct ldc_mode_ops stream_ops = { 1836 .write = write_stream, 1837 .read = read_stream, 1838 }; 1839 1840 int ldc_write(struct ldc_channel *lp, const void *buf, unsigned int size) 1841 { 1842 unsigned long flags; 1843 int err; 1844 1845 if (!buf) 1846 return -EINVAL; 1847 1848 if (!size) 1849 return 0; 1850 1851 spin_lock_irqsave(&lp->lock, flags); 1852 1853 if (lp->hs_state != LDC_HS_COMPLETE) 1854 err = -ENOTCONN; 1855 else 1856 err = lp->mops->write(lp, buf, size); 1857 1858 spin_unlock_irqrestore(&lp->lock, flags); 1859 1860 return err; 1861 } 1862 EXPORT_SYMBOL(ldc_write); 1863 1864 int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size) 1865 { 1866 unsigned long flags; 1867 int err; 1868 1869 if (!buf) 1870 return -EINVAL; 1871 1872 if (!size) 1873 return 0; 1874 1875 spin_lock_irqsave(&lp->lock, flags); 1876 1877 if (lp->hs_state != LDC_HS_COMPLETE) 1878 err = -ENOTCONN; 1879 else 1880 err = lp->mops->read(lp, buf, size); 1881 1882 spin_unlock_irqrestore(&lp->lock, flags); 1883 1884 return err; 1885 } 1886 EXPORT_SYMBOL(ldc_read); 1887 1888 static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages) 1889 { 1890 struct iommu_arena *arena = &iommu->arena; 1891 unsigned long n, start, end, limit; 1892 int pass; 1893 1894 limit = arena->limit; 1895 start = arena->hint; 1896 pass = 0; 1897 1898 again: 1899 n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0); 1900 end = n + npages; 1901 if (unlikely(end >= limit)) { 1902 if (likely(pass < 1)) { 1903 limit = start; 1904 start = 0; 1905 pass++; 1906 goto again; 1907 } else { 1908 /* Scanned the whole thing, give up. */ 1909 return -1; 1910 } 1911 } 1912 bitmap_set(arena->map, n, npages); 1913 1914 arena->hint = end; 1915 1916 return n; 1917 } 1918 1919 #define COOKIE_PGSZ_CODE 0xf000000000000000ULL 1920 #define COOKIE_PGSZ_CODE_SHIFT 60ULL 1921 1922 static u64 pagesize_code(void) 1923 { 1924 switch (PAGE_SIZE) { 1925 default: 1926 case (8ULL * 1024ULL): 1927 return 0; 1928 case (64ULL * 1024ULL): 1929 return 1; 1930 case (512ULL * 1024ULL): 1931 return 2; 1932 case (4ULL * 1024ULL * 1024ULL): 1933 return 3; 1934 case (32ULL * 1024ULL * 1024ULL): 1935 return 4; 1936 case (256ULL * 1024ULL * 1024ULL): 1937 return 5; 1938 } 1939 } 1940 1941 static u64 make_cookie(u64 index, u64 pgsz_code, u64 page_offset) 1942 { 1943 return ((pgsz_code << COOKIE_PGSZ_CODE_SHIFT) | 1944 (index << PAGE_SHIFT) | 1945 page_offset); 1946 } 1947 1948 static u64 cookie_to_index(u64 cookie, unsigned long *shift) 1949 { 1950 u64 szcode = cookie >> COOKIE_PGSZ_CODE_SHIFT; 1951 1952 cookie &= ~COOKIE_PGSZ_CODE; 1953 1954 *shift = szcode * 3; 1955 1956 return (cookie >> (13ULL + (szcode * 3ULL))); 1957 } 1958 1959 static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu, 1960 unsigned long npages) 1961 { 1962 long entry; 1963 1964 entry = arena_alloc(iommu, npages); 1965 if (unlikely(entry < 0)) 1966 return NULL; 1967 1968 return iommu->page_table + entry; 1969 } 1970 1971 static u64 perm_to_mte(unsigned int map_perm) 1972 { 1973 u64 mte_base; 1974 1975 mte_base = pagesize_code(); 1976 1977 if (map_perm & LDC_MAP_SHADOW) { 1978 if (map_perm & LDC_MAP_R) 1979 mte_base |= LDC_MTE_COPY_R; 1980 if (map_perm & LDC_MAP_W) 1981 mte_base |= LDC_MTE_COPY_W; 1982 } 1983 if (map_perm & LDC_MAP_DIRECT) { 1984 if (map_perm & LDC_MAP_R) 1985 mte_base |= LDC_MTE_READ; 1986 if (map_perm & LDC_MAP_W) 1987 mte_base |= LDC_MTE_WRITE; 1988 if (map_perm & LDC_MAP_X) 1989 mte_base |= LDC_MTE_EXEC; 1990 } 1991 if (map_perm & LDC_MAP_IO) { 1992 if (map_perm & LDC_MAP_R) 1993 mte_base |= LDC_MTE_IOMMU_R; 1994 if (map_perm & LDC_MAP_W) 1995 mte_base |= LDC_MTE_IOMMU_W; 1996 } 1997 1998 return mte_base; 1999 } 2000 2001 static int pages_in_region(unsigned long base, long len) 2002 { 2003 int count = 0; 2004 2005 do { 2006 unsigned long new = (base + PAGE_SIZE) & PAGE_MASK; 2007 2008 len -= (new - base); 2009 base = new; 2010 count++; 2011 } while (len > 0); 2012 2013 return count; 2014 } 2015 2016 struct cookie_state { 2017 struct ldc_mtable_entry *page_table; 2018 struct ldc_trans_cookie *cookies; 2019 u64 mte_base; 2020 u64 prev_cookie; 2021 u32 pte_idx; 2022 u32 nc; 2023 }; 2024 2025 static void fill_cookies(struct cookie_state *sp, unsigned long pa, 2026 unsigned long off, unsigned long len) 2027 { 2028 do { 2029 unsigned long tlen, new = pa + PAGE_SIZE; 2030 u64 this_cookie; 2031 2032 sp->page_table[sp->pte_idx].mte = sp->mte_base | pa; 2033 2034 tlen = PAGE_SIZE; 2035 if (off) 2036 tlen = PAGE_SIZE - off; 2037 if (tlen > len) 2038 tlen = len; 2039 2040 this_cookie = make_cookie(sp->pte_idx, 2041 pagesize_code(), off); 2042 2043 off = 0; 2044 2045 if (this_cookie == sp->prev_cookie) { 2046 sp->cookies[sp->nc - 1].cookie_size += tlen; 2047 } else { 2048 sp->cookies[sp->nc].cookie_addr = this_cookie; 2049 sp->cookies[sp->nc].cookie_size = tlen; 2050 sp->nc++; 2051 } 2052 sp->prev_cookie = this_cookie + tlen; 2053 2054 sp->pte_idx++; 2055 2056 len -= tlen; 2057 pa = new; 2058 } while (len > 0); 2059 } 2060 2061 static int sg_count_one(struct scatterlist *sg) 2062 { 2063 unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT; 2064 long len = sg->length; 2065 2066 if ((sg->offset | len) & (8UL - 1)) 2067 return -EFAULT; 2068 2069 return pages_in_region(base + sg->offset, len); 2070 } 2071 2072 static int sg_count_pages(struct scatterlist *sg, int num_sg) 2073 { 2074 int count; 2075 int i; 2076 2077 count = 0; 2078 for (i = 0; i < num_sg; i++) { 2079 int err = sg_count_one(sg + i); 2080 if (err < 0) 2081 return err; 2082 count += err; 2083 } 2084 2085 return count; 2086 } 2087 2088 int ldc_map_sg(struct ldc_channel *lp, 2089 struct scatterlist *sg, int num_sg, 2090 struct ldc_trans_cookie *cookies, int ncookies, 2091 unsigned int map_perm) 2092 { 2093 unsigned long i, npages, flags; 2094 struct ldc_mtable_entry *base; 2095 struct cookie_state state; 2096 struct ldc_iommu *iommu; 2097 int err; 2098 2099 if (map_perm & ~LDC_MAP_ALL) 2100 return -EINVAL; 2101 2102 err = sg_count_pages(sg, num_sg); 2103 if (err < 0) 2104 return err; 2105 2106 npages = err; 2107 if (err > ncookies) 2108 return -EMSGSIZE; 2109 2110 iommu = &lp->iommu; 2111 2112 spin_lock_irqsave(&iommu->lock, flags); 2113 base = alloc_npages(iommu, npages); 2114 spin_unlock_irqrestore(&iommu->lock, flags); 2115 2116 if (!base) 2117 return -ENOMEM; 2118 2119 state.page_table = iommu->page_table; 2120 state.cookies = cookies; 2121 state.mte_base = perm_to_mte(map_perm); 2122 state.prev_cookie = ~(u64)0; 2123 state.pte_idx = (base - iommu->page_table); 2124 state.nc = 0; 2125 2126 for (i = 0; i < num_sg; i++) 2127 fill_cookies(&state, page_to_pfn(sg_page(&sg[i])) << PAGE_SHIFT, 2128 sg[i].offset, sg[i].length); 2129 2130 return state.nc; 2131 } 2132 EXPORT_SYMBOL(ldc_map_sg); 2133 2134 int ldc_map_single(struct ldc_channel *lp, 2135 void *buf, unsigned int len, 2136 struct ldc_trans_cookie *cookies, int ncookies, 2137 unsigned int map_perm) 2138 { 2139 unsigned long npages, pa, flags; 2140 struct ldc_mtable_entry *base; 2141 struct cookie_state state; 2142 struct ldc_iommu *iommu; 2143 2144 if ((map_perm & ~LDC_MAP_ALL) || (ncookies < 1)) 2145 return -EINVAL; 2146 2147 pa = __pa(buf); 2148 if ((pa | len) & (8UL - 1)) 2149 return -EFAULT; 2150 2151 npages = pages_in_region(pa, len); 2152 2153 iommu = &lp->iommu; 2154 2155 spin_lock_irqsave(&iommu->lock, flags); 2156 base = alloc_npages(iommu, npages); 2157 spin_unlock_irqrestore(&iommu->lock, flags); 2158 2159 if (!base) 2160 return -ENOMEM; 2161 2162 state.page_table = iommu->page_table; 2163 state.cookies = cookies; 2164 state.mte_base = perm_to_mte(map_perm); 2165 state.prev_cookie = ~(u64)0; 2166 state.pte_idx = (base - iommu->page_table); 2167 state.nc = 0; 2168 fill_cookies(&state, (pa & PAGE_MASK), (pa & ~PAGE_MASK), len); 2169 BUG_ON(state.nc > ncookies); 2170 2171 return state.nc; 2172 } 2173 EXPORT_SYMBOL(ldc_map_single); 2174 2175 static void free_npages(unsigned long id, struct ldc_iommu *iommu, 2176 u64 cookie, u64 size) 2177 { 2178 struct iommu_arena *arena = &iommu->arena; 2179 unsigned long i, shift, index, npages; 2180 struct ldc_mtable_entry *base; 2181 2182 npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT; 2183 index = cookie_to_index(cookie, &shift); 2184 base = iommu->page_table + index; 2185 2186 BUG_ON(index > arena->limit || 2187 (index + npages) > arena->limit); 2188 2189 for (i = 0; i < npages; i++) { 2190 if (base->cookie) 2191 sun4v_ldc_revoke(id, cookie + (i << shift), 2192 base->cookie); 2193 base->mte = 0; 2194 __clear_bit(index + i, arena->map); 2195 } 2196 } 2197 2198 void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies, 2199 int ncookies) 2200 { 2201 struct ldc_iommu *iommu = &lp->iommu; 2202 unsigned long flags; 2203 int i; 2204 2205 spin_lock_irqsave(&iommu->lock, flags); 2206 for (i = 0; i < ncookies; i++) { 2207 u64 addr = cookies[i].cookie_addr; 2208 u64 size = cookies[i].cookie_size; 2209 2210 free_npages(lp->id, iommu, addr, size); 2211 } 2212 spin_unlock_irqrestore(&iommu->lock, flags); 2213 } 2214 EXPORT_SYMBOL(ldc_unmap); 2215 2216 int ldc_copy(struct ldc_channel *lp, int copy_dir, 2217 void *buf, unsigned int len, unsigned long offset, 2218 struct ldc_trans_cookie *cookies, int ncookies) 2219 { 2220 unsigned int orig_len; 2221 unsigned long ra; 2222 int i; 2223 2224 if (copy_dir != LDC_COPY_IN && copy_dir != LDC_COPY_OUT) { 2225 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Bad copy_dir[%d]\n", 2226 lp->id, copy_dir); 2227 return -EINVAL; 2228 } 2229 2230 ra = __pa(buf); 2231 if ((ra | len | offset) & (8UL - 1)) { 2232 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Unaligned buffer " 2233 "ra[%lx] len[%x] offset[%lx]\n", 2234 lp->id, ra, len, offset); 2235 return -EFAULT; 2236 } 2237 2238 if (lp->hs_state != LDC_HS_COMPLETE || 2239 (lp->flags & LDC_FLAG_RESET)) { 2240 printk(KERN_ERR PFX "ldc_copy: ID[%lu] Link down hs_state[%x] " 2241 "flags[%x]\n", lp->id, lp->hs_state, lp->flags); 2242 return -ECONNRESET; 2243 } 2244 2245 orig_len = len; 2246 for (i = 0; i < ncookies; i++) { 2247 unsigned long cookie_raddr = cookies[i].cookie_addr; 2248 unsigned long this_len = cookies[i].cookie_size; 2249 unsigned long actual_len; 2250 2251 if (unlikely(offset)) { 2252 unsigned long this_off = offset; 2253 2254 if (this_off > this_len) 2255 this_off = this_len; 2256 2257 offset -= this_off; 2258 this_len -= this_off; 2259 if (!this_len) 2260 continue; 2261 cookie_raddr += this_off; 2262 } 2263 2264 if (this_len > len) 2265 this_len = len; 2266 2267 while (1) { 2268 unsigned long hv_err; 2269 2270 hv_err = sun4v_ldc_copy(lp->id, copy_dir, 2271 cookie_raddr, ra, 2272 this_len, &actual_len); 2273 if (unlikely(hv_err)) { 2274 printk(KERN_ERR PFX "ldc_copy: ID[%lu] " 2275 "HV error %lu\n", 2276 lp->id, hv_err); 2277 if (lp->hs_state != LDC_HS_COMPLETE || 2278 (lp->flags & LDC_FLAG_RESET)) 2279 return -ECONNRESET; 2280 else 2281 return -EFAULT; 2282 } 2283 2284 cookie_raddr += actual_len; 2285 ra += actual_len; 2286 len -= actual_len; 2287 if (actual_len == this_len) 2288 break; 2289 2290 this_len -= actual_len; 2291 } 2292 2293 if (!len) 2294 break; 2295 } 2296 2297 /* It is caller policy what to do about short copies. 2298 * For example, a networking driver can declare the 2299 * packet a runt and drop it. 2300 */ 2301 2302 return orig_len - len; 2303 } 2304 EXPORT_SYMBOL(ldc_copy); 2305 2306 void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len, 2307 struct ldc_trans_cookie *cookies, int *ncookies, 2308 unsigned int map_perm) 2309 { 2310 void *buf; 2311 int err; 2312 2313 if (len & (8UL - 1)) 2314 return ERR_PTR(-EINVAL); 2315 2316 buf = kzalloc(len, GFP_KERNEL); 2317 if (!buf) 2318 return ERR_PTR(-ENOMEM); 2319 2320 err = ldc_map_single(lp, buf, len, cookies, *ncookies, map_perm); 2321 if (err < 0) { 2322 kfree(buf); 2323 return ERR_PTR(err); 2324 } 2325 *ncookies = err; 2326 2327 return buf; 2328 } 2329 EXPORT_SYMBOL(ldc_alloc_exp_dring); 2330 2331 void ldc_free_exp_dring(struct ldc_channel *lp, void *buf, unsigned int len, 2332 struct ldc_trans_cookie *cookies, int ncookies) 2333 { 2334 ldc_unmap(lp, cookies, ncookies); 2335 kfree(buf); 2336 } 2337 EXPORT_SYMBOL(ldc_free_exp_dring); 2338 2339 static int __init ldc_init(void) 2340 { 2341 unsigned long major, minor; 2342 struct mdesc_handle *hp; 2343 const u64 *v; 2344 int err; 2345 u64 mp; 2346 2347 hp = mdesc_grab(); 2348 if (!hp) 2349 return -ENODEV; 2350 2351 mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform"); 2352 err = -ENODEV; 2353 if (mp == MDESC_NODE_NULL) 2354 goto out; 2355 2356 v = mdesc_get_property(hp, mp, "domaining-enabled", NULL); 2357 if (!v) 2358 goto out; 2359 2360 major = 1; 2361 minor = 0; 2362 if (sun4v_hvapi_register(HV_GRP_LDOM, major, &minor)) { 2363 printk(KERN_INFO PFX "Could not register LDOM hvapi.\n"); 2364 goto out; 2365 } 2366 2367 printk(KERN_INFO "%s", version); 2368 2369 if (!*v) { 2370 printk(KERN_INFO PFX "Domaining disabled.\n"); 2371 goto out; 2372 } 2373 ldom_domaining_enabled = 1; 2374 err = 0; 2375 2376 out: 2377 mdesc_release(hp); 2378 return err; 2379 } 2380 2381 core_initcall(ldc_init); 2382