1 // SPDX-License-Identifier: GPL-2.0-only 2 /* ds.c: Domain Services driver for Logical Domains 3 * 4 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/types.h> 10 #include <linux/string.h> 11 #include <linux/slab.h> 12 #include <linux/sched.h> 13 #include <linux/sched/clock.h> 14 #include <linux/delay.h> 15 #include <linux/mutex.h> 16 #include <linux/kthread.h> 17 #include <linux/reboot.h> 18 #include <linux/cpu.h> 19 20 #include <asm/hypervisor.h> 21 #include <asm/ldc.h> 22 #include <asm/vio.h> 23 #include <asm/mdesc.h> 24 #include <asm/head.h> 25 #include <asm/irq.h> 26 27 #include "kernel.h" 28 29 #define DRV_MODULE_NAME "ds" 30 #define PFX DRV_MODULE_NAME ": " 31 #define DRV_MODULE_VERSION "1.0" 32 #define DRV_MODULE_RELDATE "Jul 11, 2007" 33 34 static char version[] = 35 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 36 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 37 MODULE_DESCRIPTION("Sun LDOM domain services driver"); 38 MODULE_LICENSE("GPL"); 39 MODULE_VERSION(DRV_MODULE_VERSION); 40 41 struct ds_msg_tag { 42 __u32 type; 43 #define DS_INIT_REQ 0x00 44 #define DS_INIT_ACK 0x01 45 #define DS_INIT_NACK 0x02 46 #define DS_REG_REQ 0x03 47 #define DS_REG_ACK 0x04 48 #define DS_REG_NACK 0x05 49 #define DS_UNREG_REQ 0x06 50 #define DS_UNREG_ACK 0x07 51 #define DS_UNREG_NACK 0x08 52 #define DS_DATA 0x09 53 #define DS_NACK 0x0a 54 55 __u32 len; 56 }; 57 58 /* Result codes */ 59 #define DS_OK 0x00 60 #define DS_REG_VER_NACK 0x01 61 #define DS_REG_DUP 0x02 62 #define DS_INV_HDL 0x03 63 #define DS_TYPE_UNKNOWN 0x04 64 65 struct ds_version { 66 __u16 major; 67 __u16 minor; 68 }; 69 70 struct ds_ver_req { 71 struct ds_msg_tag tag; 72 struct ds_version ver; 73 }; 74 75 struct ds_ver_ack { 76 struct ds_msg_tag tag; 77 __u16 minor; 78 }; 79 80 struct ds_ver_nack { 81 struct ds_msg_tag tag; 82 __u16 major; 83 }; 84 85 struct ds_reg_req { 86 struct ds_msg_tag tag; 87 __u64 handle; 88 __u16 major; 89 __u16 minor; 90 char svc_id[0]; 91 }; 92 93 struct ds_reg_ack { 94 struct ds_msg_tag tag; 95 __u64 handle; 96 __u16 minor; 97 }; 98 99 struct ds_reg_nack { 100 struct ds_msg_tag tag; 101 __u64 handle; 102 __u16 major; 103 }; 104 105 struct ds_unreg_req { 106 struct ds_msg_tag tag; 107 __u64 handle; 108 }; 109 110 struct ds_unreg_ack { 111 struct ds_msg_tag tag; 112 __u64 handle; 113 }; 114 115 struct ds_unreg_nack { 116 struct ds_msg_tag tag; 117 __u64 handle; 118 }; 119 120 struct ds_data { 121 struct ds_msg_tag tag; 122 __u64 handle; 123 }; 124 125 struct ds_data_nack { 126 struct ds_msg_tag tag; 127 __u64 handle; 128 __u64 result; 129 }; 130 131 struct ds_info; 132 struct ds_cap_state { 133 __u64 handle; 134 135 void (*data)(struct ds_info *dp, 136 struct ds_cap_state *cp, 137 void *buf, int len); 138 139 const char *service_id; 140 141 u8 state; 142 #define CAP_STATE_UNKNOWN 0x00 143 #define CAP_STATE_REG_SENT 0x01 144 #define CAP_STATE_REGISTERED 0x02 145 }; 146 147 static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp, 148 void *buf, int len); 149 static void domain_shutdown_data(struct ds_info *dp, 150 struct ds_cap_state *cp, 151 void *buf, int len); 152 static void domain_panic_data(struct ds_info *dp, 153 struct ds_cap_state *cp, 154 void *buf, int len); 155 #ifdef CONFIG_HOTPLUG_CPU 156 static void dr_cpu_data(struct ds_info *dp, 157 struct ds_cap_state *cp, 158 void *buf, int len); 159 #endif 160 static void ds_pri_data(struct ds_info *dp, 161 struct ds_cap_state *cp, 162 void *buf, int len); 163 static void ds_var_data(struct ds_info *dp, 164 struct ds_cap_state *cp, 165 void *buf, int len); 166 167 static struct ds_cap_state ds_states_template[] = { 168 { 169 .service_id = "md-update", 170 .data = md_update_data, 171 }, 172 { 173 .service_id = "domain-shutdown", 174 .data = domain_shutdown_data, 175 }, 176 { 177 .service_id = "domain-panic", 178 .data = domain_panic_data, 179 }, 180 #ifdef CONFIG_HOTPLUG_CPU 181 { 182 .service_id = "dr-cpu", 183 .data = dr_cpu_data, 184 }, 185 #endif 186 { 187 .service_id = "pri", 188 .data = ds_pri_data, 189 }, 190 { 191 .service_id = "var-config", 192 .data = ds_var_data, 193 }, 194 { 195 .service_id = "var-config-backup", 196 .data = ds_var_data, 197 }, 198 }; 199 200 static DEFINE_SPINLOCK(ds_lock); 201 202 struct ds_info { 203 struct ldc_channel *lp; 204 u8 hs_state; 205 #define DS_HS_START 0x01 206 #define DS_HS_DONE 0x02 207 208 u64 id; 209 210 void *rcv_buf; 211 int rcv_buf_len; 212 213 struct ds_cap_state *ds_states; 214 int num_ds_states; 215 216 struct ds_info *next; 217 }; 218 219 static struct ds_info *ds_info_list; 220 221 static struct ds_cap_state *find_cap(struct ds_info *dp, u64 handle) 222 { 223 unsigned int index = handle >> 32; 224 225 if (index >= dp->num_ds_states) 226 return NULL; 227 return &dp->ds_states[index]; 228 } 229 230 static struct ds_cap_state *find_cap_by_string(struct ds_info *dp, 231 const char *name) 232 { 233 int i; 234 235 for (i = 0; i < dp->num_ds_states; i++) { 236 if (strcmp(dp->ds_states[i].service_id, name)) 237 continue; 238 239 return &dp->ds_states[i]; 240 } 241 return NULL; 242 } 243 244 static int __ds_send(struct ldc_channel *lp, void *data, int len) 245 { 246 int err, limit = 1000; 247 248 err = -EINVAL; 249 while (limit-- > 0) { 250 err = ldc_write(lp, data, len); 251 if (!err || (err != -EAGAIN)) 252 break; 253 udelay(1); 254 } 255 256 return err; 257 } 258 259 static int ds_send(struct ldc_channel *lp, void *data, int len) 260 { 261 unsigned long flags; 262 int err; 263 264 spin_lock_irqsave(&ds_lock, flags); 265 err = __ds_send(lp, data, len); 266 spin_unlock_irqrestore(&ds_lock, flags); 267 268 return err; 269 } 270 271 struct ds_md_update_req { 272 __u64 req_num; 273 }; 274 275 struct ds_md_update_res { 276 __u64 req_num; 277 __u32 result; 278 }; 279 280 static void md_update_data(struct ds_info *dp, 281 struct ds_cap_state *cp, 282 void *buf, int len) 283 { 284 struct ldc_channel *lp = dp->lp; 285 struct ds_data *dpkt = buf; 286 struct ds_md_update_req *rp; 287 struct { 288 struct ds_data data; 289 struct ds_md_update_res res; 290 } pkt; 291 292 rp = (struct ds_md_update_req *) (dpkt + 1); 293 294 printk(KERN_INFO "ds-%llu: Machine description update.\n", dp->id); 295 296 mdesc_update(); 297 298 memset(&pkt, 0, sizeof(pkt)); 299 pkt.data.tag.type = DS_DATA; 300 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); 301 pkt.data.handle = cp->handle; 302 pkt.res.req_num = rp->req_num; 303 pkt.res.result = DS_OK; 304 305 ds_send(lp, &pkt, sizeof(pkt)); 306 } 307 308 struct ds_shutdown_req { 309 __u64 req_num; 310 __u32 ms_delay; 311 }; 312 313 struct ds_shutdown_res { 314 __u64 req_num; 315 __u32 result; 316 char reason[1]; 317 }; 318 319 static void domain_shutdown_data(struct ds_info *dp, 320 struct ds_cap_state *cp, 321 void *buf, int len) 322 { 323 struct ldc_channel *lp = dp->lp; 324 struct ds_data *dpkt = buf; 325 struct ds_shutdown_req *rp; 326 struct { 327 struct ds_data data; 328 struct ds_shutdown_res res; 329 } pkt; 330 331 rp = (struct ds_shutdown_req *) (dpkt + 1); 332 333 printk(KERN_ALERT "ds-%llu: Shutdown request from " 334 "LDOM manager received.\n", dp->id); 335 336 memset(&pkt, 0, sizeof(pkt)); 337 pkt.data.tag.type = DS_DATA; 338 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); 339 pkt.data.handle = cp->handle; 340 pkt.res.req_num = rp->req_num; 341 pkt.res.result = DS_OK; 342 pkt.res.reason[0] = 0; 343 344 ds_send(lp, &pkt, sizeof(pkt)); 345 346 orderly_poweroff(true); 347 } 348 349 struct ds_panic_req { 350 __u64 req_num; 351 }; 352 353 struct ds_panic_res { 354 __u64 req_num; 355 __u32 result; 356 char reason[1]; 357 }; 358 359 static void domain_panic_data(struct ds_info *dp, 360 struct ds_cap_state *cp, 361 void *buf, int len) 362 { 363 struct ldc_channel *lp = dp->lp; 364 struct ds_data *dpkt = buf; 365 struct ds_panic_req *rp; 366 struct { 367 struct ds_data data; 368 struct ds_panic_res res; 369 } pkt; 370 371 rp = (struct ds_panic_req *) (dpkt + 1); 372 373 printk(KERN_ALERT "ds-%llu: Panic request from " 374 "LDOM manager received.\n", dp->id); 375 376 memset(&pkt, 0, sizeof(pkt)); 377 pkt.data.tag.type = DS_DATA; 378 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); 379 pkt.data.handle = cp->handle; 380 pkt.res.req_num = rp->req_num; 381 pkt.res.result = DS_OK; 382 pkt.res.reason[0] = 0; 383 384 ds_send(lp, &pkt, sizeof(pkt)); 385 386 panic("PANIC requested by LDOM manager."); 387 } 388 389 #ifdef CONFIG_HOTPLUG_CPU 390 struct dr_cpu_tag { 391 __u64 req_num; 392 __u32 type; 393 #define DR_CPU_CONFIGURE 0x43 394 #define DR_CPU_UNCONFIGURE 0x55 395 #define DR_CPU_FORCE_UNCONFIGURE 0x46 396 #define DR_CPU_STATUS 0x53 397 398 /* Responses */ 399 #define DR_CPU_OK 0x6f 400 #define DR_CPU_ERROR 0x65 401 402 __u32 num_records; 403 }; 404 405 struct dr_cpu_resp_entry { 406 __u32 cpu; 407 __u32 result; 408 #define DR_CPU_RES_OK 0x00 409 #define DR_CPU_RES_FAILURE 0x01 410 #define DR_CPU_RES_BLOCKED 0x02 411 #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03 412 #define DR_CPU_RES_NOT_IN_MD 0x04 413 414 __u32 stat; 415 #define DR_CPU_STAT_NOT_PRESENT 0x00 416 #define DR_CPU_STAT_UNCONFIGURED 0x01 417 #define DR_CPU_STAT_CONFIGURED 0x02 418 419 __u32 str_off; 420 }; 421 422 static void __dr_cpu_send_error(struct ds_info *dp, 423 struct ds_cap_state *cp, 424 struct ds_data *data) 425 { 426 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); 427 struct { 428 struct ds_data data; 429 struct dr_cpu_tag tag; 430 } pkt; 431 int msg_len; 432 433 memset(&pkt, 0, sizeof(pkt)); 434 pkt.data.tag.type = DS_DATA; 435 pkt.data.handle = cp->handle; 436 pkt.tag.req_num = tag->req_num; 437 pkt.tag.type = DR_CPU_ERROR; 438 pkt.tag.num_records = 0; 439 440 msg_len = (sizeof(struct ds_data) + 441 sizeof(struct dr_cpu_tag)); 442 443 pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag); 444 445 __ds_send(dp->lp, &pkt, msg_len); 446 } 447 448 static void dr_cpu_send_error(struct ds_info *dp, 449 struct ds_cap_state *cp, 450 struct ds_data *data) 451 { 452 unsigned long flags; 453 454 spin_lock_irqsave(&ds_lock, flags); 455 __dr_cpu_send_error(dp, cp, data); 456 spin_unlock_irqrestore(&ds_lock, flags); 457 } 458 459 #define CPU_SENTINEL 0xffffffff 460 461 static void purge_dups(u32 *list, u32 num_ents) 462 { 463 unsigned int i; 464 465 for (i = 0; i < num_ents; i++) { 466 u32 cpu = list[i]; 467 unsigned int j; 468 469 if (cpu == CPU_SENTINEL) 470 continue; 471 472 for (j = i + 1; j < num_ents; j++) { 473 if (list[j] == cpu) 474 list[j] = CPU_SENTINEL; 475 } 476 } 477 } 478 479 static int dr_cpu_size_response(int ncpus) 480 { 481 return (sizeof(struct ds_data) + 482 sizeof(struct dr_cpu_tag) + 483 (sizeof(struct dr_cpu_resp_entry) * ncpus)); 484 } 485 486 static void dr_cpu_init_response(struct ds_data *resp, u64 req_num, 487 u64 handle, int resp_len, int ncpus, 488 cpumask_t *mask, u32 default_stat) 489 { 490 struct dr_cpu_resp_entry *ent; 491 struct dr_cpu_tag *tag; 492 int i, cpu; 493 494 tag = (struct dr_cpu_tag *) (resp + 1); 495 ent = (struct dr_cpu_resp_entry *) (tag + 1); 496 497 resp->tag.type = DS_DATA; 498 resp->tag.len = resp_len - sizeof(struct ds_msg_tag); 499 resp->handle = handle; 500 tag->req_num = req_num; 501 tag->type = DR_CPU_OK; 502 tag->num_records = ncpus; 503 504 i = 0; 505 for_each_cpu(cpu, mask) { 506 ent[i].cpu = cpu; 507 ent[i].result = DR_CPU_RES_OK; 508 ent[i].stat = default_stat; 509 i++; 510 } 511 BUG_ON(i != ncpus); 512 } 513 514 static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus, 515 u32 res, u32 stat) 516 { 517 struct dr_cpu_resp_entry *ent; 518 struct dr_cpu_tag *tag; 519 int i; 520 521 tag = (struct dr_cpu_tag *) (resp + 1); 522 ent = (struct dr_cpu_resp_entry *) (tag + 1); 523 524 for (i = 0; i < ncpus; i++) { 525 if (ent[i].cpu != cpu) 526 continue; 527 ent[i].result = res; 528 ent[i].stat = stat; 529 break; 530 } 531 } 532 533 static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp, 534 u64 req_num, cpumask_t *mask) 535 { 536 struct ds_data *resp; 537 int resp_len, ncpus, cpu; 538 unsigned long flags; 539 540 ncpus = cpumask_weight(mask); 541 resp_len = dr_cpu_size_response(ncpus); 542 resp = kzalloc(resp_len, GFP_KERNEL); 543 if (!resp) 544 return -ENOMEM; 545 546 dr_cpu_init_response(resp, req_num, cp->handle, 547 resp_len, ncpus, mask, 548 DR_CPU_STAT_CONFIGURED); 549 550 mdesc_populate_present_mask(mask); 551 mdesc_fill_in_cpu_data(mask); 552 553 for_each_cpu(cpu, mask) { 554 int err; 555 556 printk(KERN_INFO "ds-%llu: Starting cpu %d...\n", 557 dp->id, cpu); 558 err = add_cpu(cpu); 559 if (err) { 560 __u32 res = DR_CPU_RES_FAILURE; 561 __u32 stat = DR_CPU_STAT_UNCONFIGURED; 562 563 if (!cpu_present(cpu)) { 564 /* CPU not present in MD */ 565 res = DR_CPU_RES_NOT_IN_MD; 566 stat = DR_CPU_STAT_NOT_PRESENT; 567 } else if (err == -ENODEV) { 568 /* CPU did not call in successfully */ 569 res = DR_CPU_RES_CPU_NOT_RESPONDING; 570 } 571 572 printk(KERN_INFO "ds-%llu: CPU startup failed err=%d\n", 573 dp->id, err); 574 dr_cpu_mark(resp, cpu, ncpus, res, stat); 575 } 576 } 577 578 spin_lock_irqsave(&ds_lock, flags); 579 __ds_send(dp->lp, resp, resp_len); 580 spin_unlock_irqrestore(&ds_lock, flags); 581 582 kfree(resp); 583 584 /* Redistribute IRQs, taking into account the new cpus. */ 585 fixup_irqs(); 586 587 return 0; 588 } 589 590 static int dr_cpu_unconfigure(struct ds_info *dp, 591 struct ds_cap_state *cp, 592 u64 req_num, 593 cpumask_t *mask) 594 { 595 struct ds_data *resp; 596 int resp_len, ncpus, cpu; 597 unsigned long flags; 598 599 ncpus = cpumask_weight(mask); 600 resp_len = dr_cpu_size_response(ncpus); 601 resp = kzalloc(resp_len, GFP_KERNEL); 602 if (!resp) 603 return -ENOMEM; 604 605 dr_cpu_init_response(resp, req_num, cp->handle, 606 resp_len, ncpus, mask, 607 DR_CPU_STAT_UNCONFIGURED); 608 609 for_each_cpu(cpu, mask) { 610 int err; 611 612 printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n", 613 dp->id, cpu); 614 err = remove_cpu(cpu); 615 if (err) 616 dr_cpu_mark(resp, cpu, ncpus, 617 DR_CPU_RES_FAILURE, 618 DR_CPU_STAT_CONFIGURED); 619 } 620 621 spin_lock_irqsave(&ds_lock, flags); 622 __ds_send(dp->lp, resp, resp_len); 623 spin_unlock_irqrestore(&ds_lock, flags); 624 625 kfree(resp); 626 627 return 0; 628 } 629 630 static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, 631 int len) 632 { 633 struct ds_data *data = buf; 634 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); 635 u32 *cpu_list = (u32 *) (tag + 1); 636 u64 req_num = tag->req_num; 637 cpumask_t mask; 638 unsigned int i; 639 int err; 640 641 switch (tag->type) { 642 case DR_CPU_CONFIGURE: 643 case DR_CPU_UNCONFIGURE: 644 case DR_CPU_FORCE_UNCONFIGURE: 645 break; 646 647 default: 648 dr_cpu_send_error(dp, cp, data); 649 return; 650 } 651 652 purge_dups(cpu_list, tag->num_records); 653 654 cpumask_clear(&mask); 655 for (i = 0; i < tag->num_records; i++) { 656 if (cpu_list[i] == CPU_SENTINEL) 657 continue; 658 659 if (cpu_list[i] < nr_cpu_ids) 660 cpumask_set_cpu(cpu_list[i], &mask); 661 } 662 663 if (tag->type == DR_CPU_CONFIGURE) 664 err = dr_cpu_configure(dp, cp, req_num, &mask); 665 else 666 err = dr_cpu_unconfigure(dp, cp, req_num, &mask); 667 668 if (err) 669 dr_cpu_send_error(dp, cp, data); 670 } 671 #endif /* CONFIG_HOTPLUG_CPU */ 672 673 struct ds_pri_msg { 674 __u64 req_num; 675 __u64 type; 676 #define DS_PRI_REQUEST 0x00 677 #define DS_PRI_DATA 0x01 678 #define DS_PRI_UPDATE 0x02 679 }; 680 681 static void ds_pri_data(struct ds_info *dp, 682 struct ds_cap_state *cp, 683 void *buf, int len) 684 { 685 struct ds_data *dpkt = buf; 686 struct ds_pri_msg *rp; 687 688 rp = (struct ds_pri_msg *) (dpkt + 1); 689 690 printk(KERN_INFO "ds-%llu: PRI REQ [%llx:%llx], len=%d\n", 691 dp->id, rp->req_num, rp->type, len); 692 } 693 694 struct ds_var_hdr { 695 __u32 type; 696 #define DS_VAR_SET_REQ 0x00 697 #define DS_VAR_DELETE_REQ 0x01 698 #define DS_VAR_SET_RESP 0x02 699 #define DS_VAR_DELETE_RESP 0x03 700 }; 701 702 struct ds_var_set_msg { 703 struct ds_var_hdr hdr; 704 char name_and_value[0]; 705 }; 706 707 struct ds_var_delete_msg { 708 struct ds_var_hdr hdr; 709 char name[0]; 710 }; 711 712 struct ds_var_resp { 713 struct ds_var_hdr hdr; 714 __u32 result; 715 #define DS_VAR_SUCCESS 0x00 716 #define DS_VAR_NO_SPACE 0x01 717 #define DS_VAR_INVALID_VAR 0x02 718 #define DS_VAR_INVALID_VAL 0x03 719 #define DS_VAR_NOT_PRESENT 0x04 720 }; 721 722 static DEFINE_MUTEX(ds_var_mutex); 723 static int ds_var_doorbell; 724 static int ds_var_response; 725 726 static void ds_var_data(struct ds_info *dp, 727 struct ds_cap_state *cp, 728 void *buf, int len) 729 { 730 struct ds_data *dpkt = buf; 731 struct ds_var_resp *rp; 732 733 rp = (struct ds_var_resp *) (dpkt + 1); 734 735 if (rp->hdr.type != DS_VAR_SET_RESP && 736 rp->hdr.type != DS_VAR_DELETE_RESP) 737 return; 738 739 ds_var_response = rp->result; 740 wmb(); 741 ds_var_doorbell = 1; 742 } 743 744 void ldom_set_var(const char *var, const char *value) 745 { 746 struct ds_cap_state *cp; 747 struct ds_info *dp; 748 unsigned long flags; 749 750 spin_lock_irqsave(&ds_lock, flags); 751 cp = NULL; 752 for (dp = ds_info_list; dp; dp = dp->next) { 753 struct ds_cap_state *tmp; 754 755 tmp = find_cap_by_string(dp, "var-config"); 756 if (tmp && tmp->state == CAP_STATE_REGISTERED) { 757 cp = tmp; 758 break; 759 } 760 } 761 if (!cp) { 762 for (dp = ds_info_list; dp; dp = dp->next) { 763 struct ds_cap_state *tmp; 764 765 tmp = find_cap_by_string(dp, "var-config-backup"); 766 if (tmp && tmp->state == CAP_STATE_REGISTERED) { 767 cp = tmp; 768 break; 769 } 770 } 771 } 772 spin_unlock_irqrestore(&ds_lock, flags); 773 774 if (cp) { 775 union { 776 struct { 777 struct ds_data data; 778 struct ds_var_set_msg msg; 779 } header; 780 char all[512]; 781 } pkt; 782 char *base, *p; 783 int msg_len, loops; 784 785 if (strlen(var) + strlen(value) + 2 > 786 sizeof(pkt) - sizeof(pkt.header)) { 787 printk(KERN_ERR PFX 788 "contents length: %zu, which more than max: %lu," 789 "so could not set (%s) variable to (%s).\n", 790 strlen(var) + strlen(value) + 2, 791 sizeof(pkt) - sizeof(pkt.header), var, value); 792 return; 793 } 794 795 memset(&pkt, 0, sizeof(pkt)); 796 pkt.header.data.tag.type = DS_DATA; 797 pkt.header.data.handle = cp->handle; 798 pkt.header.msg.hdr.type = DS_VAR_SET_REQ; 799 base = p = &pkt.header.msg.name_and_value[0]; 800 strcpy(p, var); 801 p += strlen(var) + 1; 802 strcpy(p, value); 803 p += strlen(value) + 1; 804 805 msg_len = (sizeof(struct ds_data) + 806 sizeof(struct ds_var_set_msg) + 807 (p - base)); 808 msg_len = (msg_len + 3) & ~3; 809 pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag); 810 811 mutex_lock(&ds_var_mutex); 812 813 spin_lock_irqsave(&ds_lock, flags); 814 ds_var_doorbell = 0; 815 ds_var_response = -1; 816 817 __ds_send(dp->lp, &pkt, msg_len); 818 spin_unlock_irqrestore(&ds_lock, flags); 819 820 loops = 1000; 821 while (ds_var_doorbell == 0) { 822 if (loops-- < 0) 823 break; 824 barrier(); 825 udelay(100); 826 } 827 828 mutex_unlock(&ds_var_mutex); 829 830 if (ds_var_doorbell == 0 || 831 ds_var_response != DS_VAR_SUCCESS) 832 printk(KERN_ERR "ds-%llu: var-config [%s:%s] " 833 "failed, response(%d).\n", 834 dp->id, var, value, 835 ds_var_response); 836 } else { 837 printk(KERN_ERR PFX "var-config not registered so " 838 "could not set (%s) variable to (%s).\n", 839 var, value); 840 } 841 } 842 843 static char full_boot_str[256] __attribute__((aligned(32))); 844 static int reboot_data_supported; 845 846 void ldom_reboot(const char *boot_command) 847 { 848 /* Don't bother with any of this if the boot_command 849 * is empty. 850 */ 851 if (boot_command && strlen(boot_command)) { 852 unsigned long len; 853 854 snprintf(full_boot_str, sizeof(full_boot_str), "boot %s", 855 boot_command); 856 len = strlen(full_boot_str); 857 858 if (reboot_data_supported) { 859 unsigned long ra = kimage_addr_to_ra(full_boot_str); 860 unsigned long hv_ret; 861 862 hv_ret = sun4v_reboot_data_set(ra, len); 863 if (hv_ret != HV_EOK) 864 pr_err("SUN4V: Unable to set reboot data " 865 "hv_ret=%lu\n", hv_ret); 866 } else { 867 ldom_set_var("reboot-command", full_boot_str); 868 } 869 } 870 sun4v_mach_sir(); 871 } 872 873 void ldom_power_off(void) 874 { 875 sun4v_mach_exit(0); 876 } 877 878 static void ds_conn_reset(struct ds_info *dp) 879 { 880 printk(KERN_ERR "ds-%llu: ds_conn_reset() from %ps\n", 881 dp->id, __builtin_return_address(0)); 882 } 883 884 static int register_services(struct ds_info *dp) 885 { 886 struct ldc_channel *lp = dp->lp; 887 int i; 888 889 for (i = 0; i < dp->num_ds_states; i++) { 890 struct { 891 struct ds_reg_req req; 892 u8 id_buf[256]; 893 } pbuf; 894 struct ds_cap_state *cp = &dp->ds_states[i]; 895 int err, msg_len; 896 u64 new_count; 897 898 if (cp->state == CAP_STATE_REGISTERED) 899 continue; 900 901 new_count = sched_clock() & 0xffffffff; 902 cp->handle = ((u64) i << 32) | new_count; 903 904 msg_len = (sizeof(struct ds_reg_req) + 905 strlen(cp->service_id)); 906 907 memset(&pbuf, 0, sizeof(pbuf)); 908 pbuf.req.tag.type = DS_REG_REQ; 909 pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag)); 910 pbuf.req.handle = cp->handle; 911 pbuf.req.major = 1; 912 pbuf.req.minor = 0; 913 strcpy(pbuf.id_buf, cp->service_id); 914 915 err = __ds_send(lp, &pbuf, msg_len); 916 if (err > 0) 917 cp->state = CAP_STATE_REG_SENT; 918 } 919 return 0; 920 } 921 922 static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt) 923 { 924 925 if (dp->hs_state == DS_HS_START) { 926 if (pkt->type != DS_INIT_ACK) 927 goto conn_reset; 928 929 dp->hs_state = DS_HS_DONE; 930 931 return register_services(dp); 932 } 933 934 if (dp->hs_state != DS_HS_DONE) 935 goto conn_reset; 936 937 if (pkt->type == DS_REG_ACK) { 938 struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt; 939 struct ds_cap_state *cp = find_cap(dp, ap->handle); 940 941 if (!cp) { 942 printk(KERN_ERR "ds-%llu: REG ACK for unknown " 943 "handle %llx\n", dp->id, ap->handle); 944 return 0; 945 } 946 printk(KERN_INFO "ds-%llu: Registered %s service.\n", 947 dp->id, cp->service_id); 948 cp->state = CAP_STATE_REGISTERED; 949 } else if (pkt->type == DS_REG_NACK) { 950 struct ds_reg_nack *np = (struct ds_reg_nack *) pkt; 951 struct ds_cap_state *cp = find_cap(dp, np->handle); 952 953 if (!cp) { 954 printk(KERN_ERR "ds-%llu: REG NACK for " 955 "unknown handle %llx\n", 956 dp->id, np->handle); 957 return 0; 958 } 959 cp->state = CAP_STATE_UNKNOWN; 960 } 961 962 return 0; 963 964 conn_reset: 965 ds_conn_reset(dp); 966 return -ECONNRESET; 967 } 968 969 static void __send_ds_nack(struct ds_info *dp, u64 handle) 970 { 971 struct ds_data_nack nack = { 972 .tag = { 973 .type = DS_NACK, 974 .len = (sizeof(struct ds_data_nack) - 975 sizeof(struct ds_msg_tag)), 976 }, 977 .handle = handle, 978 .result = DS_INV_HDL, 979 }; 980 981 __ds_send(dp->lp, &nack, sizeof(nack)); 982 } 983 984 static LIST_HEAD(ds_work_list); 985 static DECLARE_WAIT_QUEUE_HEAD(ds_wait); 986 987 struct ds_queue_entry { 988 struct list_head list; 989 struct ds_info *dp; 990 int req_len; 991 int __pad; 992 u64 req[0]; 993 }; 994 995 static void process_ds_work(void) 996 { 997 struct ds_queue_entry *qp, *tmp; 998 unsigned long flags; 999 LIST_HEAD(todo); 1000 1001 spin_lock_irqsave(&ds_lock, flags); 1002 list_splice_init(&ds_work_list, &todo); 1003 spin_unlock_irqrestore(&ds_lock, flags); 1004 1005 list_for_each_entry_safe(qp, tmp, &todo, list) { 1006 struct ds_data *dpkt = (struct ds_data *) qp->req; 1007 struct ds_info *dp = qp->dp; 1008 struct ds_cap_state *cp = find_cap(dp, dpkt->handle); 1009 int req_len = qp->req_len; 1010 1011 if (!cp) { 1012 printk(KERN_ERR "ds-%llu: Data for unknown " 1013 "handle %llu\n", 1014 dp->id, dpkt->handle); 1015 1016 spin_lock_irqsave(&ds_lock, flags); 1017 __send_ds_nack(dp, dpkt->handle); 1018 spin_unlock_irqrestore(&ds_lock, flags); 1019 } else { 1020 cp->data(dp, cp, dpkt, req_len); 1021 } 1022 1023 list_del(&qp->list); 1024 kfree(qp); 1025 } 1026 } 1027 1028 static int ds_thread(void *__unused) 1029 { 1030 DEFINE_WAIT(wait); 1031 1032 while (1) { 1033 prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE); 1034 if (list_empty(&ds_work_list)) 1035 schedule(); 1036 finish_wait(&ds_wait, &wait); 1037 1038 if (kthread_should_stop()) 1039 break; 1040 1041 process_ds_work(); 1042 } 1043 1044 return 0; 1045 } 1046 1047 static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len) 1048 { 1049 struct ds_data *dpkt = (struct ds_data *) pkt; 1050 struct ds_queue_entry *qp; 1051 1052 qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC); 1053 if (!qp) { 1054 __send_ds_nack(dp, dpkt->handle); 1055 } else { 1056 qp->dp = dp; 1057 memcpy(&qp->req, pkt, len); 1058 list_add_tail(&qp->list, &ds_work_list); 1059 wake_up(&ds_wait); 1060 } 1061 return 0; 1062 } 1063 1064 static void ds_up(struct ds_info *dp) 1065 { 1066 struct ldc_channel *lp = dp->lp; 1067 struct ds_ver_req req; 1068 int err; 1069 1070 req.tag.type = DS_INIT_REQ; 1071 req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag); 1072 req.ver.major = 1; 1073 req.ver.minor = 0; 1074 1075 err = __ds_send(lp, &req, sizeof(req)); 1076 if (err > 0) 1077 dp->hs_state = DS_HS_START; 1078 } 1079 1080 static void ds_reset(struct ds_info *dp) 1081 { 1082 int i; 1083 1084 dp->hs_state = 0; 1085 1086 for (i = 0; i < dp->num_ds_states; i++) { 1087 struct ds_cap_state *cp = &dp->ds_states[i]; 1088 1089 cp->state = CAP_STATE_UNKNOWN; 1090 } 1091 } 1092 1093 static void ds_event(void *arg, int event) 1094 { 1095 struct ds_info *dp = arg; 1096 struct ldc_channel *lp = dp->lp; 1097 unsigned long flags; 1098 int err; 1099 1100 spin_lock_irqsave(&ds_lock, flags); 1101 1102 if (event == LDC_EVENT_UP) { 1103 ds_up(dp); 1104 spin_unlock_irqrestore(&ds_lock, flags); 1105 return; 1106 } 1107 1108 if (event == LDC_EVENT_RESET) { 1109 ds_reset(dp); 1110 spin_unlock_irqrestore(&ds_lock, flags); 1111 return; 1112 } 1113 1114 if (event != LDC_EVENT_DATA_READY) { 1115 printk(KERN_WARNING "ds-%llu: Unexpected LDC event %d\n", 1116 dp->id, event); 1117 spin_unlock_irqrestore(&ds_lock, flags); 1118 return; 1119 } 1120 1121 err = 0; 1122 while (1) { 1123 struct ds_msg_tag *tag; 1124 1125 err = ldc_read(lp, dp->rcv_buf, sizeof(*tag)); 1126 1127 if (unlikely(err < 0)) { 1128 if (err == -ECONNRESET) 1129 ds_conn_reset(dp); 1130 break; 1131 } 1132 if (err == 0) 1133 break; 1134 1135 tag = dp->rcv_buf; 1136 err = ldc_read(lp, tag + 1, tag->len); 1137 1138 if (unlikely(err < 0)) { 1139 if (err == -ECONNRESET) 1140 ds_conn_reset(dp); 1141 break; 1142 } 1143 if (err < tag->len) 1144 break; 1145 1146 if (tag->type < DS_DATA) 1147 err = ds_handshake(dp, dp->rcv_buf); 1148 else 1149 err = ds_data(dp, dp->rcv_buf, 1150 sizeof(*tag) + err); 1151 if (err == -ECONNRESET) 1152 break; 1153 } 1154 1155 spin_unlock_irqrestore(&ds_lock, flags); 1156 } 1157 1158 static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id) 1159 { 1160 static int ds_version_printed; 1161 struct ldc_channel_config ds_cfg = { 1162 .event = ds_event, 1163 .mtu = 4096, 1164 .mode = LDC_MODE_STREAM, 1165 }; 1166 struct mdesc_handle *hp; 1167 struct ldc_channel *lp; 1168 struct ds_info *dp; 1169 const u64 *val; 1170 int err, i; 1171 1172 if (ds_version_printed++ == 0) 1173 printk(KERN_INFO "%s", version); 1174 1175 dp = kzalloc(sizeof(*dp), GFP_KERNEL); 1176 err = -ENOMEM; 1177 if (!dp) 1178 goto out_err; 1179 1180 hp = mdesc_grab(); 1181 val = mdesc_get_property(hp, vdev->mp, "id", NULL); 1182 if (val) 1183 dp->id = *val; 1184 mdesc_release(hp); 1185 1186 dp->rcv_buf = kzalloc(4096, GFP_KERNEL); 1187 if (!dp->rcv_buf) 1188 goto out_free_dp; 1189 1190 dp->rcv_buf_len = 4096; 1191 1192 dp->ds_states = kmemdup(ds_states_template, 1193 sizeof(ds_states_template), GFP_KERNEL); 1194 if (!dp->ds_states) 1195 goto out_free_rcv_buf; 1196 1197 dp->num_ds_states = ARRAY_SIZE(ds_states_template); 1198 1199 for (i = 0; i < dp->num_ds_states; i++) 1200 dp->ds_states[i].handle = ((u64)i << 32); 1201 1202 ds_cfg.tx_irq = vdev->tx_irq; 1203 ds_cfg.rx_irq = vdev->rx_irq; 1204 1205 lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS"); 1206 if (IS_ERR(lp)) { 1207 err = PTR_ERR(lp); 1208 goto out_free_ds_states; 1209 } 1210 dp->lp = lp; 1211 1212 err = ldc_bind(lp); 1213 if (err) 1214 goto out_free_ldc; 1215 1216 spin_lock_irq(&ds_lock); 1217 dp->next = ds_info_list; 1218 ds_info_list = dp; 1219 spin_unlock_irq(&ds_lock); 1220 1221 return err; 1222 1223 out_free_ldc: 1224 ldc_free(dp->lp); 1225 1226 out_free_ds_states: 1227 kfree(dp->ds_states); 1228 1229 out_free_rcv_buf: 1230 kfree(dp->rcv_buf); 1231 1232 out_free_dp: 1233 kfree(dp); 1234 1235 out_err: 1236 return err; 1237 } 1238 1239 static int ds_remove(struct vio_dev *vdev) 1240 { 1241 return 0; 1242 } 1243 1244 static const struct vio_device_id ds_match[] = { 1245 { 1246 .type = "domain-services-port", 1247 }, 1248 {}, 1249 }; 1250 1251 static struct vio_driver ds_driver = { 1252 .id_table = ds_match, 1253 .probe = ds_probe, 1254 .remove = ds_remove, 1255 .name = "ds", 1256 }; 1257 1258 static int __init ds_init(void) 1259 { 1260 unsigned long hv_ret, major, minor; 1261 1262 if (tlb_type == hypervisor) { 1263 hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor); 1264 if (hv_ret == HV_EOK) { 1265 pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n", 1266 major, minor); 1267 reboot_data_supported = 1; 1268 } 1269 } 1270 kthread_run(ds_thread, NULL, "kldomd"); 1271 1272 return vio_register_driver(&ds_driver); 1273 } 1274 1275 fs_initcall(ds_init); 1276