1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: " 15 #define dev_fmt pr_fmt 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/poll.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/spinlock.h> 23 #include <linux/mutex.h> 24 #include <linux/slab.h> 25 #include <linux/ipmi.h> 26 #include <linux/ipmi_smi.h> 27 #include <linux/notifier.h> 28 #include <linux/init.h> 29 #include <linux/proc_fs.h> 30 #include <linux/rcupdate.h> 31 #include <linux/interrupt.h> 32 #include <linux/moduleparam.h> 33 #include <linux/workqueue.h> 34 #include <linux/uuid.h> 35 #include <linux/nospec.h> 36 37 #define IPMI_DRIVER_VERSION "39.2" 38 39 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 40 static int ipmi_init_msghandler(void); 41 static void smi_recv_tasklet(unsigned long); 42 static void handle_new_recv_msgs(struct ipmi_smi *intf); 43 static void need_waiter(struct ipmi_smi *intf); 44 static int handle_one_recv_msg(struct ipmi_smi *intf, 45 struct ipmi_smi_msg *msg); 46 47 #ifdef DEBUG 48 static void ipmi_debug_msg(const char *title, unsigned char *data, 49 unsigned int len) 50 { 51 int i, pos; 52 char buf[100]; 53 54 pos = snprintf(buf, sizeof(buf), "%s: ", title); 55 for (i = 0; i < len; i++) 56 pos += snprintf(buf + pos, sizeof(buf) - pos, 57 " %2.2x", data[i]); 58 pr_debug("%s\n", buf); 59 } 60 #else 61 static void ipmi_debug_msg(const char *title, unsigned char *data, 62 unsigned int len) 63 { } 64 #endif 65 66 static bool initialized; 67 static bool drvregistered; 68 69 enum ipmi_panic_event_op { 70 IPMI_SEND_PANIC_EVENT_NONE, 71 IPMI_SEND_PANIC_EVENT, 72 IPMI_SEND_PANIC_EVENT_STRING 73 }; 74 #ifdef CONFIG_IPMI_PANIC_STRING 75 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 76 #elif defined(CONFIG_IPMI_PANIC_EVENT) 77 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 78 #else 79 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 80 #endif 81 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 82 83 static int panic_op_write_handler(const char *val, 84 const struct kernel_param *kp) 85 { 86 char valcp[16]; 87 char *s; 88 89 strncpy(valcp, val, 15); 90 valcp[15] = '\0'; 91 92 s = strstrip(valcp); 93 94 if (strcmp(s, "none") == 0) 95 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE; 96 else if (strcmp(s, "event") == 0) 97 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT; 98 else if (strcmp(s, "string") == 0) 99 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING; 100 else 101 return -EINVAL; 102 103 return 0; 104 } 105 106 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 107 { 108 switch (ipmi_send_panic_event) { 109 case IPMI_SEND_PANIC_EVENT_NONE: 110 strcpy(buffer, "none"); 111 break; 112 113 case IPMI_SEND_PANIC_EVENT: 114 strcpy(buffer, "event"); 115 break; 116 117 case IPMI_SEND_PANIC_EVENT_STRING: 118 strcpy(buffer, "string"); 119 break; 120 121 default: 122 strcpy(buffer, "???"); 123 break; 124 } 125 126 return strlen(buffer); 127 } 128 129 static const struct kernel_param_ops panic_op_ops = { 130 .set = panic_op_write_handler, 131 .get = panic_op_read_handler 132 }; 133 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 134 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 135 136 137 #define MAX_EVENTS_IN_QUEUE 25 138 139 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 140 static unsigned long maintenance_mode_timeout_ms = 30000; 141 module_param(maintenance_mode_timeout_ms, ulong, 0644); 142 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 143 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 144 145 /* 146 * Don't let a message sit in a queue forever, always time it with at lest 147 * the max message timer. This is in milliseconds. 148 */ 149 #define MAX_MSG_TIMEOUT 60000 150 151 /* 152 * Timeout times below are in milliseconds, and are done off a 1 153 * second timer. So setting the value to 1000 would mean anything 154 * between 0 and 1000ms. So really the only reasonable minimum 155 * setting it 2000ms, which is between 1 and 2 seconds. 156 */ 157 158 /* The default timeout for message retries. */ 159 static unsigned long default_retry_ms = 2000; 160 module_param(default_retry_ms, ulong, 0644); 161 MODULE_PARM_DESC(default_retry_ms, 162 "The time (milliseconds) between retry sends"); 163 164 /* The default timeout for maintenance mode message retries. */ 165 static unsigned long default_maintenance_retry_ms = 3000; 166 module_param(default_maintenance_retry_ms, ulong, 0644); 167 MODULE_PARM_DESC(default_maintenance_retry_ms, 168 "The time (milliseconds) between retry sends in maintenance mode"); 169 170 /* The default maximum number of retries */ 171 static unsigned int default_max_retries = 4; 172 module_param(default_max_retries, uint, 0644); 173 MODULE_PARM_DESC(default_max_retries, 174 "The time (milliseconds) between retry sends in maintenance mode"); 175 176 /* Call every ~1000 ms. */ 177 #define IPMI_TIMEOUT_TIME 1000 178 179 /* How many jiffies does it take to get to the timeout time. */ 180 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 181 182 /* 183 * Request events from the queue every second (this is the number of 184 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 185 * future, IPMI will add a way to know immediately if an event is in 186 * the queue and this silliness can go away. 187 */ 188 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 189 190 /* How long should we cache dynamic device IDs? */ 191 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 192 193 /* 194 * The main "user" data structure. 195 */ 196 struct ipmi_user { 197 struct list_head link; 198 199 /* 200 * Set to NULL when the user is destroyed, a pointer to myself 201 * so srcu_dereference can be used on it. 202 */ 203 struct ipmi_user *self; 204 struct srcu_struct release_barrier; 205 206 struct kref refcount; 207 208 /* The upper layer that handles receive messages. */ 209 const struct ipmi_user_hndl *handler; 210 void *handler_data; 211 212 /* The interface this user is bound to. */ 213 struct ipmi_smi *intf; 214 215 /* Does this interface receive IPMI events? */ 216 bool gets_events; 217 }; 218 219 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) 220 __acquires(user->release_barrier) 221 { 222 struct ipmi_user *ruser; 223 224 *index = srcu_read_lock(&user->release_barrier); 225 ruser = srcu_dereference(user->self, &user->release_barrier); 226 if (!ruser) 227 srcu_read_unlock(&user->release_barrier, *index); 228 return ruser; 229 } 230 231 static void release_ipmi_user(struct ipmi_user *user, int index) 232 { 233 srcu_read_unlock(&user->release_barrier, index); 234 } 235 236 struct cmd_rcvr { 237 struct list_head link; 238 239 struct ipmi_user *user; 240 unsigned char netfn; 241 unsigned char cmd; 242 unsigned int chans; 243 244 /* 245 * This is used to form a linked lised during mass deletion. 246 * Since this is in an RCU list, we cannot use the link above 247 * or change any data until the RCU period completes. So we 248 * use this next variable during mass deletion so we can have 249 * a list and don't have to wait and restart the search on 250 * every individual deletion of a command. 251 */ 252 struct cmd_rcvr *next; 253 }; 254 255 struct seq_table { 256 unsigned int inuse : 1; 257 unsigned int broadcast : 1; 258 259 unsigned long timeout; 260 unsigned long orig_timeout; 261 unsigned int retries_left; 262 263 /* 264 * To verify on an incoming send message response that this is 265 * the message that the response is for, we keep a sequence id 266 * and increment it every time we send a message. 267 */ 268 long seqid; 269 270 /* 271 * This is held so we can properly respond to the message on a 272 * timeout, and it is used to hold the temporary data for 273 * retransmission, too. 274 */ 275 struct ipmi_recv_msg *recv_msg; 276 }; 277 278 /* 279 * Store the information in a msgid (long) to allow us to find a 280 * sequence table entry from the msgid. 281 */ 282 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 283 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 284 285 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 286 do { \ 287 seq = (((msgid) >> 26) & 0x3f); \ 288 seqid = ((msgid) & 0x3ffffff); \ 289 } while (0) 290 291 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 292 293 #define IPMI_MAX_CHANNELS 16 294 struct ipmi_channel { 295 unsigned char medium; 296 unsigned char protocol; 297 }; 298 299 struct ipmi_channel_set { 300 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 301 }; 302 303 struct ipmi_my_addrinfo { 304 /* 305 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 306 * but may be changed by the user. 307 */ 308 unsigned char address; 309 310 /* 311 * My LUN. This should generally stay the SMS LUN, but just in 312 * case... 313 */ 314 unsigned char lun; 315 }; 316 317 /* 318 * Note that the product id, manufacturer id, guid, and device id are 319 * immutable in this structure, so dyn_mutex is not required for 320 * accessing those. If those change on a BMC, a new BMC is allocated. 321 */ 322 struct bmc_device { 323 struct platform_device pdev; 324 struct list_head intfs; /* Interfaces on this BMC. */ 325 struct ipmi_device_id id; 326 struct ipmi_device_id fetch_id; 327 int dyn_id_set; 328 unsigned long dyn_id_expiry; 329 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 330 guid_t guid; 331 guid_t fetch_guid; 332 int dyn_guid_set; 333 struct kref usecount; 334 struct work_struct remove_work; 335 }; 336 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 337 338 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 339 struct ipmi_device_id *id, 340 bool *guid_set, guid_t *guid); 341 342 /* 343 * Various statistics for IPMI, these index stats[] in the ipmi_smi 344 * structure. 345 */ 346 enum ipmi_stat_indexes { 347 /* Commands we got from the user that were invalid. */ 348 IPMI_STAT_sent_invalid_commands = 0, 349 350 /* Commands we sent to the MC. */ 351 IPMI_STAT_sent_local_commands, 352 353 /* Responses from the MC that were delivered to a user. */ 354 IPMI_STAT_handled_local_responses, 355 356 /* Responses from the MC that were not delivered to a user. */ 357 IPMI_STAT_unhandled_local_responses, 358 359 /* Commands we sent out to the IPMB bus. */ 360 IPMI_STAT_sent_ipmb_commands, 361 362 /* Commands sent on the IPMB that had errors on the SEND CMD */ 363 IPMI_STAT_sent_ipmb_command_errs, 364 365 /* Each retransmit increments this count. */ 366 IPMI_STAT_retransmitted_ipmb_commands, 367 368 /* 369 * When a message times out (runs out of retransmits) this is 370 * incremented. 371 */ 372 IPMI_STAT_timed_out_ipmb_commands, 373 374 /* 375 * This is like above, but for broadcasts. Broadcasts are 376 * *not* included in the above count (they are expected to 377 * time out). 378 */ 379 IPMI_STAT_timed_out_ipmb_broadcasts, 380 381 /* Responses I have sent to the IPMB bus. */ 382 IPMI_STAT_sent_ipmb_responses, 383 384 /* The response was delivered to the user. */ 385 IPMI_STAT_handled_ipmb_responses, 386 387 /* The response had invalid data in it. */ 388 IPMI_STAT_invalid_ipmb_responses, 389 390 /* The response didn't have anyone waiting for it. */ 391 IPMI_STAT_unhandled_ipmb_responses, 392 393 /* Commands we sent out to the IPMB bus. */ 394 IPMI_STAT_sent_lan_commands, 395 396 /* Commands sent on the IPMB that had errors on the SEND CMD */ 397 IPMI_STAT_sent_lan_command_errs, 398 399 /* Each retransmit increments this count. */ 400 IPMI_STAT_retransmitted_lan_commands, 401 402 /* 403 * When a message times out (runs out of retransmits) this is 404 * incremented. 405 */ 406 IPMI_STAT_timed_out_lan_commands, 407 408 /* Responses I have sent to the IPMB bus. */ 409 IPMI_STAT_sent_lan_responses, 410 411 /* The response was delivered to the user. */ 412 IPMI_STAT_handled_lan_responses, 413 414 /* The response had invalid data in it. */ 415 IPMI_STAT_invalid_lan_responses, 416 417 /* The response didn't have anyone waiting for it. */ 418 IPMI_STAT_unhandled_lan_responses, 419 420 /* The command was delivered to the user. */ 421 IPMI_STAT_handled_commands, 422 423 /* The command had invalid data in it. */ 424 IPMI_STAT_invalid_commands, 425 426 /* The command didn't have anyone waiting for it. */ 427 IPMI_STAT_unhandled_commands, 428 429 /* Invalid data in an event. */ 430 IPMI_STAT_invalid_events, 431 432 /* Events that were received with the proper format. */ 433 IPMI_STAT_events, 434 435 /* Retransmissions on IPMB that failed. */ 436 IPMI_STAT_dropped_rexmit_ipmb_commands, 437 438 /* Retransmissions on LAN that failed. */ 439 IPMI_STAT_dropped_rexmit_lan_commands, 440 441 /* This *must* remain last, add new values above this. */ 442 IPMI_NUM_STATS 443 }; 444 445 446 #define IPMI_IPMB_NUM_SEQ 64 447 struct ipmi_smi { 448 /* What interface number are we? */ 449 int intf_num; 450 451 struct kref refcount; 452 453 /* Set when the interface is being unregistered. */ 454 bool in_shutdown; 455 456 /* Used for a list of interfaces. */ 457 struct list_head link; 458 459 /* 460 * The list of upper layers that are using me. seq_lock write 461 * protects this. Read protection is with srcu. 462 */ 463 struct list_head users; 464 struct srcu_struct users_srcu; 465 466 /* Used for wake ups at startup. */ 467 wait_queue_head_t waitq; 468 469 /* 470 * Prevents the interface from being unregistered when the 471 * interface is used by being looked up through the BMC 472 * structure. 473 */ 474 struct mutex bmc_reg_mutex; 475 476 struct bmc_device tmp_bmc; 477 struct bmc_device *bmc; 478 bool bmc_registered; 479 struct list_head bmc_link; 480 char *my_dev_name; 481 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 482 struct work_struct bmc_reg_work; 483 484 const struct ipmi_smi_handlers *handlers; 485 void *send_info; 486 487 /* Driver-model device for the system interface. */ 488 struct device *si_dev; 489 490 /* 491 * A table of sequence numbers for this interface. We use the 492 * sequence numbers for IPMB messages that go out of the 493 * interface to match them up with their responses. A routine 494 * is called periodically to time the items in this list. 495 */ 496 spinlock_t seq_lock; 497 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 498 int curr_seq; 499 500 /* 501 * Messages queued for delivery. If delivery fails (out of memory 502 * for instance), They will stay in here to be processed later in a 503 * periodic timer interrupt. The tasklet is for handling received 504 * messages directly from the handler. 505 */ 506 spinlock_t waiting_rcv_msgs_lock; 507 struct list_head waiting_rcv_msgs; 508 atomic_t watchdog_pretimeouts_to_deliver; 509 struct tasklet_struct recv_tasklet; 510 511 spinlock_t xmit_msgs_lock; 512 struct list_head xmit_msgs; 513 struct ipmi_smi_msg *curr_msg; 514 struct list_head hp_xmit_msgs; 515 516 /* 517 * The list of command receivers that are registered for commands 518 * on this interface. 519 */ 520 struct mutex cmd_rcvrs_mutex; 521 struct list_head cmd_rcvrs; 522 523 /* 524 * Events that were queues because no one was there to receive 525 * them. 526 */ 527 spinlock_t events_lock; /* For dealing with event stuff. */ 528 struct list_head waiting_events; 529 unsigned int waiting_events_count; /* How many events in queue? */ 530 char delivering_events; 531 char event_msg_printed; 532 atomic_t event_waiters; 533 unsigned int ticks_to_req_ev; 534 int last_needs_timer; 535 536 /* 537 * The event receiver for my BMC, only really used at panic 538 * shutdown as a place to store this. 539 */ 540 unsigned char event_receiver; 541 unsigned char event_receiver_lun; 542 unsigned char local_sel_device; 543 unsigned char local_event_generator; 544 545 /* For handling of maintenance mode. */ 546 int maintenance_mode; 547 bool maintenance_mode_enable; 548 int auto_maintenance_timeout; 549 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 550 551 /* 552 * If we are doing maintenance on something on IPMB, extend 553 * the timeout time to avoid timeouts writing firmware and 554 * such. 555 */ 556 int ipmb_maintenance_mode_timeout; 557 558 /* 559 * A cheap hack, if this is non-null and a message to an 560 * interface comes in with a NULL user, call this routine with 561 * it. Note that the message will still be freed by the 562 * caller. This only works on the system interface. 563 * 564 * Protected by bmc_reg_mutex. 565 */ 566 void (*null_user_handler)(struct ipmi_smi *intf, 567 struct ipmi_recv_msg *msg); 568 569 /* 570 * When we are scanning the channels for an SMI, this will 571 * tell which channel we are scanning. 572 */ 573 int curr_channel; 574 575 /* Channel information */ 576 struct ipmi_channel_set *channel_list; 577 unsigned int curr_working_cset; /* First index into the following. */ 578 struct ipmi_channel_set wchannels[2]; 579 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 580 bool channels_ready; 581 582 atomic_t stats[IPMI_NUM_STATS]; 583 584 /* 585 * run_to_completion duplicate of smb_info, smi_info 586 * and ipmi_serial_info structures. Used to decrease numbers of 587 * parameters passed by "low" level IPMI code. 588 */ 589 int run_to_completion; 590 }; 591 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 592 593 static void __get_guid(struct ipmi_smi *intf); 594 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 595 static int __ipmi_bmc_register(struct ipmi_smi *intf, 596 struct ipmi_device_id *id, 597 bool guid_set, guid_t *guid, int intf_num); 598 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 599 600 601 /** 602 * The driver model view of the IPMI messaging driver. 603 */ 604 static struct platform_driver ipmidriver = { 605 .driver = { 606 .name = "ipmi", 607 .bus = &platform_bus_type 608 } 609 }; 610 /* 611 * This mutex keeps us from adding the same BMC twice. 612 */ 613 static DEFINE_MUTEX(ipmidriver_mutex); 614 615 static LIST_HEAD(ipmi_interfaces); 616 static DEFINE_MUTEX(ipmi_interfaces_mutex); 617 struct srcu_struct ipmi_interfaces_srcu; 618 619 /* 620 * List of watchers that want to know when smi's are added and deleted. 621 */ 622 static LIST_HEAD(smi_watchers); 623 static DEFINE_MUTEX(smi_watchers_mutex); 624 625 #define ipmi_inc_stat(intf, stat) \ 626 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 627 #define ipmi_get_stat(intf, stat) \ 628 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 629 630 static const char * const addr_src_to_str[] = { 631 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 632 "device-tree", "platform" 633 }; 634 635 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 636 { 637 if (src >= SI_LAST) 638 src = 0; /* Invalid */ 639 return addr_src_to_str[src]; 640 } 641 EXPORT_SYMBOL(ipmi_addr_src_to_str); 642 643 static int is_lan_addr(struct ipmi_addr *addr) 644 { 645 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 646 } 647 648 static int is_ipmb_addr(struct ipmi_addr *addr) 649 { 650 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 651 } 652 653 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 654 { 655 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 656 } 657 658 static void free_recv_msg_list(struct list_head *q) 659 { 660 struct ipmi_recv_msg *msg, *msg2; 661 662 list_for_each_entry_safe(msg, msg2, q, link) { 663 list_del(&msg->link); 664 ipmi_free_recv_msg(msg); 665 } 666 } 667 668 static void free_smi_msg_list(struct list_head *q) 669 { 670 struct ipmi_smi_msg *msg, *msg2; 671 672 list_for_each_entry_safe(msg, msg2, q, link) { 673 list_del(&msg->link); 674 ipmi_free_smi_msg(msg); 675 } 676 } 677 678 static void clean_up_interface_data(struct ipmi_smi *intf) 679 { 680 int i; 681 struct cmd_rcvr *rcvr, *rcvr2; 682 struct list_head list; 683 684 tasklet_kill(&intf->recv_tasklet); 685 686 free_smi_msg_list(&intf->waiting_rcv_msgs); 687 free_recv_msg_list(&intf->waiting_events); 688 689 /* 690 * Wholesale remove all the entries from the list in the 691 * interface and wait for RCU to know that none are in use. 692 */ 693 mutex_lock(&intf->cmd_rcvrs_mutex); 694 INIT_LIST_HEAD(&list); 695 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); 696 mutex_unlock(&intf->cmd_rcvrs_mutex); 697 698 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 699 kfree(rcvr); 700 701 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 702 if ((intf->seq_table[i].inuse) 703 && (intf->seq_table[i].recv_msg)) 704 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 705 } 706 } 707 708 static void intf_free(struct kref *ref) 709 { 710 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 711 712 clean_up_interface_data(intf); 713 kfree(intf); 714 } 715 716 struct watcher_entry { 717 int intf_num; 718 struct ipmi_smi *intf; 719 struct list_head link; 720 }; 721 722 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 723 { 724 struct ipmi_smi *intf; 725 int index, rv; 726 727 /* 728 * Make sure the driver is actually initialized, this handles 729 * problems with initialization order. 730 */ 731 rv = ipmi_init_msghandler(); 732 if (rv) 733 return rv; 734 735 mutex_lock(&smi_watchers_mutex); 736 737 list_add(&watcher->link, &smi_watchers); 738 739 index = srcu_read_lock(&ipmi_interfaces_srcu); 740 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 741 int intf_num = READ_ONCE(intf->intf_num); 742 743 if (intf_num == -1) 744 continue; 745 watcher->new_smi(intf_num, intf->si_dev); 746 } 747 srcu_read_unlock(&ipmi_interfaces_srcu, index); 748 749 mutex_unlock(&smi_watchers_mutex); 750 751 return 0; 752 } 753 EXPORT_SYMBOL(ipmi_smi_watcher_register); 754 755 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 756 { 757 mutex_lock(&smi_watchers_mutex); 758 list_del(&watcher->link); 759 mutex_unlock(&smi_watchers_mutex); 760 return 0; 761 } 762 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 763 764 /* 765 * Must be called with smi_watchers_mutex held. 766 */ 767 static void 768 call_smi_watchers(int i, struct device *dev) 769 { 770 struct ipmi_smi_watcher *w; 771 772 mutex_lock(&smi_watchers_mutex); 773 list_for_each_entry(w, &smi_watchers, link) { 774 if (try_module_get(w->owner)) { 775 w->new_smi(i, dev); 776 module_put(w->owner); 777 } 778 } 779 mutex_unlock(&smi_watchers_mutex); 780 } 781 782 static int 783 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 784 { 785 if (addr1->addr_type != addr2->addr_type) 786 return 0; 787 788 if (addr1->channel != addr2->channel) 789 return 0; 790 791 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 792 struct ipmi_system_interface_addr *smi_addr1 793 = (struct ipmi_system_interface_addr *) addr1; 794 struct ipmi_system_interface_addr *smi_addr2 795 = (struct ipmi_system_interface_addr *) addr2; 796 return (smi_addr1->lun == smi_addr2->lun); 797 } 798 799 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 800 struct ipmi_ipmb_addr *ipmb_addr1 801 = (struct ipmi_ipmb_addr *) addr1; 802 struct ipmi_ipmb_addr *ipmb_addr2 803 = (struct ipmi_ipmb_addr *) addr2; 804 805 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 806 && (ipmb_addr1->lun == ipmb_addr2->lun)); 807 } 808 809 if (is_lan_addr(addr1)) { 810 struct ipmi_lan_addr *lan_addr1 811 = (struct ipmi_lan_addr *) addr1; 812 struct ipmi_lan_addr *lan_addr2 813 = (struct ipmi_lan_addr *) addr2; 814 815 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 816 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 817 && (lan_addr1->session_handle 818 == lan_addr2->session_handle) 819 && (lan_addr1->lun == lan_addr2->lun)); 820 } 821 822 return 1; 823 } 824 825 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 826 { 827 if (len < sizeof(struct ipmi_system_interface_addr)) 828 return -EINVAL; 829 830 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 831 if (addr->channel != IPMI_BMC_CHANNEL) 832 return -EINVAL; 833 return 0; 834 } 835 836 if ((addr->channel == IPMI_BMC_CHANNEL) 837 || (addr->channel >= IPMI_MAX_CHANNELS) 838 || (addr->channel < 0)) 839 return -EINVAL; 840 841 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 842 if (len < sizeof(struct ipmi_ipmb_addr)) 843 return -EINVAL; 844 return 0; 845 } 846 847 if (is_lan_addr(addr)) { 848 if (len < sizeof(struct ipmi_lan_addr)) 849 return -EINVAL; 850 return 0; 851 } 852 853 return -EINVAL; 854 } 855 EXPORT_SYMBOL(ipmi_validate_addr); 856 857 unsigned int ipmi_addr_length(int addr_type) 858 { 859 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 860 return sizeof(struct ipmi_system_interface_addr); 861 862 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 863 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 864 return sizeof(struct ipmi_ipmb_addr); 865 866 if (addr_type == IPMI_LAN_ADDR_TYPE) 867 return sizeof(struct ipmi_lan_addr); 868 869 return 0; 870 } 871 EXPORT_SYMBOL(ipmi_addr_length); 872 873 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 874 { 875 int rv = 0; 876 877 if (!msg->user) { 878 /* Special handling for NULL users. */ 879 if (intf->null_user_handler) { 880 intf->null_user_handler(intf, msg); 881 } else { 882 /* No handler, so give up. */ 883 rv = -EINVAL; 884 } 885 ipmi_free_recv_msg(msg); 886 } else if (!oops_in_progress) { 887 /* 888 * If we are running in the panic context, calling the 889 * receive handler doesn't much meaning and has a deadlock 890 * risk. At this moment, simply skip it in that case. 891 */ 892 int index; 893 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); 894 895 if (user) { 896 user->handler->ipmi_recv_hndl(msg, user->handler_data); 897 release_ipmi_user(user, index); 898 } else { 899 /* User went away, give up. */ 900 ipmi_free_recv_msg(msg); 901 rv = -EINVAL; 902 } 903 } 904 905 return rv; 906 } 907 908 static void deliver_local_response(struct ipmi_smi *intf, 909 struct ipmi_recv_msg *msg) 910 { 911 if (deliver_response(intf, msg)) 912 ipmi_inc_stat(intf, unhandled_local_responses); 913 else 914 ipmi_inc_stat(intf, handled_local_responses); 915 } 916 917 static void deliver_err_response(struct ipmi_smi *intf, 918 struct ipmi_recv_msg *msg, int err) 919 { 920 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 921 msg->msg_data[0] = err; 922 msg->msg.netfn |= 1; /* Convert to a response. */ 923 msg->msg.data_len = 1; 924 msg->msg.data = msg->msg_data; 925 deliver_local_response(intf, msg); 926 } 927 928 /* 929 * Find the next sequence number not being used and add the given 930 * message with the given timeout to the sequence table. This must be 931 * called with the interface's seq_lock held. 932 */ 933 static int intf_next_seq(struct ipmi_smi *intf, 934 struct ipmi_recv_msg *recv_msg, 935 unsigned long timeout, 936 int retries, 937 int broadcast, 938 unsigned char *seq, 939 long *seqid) 940 { 941 int rv = 0; 942 unsigned int i; 943 944 if (timeout == 0) 945 timeout = default_retry_ms; 946 if (retries < 0) 947 retries = default_max_retries; 948 949 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 950 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 951 if (!intf->seq_table[i].inuse) 952 break; 953 } 954 955 if (!intf->seq_table[i].inuse) { 956 intf->seq_table[i].recv_msg = recv_msg; 957 958 /* 959 * Start with the maximum timeout, when the send response 960 * comes in we will start the real timer. 961 */ 962 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 963 intf->seq_table[i].orig_timeout = timeout; 964 intf->seq_table[i].retries_left = retries; 965 intf->seq_table[i].broadcast = broadcast; 966 intf->seq_table[i].inuse = 1; 967 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 968 *seq = i; 969 *seqid = intf->seq_table[i].seqid; 970 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 971 need_waiter(intf); 972 } else { 973 rv = -EAGAIN; 974 } 975 976 return rv; 977 } 978 979 /* 980 * Return the receive message for the given sequence number and 981 * release the sequence number so it can be reused. Some other data 982 * is passed in to be sure the message matches up correctly (to help 983 * guard against message coming in after their timeout and the 984 * sequence number being reused). 985 */ 986 static int intf_find_seq(struct ipmi_smi *intf, 987 unsigned char seq, 988 short channel, 989 unsigned char cmd, 990 unsigned char netfn, 991 struct ipmi_addr *addr, 992 struct ipmi_recv_msg **recv_msg) 993 { 994 int rv = -ENODEV; 995 unsigned long flags; 996 997 if (seq >= IPMI_IPMB_NUM_SEQ) 998 return -EINVAL; 999 1000 spin_lock_irqsave(&intf->seq_lock, flags); 1001 if (intf->seq_table[seq].inuse) { 1002 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1003 1004 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1005 && (msg->msg.netfn == netfn) 1006 && (ipmi_addr_equal(addr, &msg->addr))) { 1007 *recv_msg = msg; 1008 intf->seq_table[seq].inuse = 0; 1009 rv = 0; 1010 } 1011 } 1012 spin_unlock_irqrestore(&intf->seq_lock, flags); 1013 1014 return rv; 1015 } 1016 1017 1018 /* Start the timer for a specific sequence table entry. */ 1019 static int intf_start_seq_timer(struct ipmi_smi *intf, 1020 long msgid) 1021 { 1022 int rv = -ENODEV; 1023 unsigned long flags; 1024 unsigned char seq; 1025 unsigned long seqid; 1026 1027 1028 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1029 1030 spin_lock_irqsave(&intf->seq_lock, flags); 1031 /* 1032 * We do this verification because the user can be deleted 1033 * while a message is outstanding. 1034 */ 1035 if ((intf->seq_table[seq].inuse) 1036 && (intf->seq_table[seq].seqid == seqid)) { 1037 struct seq_table *ent = &intf->seq_table[seq]; 1038 ent->timeout = ent->orig_timeout; 1039 rv = 0; 1040 } 1041 spin_unlock_irqrestore(&intf->seq_lock, flags); 1042 1043 return rv; 1044 } 1045 1046 /* Got an error for the send message for a specific sequence number. */ 1047 static int intf_err_seq(struct ipmi_smi *intf, 1048 long msgid, 1049 unsigned int err) 1050 { 1051 int rv = -ENODEV; 1052 unsigned long flags; 1053 unsigned char seq; 1054 unsigned long seqid; 1055 struct ipmi_recv_msg *msg = NULL; 1056 1057 1058 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1059 1060 spin_lock_irqsave(&intf->seq_lock, flags); 1061 /* 1062 * We do this verification because the user can be deleted 1063 * while a message is outstanding. 1064 */ 1065 if ((intf->seq_table[seq].inuse) 1066 && (intf->seq_table[seq].seqid == seqid)) { 1067 struct seq_table *ent = &intf->seq_table[seq]; 1068 1069 ent->inuse = 0; 1070 msg = ent->recv_msg; 1071 rv = 0; 1072 } 1073 spin_unlock_irqrestore(&intf->seq_lock, flags); 1074 1075 if (msg) 1076 deliver_err_response(intf, msg, err); 1077 1078 return rv; 1079 } 1080 1081 1082 int ipmi_create_user(unsigned int if_num, 1083 const struct ipmi_user_hndl *handler, 1084 void *handler_data, 1085 struct ipmi_user **user) 1086 { 1087 unsigned long flags; 1088 struct ipmi_user *new_user; 1089 int rv, index; 1090 struct ipmi_smi *intf; 1091 1092 /* 1093 * There is no module usecount here, because it's not 1094 * required. Since this can only be used by and called from 1095 * other modules, they will implicitly use this module, and 1096 * thus this can't be removed unless the other modules are 1097 * removed. 1098 */ 1099 1100 if (handler == NULL) 1101 return -EINVAL; 1102 1103 /* 1104 * Make sure the driver is actually initialized, this handles 1105 * problems with initialization order. 1106 */ 1107 rv = ipmi_init_msghandler(); 1108 if (rv) 1109 return rv; 1110 1111 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); 1112 if (!new_user) 1113 return -ENOMEM; 1114 1115 index = srcu_read_lock(&ipmi_interfaces_srcu); 1116 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1117 if (intf->intf_num == if_num) 1118 goto found; 1119 } 1120 /* Not found, return an error */ 1121 rv = -EINVAL; 1122 goto out_kfree; 1123 1124 found: 1125 rv = init_srcu_struct(&new_user->release_barrier); 1126 if (rv) 1127 goto out_kfree; 1128 1129 /* Note that each existing user holds a refcount to the interface. */ 1130 kref_get(&intf->refcount); 1131 1132 kref_init(&new_user->refcount); 1133 new_user->handler = handler; 1134 new_user->handler_data = handler_data; 1135 new_user->intf = intf; 1136 new_user->gets_events = false; 1137 1138 rcu_assign_pointer(new_user->self, new_user); 1139 spin_lock_irqsave(&intf->seq_lock, flags); 1140 list_add_rcu(&new_user->link, &intf->users); 1141 spin_unlock_irqrestore(&intf->seq_lock, flags); 1142 if (handler->ipmi_watchdog_pretimeout) { 1143 /* User wants pretimeouts, so make sure to watch for them. */ 1144 if (atomic_inc_return(&intf->event_waiters) == 1) 1145 need_waiter(intf); 1146 } 1147 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1148 *user = new_user; 1149 return 0; 1150 1151 out_kfree: 1152 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1153 kfree(new_user); 1154 return rv; 1155 } 1156 EXPORT_SYMBOL(ipmi_create_user); 1157 1158 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1159 { 1160 int rv, index; 1161 struct ipmi_smi *intf; 1162 1163 index = srcu_read_lock(&ipmi_interfaces_srcu); 1164 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1165 if (intf->intf_num == if_num) 1166 goto found; 1167 } 1168 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1169 1170 /* Not found, return an error */ 1171 return -EINVAL; 1172 1173 found: 1174 if (!intf->handlers->get_smi_info) 1175 rv = -ENOTTY; 1176 else 1177 rv = intf->handlers->get_smi_info(intf->send_info, data); 1178 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1179 1180 return rv; 1181 } 1182 EXPORT_SYMBOL(ipmi_get_smi_info); 1183 1184 static void free_user(struct kref *ref) 1185 { 1186 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1187 cleanup_srcu_struct(&user->release_barrier); 1188 kfree(user); 1189 } 1190 1191 static void _ipmi_destroy_user(struct ipmi_user *user) 1192 { 1193 struct ipmi_smi *intf = user->intf; 1194 int i; 1195 unsigned long flags; 1196 struct cmd_rcvr *rcvr; 1197 struct cmd_rcvr *rcvrs = NULL; 1198 1199 if (!acquire_ipmi_user(user, &i)) { 1200 /* 1201 * The user has already been cleaned up, just make sure 1202 * nothing is using it and return. 1203 */ 1204 synchronize_srcu(&user->release_barrier); 1205 return; 1206 } 1207 1208 rcu_assign_pointer(user->self, NULL); 1209 release_ipmi_user(user, i); 1210 1211 synchronize_srcu(&user->release_barrier); 1212 1213 if (user->handler->shutdown) 1214 user->handler->shutdown(user->handler_data); 1215 1216 if (user->handler->ipmi_watchdog_pretimeout) 1217 atomic_dec(&intf->event_waiters); 1218 1219 if (user->gets_events) 1220 atomic_dec(&intf->event_waiters); 1221 1222 /* Remove the user from the interface's sequence table. */ 1223 spin_lock_irqsave(&intf->seq_lock, flags); 1224 list_del_rcu(&user->link); 1225 1226 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1227 if (intf->seq_table[i].inuse 1228 && (intf->seq_table[i].recv_msg->user == user)) { 1229 intf->seq_table[i].inuse = 0; 1230 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1231 } 1232 } 1233 spin_unlock_irqrestore(&intf->seq_lock, flags); 1234 1235 /* 1236 * Remove the user from the command receiver's table. First 1237 * we build a list of everything (not using the standard link, 1238 * since other things may be using it till we do 1239 * synchronize_srcu()) then free everything in that list. 1240 */ 1241 mutex_lock(&intf->cmd_rcvrs_mutex); 1242 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 1243 if (rcvr->user == user) { 1244 list_del_rcu(&rcvr->link); 1245 rcvr->next = rcvrs; 1246 rcvrs = rcvr; 1247 } 1248 } 1249 mutex_unlock(&intf->cmd_rcvrs_mutex); 1250 synchronize_rcu(); 1251 while (rcvrs) { 1252 rcvr = rcvrs; 1253 rcvrs = rcvr->next; 1254 kfree(rcvr); 1255 } 1256 1257 kref_put(&intf->refcount, intf_free); 1258 } 1259 1260 int ipmi_destroy_user(struct ipmi_user *user) 1261 { 1262 _ipmi_destroy_user(user); 1263 1264 kref_put(&user->refcount, free_user); 1265 1266 return 0; 1267 } 1268 EXPORT_SYMBOL(ipmi_destroy_user); 1269 1270 int ipmi_get_version(struct ipmi_user *user, 1271 unsigned char *major, 1272 unsigned char *minor) 1273 { 1274 struct ipmi_device_id id; 1275 int rv, index; 1276 1277 user = acquire_ipmi_user(user, &index); 1278 if (!user) 1279 return -ENODEV; 1280 1281 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1282 if (!rv) { 1283 *major = ipmi_version_major(&id); 1284 *minor = ipmi_version_minor(&id); 1285 } 1286 release_ipmi_user(user, index); 1287 1288 return rv; 1289 } 1290 EXPORT_SYMBOL(ipmi_get_version); 1291 1292 int ipmi_set_my_address(struct ipmi_user *user, 1293 unsigned int channel, 1294 unsigned char address) 1295 { 1296 int index, rv = 0; 1297 1298 user = acquire_ipmi_user(user, &index); 1299 if (!user) 1300 return -ENODEV; 1301 1302 if (channel >= IPMI_MAX_CHANNELS) { 1303 rv = -EINVAL; 1304 } else { 1305 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1306 user->intf->addrinfo[channel].address = address; 1307 } 1308 release_ipmi_user(user, index); 1309 1310 return rv; 1311 } 1312 EXPORT_SYMBOL(ipmi_set_my_address); 1313 1314 int ipmi_get_my_address(struct ipmi_user *user, 1315 unsigned int channel, 1316 unsigned char *address) 1317 { 1318 int index, rv = 0; 1319 1320 user = acquire_ipmi_user(user, &index); 1321 if (!user) 1322 return -ENODEV; 1323 1324 if (channel >= IPMI_MAX_CHANNELS) { 1325 rv = -EINVAL; 1326 } else { 1327 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1328 *address = user->intf->addrinfo[channel].address; 1329 } 1330 release_ipmi_user(user, index); 1331 1332 return rv; 1333 } 1334 EXPORT_SYMBOL(ipmi_get_my_address); 1335 1336 int ipmi_set_my_LUN(struct ipmi_user *user, 1337 unsigned int channel, 1338 unsigned char LUN) 1339 { 1340 int index, rv = 0; 1341 1342 user = acquire_ipmi_user(user, &index); 1343 if (!user) 1344 return -ENODEV; 1345 1346 if (channel >= IPMI_MAX_CHANNELS) { 1347 rv = -EINVAL; 1348 } else { 1349 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1350 user->intf->addrinfo[channel].lun = LUN & 0x3; 1351 } 1352 release_ipmi_user(user, index); 1353 1354 return rv; 1355 } 1356 EXPORT_SYMBOL(ipmi_set_my_LUN); 1357 1358 int ipmi_get_my_LUN(struct ipmi_user *user, 1359 unsigned int channel, 1360 unsigned char *address) 1361 { 1362 int index, rv = 0; 1363 1364 user = acquire_ipmi_user(user, &index); 1365 if (!user) 1366 return -ENODEV; 1367 1368 if (channel >= IPMI_MAX_CHANNELS) { 1369 rv = -EINVAL; 1370 } else { 1371 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1372 *address = user->intf->addrinfo[channel].lun; 1373 } 1374 release_ipmi_user(user, index); 1375 1376 return rv; 1377 } 1378 EXPORT_SYMBOL(ipmi_get_my_LUN); 1379 1380 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1381 { 1382 int mode, index; 1383 unsigned long flags; 1384 1385 user = acquire_ipmi_user(user, &index); 1386 if (!user) 1387 return -ENODEV; 1388 1389 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1390 mode = user->intf->maintenance_mode; 1391 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1392 release_ipmi_user(user, index); 1393 1394 return mode; 1395 } 1396 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1397 1398 static void maintenance_mode_update(struct ipmi_smi *intf) 1399 { 1400 if (intf->handlers->set_maintenance_mode) 1401 intf->handlers->set_maintenance_mode( 1402 intf->send_info, intf->maintenance_mode_enable); 1403 } 1404 1405 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1406 { 1407 int rv = 0, index; 1408 unsigned long flags; 1409 struct ipmi_smi *intf = user->intf; 1410 1411 user = acquire_ipmi_user(user, &index); 1412 if (!user) 1413 return -ENODEV; 1414 1415 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1416 if (intf->maintenance_mode != mode) { 1417 switch (mode) { 1418 case IPMI_MAINTENANCE_MODE_AUTO: 1419 intf->maintenance_mode_enable 1420 = (intf->auto_maintenance_timeout > 0); 1421 break; 1422 1423 case IPMI_MAINTENANCE_MODE_OFF: 1424 intf->maintenance_mode_enable = false; 1425 break; 1426 1427 case IPMI_MAINTENANCE_MODE_ON: 1428 intf->maintenance_mode_enable = true; 1429 break; 1430 1431 default: 1432 rv = -EINVAL; 1433 goto out_unlock; 1434 } 1435 intf->maintenance_mode = mode; 1436 1437 maintenance_mode_update(intf); 1438 } 1439 out_unlock: 1440 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1441 release_ipmi_user(user, index); 1442 1443 return rv; 1444 } 1445 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1446 1447 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1448 { 1449 unsigned long flags; 1450 struct ipmi_smi *intf = user->intf; 1451 struct ipmi_recv_msg *msg, *msg2; 1452 struct list_head msgs; 1453 int index; 1454 1455 user = acquire_ipmi_user(user, &index); 1456 if (!user) 1457 return -ENODEV; 1458 1459 INIT_LIST_HEAD(&msgs); 1460 1461 spin_lock_irqsave(&intf->events_lock, flags); 1462 if (user->gets_events == val) 1463 goto out; 1464 1465 user->gets_events = val; 1466 1467 if (val) { 1468 if (atomic_inc_return(&intf->event_waiters) == 1) 1469 need_waiter(intf); 1470 } else { 1471 atomic_dec(&intf->event_waiters); 1472 } 1473 1474 if (intf->delivering_events) 1475 /* 1476 * Another thread is delivering events for this, so 1477 * let it handle any new events. 1478 */ 1479 goto out; 1480 1481 /* Deliver any queued events. */ 1482 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1483 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1484 list_move_tail(&msg->link, &msgs); 1485 intf->waiting_events_count = 0; 1486 if (intf->event_msg_printed) { 1487 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1488 intf->event_msg_printed = 0; 1489 } 1490 1491 intf->delivering_events = 1; 1492 spin_unlock_irqrestore(&intf->events_lock, flags); 1493 1494 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1495 msg->user = user; 1496 kref_get(&user->refcount); 1497 deliver_local_response(intf, msg); 1498 } 1499 1500 spin_lock_irqsave(&intf->events_lock, flags); 1501 intf->delivering_events = 0; 1502 } 1503 1504 out: 1505 spin_unlock_irqrestore(&intf->events_lock, flags); 1506 release_ipmi_user(user, index); 1507 1508 return 0; 1509 } 1510 EXPORT_SYMBOL(ipmi_set_gets_events); 1511 1512 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1513 unsigned char netfn, 1514 unsigned char cmd, 1515 unsigned char chan) 1516 { 1517 struct cmd_rcvr *rcvr; 1518 1519 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 1520 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1521 && (rcvr->chans & (1 << chan))) 1522 return rcvr; 1523 } 1524 return NULL; 1525 } 1526 1527 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1528 unsigned char netfn, 1529 unsigned char cmd, 1530 unsigned int chans) 1531 { 1532 struct cmd_rcvr *rcvr; 1533 1534 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 1535 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1536 && (rcvr->chans & chans)) 1537 return 0; 1538 } 1539 return 1; 1540 } 1541 1542 int ipmi_register_for_cmd(struct ipmi_user *user, 1543 unsigned char netfn, 1544 unsigned char cmd, 1545 unsigned int chans) 1546 { 1547 struct ipmi_smi *intf = user->intf; 1548 struct cmd_rcvr *rcvr; 1549 int rv = 0, index; 1550 1551 user = acquire_ipmi_user(user, &index); 1552 if (!user) 1553 return -ENODEV; 1554 1555 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1556 if (!rcvr) { 1557 rv = -ENOMEM; 1558 goto out_release; 1559 } 1560 rcvr->cmd = cmd; 1561 rcvr->netfn = netfn; 1562 rcvr->chans = chans; 1563 rcvr->user = user; 1564 1565 mutex_lock(&intf->cmd_rcvrs_mutex); 1566 /* Make sure the command/netfn is not already registered. */ 1567 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1568 rv = -EBUSY; 1569 goto out_unlock; 1570 } 1571 1572 if (atomic_inc_return(&intf->event_waiters) == 1) 1573 need_waiter(intf); 1574 1575 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1576 1577 out_unlock: 1578 mutex_unlock(&intf->cmd_rcvrs_mutex); 1579 if (rv) 1580 kfree(rcvr); 1581 out_release: 1582 release_ipmi_user(user, index); 1583 1584 return rv; 1585 } 1586 EXPORT_SYMBOL(ipmi_register_for_cmd); 1587 1588 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1589 unsigned char netfn, 1590 unsigned char cmd, 1591 unsigned int chans) 1592 { 1593 struct ipmi_smi *intf = user->intf; 1594 struct cmd_rcvr *rcvr; 1595 struct cmd_rcvr *rcvrs = NULL; 1596 int i, rv = -ENOENT, index; 1597 1598 user = acquire_ipmi_user(user, &index); 1599 if (!user) 1600 return -ENODEV; 1601 1602 mutex_lock(&intf->cmd_rcvrs_mutex); 1603 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1604 if (((1 << i) & chans) == 0) 1605 continue; 1606 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1607 if (rcvr == NULL) 1608 continue; 1609 if (rcvr->user == user) { 1610 rv = 0; 1611 rcvr->chans &= ~chans; 1612 if (rcvr->chans == 0) { 1613 list_del_rcu(&rcvr->link); 1614 rcvr->next = rcvrs; 1615 rcvrs = rcvr; 1616 } 1617 } 1618 } 1619 mutex_unlock(&intf->cmd_rcvrs_mutex); 1620 synchronize_rcu(); 1621 release_ipmi_user(user, index); 1622 while (rcvrs) { 1623 atomic_dec(&intf->event_waiters); 1624 rcvr = rcvrs; 1625 rcvrs = rcvr->next; 1626 kfree(rcvr); 1627 } 1628 1629 return rv; 1630 } 1631 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1632 1633 static unsigned char 1634 ipmb_checksum(unsigned char *data, int size) 1635 { 1636 unsigned char csum = 0; 1637 1638 for (; size > 0; size--, data++) 1639 csum += *data; 1640 1641 return -csum; 1642 } 1643 1644 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1645 struct kernel_ipmi_msg *msg, 1646 struct ipmi_ipmb_addr *ipmb_addr, 1647 long msgid, 1648 unsigned char ipmb_seq, 1649 int broadcast, 1650 unsigned char source_address, 1651 unsigned char source_lun) 1652 { 1653 int i = broadcast; 1654 1655 /* Format the IPMB header data. */ 1656 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1657 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1658 smi_msg->data[2] = ipmb_addr->channel; 1659 if (broadcast) 1660 smi_msg->data[3] = 0; 1661 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1662 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1663 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1664 smi_msg->data[i+6] = source_address; 1665 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1666 smi_msg->data[i+8] = msg->cmd; 1667 1668 /* Now tack on the data to the message. */ 1669 if (msg->data_len > 0) 1670 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1671 smi_msg->data_size = msg->data_len + 9; 1672 1673 /* Now calculate the checksum and tack it on. */ 1674 smi_msg->data[i+smi_msg->data_size] 1675 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1676 1677 /* 1678 * Add on the checksum size and the offset from the 1679 * broadcast. 1680 */ 1681 smi_msg->data_size += 1 + i; 1682 1683 smi_msg->msgid = msgid; 1684 } 1685 1686 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1687 struct kernel_ipmi_msg *msg, 1688 struct ipmi_lan_addr *lan_addr, 1689 long msgid, 1690 unsigned char ipmb_seq, 1691 unsigned char source_lun) 1692 { 1693 /* Format the IPMB header data. */ 1694 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1695 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1696 smi_msg->data[2] = lan_addr->channel; 1697 smi_msg->data[3] = lan_addr->session_handle; 1698 smi_msg->data[4] = lan_addr->remote_SWID; 1699 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1700 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1701 smi_msg->data[7] = lan_addr->local_SWID; 1702 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1703 smi_msg->data[9] = msg->cmd; 1704 1705 /* Now tack on the data to the message. */ 1706 if (msg->data_len > 0) 1707 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1708 smi_msg->data_size = msg->data_len + 10; 1709 1710 /* Now calculate the checksum and tack it on. */ 1711 smi_msg->data[smi_msg->data_size] 1712 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1713 1714 /* 1715 * Add on the checksum size and the offset from the 1716 * broadcast. 1717 */ 1718 smi_msg->data_size += 1; 1719 1720 smi_msg->msgid = msgid; 1721 } 1722 1723 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1724 struct ipmi_smi_msg *smi_msg, 1725 int priority) 1726 { 1727 if (intf->curr_msg) { 1728 if (priority > 0) 1729 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1730 else 1731 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1732 smi_msg = NULL; 1733 } else { 1734 intf->curr_msg = smi_msg; 1735 } 1736 1737 return smi_msg; 1738 } 1739 1740 1741 static void smi_send(struct ipmi_smi *intf, 1742 const struct ipmi_smi_handlers *handlers, 1743 struct ipmi_smi_msg *smi_msg, int priority) 1744 { 1745 int run_to_completion = intf->run_to_completion; 1746 1747 if (run_to_completion) { 1748 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1749 } else { 1750 unsigned long flags; 1751 1752 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1753 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1754 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1755 } 1756 1757 if (smi_msg) 1758 handlers->sender(intf->send_info, smi_msg); 1759 } 1760 1761 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1762 { 1763 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1764 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1765 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1766 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1767 } 1768 1769 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1770 struct ipmi_addr *addr, 1771 long msgid, 1772 struct kernel_ipmi_msg *msg, 1773 struct ipmi_smi_msg *smi_msg, 1774 struct ipmi_recv_msg *recv_msg, 1775 int retries, 1776 unsigned int retry_time_ms) 1777 { 1778 struct ipmi_system_interface_addr *smi_addr; 1779 1780 if (msg->netfn & 1) 1781 /* Responses are not allowed to the SMI. */ 1782 return -EINVAL; 1783 1784 smi_addr = (struct ipmi_system_interface_addr *) addr; 1785 if (smi_addr->lun > 3) { 1786 ipmi_inc_stat(intf, sent_invalid_commands); 1787 return -EINVAL; 1788 } 1789 1790 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1791 1792 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1793 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1794 || (msg->cmd == IPMI_GET_MSG_CMD) 1795 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1796 /* 1797 * We don't let the user do these, since we manage 1798 * the sequence numbers. 1799 */ 1800 ipmi_inc_stat(intf, sent_invalid_commands); 1801 return -EINVAL; 1802 } 1803 1804 if (is_maintenance_mode_cmd(msg)) { 1805 unsigned long flags; 1806 1807 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1808 intf->auto_maintenance_timeout 1809 = maintenance_mode_timeout_ms; 1810 if (!intf->maintenance_mode 1811 && !intf->maintenance_mode_enable) { 1812 intf->maintenance_mode_enable = true; 1813 maintenance_mode_update(intf); 1814 } 1815 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1816 flags); 1817 } 1818 1819 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1820 ipmi_inc_stat(intf, sent_invalid_commands); 1821 return -EMSGSIZE; 1822 } 1823 1824 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1825 smi_msg->data[1] = msg->cmd; 1826 smi_msg->msgid = msgid; 1827 smi_msg->user_data = recv_msg; 1828 if (msg->data_len > 0) 1829 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1830 smi_msg->data_size = msg->data_len + 2; 1831 ipmi_inc_stat(intf, sent_local_commands); 1832 1833 return 0; 1834 } 1835 1836 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1837 struct ipmi_addr *addr, 1838 long msgid, 1839 struct kernel_ipmi_msg *msg, 1840 struct ipmi_smi_msg *smi_msg, 1841 struct ipmi_recv_msg *recv_msg, 1842 unsigned char source_address, 1843 unsigned char source_lun, 1844 int retries, 1845 unsigned int retry_time_ms) 1846 { 1847 struct ipmi_ipmb_addr *ipmb_addr; 1848 unsigned char ipmb_seq; 1849 long seqid; 1850 int broadcast = 0; 1851 struct ipmi_channel *chans; 1852 int rv = 0; 1853 1854 if (addr->channel >= IPMI_MAX_CHANNELS) { 1855 ipmi_inc_stat(intf, sent_invalid_commands); 1856 return -EINVAL; 1857 } 1858 1859 chans = READ_ONCE(intf->channel_list)->c; 1860 1861 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 1862 ipmi_inc_stat(intf, sent_invalid_commands); 1863 return -EINVAL; 1864 } 1865 1866 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1867 /* 1868 * Broadcasts add a zero at the beginning of the 1869 * message, but otherwise is the same as an IPMB 1870 * address. 1871 */ 1872 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1873 broadcast = 1; 1874 retries = 0; /* Don't retry broadcasts. */ 1875 } 1876 1877 /* 1878 * 9 for the header and 1 for the checksum, plus 1879 * possibly one for the broadcast. 1880 */ 1881 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1882 ipmi_inc_stat(intf, sent_invalid_commands); 1883 return -EMSGSIZE; 1884 } 1885 1886 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 1887 if (ipmb_addr->lun > 3) { 1888 ipmi_inc_stat(intf, sent_invalid_commands); 1889 return -EINVAL; 1890 } 1891 1892 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 1893 1894 if (recv_msg->msg.netfn & 0x1) { 1895 /* 1896 * It's a response, so use the user's sequence 1897 * from msgid. 1898 */ 1899 ipmi_inc_stat(intf, sent_ipmb_responses); 1900 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 1901 msgid, broadcast, 1902 source_address, source_lun); 1903 1904 /* 1905 * Save the receive message so we can use it 1906 * to deliver the response. 1907 */ 1908 smi_msg->user_data = recv_msg; 1909 } else { 1910 /* It's a command, so get a sequence for it. */ 1911 unsigned long flags; 1912 1913 spin_lock_irqsave(&intf->seq_lock, flags); 1914 1915 if (is_maintenance_mode_cmd(msg)) 1916 intf->ipmb_maintenance_mode_timeout = 1917 maintenance_mode_timeout_ms; 1918 1919 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 1920 /* Different default in maintenance mode */ 1921 retry_time_ms = default_maintenance_retry_ms; 1922 1923 /* 1924 * Create a sequence number with a 1 second 1925 * timeout and 4 retries. 1926 */ 1927 rv = intf_next_seq(intf, 1928 recv_msg, 1929 retry_time_ms, 1930 retries, 1931 broadcast, 1932 &ipmb_seq, 1933 &seqid); 1934 if (rv) 1935 /* 1936 * We have used up all the sequence numbers, 1937 * probably, so abort. 1938 */ 1939 goto out_err; 1940 1941 ipmi_inc_stat(intf, sent_ipmb_commands); 1942 1943 /* 1944 * Store the sequence number in the message, 1945 * so that when the send message response 1946 * comes back we can start the timer. 1947 */ 1948 format_ipmb_msg(smi_msg, msg, ipmb_addr, 1949 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 1950 ipmb_seq, broadcast, 1951 source_address, source_lun); 1952 1953 /* 1954 * Copy the message into the recv message data, so we 1955 * can retransmit it later if necessary. 1956 */ 1957 memcpy(recv_msg->msg_data, smi_msg->data, 1958 smi_msg->data_size); 1959 recv_msg->msg.data = recv_msg->msg_data; 1960 recv_msg->msg.data_len = smi_msg->data_size; 1961 1962 /* 1963 * We don't unlock until here, because we need 1964 * to copy the completed message into the 1965 * recv_msg before we release the lock. 1966 * Otherwise, race conditions may bite us. I 1967 * know that's pretty paranoid, but I prefer 1968 * to be correct. 1969 */ 1970 out_err: 1971 spin_unlock_irqrestore(&intf->seq_lock, flags); 1972 } 1973 1974 return rv; 1975 } 1976 1977 static int i_ipmi_req_lan(struct ipmi_smi *intf, 1978 struct ipmi_addr *addr, 1979 long msgid, 1980 struct kernel_ipmi_msg *msg, 1981 struct ipmi_smi_msg *smi_msg, 1982 struct ipmi_recv_msg *recv_msg, 1983 unsigned char source_lun, 1984 int retries, 1985 unsigned int retry_time_ms) 1986 { 1987 struct ipmi_lan_addr *lan_addr; 1988 unsigned char ipmb_seq; 1989 long seqid; 1990 struct ipmi_channel *chans; 1991 int rv = 0; 1992 1993 if (addr->channel >= IPMI_MAX_CHANNELS) { 1994 ipmi_inc_stat(intf, sent_invalid_commands); 1995 return -EINVAL; 1996 } 1997 1998 chans = READ_ONCE(intf->channel_list)->c; 1999 2000 if ((chans[addr->channel].medium 2001 != IPMI_CHANNEL_MEDIUM_8023LAN) 2002 && (chans[addr->channel].medium 2003 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2004 ipmi_inc_stat(intf, sent_invalid_commands); 2005 return -EINVAL; 2006 } 2007 2008 /* 11 for the header and 1 for the checksum. */ 2009 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2010 ipmi_inc_stat(intf, sent_invalid_commands); 2011 return -EMSGSIZE; 2012 } 2013 2014 lan_addr = (struct ipmi_lan_addr *) addr; 2015 if (lan_addr->lun > 3) { 2016 ipmi_inc_stat(intf, sent_invalid_commands); 2017 return -EINVAL; 2018 } 2019 2020 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2021 2022 if (recv_msg->msg.netfn & 0x1) { 2023 /* 2024 * It's a response, so use the user's sequence 2025 * from msgid. 2026 */ 2027 ipmi_inc_stat(intf, sent_lan_responses); 2028 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2029 msgid, source_lun); 2030 2031 /* 2032 * Save the receive message so we can use it 2033 * to deliver the response. 2034 */ 2035 smi_msg->user_data = recv_msg; 2036 } else { 2037 /* It's a command, so get a sequence for it. */ 2038 unsigned long flags; 2039 2040 spin_lock_irqsave(&intf->seq_lock, flags); 2041 2042 /* 2043 * Create a sequence number with a 1 second 2044 * timeout and 4 retries. 2045 */ 2046 rv = intf_next_seq(intf, 2047 recv_msg, 2048 retry_time_ms, 2049 retries, 2050 0, 2051 &ipmb_seq, 2052 &seqid); 2053 if (rv) 2054 /* 2055 * We have used up all the sequence numbers, 2056 * probably, so abort. 2057 */ 2058 goto out_err; 2059 2060 ipmi_inc_stat(intf, sent_lan_commands); 2061 2062 /* 2063 * Store the sequence number in the message, 2064 * so that when the send message response 2065 * comes back we can start the timer. 2066 */ 2067 format_lan_msg(smi_msg, msg, lan_addr, 2068 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2069 ipmb_seq, source_lun); 2070 2071 /* 2072 * Copy the message into the recv message data, so we 2073 * can retransmit it later if necessary. 2074 */ 2075 memcpy(recv_msg->msg_data, smi_msg->data, 2076 smi_msg->data_size); 2077 recv_msg->msg.data = recv_msg->msg_data; 2078 recv_msg->msg.data_len = smi_msg->data_size; 2079 2080 /* 2081 * We don't unlock until here, because we need 2082 * to copy the completed message into the 2083 * recv_msg before we release the lock. 2084 * Otherwise, race conditions may bite us. I 2085 * know that's pretty paranoid, but I prefer 2086 * to be correct. 2087 */ 2088 out_err: 2089 spin_unlock_irqrestore(&intf->seq_lock, flags); 2090 } 2091 2092 return rv; 2093 } 2094 2095 /* 2096 * Separate from ipmi_request so that the user does not have to be 2097 * supplied in certain circumstances (mainly at panic time). If 2098 * messages are supplied, they will be freed, even if an error 2099 * occurs. 2100 */ 2101 static int i_ipmi_request(struct ipmi_user *user, 2102 struct ipmi_smi *intf, 2103 struct ipmi_addr *addr, 2104 long msgid, 2105 struct kernel_ipmi_msg *msg, 2106 void *user_msg_data, 2107 void *supplied_smi, 2108 struct ipmi_recv_msg *supplied_recv, 2109 int priority, 2110 unsigned char source_address, 2111 unsigned char source_lun, 2112 int retries, 2113 unsigned int retry_time_ms) 2114 { 2115 struct ipmi_smi_msg *smi_msg; 2116 struct ipmi_recv_msg *recv_msg; 2117 int rv = 0; 2118 2119 if (supplied_recv) 2120 recv_msg = supplied_recv; 2121 else { 2122 recv_msg = ipmi_alloc_recv_msg(); 2123 if (recv_msg == NULL) { 2124 rv = -ENOMEM; 2125 goto out; 2126 } 2127 } 2128 recv_msg->user_msg_data = user_msg_data; 2129 2130 if (supplied_smi) 2131 smi_msg = (struct ipmi_smi_msg *) supplied_smi; 2132 else { 2133 smi_msg = ipmi_alloc_smi_msg(); 2134 if (smi_msg == NULL) { 2135 ipmi_free_recv_msg(recv_msg); 2136 rv = -ENOMEM; 2137 goto out; 2138 } 2139 } 2140 2141 rcu_read_lock(); 2142 if (intf->in_shutdown) { 2143 rv = -ENODEV; 2144 goto out_err; 2145 } 2146 2147 recv_msg->user = user; 2148 if (user) 2149 /* The put happens when the message is freed. */ 2150 kref_get(&user->refcount); 2151 recv_msg->msgid = msgid; 2152 /* 2153 * Store the message to send in the receive message so timeout 2154 * responses can get the proper response data. 2155 */ 2156 recv_msg->msg = *msg; 2157 2158 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2159 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2160 recv_msg, retries, retry_time_ms); 2161 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2162 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2163 source_address, source_lun, 2164 retries, retry_time_ms); 2165 } else if (is_lan_addr(addr)) { 2166 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2167 source_lun, retries, retry_time_ms); 2168 } else { 2169 /* Unknown address type. */ 2170 ipmi_inc_stat(intf, sent_invalid_commands); 2171 rv = -EINVAL; 2172 } 2173 2174 if (rv) { 2175 out_err: 2176 ipmi_free_smi_msg(smi_msg); 2177 ipmi_free_recv_msg(recv_msg); 2178 } else { 2179 ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size); 2180 2181 smi_send(intf, intf->handlers, smi_msg, priority); 2182 } 2183 rcu_read_unlock(); 2184 2185 out: 2186 return rv; 2187 } 2188 2189 static int check_addr(struct ipmi_smi *intf, 2190 struct ipmi_addr *addr, 2191 unsigned char *saddr, 2192 unsigned char *lun) 2193 { 2194 if (addr->channel >= IPMI_MAX_CHANNELS) 2195 return -EINVAL; 2196 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2197 *lun = intf->addrinfo[addr->channel].lun; 2198 *saddr = intf->addrinfo[addr->channel].address; 2199 return 0; 2200 } 2201 2202 int ipmi_request_settime(struct ipmi_user *user, 2203 struct ipmi_addr *addr, 2204 long msgid, 2205 struct kernel_ipmi_msg *msg, 2206 void *user_msg_data, 2207 int priority, 2208 int retries, 2209 unsigned int retry_time_ms) 2210 { 2211 unsigned char saddr = 0, lun = 0; 2212 int rv, index; 2213 2214 if (!user) 2215 return -EINVAL; 2216 2217 user = acquire_ipmi_user(user, &index); 2218 if (!user) 2219 return -ENODEV; 2220 2221 rv = check_addr(user->intf, addr, &saddr, &lun); 2222 if (!rv) 2223 rv = i_ipmi_request(user, 2224 user->intf, 2225 addr, 2226 msgid, 2227 msg, 2228 user_msg_data, 2229 NULL, NULL, 2230 priority, 2231 saddr, 2232 lun, 2233 retries, 2234 retry_time_ms); 2235 2236 release_ipmi_user(user, index); 2237 return rv; 2238 } 2239 EXPORT_SYMBOL(ipmi_request_settime); 2240 2241 int ipmi_request_supply_msgs(struct ipmi_user *user, 2242 struct ipmi_addr *addr, 2243 long msgid, 2244 struct kernel_ipmi_msg *msg, 2245 void *user_msg_data, 2246 void *supplied_smi, 2247 struct ipmi_recv_msg *supplied_recv, 2248 int priority) 2249 { 2250 unsigned char saddr = 0, lun = 0; 2251 int rv, index; 2252 2253 if (!user) 2254 return -EINVAL; 2255 2256 user = acquire_ipmi_user(user, &index); 2257 if (!user) 2258 return -ENODEV; 2259 2260 rv = check_addr(user->intf, addr, &saddr, &lun); 2261 if (!rv) 2262 rv = i_ipmi_request(user, 2263 user->intf, 2264 addr, 2265 msgid, 2266 msg, 2267 user_msg_data, 2268 supplied_smi, 2269 supplied_recv, 2270 priority, 2271 saddr, 2272 lun, 2273 -1, 0); 2274 2275 release_ipmi_user(user, index); 2276 return rv; 2277 } 2278 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2279 2280 static void bmc_device_id_handler(struct ipmi_smi *intf, 2281 struct ipmi_recv_msg *msg) 2282 { 2283 int rv; 2284 2285 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2286 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2287 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2288 dev_warn(intf->si_dev, 2289 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2290 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2291 return; 2292 } 2293 2294 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2295 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2296 if (rv) { 2297 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2298 intf->bmc->dyn_id_set = 0; 2299 } else { 2300 /* 2301 * Make sure the id data is available before setting 2302 * dyn_id_set. 2303 */ 2304 smp_wmb(); 2305 intf->bmc->dyn_id_set = 1; 2306 } 2307 2308 wake_up(&intf->waitq); 2309 } 2310 2311 static int 2312 send_get_device_id_cmd(struct ipmi_smi *intf) 2313 { 2314 struct ipmi_system_interface_addr si; 2315 struct kernel_ipmi_msg msg; 2316 2317 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2318 si.channel = IPMI_BMC_CHANNEL; 2319 si.lun = 0; 2320 2321 msg.netfn = IPMI_NETFN_APP_REQUEST; 2322 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2323 msg.data = NULL; 2324 msg.data_len = 0; 2325 2326 return i_ipmi_request(NULL, 2327 intf, 2328 (struct ipmi_addr *) &si, 2329 0, 2330 &msg, 2331 intf, 2332 NULL, 2333 NULL, 2334 0, 2335 intf->addrinfo[0].address, 2336 intf->addrinfo[0].lun, 2337 -1, 0); 2338 } 2339 2340 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2341 { 2342 int rv; 2343 2344 bmc->dyn_id_set = 2; 2345 2346 intf->null_user_handler = bmc_device_id_handler; 2347 2348 rv = send_get_device_id_cmd(intf); 2349 if (rv) 2350 return rv; 2351 2352 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2353 2354 if (!bmc->dyn_id_set) 2355 rv = -EIO; /* Something went wrong in the fetch. */ 2356 2357 /* dyn_id_set makes the id data available. */ 2358 smp_rmb(); 2359 2360 intf->null_user_handler = NULL; 2361 2362 return rv; 2363 } 2364 2365 /* 2366 * Fetch the device id for the bmc/interface. You must pass in either 2367 * bmc or intf, this code will get the other one. If the data has 2368 * been recently fetched, this will just use the cached data. Otherwise 2369 * it will run a new fetch. 2370 * 2371 * Except for the first time this is called (in ipmi_register_smi()), 2372 * this will always return good data; 2373 */ 2374 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2375 struct ipmi_device_id *id, 2376 bool *guid_set, guid_t *guid, int intf_num) 2377 { 2378 int rv = 0; 2379 int prev_dyn_id_set, prev_guid_set; 2380 bool intf_set = intf != NULL; 2381 2382 if (!intf) { 2383 mutex_lock(&bmc->dyn_mutex); 2384 retry_bmc_lock: 2385 if (list_empty(&bmc->intfs)) { 2386 mutex_unlock(&bmc->dyn_mutex); 2387 return -ENOENT; 2388 } 2389 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2390 bmc_link); 2391 kref_get(&intf->refcount); 2392 mutex_unlock(&bmc->dyn_mutex); 2393 mutex_lock(&intf->bmc_reg_mutex); 2394 mutex_lock(&bmc->dyn_mutex); 2395 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2396 bmc_link)) { 2397 mutex_unlock(&intf->bmc_reg_mutex); 2398 kref_put(&intf->refcount, intf_free); 2399 goto retry_bmc_lock; 2400 } 2401 } else { 2402 mutex_lock(&intf->bmc_reg_mutex); 2403 bmc = intf->bmc; 2404 mutex_lock(&bmc->dyn_mutex); 2405 kref_get(&intf->refcount); 2406 } 2407 2408 /* If we have a valid and current ID, just return that. */ 2409 if (intf->in_bmc_register || 2410 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2411 goto out_noprocessing; 2412 2413 prev_guid_set = bmc->dyn_guid_set; 2414 __get_guid(intf); 2415 2416 prev_dyn_id_set = bmc->dyn_id_set; 2417 rv = __get_device_id(intf, bmc); 2418 if (rv) 2419 goto out; 2420 2421 /* 2422 * The guid, device id, manufacturer id, and product id should 2423 * not change on a BMC. If it does we have to do some dancing. 2424 */ 2425 if (!intf->bmc_registered 2426 || (!prev_guid_set && bmc->dyn_guid_set) 2427 || (!prev_dyn_id_set && bmc->dyn_id_set) 2428 || (prev_guid_set && bmc->dyn_guid_set 2429 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2430 || bmc->id.device_id != bmc->fetch_id.device_id 2431 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2432 || bmc->id.product_id != bmc->fetch_id.product_id) { 2433 struct ipmi_device_id id = bmc->fetch_id; 2434 int guid_set = bmc->dyn_guid_set; 2435 guid_t guid; 2436 2437 guid = bmc->fetch_guid; 2438 mutex_unlock(&bmc->dyn_mutex); 2439 2440 __ipmi_bmc_unregister(intf); 2441 /* Fill in the temporary BMC for good measure. */ 2442 intf->bmc->id = id; 2443 intf->bmc->dyn_guid_set = guid_set; 2444 intf->bmc->guid = guid; 2445 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2446 need_waiter(intf); /* Retry later on an error. */ 2447 else 2448 __scan_channels(intf, &id); 2449 2450 2451 if (!intf_set) { 2452 /* 2453 * We weren't given the interface on the 2454 * command line, so restart the operation on 2455 * the next interface for the BMC. 2456 */ 2457 mutex_unlock(&intf->bmc_reg_mutex); 2458 mutex_lock(&bmc->dyn_mutex); 2459 goto retry_bmc_lock; 2460 } 2461 2462 /* We have a new BMC, set it up. */ 2463 bmc = intf->bmc; 2464 mutex_lock(&bmc->dyn_mutex); 2465 goto out_noprocessing; 2466 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2467 /* Version info changes, scan the channels again. */ 2468 __scan_channels(intf, &bmc->fetch_id); 2469 2470 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2471 2472 out: 2473 if (rv && prev_dyn_id_set) { 2474 rv = 0; /* Ignore failures if we have previous data. */ 2475 bmc->dyn_id_set = prev_dyn_id_set; 2476 } 2477 if (!rv) { 2478 bmc->id = bmc->fetch_id; 2479 if (bmc->dyn_guid_set) 2480 bmc->guid = bmc->fetch_guid; 2481 else if (prev_guid_set) 2482 /* 2483 * The guid used to be valid and it failed to fetch, 2484 * just use the cached value. 2485 */ 2486 bmc->dyn_guid_set = prev_guid_set; 2487 } 2488 out_noprocessing: 2489 if (!rv) { 2490 if (id) 2491 *id = bmc->id; 2492 2493 if (guid_set) 2494 *guid_set = bmc->dyn_guid_set; 2495 2496 if (guid && bmc->dyn_guid_set) 2497 *guid = bmc->guid; 2498 } 2499 2500 mutex_unlock(&bmc->dyn_mutex); 2501 mutex_unlock(&intf->bmc_reg_mutex); 2502 2503 kref_put(&intf->refcount, intf_free); 2504 return rv; 2505 } 2506 2507 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2508 struct ipmi_device_id *id, 2509 bool *guid_set, guid_t *guid) 2510 { 2511 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2512 } 2513 2514 static ssize_t device_id_show(struct device *dev, 2515 struct device_attribute *attr, 2516 char *buf) 2517 { 2518 struct bmc_device *bmc = to_bmc_device(dev); 2519 struct ipmi_device_id id; 2520 int rv; 2521 2522 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2523 if (rv) 2524 return rv; 2525 2526 return snprintf(buf, 10, "%u\n", id.device_id); 2527 } 2528 static DEVICE_ATTR_RO(device_id); 2529 2530 static ssize_t provides_device_sdrs_show(struct device *dev, 2531 struct device_attribute *attr, 2532 char *buf) 2533 { 2534 struct bmc_device *bmc = to_bmc_device(dev); 2535 struct ipmi_device_id id; 2536 int rv; 2537 2538 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2539 if (rv) 2540 return rv; 2541 2542 return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7); 2543 } 2544 static DEVICE_ATTR_RO(provides_device_sdrs); 2545 2546 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2547 char *buf) 2548 { 2549 struct bmc_device *bmc = to_bmc_device(dev); 2550 struct ipmi_device_id id; 2551 int rv; 2552 2553 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2554 if (rv) 2555 return rv; 2556 2557 return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F); 2558 } 2559 static DEVICE_ATTR_RO(revision); 2560 2561 static ssize_t firmware_revision_show(struct device *dev, 2562 struct device_attribute *attr, 2563 char *buf) 2564 { 2565 struct bmc_device *bmc = to_bmc_device(dev); 2566 struct ipmi_device_id id; 2567 int rv; 2568 2569 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2570 if (rv) 2571 return rv; 2572 2573 return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1, 2574 id.firmware_revision_2); 2575 } 2576 static DEVICE_ATTR_RO(firmware_revision); 2577 2578 static ssize_t ipmi_version_show(struct device *dev, 2579 struct device_attribute *attr, 2580 char *buf) 2581 { 2582 struct bmc_device *bmc = to_bmc_device(dev); 2583 struct ipmi_device_id id; 2584 int rv; 2585 2586 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2587 if (rv) 2588 return rv; 2589 2590 return snprintf(buf, 20, "%u.%u\n", 2591 ipmi_version_major(&id), 2592 ipmi_version_minor(&id)); 2593 } 2594 static DEVICE_ATTR_RO(ipmi_version); 2595 2596 static ssize_t add_dev_support_show(struct device *dev, 2597 struct device_attribute *attr, 2598 char *buf) 2599 { 2600 struct bmc_device *bmc = to_bmc_device(dev); 2601 struct ipmi_device_id id; 2602 int rv; 2603 2604 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2605 if (rv) 2606 return rv; 2607 2608 return snprintf(buf, 10, "0x%02x\n", id.additional_device_support); 2609 } 2610 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2611 NULL); 2612 2613 static ssize_t manufacturer_id_show(struct device *dev, 2614 struct device_attribute *attr, 2615 char *buf) 2616 { 2617 struct bmc_device *bmc = to_bmc_device(dev); 2618 struct ipmi_device_id id; 2619 int rv; 2620 2621 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2622 if (rv) 2623 return rv; 2624 2625 return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id); 2626 } 2627 static DEVICE_ATTR_RO(manufacturer_id); 2628 2629 static ssize_t product_id_show(struct device *dev, 2630 struct device_attribute *attr, 2631 char *buf) 2632 { 2633 struct bmc_device *bmc = to_bmc_device(dev); 2634 struct ipmi_device_id id; 2635 int rv; 2636 2637 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2638 if (rv) 2639 return rv; 2640 2641 return snprintf(buf, 10, "0x%4.4x\n", id.product_id); 2642 } 2643 static DEVICE_ATTR_RO(product_id); 2644 2645 static ssize_t aux_firmware_rev_show(struct device *dev, 2646 struct device_attribute *attr, 2647 char *buf) 2648 { 2649 struct bmc_device *bmc = to_bmc_device(dev); 2650 struct ipmi_device_id id; 2651 int rv; 2652 2653 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2654 if (rv) 2655 return rv; 2656 2657 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2658 id.aux_firmware_revision[3], 2659 id.aux_firmware_revision[2], 2660 id.aux_firmware_revision[1], 2661 id.aux_firmware_revision[0]); 2662 } 2663 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2664 2665 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2666 char *buf) 2667 { 2668 struct bmc_device *bmc = to_bmc_device(dev); 2669 bool guid_set; 2670 guid_t guid; 2671 int rv; 2672 2673 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2674 if (rv) 2675 return rv; 2676 if (!guid_set) 2677 return -ENOENT; 2678 2679 return snprintf(buf, 38, "%pUl\n", guid.b); 2680 } 2681 static DEVICE_ATTR_RO(guid); 2682 2683 static struct attribute *bmc_dev_attrs[] = { 2684 &dev_attr_device_id.attr, 2685 &dev_attr_provides_device_sdrs.attr, 2686 &dev_attr_revision.attr, 2687 &dev_attr_firmware_revision.attr, 2688 &dev_attr_ipmi_version.attr, 2689 &dev_attr_additional_device_support.attr, 2690 &dev_attr_manufacturer_id.attr, 2691 &dev_attr_product_id.attr, 2692 &dev_attr_aux_firmware_revision.attr, 2693 &dev_attr_guid.attr, 2694 NULL 2695 }; 2696 2697 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2698 struct attribute *attr, int idx) 2699 { 2700 struct device *dev = kobj_to_dev(kobj); 2701 struct bmc_device *bmc = to_bmc_device(dev); 2702 umode_t mode = attr->mode; 2703 int rv; 2704 2705 if (attr == &dev_attr_aux_firmware_revision.attr) { 2706 struct ipmi_device_id id; 2707 2708 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2709 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2710 } 2711 if (attr == &dev_attr_guid.attr) { 2712 bool guid_set; 2713 2714 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2715 return (!rv && guid_set) ? mode : 0; 2716 } 2717 return mode; 2718 } 2719 2720 static const struct attribute_group bmc_dev_attr_group = { 2721 .attrs = bmc_dev_attrs, 2722 .is_visible = bmc_dev_attr_is_visible, 2723 }; 2724 2725 static const struct attribute_group *bmc_dev_attr_groups[] = { 2726 &bmc_dev_attr_group, 2727 NULL 2728 }; 2729 2730 static const struct device_type bmc_device_type = { 2731 .groups = bmc_dev_attr_groups, 2732 }; 2733 2734 static int __find_bmc_guid(struct device *dev, void *data) 2735 { 2736 guid_t *guid = data; 2737 struct bmc_device *bmc; 2738 int rv; 2739 2740 if (dev->type != &bmc_device_type) 2741 return 0; 2742 2743 bmc = to_bmc_device(dev); 2744 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2745 if (rv) 2746 rv = kref_get_unless_zero(&bmc->usecount); 2747 return rv; 2748 } 2749 2750 /* 2751 * Returns with the bmc's usecount incremented, if it is non-NULL. 2752 */ 2753 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2754 guid_t *guid) 2755 { 2756 struct device *dev; 2757 struct bmc_device *bmc = NULL; 2758 2759 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2760 if (dev) { 2761 bmc = to_bmc_device(dev); 2762 put_device(dev); 2763 } 2764 return bmc; 2765 } 2766 2767 struct prod_dev_id { 2768 unsigned int product_id; 2769 unsigned char device_id; 2770 }; 2771 2772 static int __find_bmc_prod_dev_id(struct device *dev, void *data) 2773 { 2774 struct prod_dev_id *cid = data; 2775 struct bmc_device *bmc; 2776 int rv; 2777 2778 if (dev->type != &bmc_device_type) 2779 return 0; 2780 2781 bmc = to_bmc_device(dev); 2782 rv = (bmc->id.product_id == cid->product_id 2783 && bmc->id.device_id == cid->device_id); 2784 if (rv) 2785 rv = kref_get_unless_zero(&bmc->usecount); 2786 return rv; 2787 } 2788 2789 /* 2790 * Returns with the bmc's usecount incremented, if it is non-NULL. 2791 */ 2792 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 2793 struct device_driver *drv, 2794 unsigned int product_id, unsigned char device_id) 2795 { 2796 struct prod_dev_id id = { 2797 .product_id = product_id, 2798 .device_id = device_id, 2799 }; 2800 struct device *dev; 2801 struct bmc_device *bmc = NULL; 2802 2803 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 2804 if (dev) { 2805 bmc = to_bmc_device(dev); 2806 put_device(dev); 2807 } 2808 return bmc; 2809 } 2810 2811 static DEFINE_IDA(ipmi_bmc_ida); 2812 2813 static void 2814 release_bmc_device(struct device *dev) 2815 { 2816 kfree(to_bmc_device(dev)); 2817 } 2818 2819 static void cleanup_bmc_work(struct work_struct *work) 2820 { 2821 struct bmc_device *bmc = container_of(work, struct bmc_device, 2822 remove_work); 2823 int id = bmc->pdev.id; /* Unregister overwrites id */ 2824 2825 platform_device_unregister(&bmc->pdev); 2826 ida_simple_remove(&ipmi_bmc_ida, id); 2827 } 2828 2829 static void 2830 cleanup_bmc_device(struct kref *ref) 2831 { 2832 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 2833 2834 /* 2835 * Remove the platform device in a work queue to avoid issues 2836 * with removing the device attributes while reading a device 2837 * attribute. 2838 */ 2839 schedule_work(&bmc->remove_work); 2840 } 2841 2842 /* 2843 * Must be called with intf->bmc_reg_mutex held. 2844 */ 2845 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 2846 { 2847 struct bmc_device *bmc = intf->bmc; 2848 2849 if (!intf->bmc_registered) 2850 return; 2851 2852 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 2853 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 2854 kfree(intf->my_dev_name); 2855 intf->my_dev_name = NULL; 2856 2857 mutex_lock(&bmc->dyn_mutex); 2858 list_del(&intf->bmc_link); 2859 mutex_unlock(&bmc->dyn_mutex); 2860 intf->bmc = &intf->tmp_bmc; 2861 kref_put(&bmc->usecount, cleanup_bmc_device); 2862 intf->bmc_registered = false; 2863 } 2864 2865 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 2866 { 2867 mutex_lock(&intf->bmc_reg_mutex); 2868 __ipmi_bmc_unregister(intf); 2869 mutex_unlock(&intf->bmc_reg_mutex); 2870 } 2871 2872 /* 2873 * Must be called with intf->bmc_reg_mutex held. 2874 */ 2875 static int __ipmi_bmc_register(struct ipmi_smi *intf, 2876 struct ipmi_device_id *id, 2877 bool guid_set, guid_t *guid, int intf_num) 2878 { 2879 int rv; 2880 struct bmc_device *bmc; 2881 struct bmc_device *old_bmc; 2882 2883 /* 2884 * platform_device_register() can cause bmc_reg_mutex to 2885 * be claimed because of the is_visible functions of 2886 * the attributes. Eliminate possible recursion and 2887 * release the lock. 2888 */ 2889 intf->in_bmc_register = true; 2890 mutex_unlock(&intf->bmc_reg_mutex); 2891 2892 /* 2893 * Try to find if there is an bmc_device struct 2894 * representing the interfaced BMC already 2895 */ 2896 mutex_lock(&ipmidriver_mutex); 2897 if (guid_set) 2898 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 2899 else 2900 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 2901 id->product_id, 2902 id->device_id); 2903 2904 /* 2905 * If there is already an bmc_device, free the new one, 2906 * otherwise register the new BMC device 2907 */ 2908 if (old_bmc) { 2909 bmc = old_bmc; 2910 /* 2911 * Note: old_bmc already has usecount incremented by 2912 * the BMC find functions. 2913 */ 2914 intf->bmc = old_bmc; 2915 mutex_lock(&bmc->dyn_mutex); 2916 list_add_tail(&intf->bmc_link, &bmc->intfs); 2917 mutex_unlock(&bmc->dyn_mutex); 2918 2919 dev_info(intf->si_dev, 2920 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2921 bmc->id.manufacturer_id, 2922 bmc->id.product_id, 2923 bmc->id.device_id); 2924 } else { 2925 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 2926 if (!bmc) { 2927 rv = -ENOMEM; 2928 goto out; 2929 } 2930 INIT_LIST_HEAD(&bmc->intfs); 2931 mutex_init(&bmc->dyn_mutex); 2932 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 2933 2934 bmc->id = *id; 2935 bmc->dyn_id_set = 1; 2936 bmc->dyn_guid_set = guid_set; 2937 bmc->guid = *guid; 2938 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2939 2940 bmc->pdev.name = "ipmi_bmc"; 2941 2942 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); 2943 if (rv < 0) 2944 goto out; 2945 bmc->pdev.dev.driver = &ipmidriver.driver; 2946 bmc->pdev.id = rv; 2947 bmc->pdev.dev.release = release_bmc_device; 2948 bmc->pdev.dev.type = &bmc_device_type; 2949 kref_init(&bmc->usecount); 2950 2951 intf->bmc = bmc; 2952 mutex_lock(&bmc->dyn_mutex); 2953 list_add_tail(&intf->bmc_link, &bmc->intfs); 2954 mutex_unlock(&bmc->dyn_mutex); 2955 2956 rv = platform_device_register(&bmc->pdev); 2957 if (rv) { 2958 dev_err(intf->si_dev, 2959 "Unable to register bmc device: %d\n", 2960 rv); 2961 goto out_list_del; 2962 } 2963 2964 dev_info(intf->si_dev, 2965 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2966 bmc->id.manufacturer_id, 2967 bmc->id.product_id, 2968 bmc->id.device_id); 2969 } 2970 2971 /* 2972 * create symlink from system interface device to bmc device 2973 * and back. 2974 */ 2975 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 2976 if (rv) { 2977 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 2978 goto out_put_bmc; 2979 } 2980 2981 if (intf_num == -1) 2982 intf_num = intf->intf_num; 2983 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 2984 if (!intf->my_dev_name) { 2985 rv = -ENOMEM; 2986 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 2987 rv); 2988 goto out_unlink1; 2989 } 2990 2991 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 2992 intf->my_dev_name); 2993 if (rv) { 2994 kfree(intf->my_dev_name); 2995 intf->my_dev_name = NULL; 2996 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 2997 rv); 2998 goto out_free_my_dev_name; 2999 } 3000 3001 intf->bmc_registered = true; 3002 3003 out: 3004 mutex_unlock(&ipmidriver_mutex); 3005 mutex_lock(&intf->bmc_reg_mutex); 3006 intf->in_bmc_register = false; 3007 return rv; 3008 3009 3010 out_free_my_dev_name: 3011 kfree(intf->my_dev_name); 3012 intf->my_dev_name = NULL; 3013 3014 out_unlink1: 3015 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3016 3017 out_put_bmc: 3018 mutex_lock(&bmc->dyn_mutex); 3019 list_del(&intf->bmc_link); 3020 mutex_unlock(&bmc->dyn_mutex); 3021 intf->bmc = &intf->tmp_bmc; 3022 kref_put(&bmc->usecount, cleanup_bmc_device); 3023 goto out; 3024 3025 out_list_del: 3026 mutex_lock(&bmc->dyn_mutex); 3027 list_del(&intf->bmc_link); 3028 mutex_unlock(&bmc->dyn_mutex); 3029 intf->bmc = &intf->tmp_bmc; 3030 put_device(&bmc->pdev.dev); 3031 goto out; 3032 } 3033 3034 static int 3035 send_guid_cmd(struct ipmi_smi *intf, int chan) 3036 { 3037 struct kernel_ipmi_msg msg; 3038 struct ipmi_system_interface_addr si; 3039 3040 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3041 si.channel = IPMI_BMC_CHANNEL; 3042 si.lun = 0; 3043 3044 msg.netfn = IPMI_NETFN_APP_REQUEST; 3045 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3046 msg.data = NULL; 3047 msg.data_len = 0; 3048 return i_ipmi_request(NULL, 3049 intf, 3050 (struct ipmi_addr *) &si, 3051 0, 3052 &msg, 3053 intf, 3054 NULL, 3055 NULL, 3056 0, 3057 intf->addrinfo[0].address, 3058 intf->addrinfo[0].lun, 3059 -1, 0); 3060 } 3061 3062 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3063 { 3064 struct bmc_device *bmc = intf->bmc; 3065 3066 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3067 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3068 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3069 /* Not for me */ 3070 return; 3071 3072 if (msg->msg.data[0] != 0) { 3073 /* Error from getting the GUID, the BMC doesn't have one. */ 3074 bmc->dyn_guid_set = 0; 3075 goto out; 3076 } 3077 3078 if (msg->msg.data_len < 17) { 3079 bmc->dyn_guid_set = 0; 3080 dev_warn(intf->si_dev, 3081 "The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n", 3082 msg->msg.data_len); 3083 goto out; 3084 } 3085 3086 memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16); 3087 /* 3088 * Make sure the guid data is available before setting 3089 * dyn_guid_set. 3090 */ 3091 smp_wmb(); 3092 bmc->dyn_guid_set = 1; 3093 out: 3094 wake_up(&intf->waitq); 3095 } 3096 3097 static void __get_guid(struct ipmi_smi *intf) 3098 { 3099 int rv; 3100 struct bmc_device *bmc = intf->bmc; 3101 3102 bmc->dyn_guid_set = 2; 3103 intf->null_user_handler = guid_handler; 3104 rv = send_guid_cmd(intf, 0); 3105 if (rv) 3106 /* Send failed, no GUID available. */ 3107 bmc->dyn_guid_set = 0; 3108 3109 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3110 3111 /* dyn_guid_set makes the guid data available. */ 3112 smp_rmb(); 3113 3114 intf->null_user_handler = NULL; 3115 } 3116 3117 static int 3118 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3119 { 3120 struct kernel_ipmi_msg msg; 3121 unsigned char data[1]; 3122 struct ipmi_system_interface_addr si; 3123 3124 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3125 si.channel = IPMI_BMC_CHANNEL; 3126 si.lun = 0; 3127 3128 msg.netfn = IPMI_NETFN_APP_REQUEST; 3129 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3130 msg.data = data; 3131 msg.data_len = 1; 3132 data[0] = chan; 3133 return i_ipmi_request(NULL, 3134 intf, 3135 (struct ipmi_addr *) &si, 3136 0, 3137 &msg, 3138 intf, 3139 NULL, 3140 NULL, 3141 0, 3142 intf->addrinfo[0].address, 3143 intf->addrinfo[0].lun, 3144 -1, 0); 3145 } 3146 3147 static void 3148 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3149 { 3150 int rv = 0; 3151 int ch; 3152 unsigned int set = intf->curr_working_cset; 3153 struct ipmi_channel *chans; 3154 3155 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3156 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3157 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3158 /* It's the one we want */ 3159 if (msg->msg.data[0] != 0) { 3160 /* Got an error from the channel, just go on. */ 3161 3162 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3163 /* 3164 * If the MC does not support this 3165 * command, that is legal. We just 3166 * assume it has one IPMB at channel 3167 * zero. 3168 */ 3169 intf->wchannels[set].c[0].medium 3170 = IPMI_CHANNEL_MEDIUM_IPMB; 3171 intf->wchannels[set].c[0].protocol 3172 = IPMI_CHANNEL_PROTOCOL_IPMB; 3173 3174 intf->channel_list = intf->wchannels + set; 3175 intf->channels_ready = true; 3176 wake_up(&intf->waitq); 3177 goto out; 3178 } 3179 goto next_channel; 3180 } 3181 if (msg->msg.data_len < 4) { 3182 /* Message not big enough, just go on. */ 3183 goto next_channel; 3184 } 3185 ch = intf->curr_channel; 3186 chans = intf->wchannels[set].c; 3187 chans[ch].medium = msg->msg.data[2] & 0x7f; 3188 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3189 3190 next_channel: 3191 intf->curr_channel++; 3192 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3193 intf->channel_list = intf->wchannels + set; 3194 intf->channels_ready = true; 3195 wake_up(&intf->waitq); 3196 } else { 3197 intf->channel_list = intf->wchannels + set; 3198 intf->channels_ready = true; 3199 rv = send_channel_info_cmd(intf, intf->curr_channel); 3200 } 3201 3202 if (rv) { 3203 /* Got an error somehow, just give up. */ 3204 dev_warn(intf->si_dev, 3205 "Error sending channel information for channel %d: %d\n", 3206 intf->curr_channel, rv); 3207 3208 intf->channel_list = intf->wchannels + set; 3209 intf->channels_ready = true; 3210 wake_up(&intf->waitq); 3211 } 3212 } 3213 out: 3214 return; 3215 } 3216 3217 /* 3218 * Must be holding intf->bmc_reg_mutex to call this. 3219 */ 3220 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3221 { 3222 int rv; 3223 3224 if (ipmi_version_major(id) > 1 3225 || (ipmi_version_major(id) == 1 3226 && ipmi_version_minor(id) >= 5)) { 3227 unsigned int set; 3228 3229 /* 3230 * Start scanning the channels to see what is 3231 * available. 3232 */ 3233 set = !intf->curr_working_cset; 3234 intf->curr_working_cset = set; 3235 memset(&intf->wchannels[set], 0, 3236 sizeof(struct ipmi_channel_set)); 3237 3238 intf->null_user_handler = channel_handler; 3239 intf->curr_channel = 0; 3240 rv = send_channel_info_cmd(intf, 0); 3241 if (rv) { 3242 dev_warn(intf->si_dev, 3243 "Error sending channel information for channel 0, %d\n", 3244 rv); 3245 return -EIO; 3246 } 3247 3248 /* Wait for the channel info to be read. */ 3249 wait_event(intf->waitq, intf->channels_ready); 3250 intf->null_user_handler = NULL; 3251 } else { 3252 unsigned int set = intf->curr_working_cset; 3253 3254 /* Assume a single IPMB channel at zero. */ 3255 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3256 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3257 intf->channel_list = intf->wchannels + set; 3258 intf->channels_ready = true; 3259 } 3260 3261 return 0; 3262 } 3263 3264 static void ipmi_poll(struct ipmi_smi *intf) 3265 { 3266 if (intf->handlers->poll) 3267 intf->handlers->poll(intf->send_info); 3268 /* In case something came in */ 3269 handle_new_recv_msgs(intf); 3270 } 3271 3272 void ipmi_poll_interface(struct ipmi_user *user) 3273 { 3274 ipmi_poll(user->intf); 3275 } 3276 EXPORT_SYMBOL(ipmi_poll_interface); 3277 3278 static void redo_bmc_reg(struct work_struct *work) 3279 { 3280 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3281 bmc_reg_work); 3282 3283 if (!intf->in_shutdown) 3284 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3285 3286 kref_put(&intf->refcount, intf_free); 3287 } 3288 3289 int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, 3290 void *send_info, 3291 struct device *si_dev, 3292 unsigned char slave_addr) 3293 { 3294 int i, j; 3295 int rv; 3296 struct ipmi_smi *intf, *tintf; 3297 struct list_head *link; 3298 struct ipmi_device_id id; 3299 3300 /* 3301 * Make sure the driver is actually initialized, this handles 3302 * problems with initialization order. 3303 */ 3304 rv = ipmi_init_msghandler(); 3305 if (rv) 3306 return rv; 3307 3308 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3309 if (!intf) 3310 return -ENOMEM; 3311 3312 rv = init_srcu_struct(&intf->users_srcu); 3313 if (rv) { 3314 kfree(intf); 3315 return rv; 3316 } 3317 3318 3319 intf->bmc = &intf->tmp_bmc; 3320 INIT_LIST_HEAD(&intf->bmc->intfs); 3321 mutex_init(&intf->bmc->dyn_mutex); 3322 INIT_LIST_HEAD(&intf->bmc_link); 3323 mutex_init(&intf->bmc_reg_mutex); 3324 intf->intf_num = -1; /* Mark it invalid for now. */ 3325 kref_init(&intf->refcount); 3326 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3327 intf->si_dev = si_dev; 3328 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3329 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3330 intf->addrinfo[j].lun = 2; 3331 } 3332 if (slave_addr != 0) 3333 intf->addrinfo[0].address = slave_addr; 3334 INIT_LIST_HEAD(&intf->users); 3335 intf->handlers = handlers; 3336 intf->send_info = send_info; 3337 spin_lock_init(&intf->seq_lock); 3338 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3339 intf->seq_table[j].inuse = 0; 3340 intf->seq_table[j].seqid = 0; 3341 } 3342 intf->curr_seq = 0; 3343 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3344 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3345 tasklet_init(&intf->recv_tasklet, 3346 smi_recv_tasklet, 3347 (unsigned long) intf); 3348 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3349 spin_lock_init(&intf->xmit_msgs_lock); 3350 INIT_LIST_HEAD(&intf->xmit_msgs); 3351 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3352 spin_lock_init(&intf->events_lock); 3353 atomic_set(&intf->event_waiters, 0); 3354 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3355 INIT_LIST_HEAD(&intf->waiting_events); 3356 intf->waiting_events_count = 0; 3357 mutex_init(&intf->cmd_rcvrs_mutex); 3358 spin_lock_init(&intf->maintenance_mode_lock); 3359 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3360 init_waitqueue_head(&intf->waitq); 3361 for (i = 0; i < IPMI_NUM_STATS; i++) 3362 atomic_set(&intf->stats[i], 0); 3363 3364 mutex_lock(&ipmi_interfaces_mutex); 3365 /* Look for a hole in the numbers. */ 3366 i = 0; 3367 link = &ipmi_interfaces; 3368 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) { 3369 if (tintf->intf_num != i) { 3370 link = &tintf->link; 3371 break; 3372 } 3373 i++; 3374 } 3375 /* Add the new interface in numeric order. */ 3376 if (i == 0) 3377 list_add_rcu(&intf->link, &ipmi_interfaces); 3378 else 3379 list_add_tail_rcu(&intf->link, link); 3380 3381 rv = handlers->start_processing(send_info, intf); 3382 if (rv) 3383 goto out_err; 3384 3385 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3386 if (rv) { 3387 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3388 goto out_err_started; 3389 } 3390 3391 mutex_lock(&intf->bmc_reg_mutex); 3392 rv = __scan_channels(intf, &id); 3393 mutex_unlock(&intf->bmc_reg_mutex); 3394 if (rv) 3395 goto out_err_bmc_reg; 3396 3397 /* 3398 * Keep memory order straight for RCU readers. Make 3399 * sure everything else is committed to memory before 3400 * setting intf_num to mark the interface valid. 3401 */ 3402 smp_wmb(); 3403 intf->intf_num = i; 3404 mutex_unlock(&ipmi_interfaces_mutex); 3405 3406 /* After this point the interface is legal to use. */ 3407 call_smi_watchers(i, intf->si_dev); 3408 3409 return 0; 3410 3411 out_err_bmc_reg: 3412 ipmi_bmc_unregister(intf); 3413 out_err_started: 3414 if (intf->handlers->shutdown) 3415 intf->handlers->shutdown(intf->send_info); 3416 out_err: 3417 list_del_rcu(&intf->link); 3418 mutex_unlock(&ipmi_interfaces_mutex); 3419 synchronize_srcu(&ipmi_interfaces_srcu); 3420 cleanup_srcu_struct(&intf->users_srcu); 3421 kref_put(&intf->refcount, intf_free); 3422 3423 return rv; 3424 } 3425 EXPORT_SYMBOL(ipmi_register_smi); 3426 3427 static void deliver_smi_err_response(struct ipmi_smi *intf, 3428 struct ipmi_smi_msg *msg, 3429 unsigned char err) 3430 { 3431 msg->rsp[0] = msg->data[0] | 4; 3432 msg->rsp[1] = msg->data[1]; 3433 msg->rsp[2] = err; 3434 msg->rsp_size = 3; 3435 /* It's an error, so it will never requeue, no need to check return. */ 3436 handle_one_recv_msg(intf, msg); 3437 } 3438 3439 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3440 { 3441 int i; 3442 struct seq_table *ent; 3443 struct ipmi_smi_msg *msg; 3444 struct list_head *entry; 3445 struct list_head tmplist; 3446 3447 /* Clear out our transmit queues and hold the messages. */ 3448 INIT_LIST_HEAD(&tmplist); 3449 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3450 list_splice_tail(&intf->xmit_msgs, &tmplist); 3451 3452 /* Current message first, to preserve order */ 3453 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3454 /* Wait for the message to clear out. */ 3455 schedule_timeout(1); 3456 } 3457 3458 /* No need for locks, the interface is down. */ 3459 3460 /* 3461 * Return errors for all pending messages in queue and in the 3462 * tables waiting for remote responses. 3463 */ 3464 while (!list_empty(&tmplist)) { 3465 entry = tmplist.next; 3466 list_del(entry); 3467 msg = list_entry(entry, struct ipmi_smi_msg, link); 3468 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3469 } 3470 3471 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3472 ent = &intf->seq_table[i]; 3473 if (!ent->inuse) 3474 continue; 3475 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3476 } 3477 } 3478 3479 void ipmi_unregister_smi(struct ipmi_smi *intf) 3480 { 3481 struct ipmi_smi_watcher *w; 3482 int intf_num = intf->intf_num, index; 3483 3484 mutex_lock(&ipmi_interfaces_mutex); 3485 intf->intf_num = -1; 3486 intf->in_shutdown = true; 3487 list_del_rcu(&intf->link); 3488 mutex_unlock(&ipmi_interfaces_mutex); 3489 synchronize_srcu(&ipmi_interfaces_srcu); 3490 3491 /* At this point no users can be added to the interface. */ 3492 3493 /* 3494 * Call all the watcher interfaces to tell them that 3495 * an interface is going away. 3496 */ 3497 mutex_lock(&smi_watchers_mutex); 3498 list_for_each_entry(w, &smi_watchers, link) 3499 w->smi_gone(intf_num); 3500 mutex_unlock(&smi_watchers_mutex); 3501 3502 index = srcu_read_lock(&intf->users_srcu); 3503 while (!list_empty(&intf->users)) { 3504 struct ipmi_user *user = 3505 container_of(list_next_rcu(&intf->users), 3506 struct ipmi_user, link); 3507 3508 _ipmi_destroy_user(user); 3509 } 3510 srcu_read_unlock(&intf->users_srcu, index); 3511 3512 if (intf->handlers->shutdown) 3513 intf->handlers->shutdown(intf->send_info); 3514 3515 cleanup_smi_msgs(intf); 3516 3517 ipmi_bmc_unregister(intf); 3518 3519 cleanup_srcu_struct(&intf->users_srcu); 3520 kref_put(&intf->refcount, intf_free); 3521 } 3522 EXPORT_SYMBOL(ipmi_unregister_smi); 3523 3524 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3525 struct ipmi_smi_msg *msg) 3526 { 3527 struct ipmi_ipmb_addr ipmb_addr; 3528 struct ipmi_recv_msg *recv_msg; 3529 3530 /* 3531 * This is 11, not 10, because the response must contain a 3532 * completion code. 3533 */ 3534 if (msg->rsp_size < 11) { 3535 /* Message not big enough, just ignore it. */ 3536 ipmi_inc_stat(intf, invalid_ipmb_responses); 3537 return 0; 3538 } 3539 3540 if (msg->rsp[2] != 0) { 3541 /* An error getting the response, just ignore it. */ 3542 return 0; 3543 } 3544 3545 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3546 ipmb_addr.slave_addr = msg->rsp[6]; 3547 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3548 ipmb_addr.lun = msg->rsp[7] & 3; 3549 3550 /* 3551 * It's a response from a remote entity. Look up the sequence 3552 * number and handle the response. 3553 */ 3554 if (intf_find_seq(intf, 3555 msg->rsp[7] >> 2, 3556 msg->rsp[3] & 0x0f, 3557 msg->rsp[8], 3558 (msg->rsp[4] >> 2) & (~1), 3559 (struct ipmi_addr *) &ipmb_addr, 3560 &recv_msg)) { 3561 /* 3562 * We were unable to find the sequence number, 3563 * so just nuke the message. 3564 */ 3565 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3566 return 0; 3567 } 3568 3569 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3570 /* 3571 * The other fields matched, so no need to set them, except 3572 * for netfn, which needs to be the response that was 3573 * returned, not the request value. 3574 */ 3575 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3576 recv_msg->msg.data = recv_msg->msg_data; 3577 recv_msg->msg.data_len = msg->rsp_size - 10; 3578 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3579 if (deliver_response(intf, recv_msg)) 3580 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3581 else 3582 ipmi_inc_stat(intf, handled_ipmb_responses); 3583 3584 return 0; 3585 } 3586 3587 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3588 struct ipmi_smi_msg *msg) 3589 { 3590 struct cmd_rcvr *rcvr; 3591 int rv = 0; 3592 unsigned char netfn; 3593 unsigned char cmd; 3594 unsigned char chan; 3595 struct ipmi_user *user = NULL; 3596 struct ipmi_ipmb_addr *ipmb_addr; 3597 struct ipmi_recv_msg *recv_msg; 3598 3599 if (msg->rsp_size < 10) { 3600 /* Message not big enough, just ignore it. */ 3601 ipmi_inc_stat(intf, invalid_commands); 3602 return 0; 3603 } 3604 3605 if (msg->rsp[2] != 0) { 3606 /* An error getting the response, just ignore it. */ 3607 return 0; 3608 } 3609 3610 netfn = msg->rsp[4] >> 2; 3611 cmd = msg->rsp[8]; 3612 chan = msg->rsp[3] & 0xf; 3613 3614 rcu_read_lock(); 3615 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3616 if (rcvr) { 3617 user = rcvr->user; 3618 kref_get(&user->refcount); 3619 } else 3620 user = NULL; 3621 rcu_read_unlock(); 3622 3623 if (user == NULL) { 3624 /* We didn't find a user, deliver an error response. */ 3625 ipmi_inc_stat(intf, unhandled_commands); 3626 3627 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3628 msg->data[1] = IPMI_SEND_MSG_CMD; 3629 msg->data[2] = msg->rsp[3]; 3630 msg->data[3] = msg->rsp[6]; 3631 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3632 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3633 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3634 /* rqseq/lun */ 3635 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3636 msg->data[8] = msg->rsp[8]; /* cmd */ 3637 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3638 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3639 msg->data_size = 11; 3640 3641 ipmi_debug_msg("Invalid command:", msg->data, msg->data_size); 3642 3643 rcu_read_lock(); 3644 if (!intf->in_shutdown) { 3645 smi_send(intf, intf->handlers, msg, 0); 3646 /* 3647 * We used the message, so return the value 3648 * that causes it to not be freed or 3649 * queued. 3650 */ 3651 rv = -1; 3652 } 3653 rcu_read_unlock(); 3654 } else { 3655 recv_msg = ipmi_alloc_recv_msg(); 3656 if (!recv_msg) { 3657 /* 3658 * We couldn't allocate memory for the 3659 * message, so requeue it for handling 3660 * later. 3661 */ 3662 rv = 1; 3663 kref_put(&user->refcount, free_user); 3664 } else { 3665 /* Extract the source address from the data. */ 3666 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3667 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3668 ipmb_addr->slave_addr = msg->rsp[6]; 3669 ipmb_addr->lun = msg->rsp[7] & 3; 3670 ipmb_addr->channel = msg->rsp[3] & 0xf; 3671 3672 /* 3673 * Extract the rest of the message information 3674 * from the IPMB header. 3675 */ 3676 recv_msg->user = user; 3677 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3678 recv_msg->msgid = msg->rsp[7] >> 2; 3679 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3680 recv_msg->msg.cmd = msg->rsp[8]; 3681 recv_msg->msg.data = recv_msg->msg_data; 3682 3683 /* 3684 * We chop off 10, not 9 bytes because the checksum 3685 * at the end also needs to be removed. 3686 */ 3687 recv_msg->msg.data_len = msg->rsp_size - 10; 3688 memcpy(recv_msg->msg_data, &msg->rsp[9], 3689 msg->rsp_size - 10); 3690 if (deliver_response(intf, recv_msg)) 3691 ipmi_inc_stat(intf, unhandled_commands); 3692 else 3693 ipmi_inc_stat(intf, handled_commands); 3694 } 3695 } 3696 3697 return rv; 3698 } 3699 3700 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 3701 struct ipmi_smi_msg *msg) 3702 { 3703 struct ipmi_lan_addr lan_addr; 3704 struct ipmi_recv_msg *recv_msg; 3705 3706 3707 /* 3708 * This is 13, not 12, because the response must contain a 3709 * completion code. 3710 */ 3711 if (msg->rsp_size < 13) { 3712 /* Message not big enough, just ignore it. */ 3713 ipmi_inc_stat(intf, invalid_lan_responses); 3714 return 0; 3715 } 3716 3717 if (msg->rsp[2] != 0) { 3718 /* An error getting the response, just ignore it. */ 3719 return 0; 3720 } 3721 3722 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 3723 lan_addr.session_handle = msg->rsp[4]; 3724 lan_addr.remote_SWID = msg->rsp[8]; 3725 lan_addr.local_SWID = msg->rsp[5]; 3726 lan_addr.channel = msg->rsp[3] & 0x0f; 3727 lan_addr.privilege = msg->rsp[3] >> 4; 3728 lan_addr.lun = msg->rsp[9] & 3; 3729 3730 /* 3731 * It's a response from a remote entity. Look up the sequence 3732 * number and handle the response. 3733 */ 3734 if (intf_find_seq(intf, 3735 msg->rsp[9] >> 2, 3736 msg->rsp[3] & 0x0f, 3737 msg->rsp[10], 3738 (msg->rsp[6] >> 2) & (~1), 3739 (struct ipmi_addr *) &lan_addr, 3740 &recv_msg)) { 3741 /* 3742 * We were unable to find the sequence number, 3743 * so just nuke the message. 3744 */ 3745 ipmi_inc_stat(intf, unhandled_lan_responses); 3746 return 0; 3747 } 3748 3749 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 3750 /* 3751 * The other fields matched, so no need to set them, except 3752 * for netfn, which needs to be the response that was 3753 * returned, not the request value. 3754 */ 3755 recv_msg->msg.netfn = msg->rsp[6] >> 2; 3756 recv_msg->msg.data = recv_msg->msg_data; 3757 recv_msg->msg.data_len = msg->rsp_size - 12; 3758 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3759 if (deliver_response(intf, recv_msg)) 3760 ipmi_inc_stat(intf, unhandled_lan_responses); 3761 else 3762 ipmi_inc_stat(intf, handled_lan_responses); 3763 3764 return 0; 3765 } 3766 3767 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 3768 struct ipmi_smi_msg *msg) 3769 { 3770 struct cmd_rcvr *rcvr; 3771 int rv = 0; 3772 unsigned char netfn; 3773 unsigned char cmd; 3774 unsigned char chan; 3775 struct ipmi_user *user = NULL; 3776 struct ipmi_lan_addr *lan_addr; 3777 struct ipmi_recv_msg *recv_msg; 3778 3779 if (msg->rsp_size < 12) { 3780 /* Message not big enough, just ignore it. */ 3781 ipmi_inc_stat(intf, invalid_commands); 3782 return 0; 3783 } 3784 3785 if (msg->rsp[2] != 0) { 3786 /* An error getting the response, just ignore it. */ 3787 return 0; 3788 } 3789 3790 netfn = msg->rsp[6] >> 2; 3791 cmd = msg->rsp[10]; 3792 chan = msg->rsp[3] & 0xf; 3793 3794 rcu_read_lock(); 3795 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3796 if (rcvr) { 3797 user = rcvr->user; 3798 kref_get(&user->refcount); 3799 } else 3800 user = NULL; 3801 rcu_read_unlock(); 3802 3803 if (user == NULL) { 3804 /* We didn't find a user, just give up. */ 3805 ipmi_inc_stat(intf, unhandled_commands); 3806 3807 /* 3808 * Don't do anything with these messages, just allow 3809 * them to be freed. 3810 */ 3811 rv = 0; 3812 } else { 3813 recv_msg = ipmi_alloc_recv_msg(); 3814 if (!recv_msg) { 3815 /* 3816 * We couldn't allocate memory for the 3817 * message, so requeue it for handling later. 3818 */ 3819 rv = 1; 3820 kref_put(&user->refcount, free_user); 3821 } else { 3822 /* Extract the source address from the data. */ 3823 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 3824 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 3825 lan_addr->session_handle = msg->rsp[4]; 3826 lan_addr->remote_SWID = msg->rsp[8]; 3827 lan_addr->local_SWID = msg->rsp[5]; 3828 lan_addr->lun = msg->rsp[9] & 3; 3829 lan_addr->channel = msg->rsp[3] & 0xf; 3830 lan_addr->privilege = msg->rsp[3] >> 4; 3831 3832 /* 3833 * Extract the rest of the message information 3834 * from the IPMB header. 3835 */ 3836 recv_msg->user = user; 3837 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3838 recv_msg->msgid = msg->rsp[9] >> 2; 3839 recv_msg->msg.netfn = msg->rsp[6] >> 2; 3840 recv_msg->msg.cmd = msg->rsp[10]; 3841 recv_msg->msg.data = recv_msg->msg_data; 3842 3843 /* 3844 * We chop off 12, not 11 bytes because the checksum 3845 * at the end also needs to be removed. 3846 */ 3847 recv_msg->msg.data_len = msg->rsp_size - 12; 3848 memcpy(recv_msg->msg_data, &msg->rsp[11], 3849 msg->rsp_size - 12); 3850 if (deliver_response(intf, recv_msg)) 3851 ipmi_inc_stat(intf, unhandled_commands); 3852 else 3853 ipmi_inc_stat(intf, handled_commands); 3854 } 3855 } 3856 3857 return rv; 3858 } 3859 3860 /* 3861 * This routine will handle "Get Message" command responses with 3862 * channels that use an OEM Medium. The message format belongs to 3863 * the OEM. See IPMI 2.0 specification, Chapter 6 and 3864 * Chapter 22, sections 22.6 and 22.24 for more details. 3865 */ 3866 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 3867 struct ipmi_smi_msg *msg) 3868 { 3869 struct cmd_rcvr *rcvr; 3870 int rv = 0; 3871 unsigned char netfn; 3872 unsigned char cmd; 3873 unsigned char chan; 3874 struct ipmi_user *user = NULL; 3875 struct ipmi_system_interface_addr *smi_addr; 3876 struct ipmi_recv_msg *recv_msg; 3877 3878 /* 3879 * We expect the OEM SW to perform error checking 3880 * so we just do some basic sanity checks 3881 */ 3882 if (msg->rsp_size < 4) { 3883 /* Message not big enough, just ignore it. */ 3884 ipmi_inc_stat(intf, invalid_commands); 3885 return 0; 3886 } 3887 3888 if (msg->rsp[2] != 0) { 3889 /* An error getting the response, just ignore it. */ 3890 return 0; 3891 } 3892 3893 /* 3894 * This is an OEM Message so the OEM needs to know how 3895 * handle the message. We do no interpretation. 3896 */ 3897 netfn = msg->rsp[0] >> 2; 3898 cmd = msg->rsp[1]; 3899 chan = msg->rsp[3] & 0xf; 3900 3901 rcu_read_lock(); 3902 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3903 if (rcvr) { 3904 user = rcvr->user; 3905 kref_get(&user->refcount); 3906 } else 3907 user = NULL; 3908 rcu_read_unlock(); 3909 3910 if (user == NULL) { 3911 /* We didn't find a user, just give up. */ 3912 ipmi_inc_stat(intf, unhandled_commands); 3913 3914 /* 3915 * Don't do anything with these messages, just allow 3916 * them to be freed. 3917 */ 3918 3919 rv = 0; 3920 } else { 3921 recv_msg = ipmi_alloc_recv_msg(); 3922 if (!recv_msg) { 3923 /* 3924 * We couldn't allocate memory for the 3925 * message, so requeue it for handling 3926 * later. 3927 */ 3928 rv = 1; 3929 kref_put(&user->refcount, free_user); 3930 } else { 3931 /* 3932 * OEM Messages are expected to be delivered via 3933 * the system interface to SMS software. We might 3934 * need to visit this again depending on OEM 3935 * requirements 3936 */ 3937 smi_addr = ((struct ipmi_system_interface_addr *) 3938 &recv_msg->addr); 3939 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3940 smi_addr->channel = IPMI_BMC_CHANNEL; 3941 smi_addr->lun = msg->rsp[0] & 3; 3942 3943 recv_msg->user = user; 3944 recv_msg->user_msg_data = NULL; 3945 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 3946 recv_msg->msg.netfn = msg->rsp[0] >> 2; 3947 recv_msg->msg.cmd = msg->rsp[1]; 3948 recv_msg->msg.data = recv_msg->msg_data; 3949 3950 /* 3951 * The message starts at byte 4 which follows the 3952 * the Channel Byte in the "GET MESSAGE" command 3953 */ 3954 recv_msg->msg.data_len = msg->rsp_size - 4; 3955 memcpy(recv_msg->msg_data, &msg->rsp[4], 3956 msg->rsp_size - 4); 3957 if (deliver_response(intf, recv_msg)) 3958 ipmi_inc_stat(intf, unhandled_commands); 3959 else 3960 ipmi_inc_stat(intf, handled_commands); 3961 } 3962 } 3963 3964 return rv; 3965 } 3966 3967 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 3968 struct ipmi_smi_msg *msg) 3969 { 3970 struct ipmi_system_interface_addr *smi_addr; 3971 3972 recv_msg->msgid = 0; 3973 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 3974 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3975 smi_addr->channel = IPMI_BMC_CHANNEL; 3976 smi_addr->lun = msg->rsp[0] & 3; 3977 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 3978 recv_msg->msg.netfn = msg->rsp[0] >> 2; 3979 recv_msg->msg.cmd = msg->rsp[1]; 3980 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 3981 recv_msg->msg.data = recv_msg->msg_data; 3982 recv_msg->msg.data_len = msg->rsp_size - 3; 3983 } 3984 3985 static int handle_read_event_rsp(struct ipmi_smi *intf, 3986 struct ipmi_smi_msg *msg) 3987 { 3988 struct ipmi_recv_msg *recv_msg, *recv_msg2; 3989 struct list_head msgs; 3990 struct ipmi_user *user; 3991 int rv = 0, deliver_count = 0, index; 3992 unsigned long flags; 3993 3994 if (msg->rsp_size < 19) { 3995 /* Message is too small to be an IPMB event. */ 3996 ipmi_inc_stat(intf, invalid_events); 3997 return 0; 3998 } 3999 4000 if (msg->rsp[2] != 0) { 4001 /* An error getting the event, just ignore it. */ 4002 return 0; 4003 } 4004 4005 INIT_LIST_HEAD(&msgs); 4006 4007 spin_lock_irqsave(&intf->events_lock, flags); 4008 4009 ipmi_inc_stat(intf, events); 4010 4011 /* 4012 * Allocate and fill in one message for every user that is 4013 * getting events. 4014 */ 4015 index = srcu_read_lock(&intf->users_srcu); 4016 list_for_each_entry_rcu(user, &intf->users, link) { 4017 if (!user->gets_events) 4018 continue; 4019 4020 recv_msg = ipmi_alloc_recv_msg(); 4021 if (!recv_msg) { 4022 rcu_read_unlock(); 4023 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4024 link) { 4025 list_del(&recv_msg->link); 4026 ipmi_free_recv_msg(recv_msg); 4027 } 4028 /* 4029 * We couldn't allocate memory for the 4030 * message, so requeue it for handling 4031 * later. 4032 */ 4033 rv = 1; 4034 goto out; 4035 } 4036 4037 deliver_count++; 4038 4039 copy_event_into_recv_msg(recv_msg, msg); 4040 recv_msg->user = user; 4041 kref_get(&user->refcount); 4042 list_add_tail(&recv_msg->link, &msgs); 4043 } 4044 srcu_read_unlock(&intf->users_srcu, index); 4045 4046 if (deliver_count) { 4047 /* Now deliver all the messages. */ 4048 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4049 list_del(&recv_msg->link); 4050 deliver_local_response(intf, recv_msg); 4051 } 4052 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4053 /* 4054 * No one to receive the message, put it in queue if there's 4055 * not already too many things in the queue. 4056 */ 4057 recv_msg = ipmi_alloc_recv_msg(); 4058 if (!recv_msg) { 4059 /* 4060 * We couldn't allocate memory for the 4061 * message, so requeue it for handling 4062 * later. 4063 */ 4064 rv = 1; 4065 goto out; 4066 } 4067 4068 copy_event_into_recv_msg(recv_msg, msg); 4069 list_add_tail(&recv_msg->link, &intf->waiting_events); 4070 intf->waiting_events_count++; 4071 } else if (!intf->event_msg_printed) { 4072 /* 4073 * There's too many things in the queue, discard this 4074 * message. 4075 */ 4076 dev_warn(intf->si_dev, 4077 "Event queue full, discarding incoming events\n"); 4078 intf->event_msg_printed = 1; 4079 } 4080 4081 out: 4082 spin_unlock_irqrestore(&intf->events_lock, flags); 4083 4084 return rv; 4085 } 4086 4087 static int handle_bmc_rsp(struct ipmi_smi *intf, 4088 struct ipmi_smi_msg *msg) 4089 { 4090 struct ipmi_recv_msg *recv_msg; 4091 struct ipmi_system_interface_addr *smi_addr; 4092 4093 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 4094 if (recv_msg == NULL) { 4095 dev_warn(intf->si_dev, 4096 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4097 return 0; 4098 } 4099 4100 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4101 recv_msg->msgid = msg->msgid; 4102 smi_addr = ((struct ipmi_system_interface_addr *) 4103 &recv_msg->addr); 4104 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4105 smi_addr->channel = IPMI_BMC_CHANNEL; 4106 smi_addr->lun = msg->rsp[0] & 3; 4107 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4108 recv_msg->msg.cmd = msg->rsp[1]; 4109 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4110 recv_msg->msg.data = recv_msg->msg_data; 4111 recv_msg->msg.data_len = msg->rsp_size - 2; 4112 deliver_local_response(intf, recv_msg); 4113 4114 return 0; 4115 } 4116 4117 /* 4118 * Handle a received message. Return 1 if the message should be requeued, 4119 * 0 if the message should be freed, or -1 if the message should not 4120 * be freed or requeued. 4121 */ 4122 static int handle_one_recv_msg(struct ipmi_smi *intf, 4123 struct ipmi_smi_msg *msg) 4124 { 4125 int requeue; 4126 int chan; 4127 4128 ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size); 4129 if (msg->rsp_size < 2) { 4130 /* Message is too small to be correct. */ 4131 dev_warn(intf->si_dev, 4132 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4133 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4134 4135 /* Generate an error response for the message. */ 4136 msg->rsp[0] = msg->data[0] | (1 << 2); 4137 msg->rsp[1] = msg->data[1]; 4138 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4139 msg->rsp_size = 3; 4140 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4141 || (msg->rsp[1] != msg->data[1])) { 4142 /* 4143 * The NetFN and Command in the response is not even 4144 * marginally correct. 4145 */ 4146 dev_warn(intf->si_dev, 4147 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4148 (msg->data[0] >> 2) | 1, msg->data[1], 4149 msg->rsp[0] >> 2, msg->rsp[1]); 4150 4151 /* Generate an error response for the message. */ 4152 msg->rsp[0] = msg->data[0] | (1 << 2); 4153 msg->rsp[1] = msg->data[1]; 4154 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4155 msg->rsp_size = 3; 4156 } 4157 4158 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4159 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4160 && (msg->user_data != NULL)) { 4161 /* 4162 * It's a response to a response we sent. For this we 4163 * deliver a send message response to the user. 4164 */ 4165 struct ipmi_recv_msg *recv_msg = msg->user_data; 4166 4167 requeue = 0; 4168 if (msg->rsp_size < 2) 4169 /* Message is too small to be correct. */ 4170 goto out; 4171 4172 chan = msg->data[2] & 0x0f; 4173 if (chan >= IPMI_MAX_CHANNELS) 4174 /* Invalid channel number */ 4175 goto out; 4176 4177 if (!recv_msg) 4178 goto out; 4179 4180 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4181 recv_msg->msg.data = recv_msg->msg_data; 4182 recv_msg->msg.data_len = 1; 4183 recv_msg->msg_data[0] = msg->rsp[2]; 4184 deliver_local_response(intf, recv_msg); 4185 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4186 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4187 struct ipmi_channel *chans; 4188 4189 /* It's from the receive queue. */ 4190 chan = msg->rsp[3] & 0xf; 4191 if (chan >= IPMI_MAX_CHANNELS) { 4192 /* Invalid channel number */ 4193 requeue = 0; 4194 goto out; 4195 } 4196 4197 /* 4198 * We need to make sure the channels have been initialized. 4199 * The channel_handler routine will set the "curr_channel" 4200 * equal to or greater than IPMI_MAX_CHANNELS when all the 4201 * channels for this interface have been initialized. 4202 */ 4203 if (!intf->channels_ready) { 4204 requeue = 0; /* Throw the message away */ 4205 goto out; 4206 } 4207 4208 chans = READ_ONCE(intf->channel_list)->c; 4209 4210 switch (chans[chan].medium) { 4211 case IPMI_CHANNEL_MEDIUM_IPMB: 4212 if (msg->rsp[4] & 0x04) { 4213 /* 4214 * It's a response, so find the 4215 * requesting message and send it up. 4216 */ 4217 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4218 } else { 4219 /* 4220 * It's a command to the SMS from some other 4221 * entity. Handle that. 4222 */ 4223 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4224 } 4225 break; 4226 4227 case IPMI_CHANNEL_MEDIUM_8023LAN: 4228 case IPMI_CHANNEL_MEDIUM_ASYNC: 4229 if (msg->rsp[6] & 0x04) { 4230 /* 4231 * It's a response, so find the 4232 * requesting message and send it up. 4233 */ 4234 requeue = handle_lan_get_msg_rsp(intf, msg); 4235 } else { 4236 /* 4237 * It's a command to the SMS from some other 4238 * entity. Handle that. 4239 */ 4240 requeue = handle_lan_get_msg_cmd(intf, msg); 4241 } 4242 break; 4243 4244 default: 4245 /* Check for OEM Channels. Clients had better 4246 register for these commands. */ 4247 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4248 && (chans[chan].medium 4249 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4250 requeue = handle_oem_get_msg_cmd(intf, msg); 4251 } else { 4252 /* 4253 * We don't handle the channel type, so just 4254 * free the message. 4255 */ 4256 requeue = 0; 4257 } 4258 } 4259 4260 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4261 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4262 /* It's an asynchronous event. */ 4263 requeue = handle_read_event_rsp(intf, msg); 4264 } else { 4265 /* It's a response from the local BMC. */ 4266 requeue = handle_bmc_rsp(intf, msg); 4267 } 4268 4269 out: 4270 return requeue; 4271 } 4272 4273 /* 4274 * If there are messages in the queue or pretimeouts, handle them. 4275 */ 4276 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4277 { 4278 struct ipmi_smi_msg *smi_msg; 4279 unsigned long flags = 0; 4280 int rv; 4281 int run_to_completion = intf->run_to_completion; 4282 4283 /* See if any waiting messages need to be processed. */ 4284 if (!run_to_completion) 4285 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4286 while (!list_empty(&intf->waiting_rcv_msgs)) { 4287 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4288 struct ipmi_smi_msg, link); 4289 list_del(&smi_msg->link); 4290 if (!run_to_completion) 4291 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4292 flags); 4293 rv = handle_one_recv_msg(intf, smi_msg); 4294 if (!run_to_completion) 4295 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4296 if (rv > 0) { 4297 /* 4298 * To preserve message order, quit if we 4299 * can't handle a message. Add the message 4300 * back at the head, this is safe because this 4301 * tasklet is the only thing that pulls the 4302 * messages. 4303 */ 4304 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4305 break; 4306 } else { 4307 if (rv == 0) 4308 /* Message handled */ 4309 ipmi_free_smi_msg(smi_msg); 4310 /* If rv < 0, fatal error, del but don't free. */ 4311 } 4312 } 4313 if (!run_to_completion) 4314 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4315 4316 /* 4317 * If the pretimout count is non-zero, decrement one from it and 4318 * deliver pretimeouts to all the users. 4319 */ 4320 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4321 struct ipmi_user *user; 4322 int index; 4323 4324 index = srcu_read_lock(&intf->users_srcu); 4325 list_for_each_entry_rcu(user, &intf->users, link) { 4326 if (user->handler->ipmi_watchdog_pretimeout) 4327 user->handler->ipmi_watchdog_pretimeout( 4328 user->handler_data); 4329 } 4330 srcu_read_unlock(&intf->users_srcu, index); 4331 } 4332 } 4333 4334 static void smi_recv_tasklet(unsigned long val) 4335 { 4336 unsigned long flags = 0; /* keep us warning-free. */ 4337 struct ipmi_smi *intf = (struct ipmi_smi *) val; 4338 int run_to_completion = intf->run_to_completion; 4339 struct ipmi_smi_msg *newmsg = NULL; 4340 4341 /* 4342 * Start the next message if available. 4343 * 4344 * Do this here, not in the actual receiver, because we may deadlock 4345 * because the lower layer is allowed to hold locks while calling 4346 * message delivery. 4347 */ 4348 4349 rcu_read_lock(); 4350 4351 if (!run_to_completion) 4352 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4353 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4354 struct list_head *entry = NULL; 4355 4356 /* Pick the high priority queue first. */ 4357 if (!list_empty(&intf->hp_xmit_msgs)) 4358 entry = intf->hp_xmit_msgs.next; 4359 else if (!list_empty(&intf->xmit_msgs)) 4360 entry = intf->xmit_msgs.next; 4361 4362 if (entry) { 4363 list_del(entry); 4364 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4365 intf->curr_msg = newmsg; 4366 } 4367 } 4368 if (!run_to_completion) 4369 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4370 if (newmsg) 4371 intf->handlers->sender(intf->send_info, newmsg); 4372 4373 rcu_read_unlock(); 4374 4375 handle_new_recv_msgs(intf); 4376 } 4377 4378 /* Handle a new message from the lower layer. */ 4379 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4380 struct ipmi_smi_msg *msg) 4381 { 4382 unsigned long flags = 0; /* keep us warning-free. */ 4383 int run_to_completion = intf->run_to_completion; 4384 4385 if ((msg->data_size >= 2) 4386 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4387 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4388 && (msg->user_data == NULL)) { 4389 4390 if (intf->in_shutdown) 4391 goto free_msg; 4392 4393 /* 4394 * This is the local response to a command send, start 4395 * the timer for these. The user_data will not be 4396 * NULL if this is a response send, and we will let 4397 * response sends just go through. 4398 */ 4399 4400 /* 4401 * Check for errors, if we get certain errors (ones 4402 * that mean basically we can try again later), we 4403 * ignore them and start the timer. Otherwise we 4404 * report the error immediately. 4405 */ 4406 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4407 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4408 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4409 && (msg->rsp[2] != IPMI_BUS_ERR) 4410 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4411 int ch = msg->rsp[3] & 0xf; 4412 struct ipmi_channel *chans; 4413 4414 /* Got an error sending the message, handle it. */ 4415 4416 chans = READ_ONCE(intf->channel_list)->c; 4417 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4418 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4419 ipmi_inc_stat(intf, sent_lan_command_errs); 4420 else 4421 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4422 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4423 } else 4424 /* The message was sent, start the timer. */ 4425 intf_start_seq_timer(intf, msg->msgid); 4426 4427 free_msg: 4428 ipmi_free_smi_msg(msg); 4429 } else { 4430 /* 4431 * To preserve message order, we keep a queue and deliver from 4432 * a tasklet. 4433 */ 4434 if (!run_to_completion) 4435 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4436 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4437 if (!run_to_completion) 4438 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4439 flags); 4440 } 4441 4442 if (!run_to_completion) 4443 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4444 /* 4445 * We can get an asynchronous event or receive message in addition 4446 * to commands we send. 4447 */ 4448 if (msg == intf->curr_msg) 4449 intf->curr_msg = NULL; 4450 if (!run_to_completion) 4451 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4452 4453 if (run_to_completion) 4454 smi_recv_tasklet((unsigned long) intf); 4455 else 4456 tasklet_schedule(&intf->recv_tasklet); 4457 } 4458 EXPORT_SYMBOL(ipmi_smi_msg_received); 4459 4460 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4461 { 4462 if (intf->in_shutdown) 4463 return; 4464 4465 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4466 tasklet_schedule(&intf->recv_tasklet); 4467 } 4468 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4469 4470 static struct ipmi_smi_msg * 4471 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4472 unsigned char seq, long seqid) 4473 { 4474 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4475 if (!smi_msg) 4476 /* 4477 * If we can't allocate the message, then just return, we 4478 * get 4 retries, so this should be ok. 4479 */ 4480 return NULL; 4481 4482 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4483 smi_msg->data_size = recv_msg->msg.data_len; 4484 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4485 4486 ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size); 4487 4488 return smi_msg; 4489 } 4490 4491 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4492 struct list_head *timeouts, 4493 unsigned long timeout_period, 4494 int slot, unsigned long *flags, 4495 unsigned int *waiting_msgs) 4496 { 4497 struct ipmi_recv_msg *msg; 4498 4499 if (intf->in_shutdown) 4500 return; 4501 4502 if (!ent->inuse) 4503 return; 4504 4505 if (timeout_period < ent->timeout) { 4506 ent->timeout -= timeout_period; 4507 (*waiting_msgs)++; 4508 return; 4509 } 4510 4511 if (ent->retries_left == 0) { 4512 /* The message has used all its retries. */ 4513 ent->inuse = 0; 4514 msg = ent->recv_msg; 4515 list_add_tail(&msg->link, timeouts); 4516 if (ent->broadcast) 4517 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4518 else if (is_lan_addr(&ent->recv_msg->addr)) 4519 ipmi_inc_stat(intf, timed_out_lan_commands); 4520 else 4521 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4522 } else { 4523 struct ipmi_smi_msg *smi_msg; 4524 /* More retries, send again. */ 4525 4526 (*waiting_msgs)++; 4527 4528 /* 4529 * Start with the max timer, set to normal timer after 4530 * the message is sent. 4531 */ 4532 ent->timeout = MAX_MSG_TIMEOUT; 4533 ent->retries_left--; 4534 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4535 ent->seqid); 4536 if (!smi_msg) { 4537 if (is_lan_addr(&ent->recv_msg->addr)) 4538 ipmi_inc_stat(intf, 4539 dropped_rexmit_lan_commands); 4540 else 4541 ipmi_inc_stat(intf, 4542 dropped_rexmit_ipmb_commands); 4543 return; 4544 } 4545 4546 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4547 4548 /* 4549 * Send the new message. We send with a zero 4550 * priority. It timed out, I doubt time is that 4551 * critical now, and high priority messages are really 4552 * only for messages to the local MC, which don't get 4553 * resent. 4554 */ 4555 if (intf->handlers) { 4556 if (is_lan_addr(&ent->recv_msg->addr)) 4557 ipmi_inc_stat(intf, 4558 retransmitted_lan_commands); 4559 else 4560 ipmi_inc_stat(intf, 4561 retransmitted_ipmb_commands); 4562 4563 smi_send(intf, intf->handlers, smi_msg, 0); 4564 } else 4565 ipmi_free_smi_msg(smi_msg); 4566 4567 spin_lock_irqsave(&intf->seq_lock, *flags); 4568 } 4569 } 4570 4571 static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, 4572 unsigned long timeout_period) 4573 { 4574 struct list_head timeouts; 4575 struct ipmi_recv_msg *msg, *msg2; 4576 unsigned long flags; 4577 int i; 4578 unsigned int waiting_msgs = 0; 4579 4580 if (!intf->bmc_registered) { 4581 kref_get(&intf->refcount); 4582 if (!schedule_work(&intf->bmc_reg_work)) { 4583 kref_put(&intf->refcount, intf_free); 4584 waiting_msgs++; 4585 } 4586 } 4587 4588 /* 4589 * Go through the seq table and find any messages that 4590 * have timed out, putting them in the timeouts 4591 * list. 4592 */ 4593 INIT_LIST_HEAD(&timeouts); 4594 spin_lock_irqsave(&intf->seq_lock, flags); 4595 if (intf->ipmb_maintenance_mode_timeout) { 4596 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 4597 intf->ipmb_maintenance_mode_timeout = 0; 4598 else 4599 intf->ipmb_maintenance_mode_timeout -= timeout_period; 4600 } 4601 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 4602 check_msg_timeout(intf, &intf->seq_table[i], 4603 &timeouts, timeout_period, i, 4604 &flags, &waiting_msgs); 4605 spin_unlock_irqrestore(&intf->seq_lock, flags); 4606 4607 list_for_each_entry_safe(msg, msg2, &timeouts, link) 4608 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 4609 4610 /* 4611 * Maintenance mode handling. Check the timeout 4612 * optimistically before we claim the lock. It may 4613 * mean a timeout gets missed occasionally, but that 4614 * only means the timeout gets extended by one period 4615 * in that case. No big deal, and it avoids the lock 4616 * most of the time. 4617 */ 4618 if (intf->auto_maintenance_timeout > 0) { 4619 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 4620 if (intf->auto_maintenance_timeout > 0) { 4621 intf->auto_maintenance_timeout 4622 -= timeout_period; 4623 if (!intf->maintenance_mode 4624 && (intf->auto_maintenance_timeout <= 0)) { 4625 intf->maintenance_mode_enable = false; 4626 maintenance_mode_update(intf); 4627 } 4628 } 4629 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 4630 flags); 4631 } 4632 4633 tasklet_schedule(&intf->recv_tasklet); 4634 4635 return waiting_msgs; 4636 } 4637 4638 static void ipmi_request_event(struct ipmi_smi *intf) 4639 { 4640 /* No event requests when in maintenance mode. */ 4641 if (intf->maintenance_mode_enable) 4642 return; 4643 4644 if (!intf->in_shutdown) 4645 intf->handlers->request_events(intf->send_info); 4646 } 4647 4648 static struct timer_list ipmi_timer; 4649 4650 static atomic_t stop_operation; 4651 4652 static void ipmi_timeout(struct timer_list *unused) 4653 { 4654 struct ipmi_smi *intf; 4655 int nt = 0, index; 4656 4657 if (atomic_read(&stop_operation)) 4658 return; 4659 4660 index = srcu_read_lock(&ipmi_interfaces_srcu); 4661 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4662 int lnt = 0; 4663 4664 if (atomic_read(&intf->event_waiters)) { 4665 intf->ticks_to_req_ev--; 4666 if (intf->ticks_to_req_ev == 0) { 4667 ipmi_request_event(intf); 4668 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 4669 } 4670 lnt++; 4671 } 4672 4673 lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 4674 4675 lnt = !!lnt; 4676 if (lnt != intf->last_needs_timer && 4677 intf->handlers->set_need_watch) 4678 intf->handlers->set_need_watch(intf->send_info, lnt); 4679 intf->last_needs_timer = lnt; 4680 4681 nt += lnt; 4682 } 4683 srcu_read_unlock(&ipmi_interfaces_srcu, index); 4684 4685 if (nt) 4686 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4687 } 4688 4689 static void need_waiter(struct ipmi_smi *intf) 4690 { 4691 /* Racy, but worst case we start the timer twice. */ 4692 if (!timer_pending(&ipmi_timer)) 4693 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4694 } 4695 4696 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 4697 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 4698 4699 static void free_smi_msg(struct ipmi_smi_msg *msg) 4700 { 4701 atomic_dec(&smi_msg_inuse_count); 4702 kfree(msg); 4703 } 4704 4705 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 4706 { 4707 struct ipmi_smi_msg *rv; 4708 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 4709 if (rv) { 4710 rv->done = free_smi_msg; 4711 rv->user_data = NULL; 4712 atomic_inc(&smi_msg_inuse_count); 4713 } 4714 return rv; 4715 } 4716 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 4717 4718 static void free_recv_msg(struct ipmi_recv_msg *msg) 4719 { 4720 atomic_dec(&recv_msg_inuse_count); 4721 kfree(msg); 4722 } 4723 4724 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 4725 { 4726 struct ipmi_recv_msg *rv; 4727 4728 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 4729 if (rv) { 4730 rv->user = NULL; 4731 rv->done = free_recv_msg; 4732 atomic_inc(&recv_msg_inuse_count); 4733 } 4734 return rv; 4735 } 4736 4737 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 4738 { 4739 if (msg->user) 4740 kref_put(&msg->user->refcount, free_user); 4741 msg->done(msg); 4742 } 4743 EXPORT_SYMBOL(ipmi_free_recv_msg); 4744 4745 static atomic_t panic_done_count = ATOMIC_INIT(0); 4746 4747 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 4748 { 4749 atomic_dec(&panic_done_count); 4750 } 4751 4752 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 4753 { 4754 atomic_dec(&panic_done_count); 4755 } 4756 4757 /* 4758 * Inside a panic, send a message and wait for a response. 4759 */ 4760 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, 4761 struct ipmi_addr *addr, 4762 struct kernel_ipmi_msg *msg) 4763 { 4764 struct ipmi_smi_msg smi_msg; 4765 struct ipmi_recv_msg recv_msg; 4766 int rv; 4767 4768 smi_msg.done = dummy_smi_done_handler; 4769 recv_msg.done = dummy_recv_done_handler; 4770 atomic_add(2, &panic_done_count); 4771 rv = i_ipmi_request(NULL, 4772 intf, 4773 addr, 4774 0, 4775 msg, 4776 intf, 4777 &smi_msg, 4778 &recv_msg, 4779 0, 4780 intf->addrinfo[0].address, 4781 intf->addrinfo[0].lun, 4782 0, 1); /* Don't retry, and don't wait. */ 4783 if (rv) 4784 atomic_sub(2, &panic_done_count); 4785 else if (intf->handlers->flush_messages) 4786 intf->handlers->flush_messages(intf->send_info); 4787 4788 while (atomic_read(&panic_done_count) != 0) 4789 ipmi_poll(intf); 4790 } 4791 4792 static void event_receiver_fetcher(struct ipmi_smi *intf, 4793 struct ipmi_recv_msg *msg) 4794 { 4795 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 4796 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 4797 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 4798 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 4799 /* A get event receiver command, save it. */ 4800 intf->event_receiver = msg->msg.data[1]; 4801 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 4802 } 4803 } 4804 4805 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 4806 { 4807 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 4808 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 4809 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 4810 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 4811 /* 4812 * A get device id command, save if we are an event 4813 * receiver or generator. 4814 */ 4815 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 4816 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 4817 } 4818 } 4819 4820 static void send_panic_events(struct ipmi_smi *intf, char *str) 4821 { 4822 struct kernel_ipmi_msg msg; 4823 unsigned char data[16]; 4824 struct ipmi_system_interface_addr *si; 4825 struct ipmi_addr addr; 4826 char *p = str; 4827 struct ipmi_ipmb_addr *ipmb; 4828 int j; 4829 4830 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 4831 return; 4832 4833 si = (struct ipmi_system_interface_addr *) &addr; 4834 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4835 si->channel = IPMI_BMC_CHANNEL; 4836 si->lun = 0; 4837 4838 /* Fill in an event telling that we have failed. */ 4839 msg.netfn = 0x04; /* Sensor or Event. */ 4840 msg.cmd = 2; /* Platform event command. */ 4841 msg.data = data; 4842 msg.data_len = 8; 4843 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 4844 data[1] = 0x03; /* This is for IPMI 1.0. */ 4845 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 4846 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 4847 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 4848 4849 /* 4850 * Put a few breadcrumbs in. Hopefully later we can add more things 4851 * to make the panic events more useful. 4852 */ 4853 if (str) { 4854 data[3] = str[0]; 4855 data[6] = str[1]; 4856 data[7] = str[2]; 4857 } 4858 4859 /* Send the event announcing the panic. */ 4860 ipmi_panic_request_and_wait(intf, &addr, &msg); 4861 4862 /* 4863 * On every interface, dump a bunch of OEM event holding the 4864 * string. 4865 */ 4866 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 4867 return; 4868 4869 /* 4870 * intf_num is used as an marker to tell if the 4871 * interface is valid. Thus we need a read barrier to 4872 * make sure data fetched before checking intf_num 4873 * won't be used. 4874 */ 4875 smp_rmb(); 4876 4877 /* 4878 * First job here is to figure out where to send the 4879 * OEM events. There's no way in IPMI to send OEM 4880 * events using an event send command, so we have to 4881 * find the SEL to put them in and stick them in 4882 * there. 4883 */ 4884 4885 /* Get capabilities from the get device id. */ 4886 intf->local_sel_device = 0; 4887 intf->local_event_generator = 0; 4888 intf->event_receiver = 0; 4889 4890 /* Request the device info from the local MC. */ 4891 msg.netfn = IPMI_NETFN_APP_REQUEST; 4892 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 4893 msg.data = NULL; 4894 msg.data_len = 0; 4895 intf->null_user_handler = device_id_fetcher; 4896 ipmi_panic_request_and_wait(intf, &addr, &msg); 4897 4898 if (intf->local_event_generator) { 4899 /* Request the event receiver from the local MC. */ 4900 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 4901 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 4902 msg.data = NULL; 4903 msg.data_len = 0; 4904 intf->null_user_handler = event_receiver_fetcher; 4905 ipmi_panic_request_and_wait(intf, &addr, &msg); 4906 } 4907 intf->null_user_handler = NULL; 4908 4909 /* 4910 * Validate the event receiver. The low bit must not 4911 * be 1 (it must be a valid IPMB address), it cannot 4912 * be zero, and it must not be my address. 4913 */ 4914 if (((intf->event_receiver & 1) == 0) 4915 && (intf->event_receiver != 0) 4916 && (intf->event_receiver != intf->addrinfo[0].address)) { 4917 /* 4918 * The event receiver is valid, send an IPMB 4919 * message. 4920 */ 4921 ipmb = (struct ipmi_ipmb_addr *) &addr; 4922 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 4923 ipmb->channel = 0; /* FIXME - is this right? */ 4924 ipmb->lun = intf->event_receiver_lun; 4925 ipmb->slave_addr = intf->event_receiver; 4926 } else if (intf->local_sel_device) { 4927 /* 4928 * The event receiver was not valid (or was 4929 * me), but I am an SEL device, just dump it 4930 * in my SEL. 4931 */ 4932 si = (struct ipmi_system_interface_addr *) &addr; 4933 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4934 si->channel = IPMI_BMC_CHANNEL; 4935 si->lun = 0; 4936 } else 4937 return; /* No where to send the event. */ 4938 4939 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 4940 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 4941 msg.data = data; 4942 msg.data_len = 16; 4943 4944 j = 0; 4945 while (*p) { 4946 int size = strlen(p); 4947 4948 if (size > 11) 4949 size = 11; 4950 data[0] = 0; 4951 data[1] = 0; 4952 data[2] = 0xf0; /* OEM event without timestamp. */ 4953 data[3] = intf->addrinfo[0].address; 4954 data[4] = j++; /* sequence # */ 4955 /* 4956 * Always give 11 bytes, so strncpy will fill 4957 * it with zeroes for me. 4958 */ 4959 strncpy(data+5, p, 11); 4960 p += size; 4961 4962 ipmi_panic_request_and_wait(intf, &addr, &msg); 4963 } 4964 } 4965 4966 static int has_panicked; 4967 4968 static int panic_event(struct notifier_block *this, 4969 unsigned long event, 4970 void *ptr) 4971 { 4972 struct ipmi_smi *intf; 4973 struct ipmi_user *user; 4974 4975 if (has_panicked) 4976 return NOTIFY_DONE; 4977 has_panicked = 1; 4978 4979 /* For every registered interface, set it to run to completion. */ 4980 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4981 if (!intf->handlers || intf->intf_num == -1) 4982 /* Interface is not ready. */ 4983 continue; 4984 4985 if (!intf->handlers->poll) 4986 continue; 4987 4988 /* 4989 * If we were interrupted while locking xmit_msgs_lock or 4990 * waiting_rcv_msgs_lock, the corresponding list may be 4991 * corrupted. In this case, drop items on the list for 4992 * the safety. 4993 */ 4994 if (!spin_trylock(&intf->xmit_msgs_lock)) { 4995 INIT_LIST_HEAD(&intf->xmit_msgs); 4996 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 4997 } else 4998 spin_unlock(&intf->xmit_msgs_lock); 4999 5000 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5001 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5002 else 5003 spin_unlock(&intf->waiting_rcv_msgs_lock); 5004 5005 intf->run_to_completion = 1; 5006 if (intf->handlers->set_run_to_completion) 5007 intf->handlers->set_run_to_completion(intf->send_info, 5008 1); 5009 5010 list_for_each_entry_rcu(user, &intf->users, link) { 5011 if (user->handler->ipmi_panic_handler) 5012 user->handler->ipmi_panic_handler( 5013 user->handler_data); 5014 } 5015 5016 send_panic_events(intf, ptr); 5017 } 5018 5019 return NOTIFY_DONE; 5020 } 5021 5022 /* Must be called with ipmi_interfaces_mutex held. */ 5023 static int ipmi_register_driver(void) 5024 { 5025 int rv; 5026 5027 if (drvregistered) 5028 return 0; 5029 5030 rv = driver_register(&ipmidriver.driver); 5031 if (rv) 5032 pr_err("Could not register IPMI driver\n"); 5033 else 5034 drvregistered = true; 5035 return rv; 5036 } 5037 5038 static struct notifier_block panic_block = { 5039 .notifier_call = panic_event, 5040 .next = NULL, 5041 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5042 }; 5043 5044 static int ipmi_init_msghandler(void) 5045 { 5046 int rv; 5047 5048 mutex_lock(&ipmi_interfaces_mutex); 5049 rv = ipmi_register_driver(); 5050 if (rv) 5051 goto out; 5052 if (initialized) 5053 goto out; 5054 5055 init_srcu_struct(&ipmi_interfaces_srcu); 5056 5057 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5058 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5059 5060 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5061 5062 initialized = true; 5063 5064 out: 5065 mutex_unlock(&ipmi_interfaces_mutex); 5066 return rv; 5067 } 5068 5069 static int __init ipmi_init_msghandler_mod(void) 5070 { 5071 int rv; 5072 5073 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5074 5075 mutex_lock(&ipmi_interfaces_mutex); 5076 rv = ipmi_register_driver(); 5077 mutex_unlock(&ipmi_interfaces_mutex); 5078 5079 return rv; 5080 } 5081 5082 static void __exit cleanup_ipmi(void) 5083 { 5084 int count; 5085 5086 if (initialized) { 5087 atomic_notifier_chain_unregister(&panic_notifier_list, 5088 &panic_block); 5089 5090 /* 5091 * This can't be called if any interfaces exist, so no worry 5092 * about shutting down the interfaces. 5093 */ 5094 5095 /* 5096 * Tell the timer to stop, then wait for it to stop. This 5097 * avoids problems with race conditions removing the timer 5098 * here. 5099 */ 5100 atomic_inc(&stop_operation); 5101 del_timer_sync(&ipmi_timer); 5102 5103 initialized = false; 5104 5105 /* Check for buffer leaks. */ 5106 count = atomic_read(&smi_msg_inuse_count); 5107 if (count != 0) 5108 pr_warn("SMI message count %d at exit\n", count); 5109 count = atomic_read(&recv_msg_inuse_count); 5110 if (count != 0) 5111 pr_warn("recv message count %d at exit\n", count); 5112 5113 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5114 } 5115 if (drvregistered) 5116 driver_unregister(&ipmidriver.driver); 5117 } 5118 module_exit(cleanup_ipmi); 5119 5120 module_init(ipmi_init_msghandler_mod); 5121 MODULE_LICENSE("GPL"); 5122 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5123 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI" 5124 " interface."); 5125 MODULE_VERSION(IPMI_DRIVER_VERSION); 5126 MODULE_SOFTDEP("post: ipmi_devintf"); 5127