1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: " 15 #define dev_fmt pr_fmt 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/poll.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/spinlock.h> 23 #include <linux/mutex.h> 24 #include <linux/slab.h> 25 #include <linux/ipmi.h> 26 #include <linux/ipmi_smi.h> 27 #include <linux/notifier.h> 28 #include <linux/init.h> 29 #include <linux/proc_fs.h> 30 #include <linux/rcupdate.h> 31 #include <linux/interrupt.h> 32 #include <linux/moduleparam.h> 33 #include <linux/workqueue.h> 34 #include <linux/uuid.h> 35 #include <linux/nospec.h> 36 37 #define IPMI_DRIVER_VERSION "39.2" 38 39 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 40 static int ipmi_init_msghandler(void); 41 static void smi_recv_tasklet(unsigned long); 42 static void handle_new_recv_msgs(struct ipmi_smi *intf); 43 static void need_waiter(struct ipmi_smi *intf); 44 static int handle_one_recv_msg(struct ipmi_smi *intf, 45 struct ipmi_smi_msg *msg); 46 47 #ifdef DEBUG 48 static void ipmi_debug_msg(const char *title, unsigned char *data, 49 unsigned int len) 50 { 51 int i, pos; 52 char buf[100]; 53 54 pos = snprintf(buf, sizeof(buf), "%s: ", title); 55 for (i = 0; i < len; i++) 56 pos += snprintf(buf + pos, sizeof(buf) - pos, 57 " %2.2x", data[i]); 58 pr_debug("%s\n", buf); 59 } 60 #else 61 static void ipmi_debug_msg(const char *title, unsigned char *data, 62 unsigned int len) 63 { } 64 #endif 65 66 static bool initialized; 67 static bool drvregistered; 68 69 enum ipmi_panic_event_op { 70 IPMI_SEND_PANIC_EVENT_NONE, 71 IPMI_SEND_PANIC_EVENT, 72 IPMI_SEND_PANIC_EVENT_STRING 73 }; 74 #ifdef CONFIG_IPMI_PANIC_STRING 75 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 76 #elif defined(CONFIG_IPMI_PANIC_EVENT) 77 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 78 #else 79 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 80 #endif 81 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 82 83 static int panic_op_write_handler(const char *val, 84 const struct kernel_param *kp) 85 { 86 char valcp[16]; 87 char *s; 88 89 strncpy(valcp, val, 15); 90 valcp[15] = '\0'; 91 92 s = strstrip(valcp); 93 94 if (strcmp(s, "none") == 0) 95 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE; 96 else if (strcmp(s, "event") == 0) 97 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT; 98 else if (strcmp(s, "string") == 0) 99 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING; 100 else 101 return -EINVAL; 102 103 return 0; 104 } 105 106 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 107 { 108 switch (ipmi_send_panic_event) { 109 case IPMI_SEND_PANIC_EVENT_NONE: 110 strcpy(buffer, "none"); 111 break; 112 113 case IPMI_SEND_PANIC_EVENT: 114 strcpy(buffer, "event"); 115 break; 116 117 case IPMI_SEND_PANIC_EVENT_STRING: 118 strcpy(buffer, "string"); 119 break; 120 121 default: 122 strcpy(buffer, "???"); 123 break; 124 } 125 126 return strlen(buffer); 127 } 128 129 static const struct kernel_param_ops panic_op_ops = { 130 .set = panic_op_write_handler, 131 .get = panic_op_read_handler 132 }; 133 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 134 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 135 136 137 #define MAX_EVENTS_IN_QUEUE 25 138 139 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 140 static unsigned long maintenance_mode_timeout_ms = 30000; 141 module_param(maintenance_mode_timeout_ms, ulong, 0644); 142 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 143 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 144 145 /* 146 * Don't let a message sit in a queue forever, always time it with at lest 147 * the max message timer. This is in milliseconds. 148 */ 149 #define MAX_MSG_TIMEOUT 60000 150 151 /* 152 * Timeout times below are in milliseconds, and are done off a 1 153 * second timer. So setting the value to 1000 would mean anything 154 * between 0 and 1000ms. So really the only reasonable minimum 155 * setting it 2000ms, which is between 1 and 2 seconds. 156 */ 157 158 /* The default timeout for message retries. */ 159 static unsigned long default_retry_ms = 2000; 160 module_param(default_retry_ms, ulong, 0644); 161 MODULE_PARM_DESC(default_retry_ms, 162 "The time (milliseconds) between retry sends"); 163 164 /* The default timeout for maintenance mode message retries. */ 165 static unsigned long default_maintenance_retry_ms = 3000; 166 module_param(default_maintenance_retry_ms, ulong, 0644); 167 MODULE_PARM_DESC(default_maintenance_retry_ms, 168 "The time (milliseconds) between retry sends in maintenance mode"); 169 170 /* The default maximum number of retries */ 171 static unsigned int default_max_retries = 4; 172 module_param(default_max_retries, uint, 0644); 173 MODULE_PARM_DESC(default_max_retries, 174 "The time (milliseconds) between retry sends in maintenance mode"); 175 176 /* Call every ~1000 ms. */ 177 #define IPMI_TIMEOUT_TIME 1000 178 179 /* How many jiffies does it take to get to the timeout time. */ 180 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 181 182 /* 183 * Request events from the queue every second (this is the number of 184 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 185 * future, IPMI will add a way to know immediately if an event is in 186 * the queue and this silliness can go away. 187 */ 188 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 189 190 /* How long should we cache dynamic device IDs? */ 191 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 192 193 /* 194 * The main "user" data structure. 195 */ 196 struct ipmi_user { 197 struct list_head link; 198 199 /* 200 * Set to NULL when the user is destroyed, a pointer to myself 201 * so srcu_dereference can be used on it. 202 */ 203 struct ipmi_user *self; 204 struct srcu_struct release_barrier; 205 206 struct kref refcount; 207 208 /* The upper layer that handles receive messages. */ 209 const struct ipmi_user_hndl *handler; 210 void *handler_data; 211 212 /* The interface this user is bound to. */ 213 struct ipmi_smi *intf; 214 215 /* Does this interface receive IPMI events? */ 216 bool gets_events; 217 218 /* Free must run in process context for RCU cleanup. */ 219 struct work_struct remove_work; 220 }; 221 222 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) 223 __acquires(user->release_barrier) 224 { 225 struct ipmi_user *ruser; 226 227 *index = srcu_read_lock(&user->release_barrier); 228 ruser = srcu_dereference(user->self, &user->release_barrier); 229 if (!ruser) 230 srcu_read_unlock(&user->release_barrier, *index); 231 return ruser; 232 } 233 234 static void release_ipmi_user(struct ipmi_user *user, int index) 235 { 236 srcu_read_unlock(&user->release_barrier, index); 237 } 238 239 struct cmd_rcvr { 240 struct list_head link; 241 242 struct ipmi_user *user; 243 unsigned char netfn; 244 unsigned char cmd; 245 unsigned int chans; 246 247 /* 248 * This is used to form a linked lised during mass deletion. 249 * Since this is in an RCU list, we cannot use the link above 250 * or change any data until the RCU period completes. So we 251 * use this next variable during mass deletion so we can have 252 * a list and don't have to wait and restart the search on 253 * every individual deletion of a command. 254 */ 255 struct cmd_rcvr *next; 256 }; 257 258 struct seq_table { 259 unsigned int inuse : 1; 260 unsigned int broadcast : 1; 261 262 unsigned long timeout; 263 unsigned long orig_timeout; 264 unsigned int retries_left; 265 266 /* 267 * To verify on an incoming send message response that this is 268 * the message that the response is for, we keep a sequence id 269 * and increment it every time we send a message. 270 */ 271 long seqid; 272 273 /* 274 * This is held so we can properly respond to the message on a 275 * timeout, and it is used to hold the temporary data for 276 * retransmission, too. 277 */ 278 struct ipmi_recv_msg *recv_msg; 279 }; 280 281 /* 282 * Store the information in a msgid (long) to allow us to find a 283 * sequence table entry from the msgid. 284 */ 285 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 286 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 287 288 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 289 do { \ 290 seq = (((msgid) >> 26) & 0x3f); \ 291 seqid = ((msgid) & 0x3ffffff); \ 292 } while (0) 293 294 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 295 296 #define IPMI_MAX_CHANNELS 16 297 struct ipmi_channel { 298 unsigned char medium; 299 unsigned char protocol; 300 }; 301 302 struct ipmi_channel_set { 303 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 304 }; 305 306 struct ipmi_my_addrinfo { 307 /* 308 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 309 * but may be changed by the user. 310 */ 311 unsigned char address; 312 313 /* 314 * My LUN. This should generally stay the SMS LUN, but just in 315 * case... 316 */ 317 unsigned char lun; 318 }; 319 320 /* 321 * Note that the product id, manufacturer id, guid, and device id are 322 * immutable in this structure, so dyn_mutex is not required for 323 * accessing those. If those change on a BMC, a new BMC is allocated. 324 */ 325 struct bmc_device { 326 struct platform_device pdev; 327 struct list_head intfs; /* Interfaces on this BMC. */ 328 struct ipmi_device_id id; 329 struct ipmi_device_id fetch_id; 330 int dyn_id_set; 331 unsigned long dyn_id_expiry; 332 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 333 guid_t guid; 334 guid_t fetch_guid; 335 int dyn_guid_set; 336 struct kref usecount; 337 struct work_struct remove_work; 338 }; 339 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 340 341 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 342 struct ipmi_device_id *id, 343 bool *guid_set, guid_t *guid); 344 345 /* 346 * Various statistics for IPMI, these index stats[] in the ipmi_smi 347 * structure. 348 */ 349 enum ipmi_stat_indexes { 350 /* Commands we got from the user that were invalid. */ 351 IPMI_STAT_sent_invalid_commands = 0, 352 353 /* Commands we sent to the MC. */ 354 IPMI_STAT_sent_local_commands, 355 356 /* Responses from the MC that were delivered to a user. */ 357 IPMI_STAT_handled_local_responses, 358 359 /* Responses from the MC that were not delivered to a user. */ 360 IPMI_STAT_unhandled_local_responses, 361 362 /* Commands we sent out to the IPMB bus. */ 363 IPMI_STAT_sent_ipmb_commands, 364 365 /* Commands sent on the IPMB that had errors on the SEND CMD */ 366 IPMI_STAT_sent_ipmb_command_errs, 367 368 /* Each retransmit increments this count. */ 369 IPMI_STAT_retransmitted_ipmb_commands, 370 371 /* 372 * When a message times out (runs out of retransmits) this is 373 * incremented. 374 */ 375 IPMI_STAT_timed_out_ipmb_commands, 376 377 /* 378 * This is like above, but for broadcasts. Broadcasts are 379 * *not* included in the above count (they are expected to 380 * time out). 381 */ 382 IPMI_STAT_timed_out_ipmb_broadcasts, 383 384 /* Responses I have sent to the IPMB bus. */ 385 IPMI_STAT_sent_ipmb_responses, 386 387 /* The response was delivered to the user. */ 388 IPMI_STAT_handled_ipmb_responses, 389 390 /* The response had invalid data in it. */ 391 IPMI_STAT_invalid_ipmb_responses, 392 393 /* The response didn't have anyone waiting for it. */ 394 IPMI_STAT_unhandled_ipmb_responses, 395 396 /* Commands we sent out to the IPMB bus. */ 397 IPMI_STAT_sent_lan_commands, 398 399 /* Commands sent on the IPMB that had errors on the SEND CMD */ 400 IPMI_STAT_sent_lan_command_errs, 401 402 /* Each retransmit increments this count. */ 403 IPMI_STAT_retransmitted_lan_commands, 404 405 /* 406 * When a message times out (runs out of retransmits) this is 407 * incremented. 408 */ 409 IPMI_STAT_timed_out_lan_commands, 410 411 /* Responses I have sent to the IPMB bus. */ 412 IPMI_STAT_sent_lan_responses, 413 414 /* The response was delivered to the user. */ 415 IPMI_STAT_handled_lan_responses, 416 417 /* The response had invalid data in it. */ 418 IPMI_STAT_invalid_lan_responses, 419 420 /* The response didn't have anyone waiting for it. */ 421 IPMI_STAT_unhandled_lan_responses, 422 423 /* The command was delivered to the user. */ 424 IPMI_STAT_handled_commands, 425 426 /* The command had invalid data in it. */ 427 IPMI_STAT_invalid_commands, 428 429 /* The command didn't have anyone waiting for it. */ 430 IPMI_STAT_unhandled_commands, 431 432 /* Invalid data in an event. */ 433 IPMI_STAT_invalid_events, 434 435 /* Events that were received with the proper format. */ 436 IPMI_STAT_events, 437 438 /* Retransmissions on IPMB that failed. */ 439 IPMI_STAT_dropped_rexmit_ipmb_commands, 440 441 /* Retransmissions on LAN that failed. */ 442 IPMI_STAT_dropped_rexmit_lan_commands, 443 444 /* This *must* remain last, add new values above this. */ 445 IPMI_NUM_STATS 446 }; 447 448 449 #define IPMI_IPMB_NUM_SEQ 64 450 struct ipmi_smi { 451 /* What interface number are we? */ 452 int intf_num; 453 454 struct kref refcount; 455 456 /* Set when the interface is being unregistered. */ 457 bool in_shutdown; 458 459 /* Used for a list of interfaces. */ 460 struct list_head link; 461 462 /* 463 * The list of upper layers that are using me. seq_lock write 464 * protects this. Read protection is with srcu. 465 */ 466 struct list_head users; 467 struct srcu_struct users_srcu; 468 469 /* Used for wake ups at startup. */ 470 wait_queue_head_t waitq; 471 472 /* 473 * Prevents the interface from being unregistered when the 474 * interface is used by being looked up through the BMC 475 * structure. 476 */ 477 struct mutex bmc_reg_mutex; 478 479 struct bmc_device tmp_bmc; 480 struct bmc_device *bmc; 481 bool bmc_registered; 482 struct list_head bmc_link; 483 char *my_dev_name; 484 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 485 struct work_struct bmc_reg_work; 486 487 const struct ipmi_smi_handlers *handlers; 488 void *send_info; 489 490 /* Driver-model device for the system interface. */ 491 struct device *si_dev; 492 493 /* 494 * A table of sequence numbers for this interface. We use the 495 * sequence numbers for IPMB messages that go out of the 496 * interface to match them up with their responses. A routine 497 * is called periodically to time the items in this list. 498 */ 499 spinlock_t seq_lock; 500 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 501 int curr_seq; 502 503 /* 504 * Messages queued for delivery. If delivery fails (out of memory 505 * for instance), They will stay in here to be processed later in a 506 * periodic timer interrupt. The tasklet is for handling received 507 * messages directly from the handler. 508 */ 509 spinlock_t waiting_rcv_msgs_lock; 510 struct list_head waiting_rcv_msgs; 511 atomic_t watchdog_pretimeouts_to_deliver; 512 struct tasklet_struct recv_tasklet; 513 514 spinlock_t xmit_msgs_lock; 515 struct list_head xmit_msgs; 516 struct ipmi_smi_msg *curr_msg; 517 struct list_head hp_xmit_msgs; 518 519 /* 520 * The list of command receivers that are registered for commands 521 * on this interface. 522 */ 523 struct mutex cmd_rcvrs_mutex; 524 struct list_head cmd_rcvrs; 525 526 /* 527 * Events that were queues because no one was there to receive 528 * them. 529 */ 530 spinlock_t events_lock; /* For dealing with event stuff. */ 531 struct list_head waiting_events; 532 unsigned int waiting_events_count; /* How many events in queue? */ 533 char delivering_events; 534 char event_msg_printed; 535 536 /* How many users are waiting for events? */ 537 atomic_t event_waiters; 538 unsigned int ticks_to_req_ev; 539 540 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 541 542 /* How many users are waiting for commands? */ 543 unsigned int command_waiters; 544 545 /* How many users are waiting for watchdogs? */ 546 unsigned int watchdog_waiters; 547 548 /* How many users are waiting for message responses? */ 549 unsigned int response_waiters; 550 551 /* 552 * Tells what the lower layer has last been asked to watch for, 553 * messages and/or watchdogs. Protected by watch_lock. 554 */ 555 unsigned int last_watch_mask; 556 557 /* 558 * The event receiver for my BMC, only really used at panic 559 * shutdown as a place to store this. 560 */ 561 unsigned char event_receiver; 562 unsigned char event_receiver_lun; 563 unsigned char local_sel_device; 564 unsigned char local_event_generator; 565 566 /* For handling of maintenance mode. */ 567 int maintenance_mode; 568 bool maintenance_mode_enable; 569 int auto_maintenance_timeout; 570 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 571 572 /* 573 * If we are doing maintenance on something on IPMB, extend 574 * the timeout time to avoid timeouts writing firmware and 575 * such. 576 */ 577 int ipmb_maintenance_mode_timeout; 578 579 /* 580 * A cheap hack, if this is non-null and a message to an 581 * interface comes in with a NULL user, call this routine with 582 * it. Note that the message will still be freed by the 583 * caller. This only works on the system interface. 584 * 585 * Protected by bmc_reg_mutex. 586 */ 587 void (*null_user_handler)(struct ipmi_smi *intf, 588 struct ipmi_recv_msg *msg); 589 590 /* 591 * When we are scanning the channels for an SMI, this will 592 * tell which channel we are scanning. 593 */ 594 int curr_channel; 595 596 /* Channel information */ 597 struct ipmi_channel_set *channel_list; 598 unsigned int curr_working_cset; /* First index into the following. */ 599 struct ipmi_channel_set wchannels[2]; 600 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 601 bool channels_ready; 602 603 atomic_t stats[IPMI_NUM_STATS]; 604 605 /* 606 * run_to_completion duplicate of smb_info, smi_info 607 * and ipmi_serial_info structures. Used to decrease numbers of 608 * parameters passed by "low" level IPMI code. 609 */ 610 int run_to_completion; 611 }; 612 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 613 614 static void __get_guid(struct ipmi_smi *intf); 615 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 616 static int __ipmi_bmc_register(struct ipmi_smi *intf, 617 struct ipmi_device_id *id, 618 bool guid_set, guid_t *guid, int intf_num); 619 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 620 621 622 /** 623 * The driver model view of the IPMI messaging driver. 624 */ 625 static struct platform_driver ipmidriver = { 626 .driver = { 627 .name = "ipmi", 628 .bus = &platform_bus_type 629 } 630 }; 631 /* 632 * This mutex keeps us from adding the same BMC twice. 633 */ 634 static DEFINE_MUTEX(ipmidriver_mutex); 635 636 static LIST_HEAD(ipmi_interfaces); 637 static DEFINE_MUTEX(ipmi_interfaces_mutex); 638 static struct srcu_struct ipmi_interfaces_srcu; 639 640 /* 641 * List of watchers that want to know when smi's are added and deleted. 642 */ 643 static LIST_HEAD(smi_watchers); 644 static DEFINE_MUTEX(smi_watchers_mutex); 645 646 #define ipmi_inc_stat(intf, stat) \ 647 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 648 #define ipmi_get_stat(intf, stat) \ 649 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 650 651 static const char * const addr_src_to_str[] = { 652 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 653 "device-tree", "platform" 654 }; 655 656 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 657 { 658 if (src >= SI_LAST) 659 src = 0; /* Invalid */ 660 return addr_src_to_str[src]; 661 } 662 EXPORT_SYMBOL(ipmi_addr_src_to_str); 663 664 static int is_lan_addr(struct ipmi_addr *addr) 665 { 666 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 667 } 668 669 static int is_ipmb_addr(struct ipmi_addr *addr) 670 { 671 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 672 } 673 674 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 675 { 676 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 677 } 678 679 static void free_recv_msg_list(struct list_head *q) 680 { 681 struct ipmi_recv_msg *msg, *msg2; 682 683 list_for_each_entry_safe(msg, msg2, q, link) { 684 list_del(&msg->link); 685 ipmi_free_recv_msg(msg); 686 } 687 } 688 689 static void free_smi_msg_list(struct list_head *q) 690 { 691 struct ipmi_smi_msg *msg, *msg2; 692 693 list_for_each_entry_safe(msg, msg2, q, link) { 694 list_del(&msg->link); 695 ipmi_free_smi_msg(msg); 696 } 697 } 698 699 static void clean_up_interface_data(struct ipmi_smi *intf) 700 { 701 int i; 702 struct cmd_rcvr *rcvr, *rcvr2; 703 struct list_head list; 704 705 tasklet_kill(&intf->recv_tasklet); 706 707 free_smi_msg_list(&intf->waiting_rcv_msgs); 708 free_recv_msg_list(&intf->waiting_events); 709 710 /* 711 * Wholesale remove all the entries from the list in the 712 * interface and wait for RCU to know that none are in use. 713 */ 714 mutex_lock(&intf->cmd_rcvrs_mutex); 715 INIT_LIST_HEAD(&list); 716 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); 717 mutex_unlock(&intf->cmd_rcvrs_mutex); 718 719 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 720 kfree(rcvr); 721 722 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 723 if ((intf->seq_table[i].inuse) 724 && (intf->seq_table[i].recv_msg)) 725 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 726 } 727 } 728 729 static void intf_free(struct kref *ref) 730 { 731 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 732 733 clean_up_interface_data(intf); 734 kfree(intf); 735 } 736 737 struct watcher_entry { 738 int intf_num; 739 struct ipmi_smi *intf; 740 struct list_head link; 741 }; 742 743 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 744 { 745 struct ipmi_smi *intf; 746 int index, rv; 747 748 /* 749 * Make sure the driver is actually initialized, this handles 750 * problems with initialization order. 751 */ 752 rv = ipmi_init_msghandler(); 753 if (rv) 754 return rv; 755 756 mutex_lock(&smi_watchers_mutex); 757 758 list_add(&watcher->link, &smi_watchers); 759 760 index = srcu_read_lock(&ipmi_interfaces_srcu); 761 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 762 int intf_num = READ_ONCE(intf->intf_num); 763 764 if (intf_num == -1) 765 continue; 766 watcher->new_smi(intf_num, intf->si_dev); 767 } 768 srcu_read_unlock(&ipmi_interfaces_srcu, index); 769 770 mutex_unlock(&smi_watchers_mutex); 771 772 return 0; 773 } 774 EXPORT_SYMBOL(ipmi_smi_watcher_register); 775 776 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 777 { 778 mutex_lock(&smi_watchers_mutex); 779 list_del(&watcher->link); 780 mutex_unlock(&smi_watchers_mutex); 781 return 0; 782 } 783 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 784 785 /* 786 * Must be called with smi_watchers_mutex held. 787 */ 788 static void 789 call_smi_watchers(int i, struct device *dev) 790 { 791 struct ipmi_smi_watcher *w; 792 793 mutex_lock(&smi_watchers_mutex); 794 list_for_each_entry(w, &smi_watchers, link) { 795 if (try_module_get(w->owner)) { 796 w->new_smi(i, dev); 797 module_put(w->owner); 798 } 799 } 800 mutex_unlock(&smi_watchers_mutex); 801 } 802 803 static int 804 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 805 { 806 if (addr1->addr_type != addr2->addr_type) 807 return 0; 808 809 if (addr1->channel != addr2->channel) 810 return 0; 811 812 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 813 struct ipmi_system_interface_addr *smi_addr1 814 = (struct ipmi_system_interface_addr *) addr1; 815 struct ipmi_system_interface_addr *smi_addr2 816 = (struct ipmi_system_interface_addr *) addr2; 817 return (smi_addr1->lun == smi_addr2->lun); 818 } 819 820 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 821 struct ipmi_ipmb_addr *ipmb_addr1 822 = (struct ipmi_ipmb_addr *) addr1; 823 struct ipmi_ipmb_addr *ipmb_addr2 824 = (struct ipmi_ipmb_addr *) addr2; 825 826 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 827 && (ipmb_addr1->lun == ipmb_addr2->lun)); 828 } 829 830 if (is_lan_addr(addr1)) { 831 struct ipmi_lan_addr *lan_addr1 832 = (struct ipmi_lan_addr *) addr1; 833 struct ipmi_lan_addr *lan_addr2 834 = (struct ipmi_lan_addr *) addr2; 835 836 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 837 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 838 && (lan_addr1->session_handle 839 == lan_addr2->session_handle) 840 && (lan_addr1->lun == lan_addr2->lun)); 841 } 842 843 return 1; 844 } 845 846 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 847 { 848 if (len < sizeof(struct ipmi_system_interface_addr)) 849 return -EINVAL; 850 851 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 852 if (addr->channel != IPMI_BMC_CHANNEL) 853 return -EINVAL; 854 return 0; 855 } 856 857 if ((addr->channel == IPMI_BMC_CHANNEL) 858 || (addr->channel >= IPMI_MAX_CHANNELS) 859 || (addr->channel < 0)) 860 return -EINVAL; 861 862 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 863 if (len < sizeof(struct ipmi_ipmb_addr)) 864 return -EINVAL; 865 return 0; 866 } 867 868 if (is_lan_addr(addr)) { 869 if (len < sizeof(struct ipmi_lan_addr)) 870 return -EINVAL; 871 return 0; 872 } 873 874 return -EINVAL; 875 } 876 EXPORT_SYMBOL(ipmi_validate_addr); 877 878 unsigned int ipmi_addr_length(int addr_type) 879 { 880 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 881 return sizeof(struct ipmi_system_interface_addr); 882 883 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 884 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 885 return sizeof(struct ipmi_ipmb_addr); 886 887 if (addr_type == IPMI_LAN_ADDR_TYPE) 888 return sizeof(struct ipmi_lan_addr); 889 890 return 0; 891 } 892 EXPORT_SYMBOL(ipmi_addr_length); 893 894 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 895 { 896 int rv = 0; 897 898 if (!msg->user) { 899 /* Special handling for NULL users. */ 900 if (intf->null_user_handler) { 901 intf->null_user_handler(intf, msg); 902 } else { 903 /* No handler, so give up. */ 904 rv = -EINVAL; 905 } 906 ipmi_free_recv_msg(msg); 907 } else if (oops_in_progress) { 908 /* 909 * If we are running in the panic context, calling the 910 * receive handler doesn't much meaning and has a deadlock 911 * risk. At this moment, simply skip it in that case. 912 */ 913 ipmi_free_recv_msg(msg); 914 } else { 915 int index; 916 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); 917 918 if (user) { 919 user->handler->ipmi_recv_hndl(msg, user->handler_data); 920 release_ipmi_user(user, index); 921 } else { 922 /* User went away, give up. */ 923 ipmi_free_recv_msg(msg); 924 rv = -EINVAL; 925 } 926 } 927 928 return rv; 929 } 930 931 static void deliver_local_response(struct ipmi_smi *intf, 932 struct ipmi_recv_msg *msg) 933 { 934 if (deliver_response(intf, msg)) 935 ipmi_inc_stat(intf, unhandled_local_responses); 936 else 937 ipmi_inc_stat(intf, handled_local_responses); 938 } 939 940 static void deliver_err_response(struct ipmi_smi *intf, 941 struct ipmi_recv_msg *msg, int err) 942 { 943 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 944 msg->msg_data[0] = err; 945 msg->msg.netfn |= 1; /* Convert to a response. */ 946 msg->msg.data_len = 1; 947 msg->msg.data = msg->msg_data; 948 deliver_local_response(intf, msg); 949 } 950 951 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 952 { 953 unsigned long iflags; 954 955 if (!intf->handlers->set_need_watch) 956 return; 957 958 spin_lock_irqsave(&intf->watch_lock, iflags); 959 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 960 intf->response_waiters++; 961 962 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 963 intf->watchdog_waiters++; 964 965 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 966 intf->command_waiters++; 967 968 if ((intf->last_watch_mask & flags) != flags) { 969 intf->last_watch_mask |= flags; 970 intf->handlers->set_need_watch(intf->send_info, 971 intf->last_watch_mask); 972 } 973 spin_unlock_irqrestore(&intf->watch_lock, iflags); 974 } 975 976 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 977 { 978 unsigned long iflags; 979 980 if (!intf->handlers->set_need_watch) 981 return; 982 983 spin_lock_irqsave(&intf->watch_lock, iflags); 984 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 985 intf->response_waiters--; 986 987 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 988 intf->watchdog_waiters--; 989 990 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 991 intf->command_waiters--; 992 993 flags = 0; 994 if (intf->response_waiters) 995 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 996 if (intf->watchdog_waiters) 997 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 998 if (intf->command_waiters) 999 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 1000 1001 if (intf->last_watch_mask != flags) { 1002 intf->last_watch_mask = flags; 1003 intf->handlers->set_need_watch(intf->send_info, 1004 intf->last_watch_mask); 1005 } 1006 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1007 } 1008 1009 /* 1010 * Find the next sequence number not being used and add the given 1011 * message with the given timeout to the sequence table. This must be 1012 * called with the interface's seq_lock held. 1013 */ 1014 static int intf_next_seq(struct ipmi_smi *intf, 1015 struct ipmi_recv_msg *recv_msg, 1016 unsigned long timeout, 1017 int retries, 1018 int broadcast, 1019 unsigned char *seq, 1020 long *seqid) 1021 { 1022 int rv = 0; 1023 unsigned int i; 1024 1025 if (timeout == 0) 1026 timeout = default_retry_ms; 1027 if (retries < 0) 1028 retries = default_max_retries; 1029 1030 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1031 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1032 if (!intf->seq_table[i].inuse) 1033 break; 1034 } 1035 1036 if (!intf->seq_table[i].inuse) { 1037 intf->seq_table[i].recv_msg = recv_msg; 1038 1039 /* 1040 * Start with the maximum timeout, when the send response 1041 * comes in we will start the real timer. 1042 */ 1043 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1044 intf->seq_table[i].orig_timeout = timeout; 1045 intf->seq_table[i].retries_left = retries; 1046 intf->seq_table[i].broadcast = broadcast; 1047 intf->seq_table[i].inuse = 1; 1048 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1049 *seq = i; 1050 *seqid = intf->seq_table[i].seqid; 1051 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1052 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1053 need_waiter(intf); 1054 } else { 1055 rv = -EAGAIN; 1056 } 1057 1058 return rv; 1059 } 1060 1061 /* 1062 * Return the receive message for the given sequence number and 1063 * release the sequence number so it can be reused. Some other data 1064 * is passed in to be sure the message matches up correctly (to help 1065 * guard against message coming in after their timeout and the 1066 * sequence number being reused). 1067 */ 1068 static int intf_find_seq(struct ipmi_smi *intf, 1069 unsigned char seq, 1070 short channel, 1071 unsigned char cmd, 1072 unsigned char netfn, 1073 struct ipmi_addr *addr, 1074 struct ipmi_recv_msg **recv_msg) 1075 { 1076 int rv = -ENODEV; 1077 unsigned long flags; 1078 1079 if (seq >= IPMI_IPMB_NUM_SEQ) 1080 return -EINVAL; 1081 1082 spin_lock_irqsave(&intf->seq_lock, flags); 1083 if (intf->seq_table[seq].inuse) { 1084 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1085 1086 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1087 && (msg->msg.netfn == netfn) 1088 && (ipmi_addr_equal(addr, &msg->addr))) { 1089 *recv_msg = msg; 1090 intf->seq_table[seq].inuse = 0; 1091 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1092 rv = 0; 1093 } 1094 } 1095 spin_unlock_irqrestore(&intf->seq_lock, flags); 1096 1097 return rv; 1098 } 1099 1100 1101 /* Start the timer for a specific sequence table entry. */ 1102 static int intf_start_seq_timer(struct ipmi_smi *intf, 1103 long msgid) 1104 { 1105 int rv = -ENODEV; 1106 unsigned long flags; 1107 unsigned char seq; 1108 unsigned long seqid; 1109 1110 1111 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1112 1113 spin_lock_irqsave(&intf->seq_lock, flags); 1114 /* 1115 * We do this verification because the user can be deleted 1116 * while a message is outstanding. 1117 */ 1118 if ((intf->seq_table[seq].inuse) 1119 && (intf->seq_table[seq].seqid == seqid)) { 1120 struct seq_table *ent = &intf->seq_table[seq]; 1121 ent->timeout = ent->orig_timeout; 1122 rv = 0; 1123 } 1124 spin_unlock_irqrestore(&intf->seq_lock, flags); 1125 1126 return rv; 1127 } 1128 1129 /* Got an error for the send message for a specific sequence number. */ 1130 static int intf_err_seq(struct ipmi_smi *intf, 1131 long msgid, 1132 unsigned int err) 1133 { 1134 int rv = -ENODEV; 1135 unsigned long flags; 1136 unsigned char seq; 1137 unsigned long seqid; 1138 struct ipmi_recv_msg *msg = NULL; 1139 1140 1141 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1142 1143 spin_lock_irqsave(&intf->seq_lock, flags); 1144 /* 1145 * We do this verification because the user can be deleted 1146 * while a message is outstanding. 1147 */ 1148 if ((intf->seq_table[seq].inuse) 1149 && (intf->seq_table[seq].seqid == seqid)) { 1150 struct seq_table *ent = &intf->seq_table[seq]; 1151 1152 ent->inuse = 0; 1153 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1154 msg = ent->recv_msg; 1155 rv = 0; 1156 } 1157 spin_unlock_irqrestore(&intf->seq_lock, flags); 1158 1159 if (msg) 1160 deliver_err_response(intf, msg, err); 1161 1162 return rv; 1163 } 1164 1165 static void free_user_work(struct work_struct *work) 1166 { 1167 struct ipmi_user *user = container_of(work, struct ipmi_user, 1168 remove_work); 1169 1170 cleanup_srcu_struct(&user->release_barrier); 1171 kfree(user); 1172 } 1173 1174 int ipmi_create_user(unsigned int if_num, 1175 const struct ipmi_user_hndl *handler, 1176 void *handler_data, 1177 struct ipmi_user **user) 1178 { 1179 unsigned long flags; 1180 struct ipmi_user *new_user; 1181 int rv, index; 1182 struct ipmi_smi *intf; 1183 1184 /* 1185 * There is no module usecount here, because it's not 1186 * required. Since this can only be used by and called from 1187 * other modules, they will implicitly use this module, and 1188 * thus this can't be removed unless the other modules are 1189 * removed. 1190 */ 1191 1192 if (handler == NULL) 1193 return -EINVAL; 1194 1195 /* 1196 * Make sure the driver is actually initialized, this handles 1197 * problems with initialization order. 1198 */ 1199 rv = ipmi_init_msghandler(); 1200 if (rv) 1201 return rv; 1202 1203 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); 1204 if (!new_user) 1205 return -ENOMEM; 1206 1207 index = srcu_read_lock(&ipmi_interfaces_srcu); 1208 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1209 if (intf->intf_num == if_num) 1210 goto found; 1211 } 1212 /* Not found, return an error */ 1213 rv = -EINVAL; 1214 goto out_kfree; 1215 1216 found: 1217 INIT_WORK(&new_user->remove_work, free_user_work); 1218 1219 rv = init_srcu_struct(&new_user->release_barrier); 1220 if (rv) 1221 goto out_kfree; 1222 1223 /* Note that each existing user holds a refcount to the interface. */ 1224 kref_get(&intf->refcount); 1225 1226 kref_init(&new_user->refcount); 1227 new_user->handler = handler; 1228 new_user->handler_data = handler_data; 1229 new_user->intf = intf; 1230 new_user->gets_events = false; 1231 1232 rcu_assign_pointer(new_user->self, new_user); 1233 spin_lock_irqsave(&intf->seq_lock, flags); 1234 list_add_rcu(&new_user->link, &intf->users); 1235 spin_unlock_irqrestore(&intf->seq_lock, flags); 1236 if (handler->ipmi_watchdog_pretimeout) 1237 /* User wants pretimeouts, so make sure to watch for them. */ 1238 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1239 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1240 *user = new_user; 1241 return 0; 1242 1243 out_kfree: 1244 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1245 kfree(new_user); 1246 return rv; 1247 } 1248 EXPORT_SYMBOL(ipmi_create_user); 1249 1250 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1251 { 1252 int rv, index; 1253 struct ipmi_smi *intf; 1254 1255 index = srcu_read_lock(&ipmi_interfaces_srcu); 1256 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1257 if (intf->intf_num == if_num) 1258 goto found; 1259 } 1260 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1261 1262 /* Not found, return an error */ 1263 return -EINVAL; 1264 1265 found: 1266 if (!intf->handlers->get_smi_info) 1267 rv = -ENOTTY; 1268 else 1269 rv = intf->handlers->get_smi_info(intf->send_info, data); 1270 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1271 1272 return rv; 1273 } 1274 EXPORT_SYMBOL(ipmi_get_smi_info); 1275 1276 static void free_user(struct kref *ref) 1277 { 1278 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1279 1280 /* SRCU cleanup must happen in task context. */ 1281 schedule_work(&user->remove_work); 1282 } 1283 1284 static void _ipmi_destroy_user(struct ipmi_user *user) 1285 { 1286 struct ipmi_smi *intf = user->intf; 1287 int i; 1288 unsigned long flags; 1289 struct cmd_rcvr *rcvr; 1290 struct cmd_rcvr *rcvrs = NULL; 1291 1292 if (!acquire_ipmi_user(user, &i)) { 1293 /* 1294 * The user has already been cleaned up, just make sure 1295 * nothing is using it and return. 1296 */ 1297 synchronize_srcu(&user->release_barrier); 1298 return; 1299 } 1300 1301 rcu_assign_pointer(user->self, NULL); 1302 release_ipmi_user(user, i); 1303 1304 synchronize_srcu(&user->release_barrier); 1305 1306 if (user->handler->shutdown) 1307 user->handler->shutdown(user->handler_data); 1308 1309 if (user->handler->ipmi_watchdog_pretimeout) 1310 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1311 1312 if (user->gets_events) 1313 atomic_dec(&intf->event_waiters); 1314 1315 /* Remove the user from the interface's sequence table. */ 1316 spin_lock_irqsave(&intf->seq_lock, flags); 1317 list_del_rcu(&user->link); 1318 1319 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1320 if (intf->seq_table[i].inuse 1321 && (intf->seq_table[i].recv_msg->user == user)) { 1322 intf->seq_table[i].inuse = 0; 1323 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1324 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1325 } 1326 } 1327 spin_unlock_irqrestore(&intf->seq_lock, flags); 1328 1329 /* 1330 * Remove the user from the command receiver's table. First 1331 * we build a list of everything (not using the standard link, 1332 * since other things may be using it till we do 1333 * synchronize_srcu()) then free everything in that list. 1334 */ 1335 mutex_lock(&intf->cmd_rcvrs_mutex); 1336 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 1337 if (rcvr->user == user) { 1338 list_del_rcu(&rcvr->link); 1339 rcvr->next = rcvrs; 1340 rcvrs = rcvr; 1341 } 1342 } 1343 mutex_unlock(&intf->cmd_rcvrs_mutex); 1344 synchronize_rcu(); 1345 while (rcvrs) { 1346 rcvr = rcvrs; 1347 rcvrs = rcvr->next; 1348 kfree(rcvr); 1349 } 1350 1351 kref_put(&intf->refcount, intf_free); 1352 } 1353 1354 int ipmi_destroy_user(struct ipmi_user *user) 1355 { 1356 _ipmi_destroy_user(user); 1357 1358 kref_put(&user->refcount, free_user); 1359 1360 return 0; 1361 } 1362 EXPORT_SYMBOL(ipmi_destroy_user); 1363 1364 int ipmi_get_version(struct ipmi_user *user, 1365 unsigned char *major, 1366 unsigned char *minor) 1367 { 1368 struct ipmi_device_id id; 1369 int rv, index; 1370 1371 user = acquire_ipmi_user(user, &index); 1372 if (!user) 1373 return -ENODEV; 1374 1375 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1376 if (!rv) { 1377 *major = ipmi_version_major(&id); 1378 *minor = ipmi_version_minor(&id); 1379 } 1380 release_ipmi_user(user, index); 1381 1382 return rv; 1383 } 1384 EXPORT_SYMBOL(ipmi_get_version); 1385 1386 int ipmi_set_my_address(struct ipmi_user *user, 1387 unsigned int channel, 1388 unsigned char address) 1389 { 1390 int index, rv = 0; 1391 1392 user = acquire_ipmi_user(user, &index); 1393 if (!user) 1394 return -ENODEV; 1395 1396 if (channel >= IPMI_MAX_CHANNELS) { 1397 rv = -EINVAL; 1398 } else { 1399 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1400 user->intf->addrinfo[channel].address = address; 1401 } 1402 release_ipmi_user(user, index); 1403 1404 return rv; 1405 } 1406 EXPORT_SYMBOL(ipmi_set_my_address); 1407 1408 int ipmi_get_my_address(struct ipmi_user *user, 1409 unsigned int channel, 1410 unsigned char *address) 1411 { 1412 int index, rv = 0; 1413 1414 user = acquire_ipmi_user(user, &index); 1415 if (!user) 1416 return -ENODEV; 1417 1418 if (channel >= IPMI_MAX_CHANNELS) { 1419 rv = -EINVAL; 1420 } else { 1421 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1422 *address = user->intf->addrinfo[channel].address; 1423 } 1424 release_ipmi_user(user, index); 1425 1426 return rv; 1427 } 1428 EXPORT_SYMBOL(ipmi_get_my_address); 1429 1430 int ipmi_set_my_LUN(struct ipmi_user *user, 1431 unsigned int channel, 1432 unsigned char LUN) 1433 { 1434 int index, rv = 0; 1435 1436 user = acquire_ipmi_user(user, &index); 1437 if (!user) 1438 return -ENODEV; 1439 1440 if (channel >= IPMI_MAX_CHANNELS) { 1441 rv = -EINVAL; 1442 } else { 1443 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1444 user->intf->addrinfo[channel].lun = LUN & 0x3; 1445 } 1446 release_ipmi_user(user, index); 1447 1448 return rv; 1449 } 1450 EXPORT_SYMBOL(ipmi_set_my_LUN); 1451 1452 int ipmi_get_my_LUN(struct ipmi_user *user, 1453 unsigned int channel, 1454 unsigned char *address) 1455 { 1456 int index, rv = 0; 1457 1458 user = acquire_ipmi_user(user, &index); 1459 if (!user) 1460 return -ENODEV; 1461 1462 if (channel >= IPMI_MAX_CHANNELS) { 1463 rv = -EINVAL; 1464 } else { 1465 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1466 *address = user->intf->addrinfo[channel].lun; 1467 } 1468 release_ipmi_user(user, index); 1469 1470 return rv; 1471 } 1472 EXPORT_SYMBOL(ipmi_get_my_LUN); 1473 1474 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1475 { 1476 int mode, index; 1477 unsigned long flags; 1478 1479 user = acquire_ipmi_user(user, &index); 1480 if (!user) 1481 return -ENODEV; 1482 1483 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1484 mode = user->intf->maintenance_mode; 1485 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1486 release_ipmi_user(user, index); 1487 1488 return mode; 1489 } 1490 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1491 1492 static void maintenance_mode_update(struct ipmi_smi *intf) 1493 { 1494 if (intf->handlers->set_maintenance_mode) 1495 intf->handlers->set_maintenance_mode( 1496 intf->send_info, intf->maintenance_mode_enable); 1497 } 1498 1499 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1500 { 1501 int rv = 0, index; 1502 unsigned long flags; 1503 struct ipmi_smi *intf = user->intf; 1504 1505 user = acquire_ipmi_user(user, &index); 1506 if (!user) 1507 return -ENODEV; 1508 1509 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1510 if (intf->maintenance_mode != mode) { 1511 switch (mode) { 1512 case IPMI_MAINTENANCE_MODE_AUTO: 1513 intf->maintenance_mode_enable 1514 = (intf->auto_maintenance_timeout > 0); 1515 break; 1516 1517 case IPMI_MAINTENANCE_MODE_OFF: 1518 intf->maintenance_mode_enable = false; 1519 break; 1520 1521 case IPMI_MAINTENANCE_MODE_ON: 1522 intf->maintenance_mode_enable = true; 1523 break; 1524 1525 default: 1526 rv = -EINVAL; 1527 goto out_unlock; 1528 } 1529 intf->maintenance_mode = mode; 1530 1531 maintenance_mode_update(intf); 1532 } 1533 out_unlock: 1534 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1535 release_ipmi_user(user, index); 1536 1537 return rv; 1538 } 1539 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1540 1541 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1542 { 1543 unsigned long flags; 1544 struct ipmi_smi *intf = user->intf; 1545 struct ipmi_recv_msg *msg, *msg2; 1546 struct list_head msgs; 1547 int index; 1548 1549 user = acquire_ipmi_user(user, &index); 1550 if (!user) 1551 return -ENODEV; 1552 1553 INIT_LIST_HEAD(&msgs); 1554 1555 spin_lock_irqsave(&intf->events_lock, flags); 1556 if (user->gets_events == val) 1557 goto out; 1558 1559 user->gets_events = val; 1560 1561 if (val) { 1562 if (atomic_inc_return(&intf->event_waiters) == 1) 1563 need_waiter(intf); 1564 } else { 1565 atomic_dec(&intf->event_waiters); 1566 } 1567 1568 if (intf->delivering_events) 1569 /* 1570 * Another thread is delivering events for this, so 1571 * let it handle any new events. 1572 */ 1573 goto out; 1574 1575 /* Deliver any queued events. */ 1576 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1577 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1578 list_move_tail(&msg->link, &msgs); 1579 intf->waiting_events_count = 0; 1580 if (intf->event_msg_printed) { 1581 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1582 intf->event_msg_printed = 0; 1583 } 1584 1585 intf->delivering_events = 1; 1586 spin_unlock_irqrestore(&intf->events_lock, flags); 1587 1588 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1589 msg->user = user; 1590 kref_get(&user->refcount); 1591 deliver_local_response(intf, msg); 1592 } 1593 1594 spin_lock_irqsave(&intf->events_lock, flags); 1595 intf->delivering_events = 0; 1596 } 1597 1598 out: 1599 spin_unlock_irqrestore(&intf->events_lock, flags); 1600 release_ipmi_user(user, index); 1601 1602 return 0; 1603 } 1604 EXPORT_SYMBOL(ipmi_set_gets_events); 1605 1606 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1607 unsigned char netfn, 1608 unsigned char cmd, 1609 unsigned char chan) 1610 { 1611 struct cmd_rcvr *rcvr; 1612 1613 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 1614 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1615 && (rcvr->chans & (1 << chan))) 1616 return rcvr; 1617 } 1618 return NULL; 1619 } 1620 1621 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1622 unsigned char netfn, 1623 unsigned char cmd, 1624 unsigned int chans) 1625 { 1626 struct cmd_rcvr *rcvr; 1627 1628 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 1629 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1630 && (rcvr->chans & chans)) 1631 return 0; 1632 } 1633 return 1; 1634 } 1635 1636 int ipmi_register_for_cmd(struct ipmi_user *user, 1637 unsigned char netfn, 1638 unsigned char cmd, 1639 unsigned int chans) 1640 { 1641 struct ipmi_smi *intf = user->intf; 1642 struct cmd_rcvr *rcvr; 1643 int rv = 0, index; 1644 1645 user = acquire_ipmi_user(user, &index); 1646 if (!user) 1647 return -ENODEV; 1648 1649 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1650 if (!rcvr) { 1651 rv = -ENOMEM; 1652 goto out_release; 1653 } 1654 rcvr->cmd = cmd; 1655 rcvr->netfn = netfn; 1656 rcvr->chans = chans; 1657 rcvr->user = user; 1658 1659 mutex_lock(&intf->cmd_rcvrs_mutex); 1660 /* Make sure the command/netfn is not already registered. */ 1661 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1662 rv = -EBUSY; 1663 goto out_unlock; 1664 } 1665 1666 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1667 1668 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1669 1670 out_unlock: 1671 mutex_unlock(&intf->cmd_rcvrs_mutex); 1672 if (rv) 1673 kfree(rcvr); 1674 out_release: 1675 release_ipmi_user(user, index); 1676 1677 return rv; 1678 } 1679 EXPORT_SYMBOL(ipmi_register_for_cmd); 1680 1681 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1682 unsigned char netfn, 1683 unsigned char cmd, 1684 unsigned int chans) 1685 { 1686 struct ipmi_smi *intf = user->intf; 1687 struct cmd_rcvr *rcvr; 1688 struct cmd_rcvr *rcvrs = NULL; 1689 int i, rv = -ENOENT, index; 1690 1691 user = acquire_ipmi_user(user, &index); 1692 if (!user) 1693 return -ENODEV; 1694 1695 mutex_lock(&intf->cmd_rcvrs_mutex); 1696 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1697 if (((1 << i) & chans) == 0) 1698 continue; 1699 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1700 if (rcvr == NULL) 1701 continue; 1702 if (rcvr->user == user) { 1703 rv = 0; 1704 rcvr->chans &= ~chans; 1705 if (rcvr->chans == 0) { 1706 list_del_rcu(&rcvr->link); 1707 rcvr->next = rcvrs; 1708 rcvrs = rcvr; 1709 } 1710 } 1711 } 1712 mutex_unlock(&intf->cmd_rcvrs_mutex); 1713 synchronize_rcu(); 1714 release_ipmi_user(user, index); 1715 while (rcvrs) { 1716 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1717 rcvr = rcvrs; 1718 rcvrs = rcvr->next; 1719 kfree(rcvr); 1720 } 1721 1722 return rv; 1723 } 1724 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1725 1726 static unsigned char 1727 ipmb_checksum(unsigned char *data, int size) 1728 { 1729 unsigned char csum = 0; 1730 1731 for (; size > 0; size--, data++) 1732 csum += *data; 1733 1734 return -csum; 1735 } 1736 1737 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1738 struct kernel_ipmi_msg *msg, 1739 struct ipmi_ipmb_addr *ipmb_addr, 1740 long msgid, 1741 unsigned char ipmb_seq, 1742 int broadcast, 1743 unsigned char source_address, 1744 unsigned char source_lun) 1745 { 1746 int i = broadcast; 1747 1748 /* Format the IPMB header data. */ 1749 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1750 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1751 smi_msg->data[2] = ipmb_addr->channel; 1752 if (broadcast) 1753 smi_msg->data[3] = 0; 1754 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1755 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1756 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1757 smi_msg->data[i+6] = source_address; 1758 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1759 smi_msg->data[i+8] = msg->cmd; 1760 1761 /* Now tack on the data to the message. */ 1762 if (msg->data_len > 0) 1763 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1764 smi_msg->data_size = msg->data_len + 9; 1765 1766 /* Now calculate the checksum and tack it on. */ 1767 smi_msg->data[i+smi_msg->data_size] 1768 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1769 1770 /* 1771 * Add on the checksum size and the offset from the 1772 * broadcast. 1773 */ 1774 smi_msg->data_size += 1 + i; 1775 1776 smi_msg->msgid = msgid; 1777 } 1778 1779 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1780 struct kernel_ipmi_msg *msg, 1781 struct ipmi_lan_addr *lan_addr, 1782 long msgid, 1783 unsigned char ipmb_seq, 1784 unsigned char source_lun) 1785 { 1786 /* Format the IPMB header data. */ 1787 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1788 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1789 smi_msg->data[2] = lan_addr->channel; 1790 smi_msg->data[3] = lan_addr->session_handle; 1791 smi_msg->data[4] = lan_addr->remote_SWID; 1792 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1793 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1794 smi_msg->data[7] = lan_addr->local_SWID; 1795 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1796 smi_msg->data[9] = msg->cmd; 1797 1798 /* Now tack on the data to the message. */ 1799 if (msg->data_len > 0) 1800 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1801 smi_msg->data_size = msg->data_len + 10; 1802 1803 /* Now calculate the checksum and tack it on. */ 1804 smi_msg->data[smi_msg->data_size] 1805 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1806 1807 /* 1808 * Add on the checksum size and the offset from the 1809 * broadcast. 1810 */ 1811 smi_msg->data_size += 1; 1812 1813 smi_msg->msgid = msgid; 1814 } 1815 1816 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1817 struct ipmi_smi_msg *smi_msg, 1818 int priority) 1819 { 1820 if (intf->curr_msg) { 1821 if (priority > 0) 1822 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1823 else 1824 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1825 smi_msg = NULL; 1826 } else { 1827 intf->curr_msg = smi_msg; 1828 } 1829 1830 return smi_msg; 1831 } 1832 1833 static void smi_send(struct ipmi_smi *intf, 1834 const struct ipmi_smi_handlers *handlers, 1835 struct ipmi_smi_msg *smi_msg, int priority) 1836 { 1837 int run_to_completion = intf->run_to_completion; 1838 unsigned long flags = 0; 1839 1840 if (!run_to_completion) 1841 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1842 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1843 1844 if (!run_to_completion) 1845 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1846 1847 if (smi_msg) 1848 handlers->sender(intf->send_info, smi_msg); 1849 } 1850 1851 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1852 { 1853 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1854 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1855 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1856 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1857 } 1858 1859 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1860 struct ipmi_addr *addr, 1861 long msgid, 1862 struct kernel_ipmi_msg *msg, 1863 struct ipmi_smi_msg *smi_msg, 1864 struct ipmi_recv_msg *recv_msg, 1865 int retries, 1866 unsigned int retry_time_ms) 1867 { 1868 struct ipmi_system_interface_addr *smi_addr; 1869 1870 if (msg->netfn & 1) 1871 /* Responses are not allowed to the SMI. */ 1872 return -EINVAL; 1873 1874 smi_addr = (struct ipmi_system_interface_addr *) addr; 1875 if (smi_addr->lun > 3) { 1876 ipmi_inc_stat(intf, sent_invalid_commands); 1877 return -EINVAL; 1878 } 1879 1880 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1881 1882 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1883 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1884 || (msg->cmd == IPMI_GET_MSG_CMD) 1885 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1886 /* 1887 * We don't let the user do these, since we manage 1888 * the sequence numbers. 1889 */ 1890 ipmi_inc_stat(intf, sent_invalid_commands); 1891 return -EINVAL; 1892 } 1893 1894 if (is_maintenance_mode_cmd(msg)) { 1895 unsigned long flags; 1896 1897 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1898 intf->auto_maintenance_timeout 1899 = maintenance_mode_timeout_ms; 1900 if (!intf->maintenance_mode 1901 && !intf->maintenance_mode_enable) { 1902 intf->maintenance_mode_enable = true; 1903 maintenance_mode_update(intf); 1904 } 1905 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1906 flags); 1907 } 1908 1909 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1910 ipmi_inc_stat(intf, sent_invalid_commands); 1911 return -EMSGSIZE; 1912 } 1913 1914 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1915 smi_msg->data[1] = msg->cmd; 1916 smi_msg->msgid = msgid; 1917 smi_msg->user_data = recv_msg; 1918 if (msg->data_len > 0) 1919 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1920 smi_msg->data_size = msg->data_len + 2; 1921 ipmi_inc_stat(intf, sent_local_commands); 1922 1923 return 0; 1924 } 1925 1926 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1927 struct ipmi_addr *addr, 1928 long msgid, 1929 struct kernel_ipmi_msg *msg, 1930 struct ipmi_smi_msg *smi_msg, 1931 struct ipmi_recv_msg *recv_msg, 1932 unsigned char source_address, 1933 unsigned char source_lun, 1934 int retries, 1935 unsigned int retry_time_ms) 1936 { 1937 struct ipmi_ipmb_addr *ipmb_addr; 1938 unsigned char ipmb_seq; 1939 long seqid; 1940 int broadcast = 0; 1941 struct ipmi_channel *chans; 1942 int rv = 0; 1943 1944 if (addr->channel >= IPMI_MAX_CHANNELS) { 1945 ipmi_inc_stat(intf, sent_invalid_commands); 1946 return -EINVAL; 1947 } 1948 1949 chans = READ_ONCE(intf->channel_list)->c; 1950 1951 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 1952 ipmi_inc_stat(intf, sent_invalid_commands); 1953 return -EINVAL; 1954 } 1955 1956 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1957 /* 1958 * Broadcasts add a zero at the beginning of the 1959 * message, but otherwise is the same as an IPMB 1960 * address. 1961 */ 1962 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1963 broadcast = 1; 1964 retries = 0; /* Don't retry broadcasts. */ 1965 } 1966 1967 /* 1968 * 9 for the header and 1 for the checksum, plus 1969 * possibly one for the broadcast. 1970 */ 1971 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1972 ipmi_inc_stat(intf, sent_invalid_commands); 1973 return -EMSGSIZE; 1974 } 1975 1976 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 1977 if (ipmb_addr->lun > 3) { 1978 ipmi_inc_stat(intf, sent_invalid_commands); 1979 return -EINVAL; 1980 } 1981 1982 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 1983 1984 if (recv_msg->msg.netfn & 0x1) { 1985 /* 1986 * It's a response, so use the user's sequence 1987 * from msgid. 1988 */ 1989 ipmi_inc_stat(intf, sent_ipmb_responses); 1990 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 1991 msgid, broadcast, 1992 source_address, source_lun); 1993 1994 /* 1995 * Save the receive message so we can use it 1996 * to deliver the response. 1997 */ 1998 smi_msg->user_data = recv_msg; 1999 } else { 2000 /* It's a command, so get a sequence for it. */ 2001 unsigned long flags; 2002 2003 spin_lock_irqsave(&intf->seq_lock, flags); 2004 2005 if (is_maintenance_mode_cmd(msg)) 2006 intf->ipmb_maintenance_mode_timeout = 2007 maintenance_mode_timeout_ms; 2008 2009 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 2010 /* Different default in maintenance mode */ 2011 retry_time_ms = default_maintenance_retry_ms; 2012 2013 /* 2014 * Create a sequence number with a 1 second 2015 * timeout and 4 retries. 2016 */ 2017 rv = intf_next_seq(intf, 2018 recv_msg, 2019 retry_time_ms, 2020 retries, 2021 broadcast, 2022 &ipmb_seq, 2023 &seqid); 2024 if (rv) 2025 /* 2026 * We have used up all the sequence numbers, 2027 * probably, so abort. 2028 */ 2029 goto out_err; 2030 2031 ipmi_inc_stat(intf, sent_ipmb_commands); 2032 2033 /* 2034 * Store the sequence number in the message, 2035 * so that when the send message response 2036 * comes back we can start the timer. 2037 */ 2038 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2039 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2040 ipmb_seq, broadcast, 2041 source_address, source_lun); 2042 2043 /* 2044 * Copy the message into the recv message data, so we 2045 * can retransmit it later if necessary. 2046 */ 2047 memcpy(recv_msg->msg_data, smi_msg->data, 2048 smi_msg->data_size); 2049 recv_msg->msg.data = recv_msg->msg_data; 2050 recv_msg->msg.data_len = smi_msg->data_size; 2051 2052 /* 2053 * We don't unlock until here, because we need 2054 * to copy the completed message into the 2055 * recv_msg before we release the lock. 2056 * Otherwise, race conditions may bite us. I 2057 * know that's pretty paranoid, but I prefer 2058 * to be correct. 2059 */ 2060 out_err: 2061 spin_unlock_irqrestore(&intf->seq_lock, flags); 2062 } 2063 2064 return rv; 2065 } 2066 2067 static int i_ipmi_req_lan(struct ipmi_smi *intf, 2068 struct ipmi_addr *addr, 2069 long msgid, 2070 struct kernel_ipmi_msg *msg, 2071 struct ipmi_smi_msg *smi_msg, 2072 struct ipmi_recv_msg *recv_msg, 2073 unsigned char source_lun, 2074 int retries, 2075 unsigned int retry_time_ms) 2076 { 2077 struct ipmi_lan_addr *lan_addr; 2078 unsigned char ipmb_seq; 2079 long seqid; 2080 struct ipmi_channel *chans; 2081 int rv = 0; 2082 2083 if (addr->channel >= IPMI_MAX_CHANNELS) { 2084 ipmi_inc_stat(intf, sent_invalid_commands); 2085 return -EINVAL; 2086 } 2087 2088 chans = READ_ONCE(intf->channel_list)->c; 2089 2090 if ((chans[addr->channel].medium 2091 != IPMI_CHANNEL_MEDIUM_8023LAN) 2092 && (chans[addr->channel].medium 2093 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2094 ipmi_inc_stat(intf, sent_invalid_commands); 2095 return -EINVAL; 2096 } 2097 2098 /* 11 for the header and 1 for the checksum. */ 2099 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2100 ipmi_inc_stat(intf, sent_invalid_commands); 2101 return -EMSGSIZE; 2102 } 2103 2104 lan_addr = (struct ipmi_lan_addr *) addr; 2105 if (lan_addr->lun > 3) { 2106 ipmi_inc_stat(intf, sent_invalid_commands); 2107 return -EINVAL; 2108 } 2109 2110 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2111 2112 if (recv_msg->msg.netfn & 0x1) { 2113 /* 2114 * It's a response, so use the user's sequence 2115 * from msgid. 2116 */ 2117 ipmi_inc_stat(intf, sent_lan_responses); 2118 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2119 msgid, source_lun); 2120 2121 /* 2122 * Save the receive message so we can use it 2123 * to deliver the response. 2124 */ 2125 smi_msg->user_data = recv_msg; 2126 } else { 2127 /* It's a command, so get a sequence for it. */ 2128 unsigned long flags; 2129 2130 spin_lock_irqsave(&intf->seq_lock, flags); 2131 2132 /* 2133 * Create a sequence number with a 1 second 2134 * timeout and 4 retries. 2135 */ 2136 rv = intf_next_seq(intf, 2137 recv_msg, 2138 retry_time_ms, 2139 retries, 2140 0, 2141 &ipmb_seq, 2142 &seqid); 2143 if (rv) 2144 /* 2145 * We have used up all the sequence numbers, 2146 * probably, so abort. 2147 */ 2148 goto out_err; 2149 2150 ipmi_inc_stat(intf, sent_lan_commands); 2151 2152 /* 2153 * Store the sequence number in the message, 2154 * so that when the send message response 2155 * comes back we can start the timer. 2156 */ 2157 format_lan_msg(smi_msg, msg, lan_addr, 2158 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2159 ipmb_seq, source_lun); 2160 2161 /* 2162 * Copy the message into the recv message data, so we 2163 * can retransmit it later if necessary. 2164 */ 2165 memcpy(recv_msg->msg_data, smi_msg->data, 2166 smi_msg->data_size); 2167 recv_msg->msg.data = recv_msg->msg_data; 2168 recv_msg->msg.data_len = smi_msg->data_size; 2169 2170 /* 2171 * We don't unlock until here, because we need 2172 * to copy the completed message into the 2173 * recv_msg before we release the lock. 2174 * Otherwise, race conditions may bite us. I 2175 * know that's pretty paranoid, but I prefer 2176 * to be correct. 2177 */ 2178 out_err: 2179 spin_unlock_irqrestore(&intf->seq_lock, flags); 2180 } 2181 2182 return rv; 2183 } 2184 2185 /* 2186 * Separate from ipmi_request so that the user does not have to be 2187 * supplied in certain circumstances (mainly at panic time). If 2188 * messages are supplied, they will be freed, even if an error 2189 * occurs. 2190 */ 2191 static int i_ipmi_request(struct ipmi_user *user, 2192 struct ipmi_smi *intf, 2193 struct ipmi_addr *addr, 2194 long msgid, 2195 struct kernel_ipmi_msg *msg, 2196 void *user_msg_data, 2197 void *supplied_smi, 2198 struct ipmi_recv_msg *supplied_recv, 2199 int priority, 2200 unsigned char source_address, 2201 unsigned char source_lun, 2202 int retries, 2203 unsigned int retry_time_ms) 2204 { 2205 struct ipmi_smi_msg *smi_msg; 2206 struct ipmi_recv_msg *recv_msg; 2207 int rv = 0; 2208 2209 if (supplied_recv) 2210 recv_msg = supplied_recv; 2211 else { 2212 recv_msg = ipmi_alloc_recv_msg(); 2213 if (recv_msg == NULL) { 2214 rv = -ENOMEM; 2215 goto out; 2216 } 2217 } 2218 recv_msg->user_msg_data = user_msg_data; 2219 2220 if (supplied_smi) 2221 smi_msg = (struct ipmi_smi_msg *) supplied_smi; 2222 else { 2223 smi_msg = ipmi_alloc_smi_msg(); 2224 if (smi_msg == NULL) { 2225 if (!supplied_recv) 2226 ipmi_free_recv_msg(recv_msg); 2227 rv = -ENOMEM; 2228 goto out; 2229 } 2230 } 2231 2232 rcu_read_lock(); 2233 if (intf->in_shutdown) { 2234 rv = -ENODEV; 2235 goto out_err; 2236 } 2237 2238 recv_msg->user = user; 2239 if (user) 2240 /* The put happens when the message is freed. */ 2241 kref_get(&user->refcount); 2242 recv_msg->msgid = msgid; 2243 /* 2244 * Store the message to send in the receive message so timeout 2245 * responses can get the proper response data. 2246 */ 2247 recv_msg->msg = *msg; 2248 2249 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2250 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2251 recv_msg, retries, retry_time_ms); 2252 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2253 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2254 source_address, source_lun, 2255 retries, retry_time_ms); 2256 } else if (is_lan_addr(addr)) { 2257 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2258 source_lun, retries, retry_time_ms); 2259 } else { 2260 /* Unknown address type. */ 2261 ipmi_inc_stat(intf, sent_invalid_commands); 2262 rv = -EINVAL; 2263 } 2264 2265 if (rv) { 2266 out_err: 2267 ipmi_free_smi_msg(smi_msg); 2268 ipmi_free_recv_msg(recv_msg); 2269 } else { 2270 ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size); 2271 2272 smi_send(intf, intf->handlers, smi_msg, priority); 2273 } 2274 rcu_read_unlock(); 2275 2276 out: 2277 return rv; 2278 } 2279 2280 static int check_addr(struct ipmi_smi *intf, 2281 struct ipmi_addr *addr, 2282 unsigned char *saddr, 2283 unsigned char *lun) 2284 { 2285 if (addr->channel >= IPMI_MAX_CHANNELS) 2286 return -EINVAL; 2287 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2288 *lun = intf->addrinfo[addr->channel].lun; 2289 *saddr = intf->addrinfo[addr->channel].address; 2290 return 0; 2291 } 2292 2293 int ipmi_request_settime(struct ipmi_user *user, 2294 struct ipmi_addr *addr, 2295 long msgid, 2296 struct kernel_ipmi_msg *msg, 2297 void *user_msg_data, 2298 int priority, 2299 int retries, 2300 unsigned int retry_time_ms) 2301 { 2302 unsigned char saddr = 0, lun = 0; 2303 int rv, index; 2304 2305 if (!user) 2306 return -EINVAL; 2307 2308 user = acquire_ipmi_user(user, &index); 2309 if (!user) 2310 return -ENODEV; 2311 2312 rv = check_addr(user->intf, addr, &saddr, &lun); 2313 if (!rv) 2314 rv = i_ipmi_request(user, 2315 user->intf, 2316 addr, 2317 msgid, 2318 msg, 2319 user_msg_data, 2320 NULL, NULL, 2321 priority, 2322 saddr, 2323 lun, 2324 retries, 2325 retry_time_ms); 2326 2327 release_ipmi_user(user, index); 2328 return rv; 2329 } 2330 EXPORT_SYMBOL(ipmi_request_settime); 2331 2332 int ipmi_request_supply_msgs(struct ipmi_user *user, 2333 struct ipmi_addr *addr, 2334 long msgid, 2335 struct kernel_ipmi_msg *msg, 2336 void *user_msg_data, 2337 void *supplied_smi, 2338 struct ipmi_recv_msg *supplied_recv, 2339 int priority) 2340 { 2341 unsigned char saddr = 0, lun = 0; 2342 int rv, index; 2343 2344 if (!user) 2345 return -EINVAL; 2346 2347 user = acquire_ipmi_user(user, &index); 2348 if (!user) 2349 return -ENODEV; 2350 2351 rv = check_addr(user->intf, addr, &saddr, &lun); 2352 if (!rv) 2353 rv = i_ipmi_request(user, 2354 user->intf, 2355 addr, 2356 msgid, 2357 msg, 2358 user_msg_data, 2359 supplied_smi, 2360 supplied_recv, 2361 priority, 2362 saddr, 2363 lun, 2364 -1, 0); 2365 2366 release_ipmi_user(user, index); 2367 return rv; 2368 } 2369 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2370 2371 static void bmc_device_id_handler(struct ipmi_smi *intf, 2372 struct ipmi_recv_msg *msg) 2373 { 2374 int rv; 2375 2376 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2377 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2378 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2379 dev_warn(intf->si_dev, 2380 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2381 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2382 return; 2383 } 2384 2385 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2386 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2387 if (rv) { 2388 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2389 intf->bmc->dyn_id_set = 0; 2390 } else { 2391 /* 2392 * Make sure the id data is available before setting 2393 * dyn_id_set. 2394 */ 2395 smp_wmb(); 2396 intf->bmc->dyn_id_set = 1; 2397 } 2398 2399 wake_up(&intf->waitq); 2400 } 2401 2402 static int 2403 send_get_device_id_cmd(struct ipmi_smi *intf) 2404 { 2405 struct ipmi_system_interface_addr si; 2406 struct kernel_ipmi_msg msg; 2407 2408 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2409 si.channel = IPMI_BMC_CHANNEL; 2410 si.lun = 0; 2411 2412 msg.netfn = IPMI_NETFN_APP_REQUEST; 2413 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2414 msg.data = NULL; 2415 msg.data_len = 0; 2416 2417 return i_ipmi_request(NULL, 2418 intf, 2419 (struct ipmi_addr *) &si, 2420 0, 2421 &msg, 2422 intf, 2423 NULL, 2424 NULL, 2425 0, 2426 intf->addrinfo[0].address, 2427 intf->addrinfo[0].lun, 2428 -1, 0); 2429 } 2430 2431 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2432 { 2433 int rv; 2434 2435 bmc->dyn_id_set = 2; 2436 2437 intf->null_user_handler = bmc_device_id_handler; 2438 2439 rv = send_get_device_id_cmd(intf); 2440 if (rv) 2441 return rv; 2442 2443 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2444 2445 if (!bmc->dyn_id_set) 2446 rv = -EIO; /* Something went wrong in the fetch. */ 2447 2448 /* dyn_id_set makes the id data available. */ 2449 smp_rmb(); 2450 2451 intf->null_user_handler = NULL; 2452 2453 return rv; 2454 } 2455 2456 /* 2457 * Fetch the device id for the bmc/interface. You must pass in either 2458 * bmc or intf, this code will get the other one. If the data has 2459 * been recently fetched, this will just use the cached data. Otherwise 2460 * it will run a new fetch. 2461 * 2462 * Except for the first time this is called (in ipmi_register_smi()), 2463 * this will always return good data; 2464 */ 2465 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2466 struct ipmi_device_id *id, 2467 bool *guid_set, guid_t *guid, int intf_num) 2468 { 2469 int rv = 0; 2470 int prev_dyn_id_set, prev_guid_set; 2471 bool intf_set = intf != NULL; 2472 2473 if (!intf) { 2474 mutex_lock(&bmc->dyn_mutex); 2475 retry_bmc_lock: 2476 if (list_empty(&bmc->intfs)) { 2477 mutex_unlock(&bmc->dyn_mutex); 2478 return -ENOENT; 2479 } 2480 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2481 bmc_link); 2482 kref_get(&intf->refcount); 2483 mutex_unlock(&bmc->dyn_mutex); 2484 mutex_lock(&intf->bmc_reg_mutex); 2485 mutex_lock(&bmc->dyn_mutex); 2486 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2487 bmc_link)) { 2488 mutex_unlock(&intf->bmc_reg_mutex); 2489 kref_put(&intf->refcount, intf_free); 2490 goto retry_bmc_lock; 2491 } 2492 } else { 2493 mutex_lock(&intf->bmc_reg_mutex); 2494 bmc = intf->bmc; 2495 mutex_lock(&bmc->dyn_mutex); 2496 kref_get(&intf->refcount); 2497 } 2498 2499 /* If we have a valid and current ID, just return that. */ 2500 if (intf->in_bmc_register || 2501 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2502 goto out_noprocessing; 2503 2504 prev_guid_set = bmc->dyn_guid_set; 2505 __get_guid(intf); 2506 2507 prev_dyn_id_set = bmc->dyn_id_set; 2508 rv = __get_device_id(intf, bmc); 2509 if (rv) 2510 goto out; 2511 2512 /* 2513 * The guid, device id, manufacturer id, and product id should 2514 * not change on a BMC. If it does we have to do some dancing. 2515 */ 2516 if (!intf->bmc_registered 2517 || (!prev_guid_set && bmc->dyn_guid_set) 2518 || (!prev_dyn_id_set && bmc->dyn_id_set) 2519 || (prev_guid_set && bmc->dyn_guid_set 2520 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2521 || bmc->id.device_id != bmc->fetch_id.device_id 2522 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2523 || bmc->id.product_id != bmc->fetch_id.product_id) { 2524 struct ipmi_device_id id = bmc->fetch_id; 2525 int guid_set = bmc->dyn_guid_set; 2526 guid_t guid; 2527 2528 guid = bmc->fetch_guid; 2529 mutex_unlock(&bmc->dyn_mutex); 2530 2531 __ipmi_bmc_unregister(intf); 2532 /* Fill in the temporary BMC for good measure. */ 2533 intf->bmc->id = id; 2534 intf->bmc->dyn_guid_set = guid_set; 2535 intf->bmc->guid = guid; 2536 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2537 need_waiter(intf); /* Retry later on an error. */ 2538 else 2539 __scan_channels(intf, &id); 2540 2541 2542 if (!intf_set) { 2543 /* 2544 * We weren't given the interface on the 2545 * command line, so restart the operation on 2546 * the next interface for the BMC. 2547 */ 2548 mutex_unlock(&intf->bmc_reg_mutex); 2549 mutex_lock(&bmc->dyn_mutex); 2550 goto retry_bmc_lock; 2551 } 2552 2553 /* We have a new BMC, set it up. */ 2554 bmc = intf->bmc; 2555 mutex_lock(&bmc->dyn_mutex); 2556 goto out_noprocessing; 2557 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2558 /* Version info changes, scan the channels again. */ 2559 __scan_channels(intf, &bmc->fetch_id); 2560 2561 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2562 2563 out: 2564 if (rv && prev_dyn_id_set) { 2565 rv = 0; /* Ignore failures if we have previous data. */ 2566 bmc->dyn_id_set = prev_dyn_id_set; 2567 } 2568 if (!rv) { 2569 bmc->id = bmc->fetch_id; 2570 if (bmc->dyn_guid_set) 2571 bmc->guid = bmc->fetch_guid; 2572 else if (prev_guid_set) 2573 /* 2574 * The guid used to be valid and it failed to fetch, 2575 * just use the cached value. 2576 */ 2577 bmc->dyn_guid_set = prev_guid_set; 2578 } 2579 out_noprocessing: 2580 if (!rv) { 2581 if (id) 2582 *id = bmc->id; 2583 2584 if (guid_set) 2585 *guid_set = bmc->dyn_guid_set; 2586 2587 if (guid && bmc->dyn_guid_set) 2588 *guid = bmc->guid; 2589 } 2590 2591 mutex_unlock(&bmc->dyn_mutex); 2592 mutex_unlock(&intf->bmc_reg_mutex); 2593 2594 kref_put(&intf->refcount, intf_free); 2595 return rv; 2596 } 2597 2598 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2599 struct ipmi_device_id *id, 2600 bool *guid_set, guid_t *guid) 2601 { 2602 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2603 } 2604 2605 static ssize_t device_id_show(struct device *dev, 2606 struct device_attribute *attr, 2607 char *buf) 2608 { 2609 struct bmc_device *bmc = to_bmc_device(dev); 2610 struct ipmi_device_id id; 2611 int rv; 2612 2613 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2614 if (rv) 2615 return rv; 2616 2617 return snprintf(buf, 10, "%u\n", id.device_id); 2618 } 2619 static DEVICE_ATTR_RO(device_id); 2620 2621 static ssize_t provides_device_sdrs_show(struct device *dev, 2622 struct device_attribute *attr, 2623 char *buf) 2624 { 2625 struct bmc_device *bmc = to_bmc_device(dev); 2626 struct ipmi_device_id id; 2627 int rv; 2628 2629 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2630 if (rv) 2631 return rv; 2632 2633 return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7); 2634 } 2635 static DEVICE_ATTR_RO(provides_device_sdrs); 2636 2637 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2638 char *buf) 2639 { 2640 struct bmc_device *bmc = to_bmc_device(dev); 2641 struct ipmi_device_id id; 2642 int rv; 2643 2644 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2645 if (rv) 2646 return rv; 2647 2648 return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F); 2649 } 2650 static DEVICE_ATTR_RO(revision); 2651 2652 static ssize_t firmware_revision_show(struct device *dev, 2653 struct device_attribute *attr, 2654 char *buf) 2655 { 2656 struct bmc_device *bmc = to_bmc_device(dev); 2657 struct ipmi_device_id id; 2658 int rv; 2659 2660 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2661 if (rv) 2662 return rv; 2663 2664 return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1, 2665 id.firmware_revision_2); 2666 } 2667 static DEVICE_ATTR_RO(firmware_revision); 2668 2669 static ssize_t ipmi_version_show(struct device *dev, 2670 struct device_attribute *attr, 2671 char *buf) 2672 { 2673 struct bmc_device *bmc = to_bmc_device(dev); 2674 struct ipmi_device_id id; 2675 int rv; 2676 2677 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2678 if (rv) 2679 return rv; 2680 2681 return snprintf(buf, 20, "%u.%u\n", 2682 ipmi_version_major(&id), 2683 ipmi_version_minor(&id)); 2684 } 2685 static DEVICE_ATTR_RO(ipmi_version); 2686 2687 static ssize_t add_dev_support_show(struct device *dev, 2688 struct device_attribute *attr, 2689 char *buf) 2690 { 2691 struct bmc_device *bmc = to_bmc_device(dev); 2692 struct ipmi_device_id id; 2693 int rv; 2694 2695 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2696 if (rv) 2697 return rv; 2698 2699 return snprintf(buf, 10, "0x%02x\n", id.additional_device_support); 2700 } 2701 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2702 NULL); 2703 2704 static ssize_t manufacturer_id_show(struct device *dev, 2705 struct device_attribute *attr, 2706 char *buf) 2707 { 2708 struct bmc_device *bmc = to_bmc_device(dev); 2709 struct ipmi_device_id id; 2710 int rv; 2711 2712 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2713 if (rv) 2714 return rv; 2715 2716 return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id); 2717 } 2718 static DEVICE_ATTR_RO(manufacturer_id); 2719 2720 static ssize_t product_id_show(struct device *dev, 2721 struct device_attribute *attr, 2722 char *buf) 2723 { 2724 struct bmc_device *bmc = to_bmc_device(dev); 2725 struct ipmi_device_id id; 2726 int rv; 2727 2728 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2729 if (rv) 2730 return rv; 2731 2732 return snprintf(buf, 10, "0x%4.4x\n", id.product_id); 2733 } 2734 static DEVICE_ATTR_RO(product_id); 2735 2736 static ssize_t aux_firmware_rev_show(struct device *dev, 2737 struct device_attribute *attr, 2738 char *buf) 2739 { 2740 struct bmc_device *bmc = to_bmc_device(dev); 2741 struct ipmi_device_id id; 2742 int rv; 2743 2744 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2745 if (rv) 2746 return rv; 2747 2748 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2749 id.aux_firmware_revision[3], 2750 id.aux_firmware_revision[2], 2751 id.aux_firmware_revision[1], 2752 id.aux_firmware_revision[0]); 2753 } 2754 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2755 2756 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2757 char *buf) 2758 { 2759 struct bmc_device *bmc = to_bmc_device(dev); 2760 bool guid_set; 2761 guid_t guid; 2762 int rv; 2763 2764 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2765 if (rv) 2766 return rv; 2767 if (!guid_set) 2768 return -ENOENT; 2769 2770 return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid); 2771 } 2772 static DEVICE_ATTR_RO(guid); 2773 2774 static struct attribute *bmc_dev_attrs[] = { 2775 &dev_attr_device_id.attr, 2776 &dev_attr_provides_device_sdrs.attr, 2777 &dev_attr_revision.attr, 2778 &dev_attr_firmware_revision.attr, 2779 &dev_attr_ipmi_version.attr, 2780 &dev_attr_additional_device_support.attr, 2781 &dev_attr_manufacturer_id.attr, 2782 &dev_attr_product_id.attr, 2783 &dev_attr_aux_firmware_revision.attr, 2784 &dev_attr_guid.attr, 2785 NULL 2786 }; 2787 2788 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2789 struct attribute *attr, int idx) 2790 { 2791 struct device *dev = kobj_to_dev(kobj); 2792 struct bmc_device *bmc = to_bmc_device(dev); 2793 umode_t mode = attr->mode; 2794 int rv; 2795 2796 if (attr == &dev_attr_aux_firmware_revision.attr) { 2797 struct ipmi_device_id id; 2798 2799 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2800 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2801 } 2802 if (attr == &dev_attr_guid.attr) { 2803 bool guid_set; 2804 2805 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2806 return (!rv && guid_set) ? mode : 0; 2807 } 2808 return mode; 2809 } 2810 2811 static const struct attribute_group bmc_dev_attr_group = { 2812 .attrs = bmc_dev_attrs, 2813 .is_visible = bmc_dev_attr_is_visible, 2814 }; 2815 2816 static const struct attribute_group *bmc_dev_attr_groups[] = { 2817 &bmc_dev_attr_group, 2818 NULL 2819 }; 2820 2821 static const struct device_type bmc_device_type = { 2822 .groups = bmc_dev_attr_groups, 2823 }; 2824 2825 static int __find_bmc_guid(struct device *dev, const void *data) 2826 { 2827 const guid_t *guid = data; 2828 struct bmc_device *bmc; 2829 int rv; 2830 2831 if (dev->type != &bmc_device_type) 2832 return 0; 2833 2834 bmc = to_bmc_device(dev); 2835 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2836 if (rv) 2837 rv = kref_get_unless_zero(&bmc->usecount); 2838 return rv; 2839 } 2840 2841 /* 2842 * Returns with the bmc's usecount incremented, if it is non-NULL. 2843 */ 2844 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2845 guid_t *guid) 2846 { 2847 struct device *dev; 2848 struct bmc_device *bmc = NULL; 2849 2850 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2851 if (dev) { 2852 bmc = to_bmc_device(dev); 2853 put_device(dev); 2854 } 2855 return bmc; 2856 } 2857 2858 struct prod_dev_id { 2859 unsigned int product_id; 2860 unsigned char device_id; 2861 }; 2862 2863 static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 2864 { 2865 const struct prod_dev_id *cid = data; 2866 struct bmc_device *bmc; 2867 int rv; 2868 2869 if (dev->type != &bmc_device_type) 2870 return 0; 2871 2872 bmc = to_bmc_device(dev); 2873 rv = (bmc->id.product_id == cid->product_id 2874 && bmc->id.device_id == cid->device_id); 2875 if (rv) 2876 rv = kref_get_unless_zero(&bmc->usecount); 2877 return rv; 2878 } 2879 2880 /* 2881 * Returns with the bmc's usecount incremented, if it is non-NULL. 2882 */ 2883 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 2884 struct device_driver *drv, 2885 unsigned int product_id, unsigned char device_id) 2886 { 2887 struct prod_dev_id id = { 2888 .product_id = product_id, 2889 .device_id = device_id, 2890 }; 2891 struct device *dev; 2892 struct bmc_device *bmc = NULL; 2893 2894 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 2895 if (dev) { 2896 bmc = to_bmc_device(dev); 2897 put_device(dev); 2898 } 2899 return bmc; 2900 } 2901 2902 static DEFINE_IDA(ipmi_bmc_ida); 2903 2904 static void 2905 release_bmc_device(struct device *dev) 2906 { 2907 kfree(to_bmc_device(dev)); 2908 } 2909 2910 static void cleanup_bmc_work(struct work_struct *work) 2911 { 2912 struct bmc_device *bmc = container_of(work, struct bmc_device, 2913 remove_work); 2914 int id = bmc->pdev.id; /* Unregister overwrites id */ 2915 2916 platform_device_unregister(&bmc->pdev); 2917 ida_simple_remove(&ipmi_bmc_ida, id); 2918 } 2919 2920 static void 2921 cleanup_bmc_device(struct kref *ref) 2922 { 2923 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 2924 2925 /* 2926 * Remove the platform device in a work queue to avoid issues 2927 * with removing the device attributes while reading a device 2928 * attribute. 2929 */ 2930 schedule_work(&bmc->remove_work); 2931 } 2932 2933 /* 2934 * Must be called with intf->bmc_reg_mutex held. 2935 */ 2936 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 2937 { 2938 struct bmc_device *bmc = intf->bmc; 2939 2940 if (!intf->bmc_registered) 2941 return; 2942 2943 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 2944 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 2945 kfree(intf->my_dev_name); 2946 intf->my_dev_name = NULL; 2947 2948 mutex_lock(&bmc->dyn_mutex); 2949 list_del(&intf->bmc_link); 2950 mutex_unlock(&bmc->dyn_mutex); 2951 intf->bmc = &intf->tmp_bmc; 2952 kref_put(&bmc->usecount, cleanup_bmc_device); 2953 intf->bmc_registered = false; 2954 } 2955 2956 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 2957 { 2958 mutex_lock(&intf->bmc_reg_mutex); 2959 __ipmi_bmc_unregister(intf); 2960 mutex_unlock(&intf->bmc_reg_mutex); 2961 } 2962 2963 /* 2964 * Must be called with intf->bmc_reg_mutex held. 2965 */ 2966 static int __ipmi_bmc_register(struct ipmi_smi *intf, 2967 struct ipmi_device_id *id, 2968 bool guid_set, guid_t *guid, int intf_num) 2969 { 2970 int rv; 2971 struct bmc_device *bmc; 2972 struct bmc_device *old_bmc; 2973 2974 /* 2975 * platform_device_register() can cause bmc_reg_mutex to 2976 * be claimed because of the is_visible functions of 2977 * the attributes. Eliminate possible recursion and 2978 * release the lock. 2979 */ 2980 intf->in_bmc_register = true; 2981 mutex_unlock(&intf->bmc_reg_mutex); 2982 2983 /* 2984 * Try to find if there is an bmc_device struct 2985 * representing the interfaced BMC already 2986 */ 2987 mutex_lock(&ipmidriver_mutex); 2988 if (guid_set) 2989 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 2990 else 2991 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 2992 id->product_id, 2993 id->device_id); 2994 2995 /* 2996 * If there is already an bmc_device, free the new one, 2997 * otherwise register the new BMC device 2998 */ 2999 if (old_bmc) { 3000 bmc = old_bmc; 3001 /* 3002 * Note: old_bmc already has usecount incremented by 3003 * the BMC find functions. 3004 */ 3005 intf->bmc = old_bmc; 3006 mutex_lock(&bmc->dyn_mutex); 3007 list_add_tail(&intf->bmc_link, &bmc->intfs); 3008 mutex_unlock(&bmc->dyn_mutex); 3009 3010 dev_info(intf->si_dev, 3011 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3012 bmc->id.manufacturer_id, 3013 bmc->id.product_id, 3014 bmc->id.device_id); 3015 } else { 3016 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 3017 if (!bmc) { 3018 rv = -ENOMEM; 3019 goto out; 3020 } 3021 INIT_LIST_HEAD(&bmc->intfs); 3022 mutex_init(&bmc->dyn_mutex); 3023 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3024 3025 bmc->id = *id; 3026 bmc->dyn_id_set = 1; 3027 bmc->dyn_guid_set = guid_set; 3028 bmc->guid = *guid; 3029 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3030 3031 bmc->pdev.name = "ipmi_bmc"; 3032 3033 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); 3034 if (rv < 0) 3035 goto out; 3036 bmc->pdev.dev.driver = &ipmidriver.driver; 3037 bmc->pdev.id = rv; 3038 bmc->pdev.dev.release = release_bmc_device; 3039 bmc->pdev.dev.type = &bmc_device_type; 3040 kref_init(&bmc->usecount); 3041 3042 intf->bmc = bmc; 3043 mutex_lock(&bmc->dyn_mutex); 3044 list_add_tail(&intf->bmc_link, &bmc->intfs); 3045 mutex_unlock(&bmc->dyn_mutex); 3046 3047 rv = platform_device_register(&bmc->pdev); 3048 if (rv) { 3049 dev_err(intf->si_dev, 3050 "Unable to register bmc device: %d\n", 3051 rv); 3052 goto out_list_del; 3053 } 3054 3055 dev_info(intf->si_dev, 3056 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3057 bmc->id.manufacturer_id, 3058 bmc->id.product_id, 3059 bmc->id.device_id); 3060 } 3061 3062 /* 3063 * create symlink from system interface device to bmc device 3064 * and back. 3065 */ 3066 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3067 if (rv) { 3068 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3069 goto out_put_bmc; 3070 } 3071 3072 if (intf_num == -1) 3073 intf_num = intf->intf_num; 3074 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3075 if (!intf->my_dev_name) { 3076 rv = -ENOMEM; 3077 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3078 rv); 3079 goto out_unlink1; 3080 } 3081 3082 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3083 intf->my_dev_name); 3084 if (rv) { 3085 kfree(intf->my_dev_name); 3086 intf->my_dev_name = NULL; 3087 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3088 rv); 3089 goto out_free_my_dev_name; 3090 } 3091 3092 intf->bmc_registered = true; 3093 3094 out: 3095 mutex_unlock(&ipmidriver_mutex); 3096 mutex_lock(&intf->bmc_reg_mutex); 3097 intf->in_bmc_register = false; 3098 return rv; 3099 3100 3101 out_free_my_dev_name: 3102 kfree(intf->my_dev_name); 3103 intf->my_dev_name = NULL; 3104 3105 out_unlink1: 3106 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3107 3108 out_put_bmc: 3109 mutex_lock(&bmc->dyn_mutex); 3110 list_del(&intf->bmc_link); 3111 mutex_unlock(&bmc->dyn_mutex); 3112 intf->bmc = &intf->tmp_bmc; 3113 kref_put(&bmc->usecount, cleanup_bmc_device); 3114 goto out; 3115 3116 out_list_del: 3117 mutex_lock(&bmc->dyn_mutex); 3118 list_del(&intf->bmc_link); 3119 mutex_unlock(&bmc->dyn_mutex); 3120 intf->bmc = &intf->tmp_bmc; 3121 put_device(&bmc->pdev.dev); 3122 goto out; 3123 } 3124 3125 static int 3126 send_guid_cmd(struct ipmi_smi *intf, int chan) 3127 { 3128 struct kernel_ipmi_msg msg; 3129 struct ipmi_system_interface_addr si; 3130 3131 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3132 si.channel = IPMI_BMC_CHANNEL; 3133 si.lun = 0; 3134 3135 msg.netfn = IPMI_NETFN_APP_REQUEST; 3136 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3137 msg.data = NULL; 3138 msg.data_len = 0; 3139 return i_ipmi_request(NULL, 3140 intf, 3141 (struct ipmi_addr *) &si, 3142 0, 3143 &msg, 3144 intf, 3145 NULL, 3146 NULL, 3147 0, 3148 intf->addrinfo[0].address, 3149 intf->addrinfo[0].lun, 3150 -1, 0); 3151 } 3152 3153 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3154 { 3155 struct bmc_device *bmc = intf->bmc; 3156 3157 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3158 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3159 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3160 /* Not for me */ 3161 return; 3162 3163 if (msg->msg.data[0] != 0) { 3164 /* Error from getting the GUID, the BMC doesn't have one. */ 3165 bmc->dyn_guid_set = 0; 3166 goto out; 3167 } 3168 3169 if (msg->msg.data_len < UUID_SIZE + 1) { 3170 bmc->dyn_guid_set = 0; 3171 dev_warn(intf->si_dev, 3172 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3173 msg->msg.data_len, UUID_SIZE + 1); 3174 goto out; 3175 } 3176 3177 guid_copy(&bmc->fetch_guid, (guid_t *)(msg->msg.data + 1)); 3178 /* 3179 * Make sure the guid data is available before setting 3180 * dyn_guid_set. 3181 */ 3182 smp_wmb(); 3183 bmc->dyn_guid_set = 1; 3184 out: 3185 wake_up(&intf->waitq); 3186 } 3187 3188 static void __get_guid(struct ipmi_smi *intf) 3189 { 3190 int rv; 3191 struct bmc_device *bmc = intf->bmc; 3192 3193 bmc->dyn_guid_set = 2; 3194 intf->null_user_handler = guid_handler; 3195 rv = send_guid_cmd(intf, 0); 3196 if (rv) 3197 /* Send failed, no GUID available. */ 3198 bmc->dyn_guid_set = 0; 3199 3200 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3201 3202 /* dyn_guid_set makes the guid data available. */ 3203 smp_rmb(); 3204 3205 intf->null_user_handler = NULL; 3206 } 3207 3208 static int 3209 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3210 { 3211 struct kernel_ipmi_msg msg; 3212 unsigned char data[1]; 3213 struct ipmi_system_interface_addr si; 3214 3215 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3216 si.channel = IPMI_BMC_CHANNEL; 3217 si.lun = 0; 3218 3219 msg.netfn = IPMI_NETFN_APP_REQUEST; 3220 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3221 msg.data = data; 3222 msg.data_len = 1; 3223 data[0] = chan; 3224 return i_ipmi_request(NULL, 3225 intf, 3226 (struct ipmi_addr *) &si, 3227 0, 3228 &msg, 3229 intf, 3230 NULL, 3231 NULL, 3232 0, 3233 intf->addrinfo[0].address, 3234 intf->addrinfo[0].lun, 3235 -1, 0); 3236 } 3237 3238 static void 3239 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3240 { 3241 int rv = 0; 3242 int ch; 3243 unsigned int set = intf->curr_working_cset; 3244 struct ipmi_channel *chans; 3245 3246 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3247 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3248 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3249 /* It's the one we want */ 3250 if (msg->msg.data[0] != 0) { 3251 /* Got an error from the channel, just go on. */ 3252 3253 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3254 /* 3255 * If the MC does not support this 3256 * command, that is legal. We just 3257 * assume it has one IPMB at channel 3258 * zero. 3259 */ 3260 intf->wchannels[set].c[0].medium 3261 = IPMI_CHANNEL_MEDIUM_IPMB; 3262 intf->wchannels[set].c[0].protocol 3263 = IPMI_CHANNEL_PROTOCOL_IPMB; 3264 3265 intf->channel_list = intf->wchannels + set; 3266 intf->channels_ready = true; 3267 wake_up(&intf->waitq); 3268 goto out; 3269 } 3270 goto next_channel; 3271 } 3272 if (msg->msg.data_len < 4) { 3273 /* Message not big enough, just go on. */ 3274 goto next_channel; 3275 } 3276 ch = intf->curr_channel; 3277 chans = intf->wchannels[set].c; 3278 chans[ch].medium = msg->msg.data[2] & 0x7f; 3279 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3280 3281 next_channel: 3282 intf->curr_channel++; 3283 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3284 intf->channel_list = intf->wchannels + set; 3285 intf->channels_ready = true; 3286 wake_up(&intf->waitq); 3287 } else { 3288 intf->channel_list = intf->wchannels + set; 3289 intf->channels_ready = true; 3290 rv = send_channel_info_cmd(intf, intf->curr_channel); 3291 } 3292 3293 if (rv) { 3294 /* Got an error somehow, just give up. */ 3295 dev_warn(intf->si_dev, 3296 "Error sending channel information for channel %d: %d\n", 3297 intf->curr_channel, rv); 3298 3299 intf->channel_list = intf->wchannels + set; 3300 intf->channels_ready = true; 3301 wake_up(&intf->waitq); 3302 } 3303 } 3304 out: 3305 return; 3306 } 3307 3308 /* 3309 * Must be holding intf->bmc_reg_mutex to call this. 3310 */ 3311 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3312 { 3313 int rv; 3314 3315 if (ipmi_version_major(id) > 1 3316 || (ipmi_version_major(id) == 1 3317 && ipmi_version_minor(id) >= 5)) { 3318 unsigned int set; 3319 3320 /* 3321 * Start scanning the channels to see what is 3322 * available. 3323 */ 3324 set = !intf->curr_working_cset; 3325 intf->curr_working_cset = set; 3326 memset(&intf->wchannels[set], 0, 3327 sizeof(struct ipmi_channel_set)); 3328 3329 intf->null_user_handler = channel_handler; 3330 intf->curr_channel = 0; 3331 rv = send_channel_info_cmd(intf, 0); 3332 if (rv) { 3333 dev_warn(intf->si_dev, 3334 "Error sending channel information for channel 0, %d\n", 3335 rv); 3336 return -EIO; 3337 } 3338 3339 /* Wait for the channel info to be read. */ 3340 wait_event(intf->waitq, intf->channels_ready); 3341 intf->null_user_handler = NULL; 3342 } else { 3343 unsigned int set = intf->curr_working_cset; 3344 3345 /* Assume a single IPMB channel at zero. */ 3346 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3347 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3348 intf->channel_list = intf->wchannels + set; 3349 intf->channels_ready = true; 3350 } 3351 3352 return 0; 3353 } 3354 3355 static void ipmi_poll(struct ipmi_smi *intf) 3356 { 3357 if (intf->handlers->poll) 3358 intf->handlers->poll(intf->send_info); 3359 /* In case something came in */ 3360 handle_new_recv_msgs(intf); 3361 } 3362 3363 void ipmi_poll_interface(struct ipmi_user *user) 3364 { 3365 ipmi_poll(user->intf); 3366 } 3367 EXPORT_SYMBOL(ipmi_poll_interface); 3368 3369 static void redo_bmc_reg(struct work_struct *work) 3370 { 3371 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3372 bmc_reg_work); 3373 3374 if (!intf->in_shutdown) 3375 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3376 3377 kref_put(&intf->refcount, intf_free); 3378 } 3379 3380 int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, 3381 void *send_info, 3382 struct device *si_dev, 3383 unsigned char slave_addr) 3384 { 3385 int i, j; 3386 int rv; 3387 struct ipmi_smi *intf, *tintf; 3388 struct list_head *link; 3389 struct ipmi_device_id id; 3390 3391 /* 3392 * Make sure the driver is actually initialized, this handles 3393 * problems with initialization order. 3394 */ 3395 rv = ipmi_init_msghandler(); 3396 if (rv) 3397 return rv; 3398 3399 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3400 if (!intf) 3401 return -ENOMEM; 3402 3403 rv = init_srcu_struct(&intf->users_srcu); 3404 if (rv) { 3405 kfree(intf); 3406 return rv; 3407 } 3408 3409 3410 intf->bmc = &intf->tmp_bmc; 3411 INIT_LIST_HEAD(&intf->bmc->intfs); 3412 mutex_init(&intf->bmc->dyn_mutex); 3413 INIT_LIST_HEAD(&intf->bmc_link); 3414 mutex_init(&intf->bmc_reg_mutex); 3415 intf->intf_num = -1; /* Mark it invalid for now. */ 3416 kref_init(&intf->refcount); 3417 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3418 intf->si_dev = si_dev; 3419 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3420 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3421 intf->addrinfo[j].lun = 2; 3422 } 3423 if (slave_addr != 0) 3424 intf->addrinfo[0].address = slave_addr; 3425 INIT_LIST_HEAD(&intf->users); 3426 intf->handlers = handlers; 3427 intf->send_info = send_info; 3428 spin_lock_init(&intf->seq_lock); 3429 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3430 intf->seq_table[j].inuse = 0; 3431 intf->seq_table[j].seqid = 0; 3432 } 3433 intf->curr_seq = 0; 3434 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3435 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3436 tasklet_init(&intf->recv_tasklet, 3437 smi_recv_tasklet, 3438 (unsigned long) intf); 3439 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3440 spin_lock_init(&intf->xmit_msgs_lock); 3441 INIT_LIST_HEAD(&intf->xmit_msgs); 3442 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3443 spin_lock_init(&intf->events_lock); 3444 spin_lock_init(&intf->watch_lock); 3445 atomic_set(&intf->event_waiters, 0); 3446 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3447 INIT_LIST_HEAD(&intf->waiting_events); 3448 intf->waiting_events_count = 0; 3449 mutex_init(&intf->cmd_rcvrs_mutex); 3450 spin_lock_init(&intf->maintenance_mode_lock); 3451 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3452 init_waitqueue_head(&intf->waitq); 3453 for (i = 0; i < IPMI_NUM_STATS; i++) 3454 atomic_set(&intf->stats[i], 0); 3455 3456 mutex_lock(&ipmi_interfaces_mutex); 3457 /* Look for a hole in the numbers. */ 3458 i = 0; 3459 link = &ipmi_interfaces; 3460 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) { 3461 if (tintf->intf_num != i) { 3462 link = &tintf->link; 3463 break; 3464 } 3465 i++; 3466 } 3467 /* Add the new interface in numeric order. */ 3468 if (i == 0) 3469 list_add_rcu(&intf->link, &ipmi_interfaces); 3470 else 3471 list_add_tail_rcu(&intf->link, link); 3472 3473 rv = handlers->start_processing(send_info, intf); 3474 if (rv) 3475 goto out_err; 3476 3477 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3478 if (rv) { 3479 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3480 goto out_err_started; 3481 } 3482 3483 mutex_lock(&intf->bmc_reg_mutex); 3484 rv = __scan_channels(intf, &id); 3485 mutex_unlock(&intf->bmc_reg_mutex); 3486 if (rv) 3487 goto out_err_bmc_reg; 3488 3489 /* 3490 * Keep memory order straight for RCU readers. Make 3491 * sure everything else is committed to memory before 3492 * setting intf_num to mark the interface valid. 3493 */ 3494 smp_wmb(); 3495 intf->intf_num = i; 3496 mutex_unlock(&ipmi_interfaces_mutex); 3497 3498 /* After this point the interface is legal to use. */ 3499 call_smi_watchers(i, intf->si_dev); 3500 3501 return 0; 3502 3503 out_err_bmc_reg: 3504 ipmi_bmc_unregister(intf); 3505 out_err_started: 3506 if (intf->handlers->shutdown) 3507 intf->handlers->shutdown(intf->send_info); 3508 out_err: 3509 list_del_rcu(&intf->link); 3510 mutex_unlock(&ipmi_interfaces_mutex); 3511 synchronize_srcu(&ipmi_interfaces_srcu); 3512 cleanup_srcu_struct(&intf->users_srcu); 3513 kref_put(&intf->refcount, intf_free); 3514 3515 return rv; 3516 } 3517 EXPORT_SYMBOL(ipmi_register_smi); 3518 3519 static void deliver_smi_err_response(struct ipmi_smi *intf, 3520 struct ipmi_smi_msg *msg, 3521 unsigned char err) 3522 { 3523 msg->rsp[0] = msg->data[0] | 4; 3524 msg->rsp[1] = msg->data[1]; 3525 msg->rsp[2] = err; 3526 msg->rsp_size = 3; 3527 /* It's an error, so it will never requeue, no need to check return. */ 3528 handle_one_recv_msg(intf, msg); 3529 } 3530 3531 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3532 { 3533 int i; 3534 struct seq_table *ent; 3535 struct ipmi_smi_msg *msg; 3536 struct list_head *entry; 3537 struct list_head tmplist; 3538 3539 /* Clear out our transmit queues and hold the messages. */ 3540 INIT_LIST_HEAD(&tmplist); 3541 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3542 list_splice_tail(&intf->xmit_msgs, &tmplist); 3543 3544 /* Current message first, to preserve order */ 3545 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3546 /* Wait for the message to clear out. */ 3547 schedule_timeout(1); 3548 } 3549 3550 /* No need for locks, the interface is down. */ 3551 3552 /* 3553 * Return errors for all pending messages in queue and in the 3554 * tables waiting for remote responses. 3555 */ 3556 while (!list_empty(&tmplist)) { 3557 entry = tmplist.next; 3558 list_del(entry); 3559 msg = list_entry(entry, struct ipmi_smi_msg, link); 3560 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3561 } 3562 3563 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3564 ent = &intf->seq_table[i]; 3565 if (!ent->inuse) 3566 continue; 3567 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3568 } 3569 } 3570 3571 void ipmi_unregister_smi(struct ipmi_smi *intf) 3572 { 3573 struct ipmi_smi_watcher *w; 3574 int intf_num = intf->intf_num, index; 3575 3576 mutex_lock(&ipmi_interfaces_mutex); 3577 intf->intf_num = -1; 3578 intf->in_shutdown = true; 3579 list_del_rcu(&intf->link); 3580 mutex_unlock(&ipmi_interfaces_mutex); 3581 synchronize_srcu(&ipmi_interfaces_srcu); 3582 3583 /* At this point no users can be added to the interface. */ 3584 3585 /* 3586 * Call all the watcher interfaces to tell them that 3587 * an interface is going away. 3588 */ 3589 mutex_lock(&smi_watchers_mutex); 3590 list_for_each_entry(w, &smi_watchers, link) 3591 w->smi_gone(intf_num); 3592 mutex_unlock(&smi_watchers_mutex); 3593 3594 index = srcu_read_lock(&intf->users_srcu); 3595 while (!list_empty(&intf->users)) { 3596 struct ipmi_user *user = 3597 container_of(list_next_rcu(&intf->users), 3598 struct ipmi_user, link); 3599 3600 _ipmi_destroy_user(user); 3601 } 3602 srcu_read_unlock(&intf->users_srcu, index); 3603 3604 if (intf->handlers->shutdown) 3605 intf->handlers->shutdown(intf->send_info); 3606 3607 cleanup_smi_msgs(intf); 3608 3609 ipmi_bmc_unregister(intf); 3610 3611 cleanup_srcu_struct(&intf->users_srcu); 3612 kref_put(&intf->refcount, intf_free); 3613 } 3614 EXPORT_SYMBOL(ipmi_unregister_smi); 3615 3616 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3617 struct ipmi_smi_msg *msg) 3618 { 3619 struct ipmi_ipmb_addr ipmb_addr; 3620 struct ipmi_recv_msg *recv_msg; 3621 3622 /* 3623 * This is 11, not 10, because the response must contain a 3624 * completion code. 3625 */ 3626 if (msg->rsp_size < 11) { 3627 /* Message not big enough, just ignore it. */ 3628 ipmi_inc_stat(intf, invalid_ipmb_responses); 3629 return 0; 3630 } 3631 3632 if (msg->rsp[2] != 0) { 3633 /* An error getting the response, just ignore it. */ 3634 return 0; 3635 } 3636 3637 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3638 ipmb_addr.slave_addr = msg->rsp[6]; 3639 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3640 ipmb_addr.lun = msg->rsp[7] & 3; 3641 3642 /* 3643 * It's a response from a remote entity. Look up the sequence 3644 * number and handle the response. 3645 */ 3646 if (intf_find_seq(intf, 3647 msg->rsp[7] >> 2, 3648 msg->rsp[3] & 0x0f, 3649 msg->rsp[8], 3650 (msg->rsp[4] >> 2) & (~1), 3651 (struct ipmi_addr *) &ipmb_addr, 3652 &recv_msg)) { 3653 /* 3654 * We were unable to find the sequence number, 3655 * so just nuke the message. 3656 */ 3657 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3658 return 0; 3659 } 3660 3661 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3662 /* 3663 * The other fields matched, so no need to set them, except 3664 * for netfn, which needs to be the response that was 3665 * returned, not the request value. 3666 */ 3667 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3668 recv_msg->msg.data = recv_msg->msg_data; 3669 recv_msg->msg.data_len = msg->rsp_size - 10; 3670 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3671 if (deliver_response(intf, recv_msg)) 3672 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3673 else 3674 ipmi_inc_stat(intf, handled_ipmb_responses); 3675 3676 return 0; 3677 } 3678 3679 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3680 struct ipmi_smi_msg *msg) 3681 { 3682 struct cmd_rcvr *rcvr; 3683 int rv = 0; 3684 unsigned char netfn; 3685 unsigned char cmd; 3686 unsigned char chan; 3687 struct ipmi_user *user = NULL; 3688 struct ipmi_ipmb_addr *ipmb_addr; 3689 struct ipmi_recv_msg *recv_msg; 3690 3691 if (msg->rsp_size < 10) { 3692 /* Message not big enough, just ignore it. */ 3693 ipmi_inc_stat(intf, invalid_commands); 3694 return 0; 3695 } 3696 3697 if (msg->rsp[2] != 0) { 3698 /* An error getting the response, just ignore it. */ 3699 return 0; 3700 } 3701 3702 netfn = msg->rsp[4] >> 2; 3703 cmd = msg->rsp[8]; 3704 chan = msg->rsp[3] & 0xf; 3705 3706 rcu_read_lock(); 3707 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3708 if (rcvr) { 3709 user = rcvr->user; 3710 kref_get(&user->refcount); 3711 } else 3712 user = NULL; 3713 rcu_read_unlock(); 3714 3715 if (user == NULL) { 3716 /* We didn't find a user, deliver an error response. */ 3717 ipmi_inc_stat(intf, unhandled_commands); 3718 3719 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3720 msg->data[1] = IPMI_SEND_MSG_CMD; 3721 msg->data[2] = msg->rsp[3]; 3722 msg->data[3] = msg->rsp[6]; 3723 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3724 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3725 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3726 /* rqseq/lun */ 3727 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3728 msg->data[8] = msg->rsp[8]; /* cmd */ 3729 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3730 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3731 msg->data_size = 11; 3732 3733 ipmi_debug_msg("Invalid command:", msg->data, msg->data_size); 3734 3735 rcu_read_lock(); 3736 if (!intf->in_shutdown) { 3737 smi_send(intf, intf->handlers, msg, 0); 3738 /* 3739 * We used the message, so return the value 3740 * that causes it to not be freed or 3741 * queued. 3742 */ 3743 rv = -1; 3744 } 3745 rcu_read_unlock(); 3746 } else { 3747 recv_msg = ipmi_alloc_recv_msg(); 3748 if (!recv_msg) { 3749 /* 3750 * We couldn't allocate memory for the 3751 * message, so requeue it for handling 3752 * later. 3753 */ 3754 rv = 1; 3755 kref_put(&user->refcount, free_user); 3756 } else { 3757 /* Extract the source address from the data. */ 3758 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3759 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3760 ipmb_addr->slave_addr = msg->rsp[6]; 3761 ipmb_addr->lun = msg->rsp[7] & 3; 3762 ipmb_addr->channel = msg->rsp[3] & 0xf; 3763 3764 /* 3765 * Extract the rest of the message information 3766 * from the IPMB header. 3767 */ 3768 recv_msg->user = user; 3769 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3770 recv_msg->msgid = msg->rsp[7] >> 2; 3771 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3772 recv_msg->msg.cmd = msg->rsp[8]; 3773 recv_msg->msg.data = recv_msg->msg_data; 3774 3775 /* 3776 * We chop off 10, not 9 bytes because the checksum 3777 * at the end also needs to be removed. 3778 */ 3779 recv_msg->msg.data_len = msg->rsp_size - 10; 3780 memcpy(recv_msg->msg_data, &msg->rsp[9], 3781 msg->rsp_size - 10); 3782 if (deliver_response(intf, recv_msg)) 3783 ipmi_inc_stat(intf, unhandled_commands); 3784 else 3785 ipmi_inc_stat(intf, handled_commands); 3786 } 3787 } 3788 3789 return rv; 3790 } 3791 3792 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 3793 struct ipmi_smi_msg *msg) 3794 { 3795 struct ipmi_lan_addr lan_addr; 3796 struct ipmi_recv_msg *recv_msg; 3797 3798 3799 /* 3800 * This is 13, not 12, because the response must contain a 3801 * completion code. 3802 */ 3803 if (msg->rsp_size < 13) { 3804 /* Message not big enough, just ignore it. */ 3805 ipmi_inc_stat(intf, invalid_lan_responses); 3806 return 0; 3807 } 3808 3809 if (msg->rsp[2] != 0) { 3810 /* An error getting the response, just ignore it. */ 3811 return 0; 3812 } 3813 3814 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 3815 lan_addr.session_handle = msg->rsp[4]; 3816 lan_addr.remote_SWID = msg->rsp[8]; 3817 lan_addr.local_SWID = msg->rsp[5]; 3818 lan_addr.channel = msg->rsp[3] & 0x0f; 3819 lan_addr.privilege = msg->rsp[3] >> 4; 3820 lan_addr.lun = msg->rsp[9] & 3; 3821 3822 /* 3823 * It's a response from a remote entity. Look up the sequence 3824 * number and handle the response. 3825 */ 3826 if (intf_find_seq(intf, 3827 msg->rsp[9] >> 2, 3828 msg->rsp[3] & 0x0f, 3829 msg->rsp[10], 3830 (msg->rsp[6] >> 2) & (~1), 3831 (struct ipmi_addr *) &lan_addr, 3832 &recv_msg)) { 3833 /* 3834 * We were unable to find the sequence number, 3835 * so just nuke the message. 3836 */ 3837 ipmi_inc_stat(intf, unhandled_lan_responses); 3838 return 0; 3839 } 3840 3841 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 3842 /* 3843 * The other fields matched, so no need to set them, except 3844 * for netfn, which needs to be the response that was 3845 * returned, not the request value. 3846 */ 3847 recv_msg->msg.netfn = msg->rsp[6] >> 2; 3848 recv_msg->msg.data = recv_msg->msg_data; 3849 recv_msg->msg.data_len = msg->rsp_size - 12; 3850 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3851 if (deliver_response(intf, recv_msg)) 3852 ipmi_inc_stat(intf, unhandled_lan_responses); 3853 else 3854 ipmi_inc_stat(intf, handled_lan_responses); 3855 3856 return 0; 3857 } 3858 3859 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 3860 struct ipmi_smi_msg *msg) 3861 { 3862 struct cmd_rcvr *rcvr; 3863 int rv = 0; 3864 unsigned char netfn; 3865 unsigned char cmd; 3866 unsigned char chan; 3867 struct ipmi_user *user = NULL; 3868 struct ipmi_lan_addr *lan_addr; 3869 struct ipmi_recv_msg *recv_msg; 3870 3871 if (msg->rsp_size < 12) { 3872 /* Message not big enough, just ignore it. */ 3873 ipmi_inc_stat(intf, invalid_commands); 3874 return 0; 3875 } 3876 3877 if (msg->rsp[2] != 0) { 3878 /* An error getting the response, just ignore it. */ 3879 return 0; 3880 } 3881 3882 netfn = msg->rsp[6] >> 2; 3883 cmd = msg->rsp[10]; 3884 chan = msg->rsp[3] & 0xf; 3885 3886 rcu_read_lock(); 3887 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3888 if (rcvr) { 3889 user = rcvr->user; 3890 kref_get(&user->refcount); 3891 } else 3892 user = NULL; 3893 rcu_read_unlock(); 3894 3895 if (user == NULL) { 3896 /* We didn't find a user, just give up. */ 3897 ipmi_inc_stat(intf, unhandled_commands); 3898 3899 /* 3900 * Don't do anything with these messages, just allow 3901 * them to be freed. 3902 */ 3903 rv = 0; 3904 } else { 3905 recv_msg = ipmi_alloc_recv_msg(); 3906 if (!recv_msg) { 3907 /* 3908 * We couldn't allocate memory for the 3909 * message, so requeue it for handling later. 3910 */ 3911 rv = 1; 3912 kref_put(&user->refcount, free_user); 3913 } else { 3914 /* Extract the source address from the data. */ 3915 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 3916 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 3917 lan_addr->session_handle = msg->rsp[4]; 3918 lan_addr->remote_SWID = msg->rsp[8]; 3919 lan_addr->local_SWID = msg->rsp[5]; 3920 lan_addr->lun = msg->rsp[9] & 3; 3921 lan_addr->channel = msg->rsp[3] & 0xf; 3922 lan_addr->privilege = msg->rsp[3] >> 4; 3923 3924 /* 3925 * Extract the rest of the message information 3926 * from the IPMB header. 3927 */ 3928 recv_msg->user = user; 3929 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3930 recv_msg->msgid = msg->rsp[9] >> 2; 3931 recv_msg->msg.netfn = msg->rsp[6] >> 2; 3932 recv_msg->msg.cmd = msg->rsp[10]; 3933 recv_msg->msg.data = recv_msg->msg_data; 3934 3935 /* 3936 * We chop off 12, not 11 bytes because the checksum 3937 * at the end also needs to be removed. 3938 */ 3939 recv_msg->msg.data_len = msg->rsp_size - 12; 3940 memcpy(recv_msg->msg_data, &msg->rsp[11], 3941 msg->rsp_size - 12); 3942 if (deliver_response(intf, recv_msg)) 3943 ipmi_inc_stat(intf, unhandled_commands); 3944 else 3945 ipmi_inc_stat(intf, handled_commands); 3946 } 3947 } 3948 3949 return rv; 3950 } 3951 3952 /* 3953 * This routine will handle "Get Message" command responses with 3954 * channels that use an OEM Medium. The message format belongs to 3955 * the OEM. See IPMI 2.0 specification, Chapter 6 and 3956 * Chapter 22, sections 22.6 and 22.24 for more details. 3957 */ 3958 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 3959 struct ipmi_smi_msg *msg) 3960 { 3961 struct cmd_rcvr *rcvr; 3962 int rv = 0; 3963 unsigned char netfn; 3964 unsigned char cmd; 3965 unsigned char chan; 3966 struct ipmi_user *user = NULL; 3967 struct ipmi_system_interface_addr *smi_addr; 3968 struct ipmi_recv_msg *recv_msg; 3969 3970 /* 3971 * We expect the OEM SW to perform error checking 3972 * so we just do some basic sanity checks 3973 */ 3974 if (msg->rsp_size < 4) { 3975 /* Message not big enough, just ignore it. */ 3976 ipmi_inc_stat(intf, invalid_commands); 3977 return 0; 3978 } 3979 3980 if (msg->rsp[2] != 0) { 3981 /* An error getting the response, just ignore it. */ 3982 return 0; 3983 } 3984 3985 /* 3986 * This is an OEM Message so the OEM needs to know how 3987 * handle the message. We do no interpretation. 3988 */ 3989 netfn = msg->rsp[0] >> 2; 3990 cmd = msg->rsp[1]; 3991 chan = msg->rsp[3] & 0xf; 3992 3993 rcu_read_lock(); 3994 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3995 if (rcvr) { 3996 user = rcvr->user; 3997 kref_get(&user->refcount); 3998 } else 3999 user = NULL; 4000 rcu_read_unlock(); 4001 4002 if (user == NULL) { 4003 /* We didn't find a user, just give up. */ 4004 ipmi_inc_stat(intf, unhandled_commands); 4005 4006 /* 4007 * Don't do anything with these messages, just allow 4008 * them to be freed. 4009 */ 4010 4011 rv = 0; 4012 } else { 4013 recv_msg = ipmi_alloc_recv_msg(); 4014 if (!recv_msg) { 4015 /* 4016 * We couldn't allocate memory for the 4017 * message, so requeue it for handling 4018 * later. 4019 */ 4020 rv = 1; 4021 kref_put(&user->refcount, free_user); 4022 } else { 4023 /* 4024 * OEM Messages are expected to be delivered via 4025 * the system interface to SMS software. We might 4026 * need to visit this again depending on OEM 4027 * requirements 4028 */ 4029 smi_addr = ((struct ipmi_system_interface_addr *) 4030 &recv_msg->addr); 4031 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4032 smi_addr->channel = IPMI_BMC_CHANNEL; 4033 smi_addr->lun = msg->rsp[0] & 3; 4034 4035 recv_msg->user = user; 4036 recv_msg->user_msg_data = NULL; 4037 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4038 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4039 recv_msg->msg.cmd = msg->rsp[1]; 4040 recv_msg->msg.data = recv_msg->msg_data; 4041 4042 /* 4043 * The message starts at byte 4 which follows the 4044 * the Channel Byte in the "GET MESSAGE" command 4045 */ 4046 recv_msg->msg.data_len = msg->rsp_size - 4; 4047 memcpy(recv_msg->msg_data, &msg->rsp[4], 4048 msg->rsp_size - 4); 4049 if (deliver_response(intf, recv_msg)) 4050 ipmi_inc_stat(intf, unhandled_commands); 4051 else 4052 ipmi_inc_stat(intf, handled_commands); 4053 } 4054 } 4055 4056 return rv; 4057 } 4058 4059 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4060 struct ipmi_smi_msg *msg) 4061 { 4062 struct ipmi_system_interface_addr *smi_addr; 4063 4064 recv_msg->msgid = 0; 4065 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4066 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4067 smi_addr->channel = IPMI_BMC_CHANNEL; 4068 smi_addr->lun = msg->rsp[0] & 3; 4069 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4070 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4071 recv_msg->msg.cmd = msg->rsp[1]; 4072 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4073 recv_msg->msg.data = recv_msg->msg_data; 4074 recv_msg->msg.data_len = msg->rsp_size - 3; 4075 } 4076 4077 static int handle_read_event_rsp(struct ipmi_smi *intf, 4078 struct ipmi_smi_msg *msg) 4079 { 4080 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4081 struct list_head msgs; 4082 struct ipmi_user *user; 4083 int rv = 0, deliver_count = 0, index; 4084 unsigned long flags; 4085 4086 if (msg->rsp_size < 19) { 4087 /* Message is too small to be an IPMB event. */ 4088 ipmi_inc_stat(intf, invalid_events); 4089 return 0; 4090 } 4091 4092 if (msg->rsp[2] != 0) { 4093 /* An error getting the event, just ignore it. */ 4094 return 0; 4095 } 4096 4097 INIT_LIST_HEAD(&msgs); 4098 4099 spin_lock_irqsave(&intf->events_lock, flags); 4100 4101 ipmi_inc_stat(intf, events); 4102 4103 /* 4104 * Allocate and fill in one message for every user that is 4105 * getting events. 4106 */ 4107 index = srcu_read_lock(&intf->users_srcu); 4108 list_for_each_entry_rcu(user, &intf->users, link) { 4109 if (!user->gets_events) 4110 continue; 4111 4112 recv_msg = ipmi_alloc_recv_msg(); 4113 if (!recv_msg) { 4114 rcu_read_unlock(); 4115 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4116 link) { 4117 list_del(&recv_msg->link); 4118 ipmi_free_recv_msg(recv_msg); 4119 } 4120 /* 4121 * We couldn't allocate memory for the 4122 * message, so requeue it for handling 4123 * later. 4124 */ 4125 rv = 1; 4126 goto out; 4127 } 4128 4129 deliver_count++; 4130 4131 copy_event_into_recv_msg(recv_msg, msg); 4132 recv_msg->user = user; 4133 kref_get(&user->refcount); 4134 list_add_tail(&recv_msg->link, &msgs); 4135 } 4136 srcu_read_unlock(&intf->users_srcu, index); 4137 4138 if (deliver_count) { 4139 /* Now deliver all the messages. */ 4140 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4141 list_del(&recv_msg->link); 4142 deliver_local_response(intf, recv_msg); 4143 } 4144 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4145 /* 4146 * No one to receive the message, put it in queue if there's 4147 * not already too many things in the queue. 4148 */ 4149 recv_msg = ipmi_alloc_recv_msg(); 4150 if (!recv_msg) { 4151 /* 4152 * We couldn't allocate memory for the 4153 * message, so requeue it for handling 4154 * later. 4155 */ 4156 rv = 1; 4157 goto out; 4158 } 4159 4160 copy_event_into_recv_msg(recv_msg, msg); 4161 list_add_tail(&recv_msg->link, &intf->waiting_events); 4162 intf->waiting_events_count++; 4163 } else if (!intf->event_msg_printed) { 4164 /* 4165 * There's too many things in the queue, discard this 4166 * message. 4167 */ 4168 dev_warn(intf->si_dev, 4169 "Event queue full, discarding incoming events\n"); 4170 intf->event_msg_printed = 1; 4171 } 4172 4173 out: 4174 spin_unlock_irqrestore(&intf->events_lock, flags); 4175 4176 return rv; 4177 } 4178 4179 static int handle_bmc_rsp(struct ipmi_smi *intf, 4180 struct ipmi_smi_msg *msg) 4181 { 4182 struct ipmi_recv_msg *recv_msg; 4183 struct ipmi_system_interface_addr *smi_addr; 4184 4185 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 4186 if (recv_msg == NULL) { 4187 dev_warn(intf->si_dev, 4188 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4189 return 0; 4190 } 4191 4192 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4193 recv_msg->msgid = msg->msgid; 4194 smi_addr = ((struct ipmi_system_interface_addr *) 4195 &recv_msg->addr); 4196 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4197 smi_addr->channel = IPMI_BMC_CHANNEL; 4198 smi_addr->lun = msg->rsp[0] & 3; 4199 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4200 recv_msg->msg.cmd = msg->rsp[1]; 4201 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4202 recv_msg->msg.data = recv_msg->msg_data; 4203 recv_msg->msg.data_len = msg->rsp_size - 2; 4204 deliver_local_response(intf, recv_msg); 4205 4206 return 0; 4207 } 4208 4209 /* 4210 * Handle a received message. Return 1 if the message should be requeued, 4211 * 0 if the message should be freed, or -1 if the message should not 4212 * be freed or requeued. 4213 */ 4214 static int handle_one_recv_msg(struct ipmi_smi *intf, 4215 struct ipmi_smi_msg *msg) 4216 { 4217 int requeue; 4218 int chan; 4219 4220 ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size); 4221 4222 if ((msg->data_size >= 2) 4223 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4224 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4225 && (msg->user_data == NULL)) { 4226 4227 if (intf->in_shutdown) 4228 goto free_msg; 4229 4230 /* 4231 * This is the local response to a command send, start 4232 * the timer for these. The user_data will not be 4233 * NULL if this is a response send, and we will let 4234 * response sends just go through. 4235 */ 4236 4237 /* 4238 * Check for errors, if we get certain errors (ones 4239 * that mean basically we can try again later), we 4240 * ignore them and start the timer. Otherwise we 4241 * report the error immediately. 4242 */ 4243 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4244 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4245 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4246 && (msg->rsp[2] != IPMI_BUS_ERR) 4247 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4248 int ch = msg->rsp[3] & 0xf; 4249 struct ipmi_channel *chans; 4250 4251 /* Got an error sending the message, handle it. */ 4252 4253 chans = READ_ONCE(intf->channel_list)->c; 4254 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4255 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4256 ipmi_inc_stat(intf, sent_lan_command_errs); 4257 else 4258 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4259 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4260 } else 4261 /* The message was sent, start the timer. */ 4262 intf_start_seq_timer(intf, msg->msgid); 4263 free_msg: 4264 requeue = 0; 4265 goto out; 4266 4267 } else if (msg->rsp_size < 2) { 4268 /* Message is too small to be correct. */ 4269 dev_warn(intf->si_dev, 4270 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4271 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4272 4273 /* Generate an error response for the message. */ 4274 msg->rsp[0] = msg->data[0] | (1 << 2); 4275 msg->rsp[1] = msg->data[1]; 4276 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4277 msg->rsp_size = 3; 4278 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4279 || (msg->rsp[1] != msg->data[1])) { 4280 /* 4281 * The NetFN and Command in the response is not even 4282 * marginally correct. 4283 */ 4284 dev_warn(intf->si_dev, 4285 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4286 (msg->data[0] >> 2) | 1, msg->data[1], 4287 msg->rsp[0] >> 2, msg->rsp[1]); 4288 4289 /* Generate an error response for the message. */ 4290 msg->rsp[0] = msg->data[0] | (1 << 2); 4291 msg->rsp[1] = msg->data[1]; 4292 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4293 msg->rsp_size = 3; 4294 } 4295 4296 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4297 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4298 && (msg->user_data != NULL)) { 4299 /* 4300 * It's a response to a response we sent. For this we 4301 * deliver a send message response to the user. 4302 */ 4303 struct ipmi_recv_msg *recv_msg = msg->user_data; 4304 4305 requeue = 0; 4306 if (msg->rsp_size < 2) 4307 /* Message is too small to be correct. */ 4308 goto out; 4309 4310 chan = msg->data[2] & 0x0f; 4311 if (chan >= IPMI_MAX_CHANNELS) 4312 /* Invalid channel number */ 4313 goto out; 4314 4315 if (!recv_msg) 4316 goto out; 4317 4318 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4319 recv_msg->msg.data = recv_msg->msg_data; 4320 recv_msg->msg.data_len = 1; 4321 recv_msg->msg_data[0] = msg->rsp[2]; 4322 deliver_local_response(intf, recv_msg); 4323 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4324 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4325 struct ipmi_channel *chans; 4326 4327 /* It's from the receive queue. */ 4328 chan = msg->rsp[3] & 0xf; 4329 if (chan >= IPMI_MAX_CHANNELS) { 4330 /* Invalid channel number */ 4331 requeue = 0; 4332 goto out; 4333 } 4334 4335 /* 4336 * We need to make sure the channels have been initialized. 4337 * The channel_handler routine will set the "curr_channel" 4338 * equal to or greater than IPMI_MAX_CHANNELS when all the 4339 * channels for this interface have been initialized. 4340 */ 4341 if (!intf->channels_ready) { 4342 requeue = 0; /* Throw the message away */ 4343 goto out; 4344 } 4345 4346 chans = READ_ONCE(intf->channel_list)->c; 4347 4348 switch (chans[chan].medium) { 4349 case IPMI_CHANNEL_MEDIUM_IPMB: 4350 if (msg->rsp[4] & 0x04) { 4351 /* 4352 * It's a response, so find the 4353 * requesting message and send it up. 4354 */ 4355 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4356 } else { 4357 /* 4358 * It's a command to the SMS from some other 4359 * entity. Handle that. 4360 */ 4361 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4362 } 4363 break; 4364 4365 case IPMI_CHANNEL_MEDIUM_8023LAN: 4366 case IPMI_CHANNEL_MEDIUM_ASYNC: 4367 if (msg->rsp[6] & 0x04) { 4368 /* 4369 * It's a response, so find the 4370 * requesting message and send it up. 4371 */ 4372 requeue = handle_lan_get_msg_rsp(intf, msg); 4373 } else { 4374 /* 4375 * It's a command to the SMS from some other 4376 * entity. Handle that. 4377 */ 4378 requeue = handle_lan_get_msg_cmd(intf, msg); 4379 } 4380 break; 4381 4382 default: 4383 /* Check for OEM Channels. Clients had better 4384 register for these commands. */ 4385 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4386 && (chans[chan].medium 4387 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4388 requeue = handle_oem_get_msg_cmd(intf, msg); 4389 } else { 4390 /* 4391 * We don't handle the channel type, so just 4392 * free the message. 4393 */ 4394 requeue = 0; 4395 } 4396 } 4397 4398 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4399 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4400 /* It's an asynchronous event. */ 4401 requeue = handle_read_event_rsp(intf, msg); 4402 } else { 4403 /* It's a response from the local BMC. */ 4404 requeue = handle_bmc_rsp(intf, msg); 4405 } 4406 4407 out: 4408 return requeue; 4409 } 4410 4411 /* 4412 * If there are messages in the queue or pretimeouts, handle them. 4413 */ 4414 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4415 { 4416 struct ipmi_smi_msg *smi_msg; 4417 unsigned long flags = 0; 4418 int rv; 4419 int run_to_completion = intf->run_to_completion; 4420 4421 /* See if any waiting messages need to be processed. */ 4422 if (!run_to_completion) 4423 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4424 while (!list_empty(&intf->waiting_rcv_msgs)) { 4425 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4426 struct ipmi_smi_msg, link); 4427 list_del(&smi_msg->link); 4428 if (!run_to_completion) 4429 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4430 flags); 4431 rv = handle_one_recv_msg(intf, smi_msg); 4432 if (!run_to_completion) 4433 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4434 if (rv > 0) { 4435 /* 4436 * To preserve message order, quit if we 4437 * can't handle a message. Add the message 4438 * back at the head, this is safe because this 4439 * tasklet is the only thing that pulls the 4440 * messages. 4441 */ 4442 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4443 break; 4444 } else { 4445 if (rv == 0) 4446 /* Message handled */ 4447 ipmi_free_smi_msg(smi_msg); 4448 /* If rv < 0, fatal error, del but don't free. */ 4449 } 4450 } 4451 if (!run_to_completion) 4452 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4453 4454 /* 4455 * If the pretimout count is non-zero, decrement one from it and 4456 * deliver pretimeouts to all the users. 4457 */ 4458 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4459 struct ipmi_user *user; 4460 int index; 4461 4462 index = srcu_read_lock(&intf->users_srcu); 4463 list_for_each_entry_rcu(user, &intf->users, link) { 4464 if (user->handler->ipmi_watchdog_pretimeout) 4465 user->handler->ipmi_watchdog_pretimeout( 4466 user->handler_data); 4467 } 4468 srcu_read_unlock(&intf->users_srcu, index); 4469 } 4470 } 4471 4472 static void smi_recv_tasklet(unsigned long val) 4473 { 4474 unsigned long flags = 0; /* keep us warning-free. */ 4475 struct ipmi_smi *intf = (struct ipmi_smi *) val; 4476 int run_to_completion = intf->run_to_completion; 4477 struct ipmi_smi_msg *newmsg = NULL; 4478 4479 /* 4480 * Start the next message if available. 4481 * 4482 * Do this here, not in the actual receiver, because we may deadlock 4483 * because the lower layer is allowed to hold locks while calling 4484 * message delivery. 4485 */ 4486 4487 rcu_read_lock(); 4488 4489 if (!run_to_completion) 4490 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4491 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4492 struct list_head *entry = NULL; 4493 4494 /* Pick the high priority queue first. */ 4495 if (!list_empty(&intf->hp_xmit_msgs)) 4496 entry = intf->hp_xmit_msgs.next; 4497 else if (!list_empty(&intf->xmit_msgs)) 4498 entry = intf->xmit_msgs.next; 4499 4500 if (entry) { 4501 list_del(entry); 4502 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4503 intf->curr_msg = newmsg; 4504 } 4505 } 4506 4507 if (!run_to_completion) 4508 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4509 if (newmsg) 4510 intf->handlers->sender(intf->send_info, newmsg); 4511 4512 rcu_read_unlock(); 4513 4514 handle_new_recv_msgs(intf); 4515 } 4516 4517 /* Handle a new message from the lower layer. */ 4518 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4519 struct ipmi_smi_msg *msg) 4520 { 4521 unsigned long flags = 0; /* keep us warning-free. */ 4522 int run_to_completion = intf->run_to_completion; 4523 4524 /* 4525 * To preserve message order, we keep a queue and deliver from 4526 * a tasklet. 4527 */ 4528 if (!run_to_completion) 4529 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4530 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4531 if (!run_to_completion) 4532 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4533 flags); 4534 4535 if (!run_to_completion) 4536 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4537 /* 4538 * We can get an asynchronous event or receive message in addition 4539 * to commands we send. 4540 */ 4541 if (msg == intf->curr_msg) 4542 intf->curr_msg = NULL; 4543 if (!run_to_completion) 4544 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4545 4546 if (run_to_completion) 4547 smi_recv_tasklet((unsigned long) intf); 4548 else 4549 tasklet_schedule(&intf->recv_tasklet); 4550 } 4551 EXPORT_SYMBOL(ipmi_smi_msg_received); 4552 4553 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4554 { 4555 if (intf->in_shutdown) 4556 return; 4557 4558 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4559 tasklet_schedule(&intf->recv_tasklet); 4560 } 4561 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4562 4563 static struct ipmi_smi_msg * 4564 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4565 unsigned char seq, long seqid) 4566 { 4567 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4568 if (!smi_msg) 4569 /* 4570 * If we can't allocate the message, then just return, we 4571 * get 4 retries, so this should be ok. 4572 */ 4573 return NULL; 4574 4575 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4576 smi_msg->data_size = recv_msg->msg.data_len; 4577 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4578 4579 ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size); 4580 4581 return smi_msg; 4582 } 4583 4584 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4585 struct list_head *timeouts, 4586 unsigned long timeout_period, 4587 int slot, unsigned long *flags, 4588 bool *need_timer) 4589 { 4590 struct ipmi_recv_msg *msg; 4591 4592 if (intf->in_shutdown) 4593 return; 4594 4595 if (!ent->inuse) 4596 return; 4597 4598 if (timeout_period < ent->timeout) { 4599 ent->timeout -= timeout_period; 4600 *need_timer = true; 4601 return; 4602 } 4603 4604 if (ent->retries_left == 0) { 4605 /* The message has used all its retries. */ 4606 ent->inuse = 0; 4607 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 4608 msg = ent->recv_msg; 4609 list_add_tail(&msg->link, timeouts); 4610 if (ent->broadcast) 4611 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4612 else if (is_lan_addr(&ent->recv_msg->addr)) 4613 ipmi_inc_stat(intf, timed_out_lan_commands); 4614 else 4615 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4616 } else { 4617 struct ipmi_smi_msg *smi_msg; 4618 /* More retries, send again. */ 4619 4620 *need_timer = true; 4621 4622 /* 4623 * Start with the max timer, set to normal timer after 4624 * the message is sent. 4625 */ 4626 ent->timeout = MAX_MSG_TIMEOUT; 4627 ent->retries_left--; 4628 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4629 ent->seqid); 4630 if (!smi_msg) { 4631 if (is_lan_addr(&ent->recv_msg->addr)) 4632 ipmi_inc_stat(intf, 4633 dropped_rexmit_lan_commands); 4634 else 4635 ipmi_inc_stat(intf, 4636 dropped_rexmit_ipmb_commands); 4637 return; 4638 } 4639 4640 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4641 4642 /* 4643 * Send the new message. We send with a zero 4644 * priority. It timed out, I doubt time is that 4645 * critical now, and high priority messages are really 4646 * only for messages to the local MC, which don't get 4647 * resent. 4648 */ 4649 if (intf->handlers) { 4650 if (is_lan_addr(&ent->recv_msg->addr)) 4651 ipmi_inc_stat(intf, 4652 retransmitted_lan_commands); 4653 else 4654 ipmi_inc_stat(intf, 4655 retransmitted_ipmb_commands); 4656 4657 smi_send(intf, intf->handlers, smi_msg, 0); 4658 } else 4659 ipmi_free_smi_msg(smi_msg); 4660 4661 spin_lock_irqsave(&intf->seq_lock, *flags); 4662 } 4663 } 4664 4665 static bool ipmi_timeout_handler(struct ipmi_smi *intf, 4666 unsigned long timeout_period) 4667 { 4668 struct list_head timeouts; 4669 struct ipmi_recv_msg *msg, *msg2; 4670 unsigned long flags; 4671 int i; 4672 bool need_timer = false; 4673 4674 if (!intf->bmc_registered) { 4675 kref_get(&intf->refcount); 4676 if (!schedule_work(&intf->bmc_reg_work)) { 4677 kref_put(&intf->refcount, intf_free); 4678 need_timer = true; 4679 } 4680 } 4681 4682 /* 4683 * Go through the seq table and find any messages that 4684 * have timed out, putting them in the timeouts 4685 * list. 4686 */ 4687 INIT_LIST_HEAD(&timeouts); 4688 spin_lock_irqsave(&intf->seq_lock, flags); 4689 if (intf->ipmb_maintenance_mode_timeout) { 4690 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 4691 intf->ipmb_maintenance_mode_timeout = 0; 4692 else 4693 intf->ipmb_maintenance_mode_timeout -= timeout_period; 4694 } 4695 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 4696 check_msg_timeout(intf, &intf->seq_table[i], 4697 &timeouts, timeout_period, i, 4698 &flags, &need_timer); 4699 spin_unlock_irqrestore(&intf->seq_lock, flags); 4700 4701 list_for_each_entry_safe(msg, msg2, &timeouts, link) 4702 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 4703 4704 /* 4705 * Maintenance mode handling. Check the timeout 4706 * optimistically before we claim the lock. It may 4707 * mean a timeout gets missed occasionally, but that 4708 * only means the timeout gets extended by one period 4709 * in that case. No big deal, and it avoids the lock 4710 * most of the time. 4711 */ 4712 if (intf->auto_maintenance_timeout > 0) { 4713 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 4714 if (intf->auto_maintenance_timeout > 0) { 4715 intf->auto_maintenance_timeout 4716 -= timeout_period; 4717 if (!intf->maintenance_mode 4718 && (intf->auto_maintenance_timeout <= 0)) { 4719 intf->maintenance_mode_enable = false; 4720 maintenance_mode_update(intf); 4721 } 4722 } 4723 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 4724 flags); 4725 } 4726 4727 tasklet_schedule(&intf->recv_tasklet); 4728 4729 return need_timer; 4730 } 4731 4732 static void ipmi_request_event(struct ipmi_smi *intf) 4733 { 4734 /* No event requests when in maintenance mode. */ 4735 if (intf->maintenance_mode_enable) 4736 return; 4737 4738 if (!intf->in_shutdown) 4739 intf->handlers->request_events(intf->send_info); 4740 } 4741 4742 static struct timer_list ipmi_timer; 4743 4744 static atomic_t stop_operation; 4745 4746 static void ipmi_timeout(struct timer_list *unused) 4747 { 4748 struct ipmi_smi *intf; 4749 bool need_timer = false; 4750 int index; 4751 4752 if (atomic_read(&stop_operation)) 4753 return; 4754 4755 index = srcu_read_lock(&ipmi_interfaces_srcu); 4756 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4757 if (atomic_read(&intf->event_waiters)) { 4758 intf->ticks_to_req_ev--; 4759 if (intf->ticks_to_req_ev == 0) { 4760 ipmi_request_event(intf); 4761 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 4762 } 4763 need_timer = true; 4764 } 4765 4766 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 4767 } 4768 srcu_read_unlock(&ipmi_interfaces_srcu, index); 4769 4770 if (need_timer) 4771 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4772 } 4773 4774 static void need_waiter(struct ipmi_smi *intf) 4775 { 4776 /* Racy, but worst case we start the timer twice. */ 4777 if (!timer_pending(&ipmi_timer)) 4778 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4779 } 4780 4781 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 4782 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 4783 4784 static void free_smi_msg(struct ipmi_smi_msg *msg) 4785 { 4786 atomic_dec(&smi_msg_inuse_count); 4787 kfree(msg); 4788 } 4789 4790 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 4791 { 4792 struct ipmi_smi_msg *rv; 4793 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 4794 if (rv) { 4795 rv->done = free_smi_msg; 4796 rv->user_data = NULL; 4797 atomic_inc(&smi_msg_inuse_count); 4798 } 4799 return rv; 4800 } 4801 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 4802 4803 static void free_recv_msg(struct ipmi_recv_msg *msg) 4804 { 4805 atomic_dec(&recv_msg_inuse_count); 4806 kfree(msg); 4807 } 4808 4809 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 4810 { 4811 struct ipmi_recv_msg *rv; 4812 4813 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 4814 if (rv) { 4815 rv->user = NULL; 4816 rv->done = free_recv_msg; 4817 atomic_inc(&recv_msg_inuse_count); 4818 } 4819 return rv; 4820 } 4821 4822 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 4823 { 4824 if (msg->user) 4825 kref_put(&msg->user->refcount, free_user); 4826 msg->done(msg); 4827 } 4828 EXPORT_SYMBOL(ipmi_free_recv_msg); 4829 4830 static atomic_t panic_done_count = ATOMIC_INIT(0); 4831 4832 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 4833 { 4834 atomic_dec(&panic_done_count); 4835 } 4836 4837 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 4838 { 4839 atomic_dec(&panic_done_count); 4840 } 4841 4842 /* 4843 * Inside a panic, send a message and wait for a response. 4844 */ 4845 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, 4846 struct ipmi_addr *addr, 4847 struct kernel_ipmi_msg *msg) 4848 { 4849 struct ipmi_smi_msg smi_msg; 4850 struct ipmi_recv_msg recv_msg; 4851 int rv; 4852 4853 smi_msg.done = dummy_smi_done_handler; 4854 recv_msg.done = dummy_recv_done_handler; 4855 atomic_add(2, &panic_done_count); 4856 rv = i_ipmi_request(NULL, 4857 intf, 4858 addr, 4859 0, 4860 msg, 4861 intf, 4862 &smi_msg, 4863 &recv_msg, 4864 0, 4865 intf->addrinfo[0].address, 4866 intf->addrinfo[0].lun, 4867 0, 1); /* Don't retry, and don't wait. */ 4868 if (rv) 4869 atomic_sub(2, &panic_done_count); 4870 else if (intf->handlers->flush_messages) 4871 intf->handlers->flush_messages(intf->send_info); 4872 4873 while (atomic_read(&panic_done_count) != 0) 4874 ipmi_poll(intf); 4875 } 4876 4877 static void event_receiver_fetcher(struct ipmi_smi *intf, 4878 struct ipmi_recv_msg *msg) 4879 { 4880 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 4881 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 4882 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 4883 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 4884 /* A get event receiver command, save it. */ 4885 intf->event_receiver = msg->msg.data[1]; 4886 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 4887 } 4888 } 4889 4890 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 4891 { 4892 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 4893 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 4894 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 4895 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 4896 /* 4897 * A get device id command, save if we are an event 4898 * receiver or generator. 4899 */ 4900 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 4901 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 4902 } 4903 } 4904 4905 static void send_panic_events(struct ipmi_smi *intf, char *str) 4906 { 4907 struct kernel_ipmi_msg msg; 4908 unsigned char data[16]; 4909 struct ipmi_system_interface_addr *si; 4910 struct ipmi_addr addr; 4911 char *p = str; 4912 struct ipmi_ipmb_addr *ipmb; 4913 int j; 4914 4915 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 4916 return; 4917 4918 si = (struct ipmi_system_interface_addr *) &addr; 4919 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4920 si->channel = IPMI_BMC_CHANNEL; 4921 si->lun = 0; 4922 4923 /* Fill in an event telling that we have failed. */ 4924 msg.netfn = 0x04; /* Sensor or Event. */ 4925 msg.cmd = 2; /* Platform event command. */ 4926 msg.data = data; 4927 msg.data_len = 8; 4928 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 4929 data[1] = 0x03; /* This is for IPMI 1.0. */ 4930 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 4931 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 4932 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 4933 4934 /* 4935 * Put a few breadcrumbs in. Hopefully later we can add more things 4936 * to make the panic events more useful. 4937 */ 4938 if (str) { 4939 data[3] = str[0]; 4940 data[6] = str[1]; 4941 data[7] = str[2]; 4942 } 4943 4944 /* Send the event announcing the panic. */ 4945 ipmi_panic_request_and_wait(intf, &addr, &msg); 4946 4947 /* 4948 * On every interface, dump a bunch of OEM event holding the 4949 * string. 4950 */ 4951 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 4952 return; 4953 4954 /* 4955 * intf_num is used as an marker to tell if the 4956 * interface is valid. Thus we need a read barrier to 4957 * make sure data fetched before checking intf_num 4958 * won't be used. 4959 */ 4960 smp_rmb(); 4961 4962 /* 4963 * First job here is to figure out where to send the 4964 * OEM events. There's no way in IPMI to send OEM 4965 * events using an event send command, so we have to 4966 * find the SEL to put them in and stick them in 4967 * there. 4968 */ 4969 4970 /* Get capabilities from the get device id. */ 4971 intf->local_sel_device = 0; 4972 intf->local_event_generator = 0; 4973 intf->event_receiver = 0; 4974 4975 /* Request the device info from the local MC. */ 4976 msg.netfn = IPMI_NETFN_APP_REQUEST; 4977 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 4978 msg.data = NULL; 4979 msg.data_len = 0; 4980 intf->null_user_handler = device_id_fetcher; 4981 ipmi_panic_request_and_wait(intf, &addr, &msg); 4982 4983 if (intf->local_event_generator) { 4984 /* Request the event receiver from the local MC. */ 4985 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 4986 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 4987 msg.data = NULL; 4988 msg.data_len = 0; 4989 intf->null_user_handler = event_receiver_fetcher; 4990 ipmi_panic_request_and_wait(intf, &addr, &msg); 4991 } 4992 intf->null_user_handler = NULL; 4993 4994 /* 4995 * Validate the event receiver. The low bit must not 4996 * be 1 (it must be a valid IPMB address), it cannot 4997 * be zero, and it must not be my address. 4998 */ 4999 if (((intf->event_receiver & 1) == 0) 5000 && (intf->event_receiver != 0) 5001 && (intf->event_receiver != intf->addrinfo[0].address)) { 5002 /* 5003 * The event receiver is valid, send an IPMB 5004 * message. 5005 */ 5006 ipmb = (struct ipmi_ipmb_addr *) &addr; 5007 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5008 ipmb->channel = 0; /* FIXME - is this right? */ 5009 ipmb->lun = intf->event_receiver_lun; 5010 ipmb->slave_addr = intf->event_receiver; 5011 } else if (intf->local_sel_device) { 5012 /* 5013 * The event receiver was not valid (or was 5014 * me), but I am an SEL device, just dump it 5015 * in my SEL. 5016 */ 5017 si = (struct ipmi_system_interface_addr *) &addr; 5018 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5019 si->channel = IPMI_BMC_CHANNEL; 5020 si->lun = 0; 5021 } else 5022 return; /* No where to send the event. */ 5023 5024 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5025 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5026 msg.data = data; 5027 msg.data_len = 16; 5028 5029 j = 0; 5030 while (*p) { 5031 int size = strlen(p); 5032 5033 if (size > 11) 5034 size = 11; 5035 data[0] = 0; 5036 data[1] = 0; 5037 data[2] = 0xf0; /* OEM event without timestamp. */ 5038 data[3] = intf->addrinfo[0].address; 5039 data[4] = j++; /* sequence # */ 5040 /* 5041 * Always give 11 bytes, so strncpy will fill 5042 * it with zeroes for me. 5043 */ 5044 strncpy(data+5, p, 11); 5045 p += size; 5046 5047 ipmi_panic_request_and_wait(intf, &addr, &msg); 5048 } 5049 } 5050 5051 static int has_panicked; 5052 5053 static int panic_event(struct notifier_block *this, 5054 unsigned long event, 5055 void *ptr) 5056 { 5057 struct ipmi_smi *intf; 5058 struct ipmi_user *user; 5059 5060 if (has_panicked) 5061 return NOTIFY_DONE; 5062 has_panicked = 1; 5063 5064 /* For every registered interface, set it to run to completion. */ 5065 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5066 if (!intf->handlers || intf->intf_num == -1) 5067 /* Interface is not ready. */ 5068 continue; 5069 5070 if (!intf->handlers->poll) 5071 continue; 5072 5073 /* 5074 * If we were interrupted while locking xmit_msgs_lock or 5075 * waiting_rcv_msgs_lock, the corresponding list may be 5076 * corrupted. In this case, drop items on the list for 5077 * the safety. 5078 */ 5079 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5080 INIT_LIST_HEAD(&intf->xmit_msgs); 5081 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5082 } else 5083 spin_unlock(&intf->xmit_msgs_lock); 5084 5085 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5086 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5087 else 5088 spin_unlock(&intf->waiting_rcv_msgs_lock); 5089 5090 intf->run_to_completion = 1; 5091 if (intf->handlers->set_run_to_completion) 5092 intf->handlers->set_run_to_completion(intf->send_info, 5093 1); 5094 5095 list_for_each_entry_rcu(user, &intf->users, link) { 5096 if (user->handler->ipmi_panic_handler) 5097 user->handler->ipmi_panic_handler( 5098 user->handler_data); 5099 } 5100 5101 send_panic_events(intf, ptr); 5102 } 5103 5104 return NOTIFY_DONE; 5105 } 5106 5107 /* Must be called with ipmi_interfaces_mutex held. */ 5108 static int ipmi_register_driver(void) 5109 { 5110 int rv; 5111 5112 if (drvregistered) 5113 return 0; 5114 5115 rv = driver_register(&ipmidriver.driver); 5116 if (rv) 5117 pr_err("Could not register IPMI driver\n"); 5118 else 5119 drvregistered = true; 5120 return rv; 5121 } 5122 5123 static struct notifier_block panic_block = { 5124 .notifier_call = panic_event, 5125 .next = NULL, 5126 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5127 }; 5128 5129 static int ipmi_init_msghandler(void) 5130 { 5131 int rv; 5132 5133 mutex_lock(&ipmi_interfaces_mutex); 5134 rv = ipmi_register_driver(); 5135 if (rv) 5136 goto out; 5137 if (initialized) 5138 goto out; 5139 5140 init_srcu_struct(&ipmi_interfaces_srcu); 5141 5142 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5143 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5144 5145 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5146 5147 initialized = true; 5148 5149 out: 5150 mutex_unlock(&ipmi_interfaces_mutex); 5151 return rv; 5152 } 5153 5154 static int __init ipmi_init_msghandler_mod(void) 5155 { 5156 int rv; 5157 5158 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5159 5160 mutex_lock(&ipmi_interfaces_mutex); 5161 rv = ipmi_register_driver(); 5162 mutex_unlock(&ipmi_interfaces_mutex); 5163 5164 return rv; 5165 } 5166 5167 static void __exit cleanup_ipmi(void) 5168 { 5169 int count; 5170 5171 if (initialized) { 5172 atomic_notifier_chain_unregister(&panic_notifier_list, 5173 &panic_block); 5174 5175 /* 5176 * This can't be called if any interfaces exist, so no worry 5177 * about shutting down the interfaces. 5178 */ 5179 5180 /* 5181 * Tell the timer to stop, then wait for it to stop. This 5182 * avoids problems with race conditions removing the timer 5183 * here. 5184 */ 5185 atomic_set(&stop_operation, 1); 5186 del_timer_sync(&ipmi_timer); 5187 5188 initialized = false; 5189 5190 /* Check for buffer leaks. */ 5191 count = atomic_read(&smi_msg_inuse_count); 5192 if (count != 0) 5193 pr_warn("SMI message count %d at exit\n", count); 5194 count = atomic_read(&recv_msg_inuse_count); 5195 if (count != 0) 5196 pr_warn("recv message count %d at exit\n", count); 5197 5198 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5199 } 5200 if (drvregistered) 5201 driver_unregister(&ipmidriver.driver); 5202 } 5203 module_exit(cleanup_ipmi); 5204 5205 module_init(ipmi_init_msghandler_mod); 5206 MODULE_LICENSE("GPL"); 5207 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5208 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI" 5209 " interface."); 5210 MODULE_VERSION(IPMI_DRIVER_VERSION); 5211 MODULE_SOFTDEP("post: ipmi_devintf"); 5212