1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: " 15 #define dev_fmt pr_fmt 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/poll.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/spinlock.h> 23 #include <linux/mutex.h> 24 #include <linux/slab.h> 25 #include <linux/ipmi.h> 26 #include <linux/ipmi_smi.h> 27 #include <linux/notifier.h> 28 #include <linux/init.h> 29 #include <linux/proc_fs.h> 30 #include <linux/rcupdate.h> 31 #include <linux/interrupt.h> 32 #include <linux/moduleparam.h> 33 #include <linux/workqueue.h> 34 #include <linux/uuid.h> 35 #include <linux/nospec.h> 36 #include <linux/vmalloc.h> 37 #include <linux/delay.h> 38 39 #define IPMI_DRIVER_VERSION "39.2" 40 41 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 42 static int ipmi_init_msghandler(void); 43 static void smi_recv_tasklet(struct tasklet_struct *t); 44 static void handle_new_recv_msgs(struct ipmi_smi *intf); 45 static void need_waiter(struct ipmi_smi *intf); 46 static int handle_one_recv_msg(struct ipmi_smi *intf, 47 struct ipmi_smi_msg *msg); 48 49 static bool initialized; 50 static bool drvregistered; 51 52 /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ 53 enum ipmi_panic_event_op { 54 IPMI_SEND_PANIC_EVENT_NONE, 55 IPMI_SEND_PANIC_EVENT, 56 IPMI_SEND_PANIC_EVENT_STRING, 57 IPMI_SEND_PANIC_EVENT_MAX 58 }; 59 60 /* Indices in this array should be mapped to enum ipmi_panic_event_op */ 61 static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL }; 62 63 #ifdef CONFIG_IPMI_PANIC_STRING 64 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 65 #elif defined(CONFIG_IPMI_PANIC_EVENT) 66 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 67 #else 68 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 69 #endif 70 71 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 72 73 static int panic_op_write_handler(const char *val, 74 const struct kernel_param *kp) 75 { 76 char valcp[16]; 77 int e; 78 79 strscpy(valcp, val, sizeof(valcp)); 80 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp)); 81 if (e < 0) 82 return e; 83 84 ipmi_send_panic_event = e; 85 return 0; 86 } 87 88 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 89 { 90 const char *event_str; 91 92 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX) 93 event_str = "???"; 94 else 95 event_str = ipmi_panic_event_str[ipmi_send_panic_event]; 96 97 return sprintf(buffer, "%s\n", event_str); 98 } 99 100 static const struct kernel_param_ops panic_op_ops = { 101 .set = panic_op_write_handler, 102 .get = panic_op_read_handler 103 }; 104 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 105 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 106 107 108 #define MAX_EVENTS_IN_QUEUE 25 109 110 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 111 static unsigned long maintenance_mode_timeout_ms = 30000; 112 module_param(maintenance_mode_timeout_ms, ulong, 0644); 113 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 114 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 115 116 /* 117 * Don't let a message sit in a queue forever, always time it with at lest 118 * the max message timer. This is in milliseconds. 119 */ 120 #define MAX_MSG_TIMEOUT 60000 121 122 /* 123 * Timeout times below are in milliseconds, and are done off a 1 124 * second timer. So setting the value to 1000 would mean anything 125 * between 0 and 1000ms. So really the only reasonable minimum 126 * setting it 2000ms, which is between 1 and 2 seconds. 127 */ 128 129 /* The default timeout for message retries. */ 130 static unsigned long default_retry_ms = 2000; 131 module_param(default_retry_ms, ulong, 0644); 132 MODULE_PARM_DESC(default_retry_ms, 133 "The time (milliseconds) between retry sends"); 134 135 /* The default timeout for maintenance mode message retries. */ 136 static unsigned long default_maintenance_retry_ms = 3000; 137 module_param(default_maintenance_retry_ms, ulong, 0644); 138 MODULE_PARM_DESC(default_maintenance_retry_ms, 139 "The time (milliseconds) between retry sends in maintenance mode"); 140 141 /* The default maximum number of retries */ 142 static unsigned int default_max_retries = 4; 143 module_param(default_max_retries, uint, 0644); 144 MODULE_PARM_DESC(default_max_retries, 145 "The time (milliseconds) between retry sends in maintenance mode"); 146 147 /* Call every ~1000 ms. */ 148 #define IPMI_TIMEOUT_TIME 1000 149 150 /* How many jiffies does it take to get to the timeout time. */ 151 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 152 153 /* 154 * Request events from the queue every second (this is the number of 155 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 156 * future, IPMI will add a way to know immediately if an event is in 157 * the queue and this silliness can go away. 158 */ 159 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 160 161 /* How long should we cache dynamic device IDs? */ 162 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 163 164 /* 165 * The main "user" data structure. 166 */ 167 struct ipmi_user { 168 struct list_head link; 169 170 /* 171 * Set to NULL when the user is destroyed, a pointer to myself 172 * so srcu_dereference can be used on it. 173 */ 174 struct ipmi_user *self; 175 struct srcu_struct release_barrier; 176 177 struct kref refcount; 178 179 /* The upper layer that handles receive messages. */ 180 const struct ipmi_user_hndl *handler; 181 void *handler_data; 182 183 /* The interface this user is bound to. */ 184 struct ipmi_smi *intf; 185 186 /* Does this interface receive IPMI events? */ 187 bool gets_events; 188 189 /* Free must run in process context for RCU cleanup. */ 190 struct work_struct remove_work; 191 }; 192 193 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) 194 __acquires(user->release_barrier) 195 { 196 struct ipmi_user *ruser; 197 198 *index = srcu_read_lock(&user->release_barrier); 199 ruser = srcu_dereference(user->self, &user->release_barrier); 200 if (!ruser) 201 srcu_read_unlock(&user->release_barrier, *index); 202 return ruser; 203 } 204 205 static void release_ipmi_user(struct ipmi_user *user, int index) 206 { 207 srcu_read_unlock(&user->release_barrier, index); 208 } 209 210 struct cmd_rcvr { 211 struct list_head link; 212 213 struct ipmi_user *user; 214 unsigned char netfn; 215 unsigned char cmd; 216 unsigned int chans; 217 218 /* 219 * This is used to form a linked lised during mass deletion. 220 * Since this is in an RCU list, we cannot use the link above 221 * or change any data until the RCU period completes. So we 222 * use this next variable during mass deletion so we can have 223 * a list and don't have to wait and restart the search on 224 * every individual deletion of a command. 225 */ 226 struct cmd_rcvr *next; 227 }; 228 229 struct seq_table { 230 unsigned int inuse : 1; 231 unsigned int broadcast : 1; 232 233 unsigned long timeout; 234 unsigned long orig_timeout; 235 unsigned int retries_left; 236 237 /* 238 * To verify on an incoming send message response that this is 239 * the message that the response is for, we keep a sequence id 240 * and increment it every time we send a message. 241 */ 242 long seqid; 243 244 /* 245 * This is held so we can properly respond to the message on a 246 * timeout, and it is used to hold the temporary data for 247 * retransmission, too. 248 */ 249 struct ipmi_recv_msg *recv_msg; 250 }; 251 252 /* 253 * Store the information in a msgid (long) to allow us to find a 254 * sequence table entry from the msgid. 255 */ 256 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 257 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 258 259 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 260 do { \ 261 seq = (((msgid) >> 26) & 0x3f); \ 262 seqid = ((msgid) & 0x3ffffff); \ 263 } while (0) 264 265 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 266 267 #define IPMI_MAX_CHANNELS 16 268 struct ipmi_channel { 269 unsigned char medium; 270 unsigned char protocol; 271 }; 272 273 struct ipmi_channel_set { 274 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 275 }; 276 277 struct ipmi_my_addrinfo { 278 /* 279 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 280 * but may be changed by the user. 281 */ 282 unsigned char address; 283 284 /* 285 * My LUN. This should generally stay the SMS LUN, but just in 286 * case... 287 */ 288 unsigned char lun; 289 }; 290 291 /* 292 * Note that the product id, manufacturer id, guid, and device id are 293 * immutable in this structure, so dyn_mutex is not required for 294 * accessing those. If those change on a BMC, a new BMC is allocated. 295 */ 296 struct bmc_device { 297 struct platform_device pdev; 298 struct list_head intfs; /* Interfaces on this BMC. */ 299 struct ipmi_device_id id; 300 struct ipmi_device_id fetch_id; 301 int dyn_id_set; 302 unsigned long dyn_id_expiry; 303 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 304 guid_t guid; 305 guid_t fetch_guid; 306 int dyn_guid_set; 307 struct kref usecount; 308 struct work_struct remove_work; 309 unsigned char cc; /* completion code */ 310 }; 311 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 312 313 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 314 struct ipmi_device_id *id, 315 bool *guid_set, guid_t *guid); 316 317 /* 318 * Various statistics for IPMI, these index stats[] in the ipmi_smi 319 * structure. 320 */ 321 enum ipmi_stat_indexes { 322 /* Commands we got from the user that were invalid. */ 323 IPMI_STAT_sent_invalid_commands = 0, 324 325 /* Commands we sent to the MC. */ 326 IPMI_STAT_sent_local_commands, 327 328 /* Responses from the MC that were delivered to a user. */ 329 IPMI_STAT_handled_local_responses, 330 331 /* Responses from the MC that were not delivered to a user. */ 332 IPMI_STAT_unhandled_local_responses, 333 334 /* Commands we sent out to the IPMB bus. */ 335 IPMI_STAT_sent_ipmb_commands, 336 337 /* Commands sent on the IPMB that had errors on the SEND CMD */ 338 IPMI_STAT_sent_ipmb_command_errs, 339 340 /* Each retransmit increments this count. */ 341 IPMI_STAT_retransmitted_ipmb_commands, 342 343 /* 344 * When a message times out (runs out of retransmits) this is 345 * incremented. 346 */ 347 IPMI_STAT_timed_out_ipmb_commands, 348 349 /* 350 * This is like above, but for broadcasts. Broadcasts are 351 * *not* included in the above count (they are expected to 352 * time out). 353 */ 354 IPMI_STAT_timed_out_ipmb_broadcasts, 355 356 /* Responses I have sent to the IPMB bus. */ 357 IPMI_STAT_sent_ipmb_responses, 358 359 /* The response was delivered to the user. */ 360 IPMI_STAT_handled_ipmb_responses, 361 362 /* The response had invalid data in it. */ 363 IPMI_STAT_invalid_ipmb_responses, 364 365 /* The response didn't have anyone waiting for it. */ 366 IPMI_STAT_unhandled_ipmb_responses, 367 368 /* Commands we sent out to the IPMB bus. */ 369 IPMI_STAT_sent_lan_commands, 370 371 /* Commands sent on the IPMB that had errors on the SEND CMD */ 372 IPMI_STAT_sent_lan_command_errs, 373 374 /* Each retransmit increments this count. */ 375 IPMI_STAT_retransmitted_lan_commands, 376 377 /* 378 * When a message times out (runs out of retransmits) this is 379 * incremented. 380 */ 381 IPMI_STAT_timed_out_lan_commands, 382 383 /* Responses I have sent to the IPMB bus. */ 384 IPMI_STAT_sent_lan_responses, 385 386 /* The response was delivered to the user. */ 387 IPMI_STAT_handled_lan_responses, 388 389 /* The response had invalid data in it. */ 390 IPMI_STAT_invalid_lan_responses, 391 392 /* The response didn't have anyone waiting for it. */ 393 IPMI_STAT_unhandled_lan_responses, 394 395 /* The command was delivered to the user. */ 396 IPMI_STAT_handled_commands, 397 398 /* The command had invalid data in it. */ 399 IPMI_STAT_invalid_commands, 400 401 /* The command didn't have anyone waiting for it. */ 402 IPMI_STAT_unhandled_commands, 403 404 /* Invalid data in an event. */ 405 IPMI_STAT_invalid_events, 406 407 /* Events that were received with the proper format. */ 408 IPMI_STAT_events, 409 410 /* Retransmissions on IPMB that failed. */ 411 IPMI_STAT_dropped_rexmit_ipmb_commands, 412 413 /* Retransmissions on LAN that failed. */ 414 IPMI_STAT_dropped_rexmit_lan_commands, 415 416 /* This *must* remain last, add new values above this. */ 417 IPMI_NUM_STATS 418 }; 419 420 421 #define IPMI_IPMB_NUM_SEQ 64 422 struct ipmi_smi { 423 struct module *owner; 424 425 /* What interface number are we? */ 426 int intf_num; 427 428 struct kref refcount; 429 430 /* Set when the interface is being unregistered. */ 431 bool in_shutdown; 432 433 /* Used for a list of interfaces. */ 434 struct list_head link; 435 436 /* 437 * The list of upper layers that are using me. seq_lock write 438 * protects this. Read protection is with srcu. 439 */ 440 struct list_head users; 441 struct srcu_struct users_srcu; 442 443 /* Used for wake ups at startup. */ 444 wait_queue_head_t waitq; 445 446 /* 447 * Prevents the interface from being unregistered when the 448 * interface is used by being looked up through the BMC 449 * structure. 450 */ 451 struct mutex bmc_reg_mutex; 452 453 struct bmc_device tmp_bmc; 454 struct bmc_device *bmc; 455 bool bmc_registered; 456 struct list_head bmc_link; 457 char *my_dev_name; 458 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 459 struct work_struct bmc_reg_work; 460 461 const struct ipmi_smi_handlers *handlers; 462 void *send_info; 463 464 /* Driver-model device for the system interface. */ 465 struct device *si_dev; 466 467 /* 468 * A table of sequence numbers for this interface. We use the 469 * sequence numbers for IPMB messages that go out of the 470 * interface to match them up with their responses. A routine 471 * is called periodically to time the items in this list. 472 */ 473 spinlock_t seq_lock; 474 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 475 int curr_seq; 476 477 /* 478 * Messages queued for delivery. If delivery fails (out of memory 479 * for instance), They will stay in here to be processed later in a 480 * periodic timer interrupt. The tasklet is for handling received 481 * messages directly from the handler. 482 */ 483 spinlock_t waiting_rcv_msgs_lock; 484 struct list_head waiting_rcv_msgs; 485 atomic_t watchdog_pretimeouts_to_deliver; 486 struct tasklet_struct recv_tasklet; 487 488 spinlock_t xmit_msgs_lock; 489 struct list_head xmit_msgs; 490 struct ipmi_smi_msg *curr_msg; 491 struct list_head hp_xmit_msgs; 492 493 /* 494 * The list of command receivers that are registered for commands 495 * on this interface. 496 */ 497 struct mutex cmd_rcvrs_mutex; 498 struct list_head cmd_rcvrs; 499 500 /* 501 * Events that were queues because no one was there to receive 502 * them. 503 */ 504 spinlock_t events_lock; /* For dealing with event stuff. */ 505 struct list_head waiting_events; 506 unsigned int waiting_events_count; /* How many events in queue? */ 507 char delivering_events; 508 char event_msg_printed; 509 510 /* How many users are waiting for events? */ 511 atomic_t event_waiters; 512 unsigned int ticks_to_req_ev; 513 514 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 515 516 /* How many users are waiting for commands? */ 517 unsigned int command_waiters; 518 519 /* How many users are waiting for watchdogs? */ 520 unsigned int watchdog_waiters; 521 522 /* How many users are waiting for message responses? */ 523 unsigned int response_waiters; 524 525 /* 526 * Tells what the lower layer has last been asked to watch for, 527 * messages and/or watchdogs. Protected by watch_lock. 528 */ 529 unsigned int last_watch_mask; 530 531 /* 532 * The event receiver for my BMC, only really used at panic 533 * shutdown as a place to store this. 534 */ 535 unsigned char event_receiver; 536 unsigned char event_receiver_lun; 537 unsigned char local_sel_device; 538 unsigned char local_event_generator; 539 540 /* For handling of maintenance mode. */ 541 int maintenance_mode; 542 bool maintenance_mode_enable; 543 int auto_maintenance_timeout; 544 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 545 546 /* 547 * If we are doing maintenance on something on IPMB, extend 548 * the timeout time to avoid timeouts writing firmware and 549 * such. 550 */ 551 int ipmb_maintenance_mode_timeout; 552 553 /* 554 * A cheap hack, if this is non-null and a message to an 555 * interface comes in with a NULL user, call this routine with 556 * it. Note that the message will still be freed by the 557 * caller. This only works on the system interface. 558 * 559 * Protected by bmc_reg_mutex. 560 */ 561 void (*null_user_handler)(struct ipmi_smi *intf, 562 struct ipmi_recv_msg *msg); 563 564 /* 565 * When we are scanning the channels for an SMI, this will 566 * tell which channel we are scanning. 567 */ 568 int curr_channel; 569 570 /* Channel information */ 571 struct ipmi_channel_set *channel_list; 572 unsigned int curr_working_cset; /* First index into the following. */ 573 struct ipmi_channel_set wchannels[2]; 574 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 575 bool channels_ready; 576 577 atomic_t stats[IPMI_NUM_STATS]; 578 579 /* 580 * run_to_completion duplicate of smb_info, smi_info 581 * and ipmi_serial_info structures. Used to decrease numbers of 582 * parameters passed by "low" level IPMI code. 583 */ 584 int run_to_completion; 585 }; 586 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 587 588 static void __get_guid(struct ipmi_smi *intf); 589 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 590 static int __ipmi_bmc_register(struct ipmi_smi *intf, 591 struct ipmi_device_id *id, 592 bool guid_set, guid_t *guid, int intf_num); 593 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 594 595 596 /** 597 * The driver model view of the IPMI messaging driver. 598 */ 599 static struct platform_driver ipmidriver = { 600 .driver = { 601 .name = "ipmi", 602 .bus = &platform_bus_type 603 } 604 }; 605 /* 606 * This mutex keeps us from adding the same BMC twice. 607 */ 608 static DEFINE_MUTEX(ipmidriver_mutex); 609 610 static LIST_HEAD(ipmi_interfaces); 611 static DEFINE_MUTEX(ipmi_interfaces_mutex); 612 #define ipmi_interfaces_mutex_held() \ 613 lockdep_is_held(&ipmi_interfaces_mutex) 614 static struct srcu_struct ipmi_interfaces_srcu; 615 616 /* 617 * List of watchers that want to know when smi's are added and deleted. 618 */ 619 static LIST_HEAD(smi_watchers); 620 static DEFINE_MUTEX(smi_watchers_mutex); 621 622 #define ipmi_inc_stat(intf, stat) \ 623 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 624 #define ipmi_get_stat(intf, stat) \ 625 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 626 627 static const char * const addr_src_to_str[] = { 628 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 629 "device-tree", "platform" 630 }; 631 632 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 633 { 634 if (src >= SI_LAST) 635 src = 0; /* Invalid */ 636 return addr_src_to_str[src]; 637 } 638 EXPORT_SYMBOL(ipmi_addr_src_to_str); 639 640 static int is_lan_addr(struct ipmi_addr *addr) 641 { 642 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 643 } 644 645 static int is_ipmb_addr(struct ipmi_addr *addr) 646 { 647 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 648 } 649 650 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 651 { 652 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 653 } 654 655 static void free_recv_msg_list(struct list_head *q) 656 { 657 struct ipmi_recv_msg *msg, *msg2; 658 659 list_for_each_entry_safe(msg, msg2, q, link) { 660 list_del(&msg->link); 661 ipmi_free_recv_msg(msg); 662 } 663 } 664 665 static void free_smi_msg_list(struct list_head *q) 666 { 667 struct ipmi_smi_msg *msg, *msg2; 668 669 list_for_each_entry_safe(msg, msg2, q, link) { 670 list_del(&msg->link); 671 ipmi_free_smi_msg(msg); 672 } 673 } 674 675 static void clean_up_interface_data(struct ipmi_smi *intf) 676 { 677 int i; 678 struct cmd_rcvr *rcvr, *rcvr2; 679 struct list_head list; 680 681 tasklet_kill(&intf->recv_tasklet); 682 683 free_smi_msg_list(&intf->waiting_rcv_msgs); 684 free_recv_msg_list(&intf->waiting_events); 685 686 /* 687 * Wholesale remove all the entries from the list in the 688 * interface and wait for RCU to know that none are in use. 689 */ 690 mutex_lock(&intf->cmd_rcvrs_mutex); 691 INIT_LIST_HEAD(&list); 692 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); 693 mutex_unlock(&intf->cmd_rcvrs_mutex); 694 695 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 696 kfree(rcvr); 697 698 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 699 if ((intf->seq_table[i].inuse) 700 && (intf->seq_table[i].recv_msg)) 701 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 702 } 703 } 704 705 static void intf_free(struct kref *ref) 706 { 707 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 708 709 clean_up_interface_data(intf); 710 kfree(intf); 711 } 712 713 struct watcher_entry { 714 int intf_num; 715 struct ipmi_smi *intf; 716 struct list_head link; 717 }; 718 719 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 720 { 721 struct ipmi_smi *intf; 722 int index, rv; 723 724 /* 725 * Make sure the driver is actually initialized, this handles 726 * problems with initialization order. 727 */ 728 rv = ipmi_init_msghandler(); 729 if (rv) 730 return rv; 731 732 mutex_lock(&smi_watchers_mutex); 733 734 list_add(&watcher->link, &smi_watchers); 735 736 index = srcu_read_lock(&ipmi_interfaces_srcu); 737 list_for_each_entry_rcu(intf, &ipmi_interfaces, link, 738 lockdep_is_held(&smi_watchers_mutex)) { 739 int intf_num = READ_ONCE(intf->intf_num); 740 741 if (intf_num == -1) 742 continue; 743 watcher->new_smi(intf_num, intf->si_dev); 744 } 745 srcu_read_unlock(&ipmi_interfaces_srcu, index); 746 747 mutex_unlock(&smi_watchers_mutex); 748 749 return 0; 750 } 751 EXPORT_SYMBOL(ipmi_smi_watcher_register); 752 753 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 754 { 755 mutex_lock(&smi_watchers_mutex); 756 list_del(&watcher->link); 757 mutex_unlock(&smi_watchers_mutex); 758 return 0; 759 } 760 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 761 762 /* 763 * Must be called with smi_watchers_mutex held. 764 */ 765 static void 766 call_smi_watchers(int i, struct device *dev) 767 { 768 struct ipmi_smi_watcher *w; 769 770 mutex_lock(&smi_watchers_mutex); 771 list_for_each_entry(w, &smi_watchers, link) { 772 if (try_module_get(w->owner)) { 773 w->new_smi(i, dev); 774 module_put(w->owner); 775 } 776 } 777 mutex_unlock(&smi_watchers_mutex); 778 } 779 780 static int 781 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 782 { 783 if (addr1->addr_type != addr2->addr_type) 784 return 0; 785 786 if (addr1->channel != addr2->channel) 787 return 0; 788 789 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 790 struct ipmi_system_interface_addr *smi_addr1 791 = (struct ipmi_system_interface_addr *) addr1; 792 struct ipmi_system_interface_addr *smi_addr2 793 = (struct ipmi_system_interface_addr *) addr2; 794 return (smi_addr1->lun == smi_addr2->lun); 795 } 796 797 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 798 struct ipmi_ipmb_addr *ipmb_addr1 799 = (struct ipmi_ipmb_addr *) addr1; 800 struct ipmi_ipmb_addr *ipmb_addr2 801 = (struct ipmi_ipmb_addr *) addr2; 802 803 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 804 && (ipmb_addr1->lun == ipmb_addr2->lun)); 805 } 806 807 if (is_lan_addr(addr1)) { 808 struct ipmi_lan_addr *lan_addr1 809 = (struct ipmi_lan_addr *) addr1; 810 struct ipmi_lan_addr *lan_addr2 811 = (struct ipmi_lan_addr *) addr2; 812 813 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 814 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 815 && (lan_addr1->session_handle 816 == lan_addr2->session_handle) 817 && (lan_addr1->lun == lan_addr2->lun)); 818 } 819 820 return 1; 821 } 822 823 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 824 { 825 if (len < sizeof(struct ipmi_system_interface_addr)) 826 return -EINVAL; 827 828 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 829 if (addr->channel != IPMI_BMC_CHANNEL) 830 return -EINVAL; 831 return 0; 832 } 833 834 if ((addr->channel == IPMI_BMC_CHANNEL) 835 || (addr->channel >= IPMI_MAX_CHANNELS) 836 || (addr->channel < 0)) 837 return -EINVAL; 838 839 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 840 if (len < sizeof(struct ipmi_ipmb_addr)) 841 return -EINVAL; 842 return 0; 843 } 844 845 if (is_lan_addr(addr)) { 846 if (len < sizeof(struct ipmi_lan_addr)) 847 return -EINVAL; 848 return 0; 849 } 850 851 return -EINVAL; 852 } 853 EXPORT_SYMBOL(ipmi_validate_addr); 854 855 unsigned int ipmi_addr_length(int addr_type) 856 { 857 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 858 return sizeof(struct ipmi_system_interface_addr); 859 860 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 861 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 862 return sizeof(struct ipmi_ipmb_addr); 863 864 if (addr_type == IPMI_LAN_ADDR_TYPE) 865 return sizeof(struct ipmi_lan_addr); 866 867 return 0; 868 } 869 EXPORT_SYMBOL(ipmi_addr_length); 870 871 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 872 { 873 int rv = 0; 874 875 if (!msg->user) { 876 /* Special handling for NULL users. */ 877 if (intf->null_user_handler) { 878 intf->null_user_handler(intf, msg); 879 } else { 880 /* No handler, so give up. */ 881 rv = -EINVAL; 882 } 883 ipmi_free_recv_msg(msg); 884 } else if (oops_in_progress) { 885 /* 886 * If we are running in the panic context, calling the 887 * receive handler doesn't much meaning and has a deadlock 888 * risk. At this moment, simply skip it in that case. 889 */ 890 ipmi_free_recv_msg(msg); 891 } else { 892 int index; 893 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); 894 895 if (user) { 896 user->handler->ipmi_recv_hndl(msg, user->handler_data); 897 release_ipmi_user(user, index); 898 } else { 899 /* User went away, give up. */ 900 ipmi_free_recv_msg(msg); 901 rv = -EINVAL; 902 } 903 } 904 905 return rv; 906 } 907 908 static void deliver_local_response(struct ipmi_smi *intf, 909 struct ipmi_recv_msg *msg) 910 { 911 if (deliver_response(intf, msg)) 912 ipmi_inc_stat(intf, unhandled_local_responses); 913 else 914 ipmi_inc_stat(intf, handled_local_responses); 915 } 916 917 static void deliver_err_response(struct ipmi_smi *intf, 918 struct ipmi_recv_msg *msg, int err) 919 { 920 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 921 msg->msg_data[0] = err; 922 msg->msg.netfn |= 1; /* Convert to a response. */ 923 msg->msg.data_len = 1; 924 msg->msg.data = msg->msg_data; 925 deliver_local_response(intf, msg); 926 } 927 928 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 929 { 930 unsigned long iflags; 931 932 if (!intf->handlers->set_need_watch) 933 return; 934 935 spin_lock_irqsave(&intf->watch_lock, iflags); 936 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 937 intf->response_waiters++; 938 939 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 940 intf->watchdog_waiters++; 941 942 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 943 intf->command_waiters++; 944 945 if ((intf->last_watch_mask & flags) != flags) { 946 intf->last_watch_mask |= flags; 947 intf->handlers->set_need_watch(intf->send_info, 948 intf->last_watch_mask); 949 } 950 spin_unlock_irqrestore(&intf->watch_lock, iflags); 951 } 952 953 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 954 { 955 unsigned long iflags; 956 957 if (!intf->handlers->set_need_watch) 958 return; 959 960 spin_lock_irqsave(&intf->watch_lock, iflags); 961 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 962 intf->response_waiters--; 963 964 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 965 intf->watchdog_waiters--; 966 967 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 968 intf->command_waiters--; 969 970 flags = 0; 971 if (intf->response_waiters) 972 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 973 if (intf->watchdog_waiters) 974 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 975 if (intf->command_waiters) 976 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 977 978 if (intf->last_watch_mask != flags) { 979 intf->last_watch_mask = flags; 980 intf->handlers->set_need_watch(intf->send_info, 981 intf->last_watch_mask); 982 } 983 spin_unlock_irqrestore(&intf->watch_lock, iflags); 984 } 985 986 /* 987 * Find the next sequence number not being used and add the given 988 * message with the given timeout to the sequence table. This must be 989 * called with the interface's seq_lock held. 990 */ 991 static int intf_next_seq(struct ipmi_smi *intf, 992 struct ipmi_recv_msg *recv_msg, 993 unsigned long timeout, 994 int retries, 995 int broadcast, 996 unsigned char *seq, 997 long *seqid) 998 { 999 int rv = 0; 1000 unsigned int i; 1001 1002 if (timeout == 0) 1003 timeout = default_retry_ms; 1004 if (retries < 0) 1005 retries = default_max_retries; 1006 1007 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1008 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1009 if (!intf->seq_table[i].inuse) 1010 break; 1011 } 1012 1013 if (!intf->seq_table[i].inuse) { 1014 intf->seq_table[i].recv_msg = recv_msg; 1015 1016 /* 1017 * Start with the maximum timeout, when the send response 1018 * comes in we will start the real timer. 1019 */ 1020 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1021 intf->seq_table[i].orig_timeout = timeout; 1022 intf->seq_table[i].retries_left = retries; 1023 intf->seq_table[i].broadcast = broadcast; 1024 intf->seq_table[i].inuse = 1; 1025 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1026 *seq = i; 1027 *seqid = intf->seq_table[i].seqid; 1028 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1029 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1030 need_waiter(intf); 1031 } else { 1032 rv = -EAGAIN; 1033 } 1034 1035 return rv; 1036 } 1037 1038 /* 1039 * Return the receive message for the given sequence number and 1040 * release the sequence number so it can be reused. Some other data 1041 * is passed in to be sure the message matches up correctly (to help 1042 * guard against message coming in after their timeout and the 1043 * sequence number being reused). 1044 */ 1045 static int intf_find_seq(struct ipmi_smi *intf, 1046 unsigned char seq, 1047 short channel, 1048 unsigned char cmd, 1049 unsigned char netfn, 1050 struct ipmi_addr *addr, 1051 struct ipmi_recv_msg **recv_msg) 1052 { 1053 int rv = -ENODEV; 1054 unsigned long flags; 1055 1056 if (seq >= IPMI_IPMB_NUM_SEQ) 1057 return -EINVAL; 1058 1059 spin_lock_irqsave(&intf->seq_lock, flags); 1060 if (intf->seq_table[seq].inuse) { 1061 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1062 1063 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1064 && (msg->msg.netfn == netfn) 1065 && (ipmi_addr_equal(addr, &msg->addr))) { 1066 *recv_msg = msg; 1067 intf->seq_table[seq].inuse = 0; 1068 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1069 rv = 0; 1070 } 1071 } 1072 spin_unlock_irqrestore(&intf->seq_lock, flags); 1073 1074 return rv; 1075 } 1076 1077 1078 /* Start the timer for a specific sequence table entry. */ 1079 static int intf_start_seq_timer(struct ipmi_smi *intf, 1080 long msgid) 1081 { 1082 int rv = -ENODEV; 1083 unsigned long flags; 1084 unsigned char seq; 1085 unsigned long seqid; 1086 1087 1088 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1089 1090 spin_lock_irqsave(&intf->seq_lock, flags); 1091 /* 1092 * We do this verification because the user can be deleted 1093 * while a message is outstanding. 1094 */ 1095 if ((intf->seq_table[seq].inuse) 1096 && (intf->seq_table[seq].seqid == seqid)) { 1097 struct seq_table *ent = &intf->seq_table[seq]; 1098 ent->timeout = ent->orig_timeout; 1099 rv = 0; 1100 } 1101 spin_unlock_irqrestore(&intf->seq_lock, flags); 1102 1103 return rv; 1104 } 1105 1106 /* Got an error for the send message for a specific sequence number. */ 1107 static int intf_err_seq(struct ipmi_smi *intf, 1108 long msgid, 1109 unsigned int err) 1110 { 1111 int rv = -ENODEV; 1112 unsigned long flags; 1113 unsigned char seq; 1114 unsigned long seqid; 1115 struct ipmi_recv_msg *msg = NULL; 1116 1117 1118 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1119 1120 spin_lock_irqsave(&intf->seq_lock, flags); 1121 /* 1122 * We do this verification because the user can be deleted 1123 * while a message is outstanding. 1124 */ 1125 if ((intf->seq_table[seq].inuse) 1126 && (intf->seq_table[seq].seqid == seqid)) { 1127 struct seq_table *ent = &intf->seq_table[seq]; 1128 1129 ent->inuse = 0; 1130 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1131 msg = ent->recv_msg; 1132 rv = 0; 1133 } 1134 spin_unlock_irqrestore(&intf->seq_lock, flags); 1135 1136 if (msg) 1137 deliver_err_response(intf, msg, err); 1138 1139 return rv; 1140 } 1141 1142 static void free_user_work(struct work_struct *work) 1143 { 1144 struct ipmi_user *user = container_of(work, struct ipmi_user, 1145 remove_work); 1146 1147 cleanup_srcu_struct(&user->release_barrier); 1148 vfree(user); 1149 } 1150 1151 int ipmi_create_user(unsigned int if_num, 1152 const struct ipmi_user_hndl *handler, 1153 void *handler_data, 1154 struct ipmi_user **user) 1155 { 1156 unsigned long flags; 1157 struct ipmi_user *new_user; 1158 int rv, index; 1159 struct ipmi_smi *intf; 1160 1161 /* 1162 * There is no module usecount here, because it's not 1163 * required. Since this can only be used by and called from 1164 * other modules, they will implicitly use this module, and 1165 * thus this can't be removed unless the other modules are 1166 * removed. 1167 */ 1168 1169 if (handler == NULL) 1170 return -EINVAL; 1171 1172 /* 1173 * Make sure the driver is actually initialized, this handles 1174 * problems with initialization order. 1175 */ 1176 rv = ipmi_init_msghandler(); 1177 if (rv) 1178 return rv; 1179 1180 new_user = vzalloc(sizeof(*new_user)); 1181 if (!new_user) 1182 return -ENOMEM; 1183 1184 index = srcu_read_lock(&ipmi_interfaces_srcu); 1185 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1186 if (intf->intf_num == if_num) 1187 goto found; 1188 } 1189 /* Not found, return an error */ 1190 rv = -EINVAL; 1191 goto out_kfree; 1192 1193 found: 1194 INIT_WORK(&new_user->remove_work, free_user_work); 1195 1196 rv = init_srcu_struct(&new_user->release_barrier); 1197 if (rv) 1198 goto out_kfree; 1199 1200 if (!try_module_get(intf->owner)) { 1201 rv = -ENODEV; 1202 goto out_kfree; 1203 } 1204 1205 /* Note that each existing user holds a refcount to the interface. */ 1206 kref_get(&intf->refcount); 1207 1208 kref_init(&new_user->refcount); 1209 new_user->handler = handler; 1210 new_user->handler_data = handler_data; 1211 new_user->intf = intf; 1212 new_user->gets_events = false; 1213 1214 rcu_assign_pointer(new_user->self, new_user); 1215 spin_lock_irqsave(&intf->seq_lock, flags); 1216 list_add_rcu(&new_user->link, &intf->users); 1217 spin_unlock_irqrestore(&intf->seq_lock, flags); 1218 if (handler->ipmi_watchdog_pretimeout) 1219 /* User wants pretimeouts, so make sure to watch for them. */ 1220 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1221 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1222 *user = new_user; 1223 return 0; 1224 1225 out_kfree: 1226 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1227 vfree(new_user); 1228 return rv; 1229 } 1230 EXPORT_SYMBOL(ipmi_create_user); 1231 1232 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1233 { 1234 int rv, index; 1235 struct ipmi_smi *intf; 1236 1237 index = srcu_read_lock(&ipmi_interfaces_srcu); 1238 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1239 if (intf->intf_num == if_num) 1240 goto found; 1241 } 1242 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1243 1244 /* Not found, return an error */ 1245 return -EINVAL; 1246 1247 found: 1248 if (!intf->handlers->get_smi_info) 1249 rv = -ENOTTY; 1250 else 1251 rv = intf->handlers->get_smi_info(intf->send_info, data); 1252 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1253 1254 return rv; 1255 } 1256 EXPORT_SYMBOL(ipmi_get_smi_info); 1257 1258 static void free_user(struct kref *ref) 1259 { 1260 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1261 1262 /* SRCU cleanup must happen in task context. */ 1263 schedule_work(&user->remove_work); 1264 } 1265 1266 static void _ipmi_destroy_user(struct ipmi_user *user) 1267 { 1268 struct ipmi_smi *intf = user->intf; 1269 int i; 1270 unsigned long flags; 1271 struct cmd_rcvr *rcvr; 1272 struct cmd_rcvr *rcvrs = NULL; 1273 1274 if (!acquire_ipmi_user(user, &i)) { 1275 /* 1276 * The user has already been cleaned up, just make sure 1277 * nothing is using it and return. 1278 */ 1279 synchronize_srcu(&user->release_barrier); 1280 return; 1281 } 1282 1283 rcu_assign_pointer(user->self, NULL); 1284 release_ipmi_user(user, i); 1285 1286 synchronize_srcu(&user->release_barrier); 1287 1288 if (user->handler->shutdown) 1289 user->handler->shutdown(user->handler_data); 1290 1291 if (user->handler->ipmi_watchdog_pretimeout) 1292 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1293 1294 if (user->gets_events) 1295 atomic_dec(&intf->event_waiters); 1296 1297 /* Remove the user from the interface's sequence table. */ 1298 spin_lock_irqsave(&intf->seq_lock, flags); 1299 list_del_rcu(&user->link); 1300 1301 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1302 if (intf->seq_table[i].inuse 1303 && (intf->seq_table[i].recv_msg->user == user)) { 1304 intf->seq_table[i].inuse = 0; 1305 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1306 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1307 } 1308 } 1309 spin_unlock_irqrestore(&intf->seq_lock, flags); 1310 1311 /* 1312 * Remove the user from the command receiver's table. First 1313 * we build a list of everything (not using the standard link, 1314 * since other things may be using it till we do 1315 * synchronize_srcu()) then free everything in that list. 1316 */ 1317 mutex_lock(&intf->cmd_rcvrs_mutex); 1318 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1319 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1320 if (rcvr->user == user) { 1321 list_del_rcu(&rcvr->link); 1322 rcvr->next = rcvrs; 1323 rcvrs = rcvr; 1324 } 1325 } 1326 mutex_unlock(&intf->cmd_rcvrs_mutex); 1327 synchronize_rcu(); 1328 while (rcvrs) { 1329 rcvr = rcvrs; 1330 rcvrs = rcvr->next; 1331 kfree(rcvr); 1332 } 1333 1334 kref_put(&intf->refcount, intf_free); 1335 module_put(intf->owner); 1336 } 1337 1338 int ipmi_destroy_user(struct ipmi_user *user) 1339 { 1340 _ipmi_destroy_user(user); 1341 1342 kref_put(&user->refcount, free_user); 1343 1344 return 0; 1345 } 1346 EXPORT_SYMBOL(ipmi_destroy_user); 1347 1348 int ipmi_get_version(struct ipmi_user *user, 1349 unsigned char *major, 1350 unsigned char *minor) 1351 { 1352 struct ipmi_device_id id; 1353 int rv, index; 1354 1355 user = acquire_ipmi_user(user, &index); 1356 if (!user) 1357 return -ENODEV; 1358 1359 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1360 if (!rv) { 1361 *major = ipmi_version_major(&id); 1362 *minor = ipmi_version_minor(&id); 1363 } 1364 release_ipmi_user(user, index); 1365 1366 return rv; 1367 } 1368 EXPORT_SYMBOL(ipmi_get_version); 1369 1370 int ipmi_set_my_address(struct ipmi_user *user, 1371 unsigned int channel, 1372 unsigned char address) 1373 { 1374 int index, rv = 0; 1375 1376 user = acquire_ipmi_user(user, &index); 1377 if (!user) 1378 return -ENODEV; 1379 1380 if (channel >= IPMI_MAX_CHANNELS) { 1381 rv = -EINVAL; 1382 } else { 1383 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1384 user->intf->addrinfo[channel].address = address; 1385 } 1386 release_ipmi_user(user, index); 1387 1388 return rv; 1389 } 1390 EXPORT_SYMBOL(ipmi_set_my_address); 1391 1392 int ipmi_get_my_address(struct ipmi_user *user, 1393 unsigned int channel, 1394 unsigned char *address) 1395 { 1396 int index, rv = 0; 1397 1398 user = acquire_ipmi_user(user, &index); 1399 if (!user) 1400 return -ENODEV; 1401 1402 if (channel >= IPMI_MAX_CHANNELS) { 1403 rv = -EINVAL; 1404 } else { 1405 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1406 *address = user->intf->addrinfo[channel].address; 1407 } 1408 release_ipmi_user(user, index); 1409 1410 return rv; 1411 } 1412 EXPORT_SYMBOL(ipmi_get_my_address); 1413 1414 int ipmi_set_my_LUN(struct ipmi_user *user, 1415 unsigned int channel, 1416 unsigned char LUN) 1417 { 1418 int index, rv = 0; 1419 1420 user = acquire_ipmi_user(user, &index); 1421 if (!user) 1422 return -ENODEV; 1423 1424 if (channel >= IPMI_MAX_CHANNELS) { 1425 rv = -EINVAL; 1426 } else { 1427 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1428 user->intf->addrinfo[channel].lun = LUN & 0x3; 1429 } 1430 release_ipmi_user(user, index); 1431 1432 return rv; 1433 } 1434 EXPORT_SYMBOL(ipmi_set_my_LUN); 1435 1436 int ipmi_get_my_LUN(struct ipmi_user *user, 1437 unsigned int channel, 1438 unsigned char *address) 1439 { 1440 int index, rv = 0; 1441 1442 user = acquire_ipmi_user(user, &index); 1443 if (!user) 1444 return -ENODEV; 1445 1446 if (channel >= IPMI_MAX_CHANNELS) { 1447 rv = -EINVAL; 1448 } else { 1449 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1450 *address = user->intf->addrinfo[channel].lun; 1451 } 1452 release_ipmi_user(user, index); 1453 1454 return rv; 1455 } 1456 EXPORT_SYMBOL(ipmi_get_my_LUN); 1457 1458 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1459 { 1460 int mode, index; 1461 unsigned long flags; 1462 1463 user = acquire_ipmi_user(user, &index); 1464 if (!user) 1465 return -ENODEV; 1466 1467 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1468 mode = user->intf->maintenance_mode; 1469 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1470 release_ipmi_user(user, index); 1471 1472 return mode; 1473 } 1474 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1475 1476 static void maintenance_mode_update(struct ipmi_smi *intf) 1477 { 1478 if (intf->handlers->set_maintenance_mode) 1479 intf->handlers->set_maintenance_mode( 1480 intf->send_info, intf->maintenance_mode_enable); 1481 } 1482 1483 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1484 { 1485 int rv = 0, index; 1486 unsigned long flags; 1487 struct ipmi_smi *intf = user->intf; 1488 1489 user = acquire_ipmi_user(user, &index); 1490 if (!user) 1491 return -ENODEV; 1492 1493 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1494 if (intf->maintenance_mode != mode) { 1495 switch (mode) { 1496 case IPMI_MAINTENANCE_MODE_AUTO: 1497 intf->maintenance_mode_enable 1498 = (intf->auto_maintenance_timeout > 0); 1499 break; 1500 1501 case IPMI_MAINTENANCE_MODE_OFF: 1502 intf->maintenance_mode_enable = false; 1503 break; 1504 1505 case IPMI_MAINTENANCE_MODE_ON: 1506 intf->maintenance_mode_enable = true; 1507 break; 1508 1509 default: 1510 rv = -EINVAL; 1511 goto out_unlock; 1512 } 1513 intf->maintenance_mode = mode; 1514 1515 maintenance_mode_update(intf); 1516 } 1517 out_unlock: 1518 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1519 release_ipmi_user(user, index); 1520 1521 return rv; 1522 } 1523 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1524 1525 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1526 { 1527 unsigned long flags; 1528 struct ipmi_smi *intf = user->intf; 1529 struct ipmi_recv_msg *msg, *msg2; 1530 struct list_head msgs; 1531 int index; 1532 1533 user = acquire_ipmi_user(user, &index); 1534 if (!user) 1535 return -ENODEV; 1536 1537 INIT_LIST_HEAD(&msgs); 1538 1539 spin_lock_irqsave(&intf->events_lock, flags); 1540 if (user->gets_events == val) 1541 goto out; 1542 1543 user->gets_events = val; 1544 1545 if (val) { 1546 if (atomic_inc_return(&intf->event_waiters) == 1) 1547 need_waiter(intf); 1548 } else { 1549 atomic_dec(&intf->event_waiters); 1550 } 1551 1552 if (intf->delivering_events) 1553 /* 1554 * Another thread is delivering events for this, so 1555 * let it handle any new events. 1556 */ 1557 goto out; 1558 1559 /* Deliver any queued events. */ 1560 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1561 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1562 list_move_tail(&msg->link, &msgs); 1563 intf->waiting_events_count = 0; 1564 if (intf->event_msg_printed) { 1565 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1566 intf->event_msg_printed = 0; 1567 } 1568 1569 intf->delivering_events = 1; 1570 spin_unlock_irqrestore(&intf->events_lock, flags); 1571 1572 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1573 msg->user = user; 1574 kref_get(&user->refcount); 1575 deliver_local_response(intf, msg); 1576 } 1577 1578 spin_lock_irqsave(&intf->events_lock, flags); 1579 intf->delivering_events = 0; 1580 } 1581 1582 out: 1583 spin_unlock_irqrestore(&intf->events_lock, flags); 1584 release_ipmi_user(user, index); 1585 1586 return 0; 1587 } 1588 EXPORT_SYMBOL(ipmi_set_gets_events); 1589 1590 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1591 unsigned char netfn, 1592 unsigned char cmd, 1593 unsigned char chan) 1594 { 1595 struct cmd_rcvr *rcvr; 1596 1597 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1598 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1599 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1600 && (rcvr->chans & (1 << chan))) 1601 return rcvr; 1602 } 1603 return NULL; 1604 } 1605 1606 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1607 unsigned char netfn, 1608 unsigned char cmd, 1609 unsigned int chans) 1610 { 1611 struct cmd_rcvr *rcvr; 1612 1613 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1614 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1615 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1616 && (rcvr->chans & chans)) 1617 return 0; 1618 } 1619 return 1; 1620 } 1621 1622 int ipmi_register_for_cmd(struct ipmi_user *user, 1623 unsigned char netfn, 1624 unsigned char cmd, 1625 unsigned int chans) 1626 { 1627 struct ipmi_smi *intf = user->intf; 1628 struct cmd_rcvr *rcvr; 1629 int rv = 0, index; 1630 1631 user = acquire_ipmi_user(user, &index); 1632 if (!user) 1633 return -ENODEV; 1634 1635 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1636 if (!rcvr) { 1637 rv = -ENOMEM; 1638 goto out_release; 1639 } 1640 rcvr->cmd = cmd; 1641 rcvr->netfn = netfn; 1642 rcvr->chans = chans; 1643 rcvr->user = user; 1644 1645 mutex_lock(&intf->cmd_rcvrs_mutex); 1646 /* Make sure the command/netfn is not already registered. */ 1647 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1648 rv = -EBUSY; 1649 goto out_unlock; 1650 } 1651 1652 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1653 1654 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1655 1656 out_unlock: 1657 mutex_unlock(&intf->cmd_rcvrs_mutex); 1658 if (rv) 1659 kfree(rcvr); 1660 out_release: 1661 release_ipmi_user(user, index); 1662 1663 return rv; 1664 } 1665 EXPORT_SYMBOL(ipmi_register_for_cmd); 1666 1667 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1668 unsigned char netfn, 1669 unsigned char cmd, 1670 unsigned int chans) 1671 { 1672 struct ipmi_smi *intf = user->intf; 1673 struct cmd_rcvr *rcvr; 1674 struct cmd_rcvr *rcvrs = NULL; 1675 int i, rv = -ENOENT, index; 1676 1677 user = acquire_ipmi_user(user, &index); 1678 if (!user) 1679 return -ENODEV; 1680 1681 mutex_lock(&intf->cmd_rcvrs_mutex); 1682 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1683 if (((1 << i) & chans) == 0) 1684 continue; 1685 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1686 if (rcvr == NULL) 1687 continue; 1688 if (rcvr->user == user) { 1689 rv = 0; 1690 rcvr->chans &= ~chans; 1691 if (rcvr->chans == 0) { 1692 list_del_rcu(&rcvr->link); 1693 rcvr->next = rcvrs; 1694 rcvrs = rcvr; 1695 } 1696 } 1697 } 1698 mutex_unlock(&intf->cmd_rcvrs_mutex); 1699 synchronize_rcu(); 1700 release_ipmi_user(user, index); 1701 while (rcvrs) { 1702 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1703 rcvr = rcvrs; 1704 rcvrs = rcvr->next; 1705 kfree(rcvr); 1706 } 1707 1708 return rv; 1709 } 1710 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1711 1712 static unsigned char 1713 ipmb_checksum(unsigned char *data, int size) 1714 { 1715 unsigned char csum = 0; 1716 1717 for (; size > 0; size--, data++) 1718 csum += *data; 1719 1720 return -csum; 1721 } 1722 1723 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1724 struct kernel_ipmi_msg *msg, 1725 struct ipmi_ipmb_addr *ipmb_addr, 1726 long msgid, 1727 unsigned char ipmb_seq, 1728 int broadcast, 1729 unsigned char source_address, 1730 unsigned char source_lun) 1731 { 1732 int i = broadcast; 1733 1734 /* Format the IPMB header data. */ 1735 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1736 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1737 smi_msg->data[2] = ipmb_addr->channel; 1738 if (broadcast) 1739 smi_msg->data[3] = 0; 1740 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1741 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1742 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1743 smi_msg->data[i+6] = source_address; 1744 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1745 smi_msg->data[i+8] = msg->cmd; 1746 1747 /* Now tack on the data to the message. */ 1748 if (msg->data_len > 0) 1749 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1750 smi_msg->data_size = msg->data_len + 9; 1751 1752 /* Now calculate the checksum and tack it on. */ 1753 smi_msg->data[i+smi_msg->data_size] 1754 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1755 1756 /* 1757 * Add on the checksum size and the offset from the 1758 * broadcast. 1759 */ 1760 smi_msg->data_size += 1 + i; 1761 1762 smi_msg->msgid = msgid; 1763 } 1764 1765 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1766 struct kernel_ipmi_msg *msg, 1767 struct ipmi_lan_addr *lan_addr, 1768 long msgid, 1769 unsigned char ipmb_seq, 1770 unsigned char source_lun) 1771 { 1772 /* Format the IPMB header data. */ 1773 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1774 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1775 smi_msg->data[2] = lan_addr->channel; 1776 smi_msg->data[3] = lan_addr->session_handle; 1777 smi_msg->data[4] = lan_addr->remote_SWID; 1778 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1779 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1780 smi_msg->data[7] = lan_addr->local_SWID; 1781 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1782 smi_msg->data[9] = msg->cmd; 1783 1784 /* Now tack on the data to the message. */ 1785 if (msg->data_len > 0) 1786 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1787 smi_msg->data_size = msg->data_len + 10; 1788 1789 /* Now calculate the checksum and tack it on. */ 1790 smi_msg->data[smi_msg->data_size] 1791 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1792 1793 /* 1794 * Add on the checksum size and the offset from the 1795 * broadcast. 1796 */ 1797 smi_msg->data_size += 1; 1798 1799 smi_msg->msgid = msgid; 1800 } 1801 1802 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1803 struct ipmi_smi_msg *smi_msg, 1804 int priority) 1805 { 1806 if (intf->curr_msg) { 1807 if (priority > 0) 1808 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1809 else 1810 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1811 smi_msg = NULL; 1812 } else { 1813 intf->curr_msg = smi_msg; 1814 } 1815 1816 return smi_msg; 1817 } 1818 1819 static void smi_send(struct ipmi_smi *intf, 1820 const struct ipmi_smi_handlers *handlers, 1821 struct ipmi_smi_msg *smi_msg, int priority) 1822 { 1823 int run_to_completion = intf->run_to_completion; 1824 unsigned long flags = 0; 1825 1826 if (!run_to_completion) 1827 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1828 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1829 1830 if (!run_to_completion) 1831 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1832 1833 if (smi_msg) 1834 handlers->sender(intf->send_info, smi_msg); 1835 } 1836 1837 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1838 { 1839 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1840 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1841 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1842 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1843 } 1844 1845 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1846 struct ipmi_addr *addr, 1847 long msgid, 1848 struct kernel_ipmi_msg *msg, 1849 struct ipmi_smi_msg *smi_msg, 1850 struct ipmi_recv_msg *recv_msg, 1851 int retries, 1852 unsigned int retry_time_ms) 1853 { 1854 struct ipmi_system_interface_addr *smi_addr; 1855 1856 if (msg->netfn & 1) 1857 /* Responses are not allowed to the SMI. */ 1858 return -EINVAL; 1859 1860 smi_addr = (struct ipmi_system_interface_addr *) addr; 1861 if (smi_addr->lun > 3) { 1862 ipmi_inc_stat(intf, sent_invalid_commands); 1863 return -EINVAL; 1864 } 1865 1866 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1867 1868 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1869 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1870 || (msg->cmd == IPMI_GET_MSG_CMD) 1871 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1872 /* 1873 * We don't let the user do these, since we manage 1874 * the sequence numbers. 1875 */ 1876 ipmi_inc_stat(intf, sent_invalid_commands); 1877 return -EINVAL; 1878 } 1879 1880 if (is_maintenance_mode_cmd(msg)) { 1881 unsigned long flags; 1882 1883 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1884 intf->auto_maintenance_timeout 1885 = maintenance_mode_timeout_ms; 1886 if (!intf->maintenance_mode 1887 && !intf->maintenance_mode_enable) { 1888 intf->maintenance_mode_enable = true; 1889 maintenance_mode_update(intf); 1890 } 1891 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1892 flags); 1893 } 1894 1895 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1896 ipmi_inc_stat(intf, sent_invalid_commands); 1897 return -EMSGSIZE; 1898 } 1899 1900 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1901 smi_msg->data[1] = msg->cmd; 1902 smi_msg->msgid = msgid; 1903 smi_msg->user_data = recv_msg; 1904 if (msg->data_len > 0) 1905 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1906 smi_msg->data_size = msg->data_len + 2; 1907 ipmi_inc_stat(intf, sent_local_commands); 1908 1909 return 0; 1910 } 1911 1912 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1913 struct ipmi_addr *addr, 1914 long msgid, 1915 struct kernel_ipmi_msg *msg, 1916 struct ipmi_smi_msg *smi_msg, 1917 struct ipmi_recv_msg *recv_msg, 1918 unsigned char source_address, 1919 unsigned char source_lun, 1920 int retries, 1921 unsigned int retry_time_ms) 1922 { 1923 struct ipmi_ipmb_addr *ipmb_addr; 1924 unsigned char ipmb_seq; 1925 long seqid; 1926 int broadcast = 0; 1927 struct ipmi_channel *chans; 1928 int rv = 0; 1929 1930 if (addr->channel >= IPMI_MAX_CHANNELS) { 1931 ipmi_inc_stat(intf, sent_invalid_commands); 1932 return -EINVAL; 1933 } 1934 1935 chans = READ_ONCE(intf->channel_list)->c; 1936 1937 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 1938 ipmi_inc_stat(intf, sent_invalid_commands); 1939 return -EINVAL; 1940 } 1941 1942 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1943 /* 1944 * Broadcasts add a zero at the beginning of the 1945 * message, but otherwise is the same as an IPMB 1946 * address. 1947 */ 1948 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1949 broadcast = 1; 1950 retries = 0; /* Don't retry broadcasts. */ 1951 } 1952 1953 /* 1954 * 9 for the header and 1 for the checksum, plus 1955 * possibly one for the broadcast. 1956 */ 1957 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1958 ipmi_inc_stat(intf, sent_invalid_commands); 1959 return -EMSGSIZE; 1960 } 1961 1962 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 1963 if (ipmb_addr->lun > 3) { 1964 ipmi_inc_stat(intf, sent_invalid_commands); 1965 return -EINVAL; 1966 } 1967 1968 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 1969 1970 if (recv_msg->msg.netfn & 0x1) { 1971 /* 1972 * It's a response, so use the user's sequence 1973 * from msgid. 1974 */ 1975 ipmi_inc_stat(intf, sent_ipmb_responses); 1976 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 1977 msgid, broadcast, 1978 source_address, source_lun); 1979 1980 /* 1981 * Save the receive message so we can use it 1982 * to deliver the response. 1983 */ 1984 smi_msg->user_data = recv_msg; 1985 } else { 1986 /* It's a command, so get a sequence for it. */ 1987 unsigned long flags; 1988 1989 spin_lock_irqsave(&intf->seq_lock, flags); 1990 1991 if (is_maintenance_mode_cmd(msg)) 1992 intf->ipmb_maintenance_mode_timeout = 1993 maintenance_mode_timeout_ms; 1994 1995 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 1996 /* Different default in maintenance mode */ 1997 retry_time_ms = default_maintenance_retry_ms; 1998 1999 /* 2000 * Create a sequence number with a 1 second 2001 * timeout and 4 retries. 2002 */ 2003 rv = intf_next_seq(intf, 2004 recv_msg, 2005 retry_time_ms, 2006 retries, 2007 broadcast, 2008 &ipmb_seq, 2009 &seqid); 2010 if (rv) 2011 /* 2012 * We have used up all the sequence numbers, 2013 * probably, so abort. 2014 */ 2015 goto out_err; 2016 2017 ipmi_inc_stat(intf, sent_ipmb_commands); 2018 2019 /* 2020 * Store the sequence number in the message, 2021 * so that when the send message response 2022 * comes back we can start the timer. 2023 */ 2024 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2025 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2026 ipmb_seq, broadcast, 2027 source_address, source_lun); 2028 2029 /* 2030 * Copy the message into the recv message data, so we 2031 * can retransmit it later if necessary. 2032 */ 2033 memcpy(recv_msg->msg_data, smi_msg->data, 2034 smi_msg->data_size); 2035 recv_msg->msg.data = recv_msg->msg_data; 2036 recv_msg->msg.data_len = smi_msg->data_size; 2037 2038 /* 2039 * We don't unlock until here, because we need 2040 * to copy the completed message into the 2041 * recv_msg before we release the lock. 2042 * Otherwise, race conditions may bite us. I 2043 * know that's pretty paranoid, but I prefer 2044 * to be correct. 2045 */ 2046 out_err: 2047 spin_unlock_irqrestore(&intf->seq_lock, flags); 2048 } 2049 2050 return rv; 2051 } 2052 2053 static int i_ipmi_req_lan(struct ipmi_smi *intf, 2054 struct ipmi_addr *addr, 2055 long msgid, 2056 struct kernel_ipmi_msg *msg, 2057 struct ipmi_smi_msg *smi_msg, 2058 struct ipmi_recv_msg *recv_msg, 2059 unsigned char source_lun, 2060 int retries, 2061 unsigned int retry_time_ms) 2062 { 2063 struct ipmi_lan_addr *lan_addr; 2064 unsigned char ipmb_seq; 2065 long seqid; 2066 struct ipmi_channel *chans; 2067 int rv = 0; 2068 2069 if (addr->channel >= IPMI_MAX_CHANNELS) { 2070 ipmi_inc_stat(intf, sent_invalid_commands); 2071 return -EINVAL; 2072 } 2073 2074 chans = READ_ONCE(intf->channel_list)->c; 2075 2076 if ((chans[addr->channel].medium 2077 != IPMI_CHANNEL_MEDIUM_8023LAN) 2078 && (chans[addr->channel].medium 2079 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2080 ipmi_inc_stat(intf, sent_invalid_commands); 2081 return -EINVAL; 2082 } 2083 2084 /* 11 for the header and 1 for the checksum. */ 2085 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2086 ipmi_inc_stat(intf, sent_invalid_commands); 2087 return -EMSGSIZE; 2088 } 2089 2090 lan_addr = (struct ipmi_lan_addr *) addr; 2091 if (lan_addr->lun > 3) { 2092 ipmi_inc_stat(intf, sent_invalid_commands); 2093 return -EINVAL; 2094 } 2095 2096 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2097 2098 if (recv_msg->msg.netfn & 0x1) { 2099 /* 2100 * It's a response, so use the user's sequence 2101 * from msgid. 2102 */ 2103 ipmi_inc_stat(intf, sent_lan_responses); 2104 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2105 msgid, source_lun); 2106 2107 /* 2108 * Save the receive message so we can use it 2109 * to deliver the response. 2110 */ 2111 smi_msg->user_data = recv_msg; 2112 } else { 2113 /* It's a command, so get a sequence for it. */ 2114 unsigned long flags; 2115 2116 spin_lock_irqsave(&intf->seq_lock, flags); 2117 2118 /* 2119 * Create a sequence number with a 1 second 2120 * timeout and 4 retries. 2121 */ 2122 rv = intf_next_seq(intf, 2123 recv_msg, 2124 retry_time_ms, 2125 retries, 2126 0, 2127 &ipmb_seq, 2128 &seqid); 2129 if (rv) 2130 /* 2131 * We have used up all the sequence numbers, 2132 * probably, so abort. 2133 */ 2134 goto out_err; 2135 2136 ipmi_inc_stat(intf, sent_lan_commands); 2137 2138 /* 2139 * Store the sequence number in the message, 2140 * so that when the send message response 2141 * comes back we can start the timer. 2142 */ 2143 format_lan_msg(smi_msg, msg, lan_addr, 2144 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2145 ipmb_seq, source_lun); 2146 2147 /* 2148 * Copy the message into the recv message data, so we 2149 * can retransmit it later if necessary. 2150 */ 2151 memcpy(recv_msg->msg_data, smi_msg->data, 2152 smi_msg->data_size); 2153 recv_msg->msg.data = recv_msg->msg_data; 2154 recv_msg->msg.data_len = smi_msg->data_size; 2155 2156 /* 2157 * We don't unlock until here, because we need 2158 * to copy the completed message into the 2159 * recv_msg before we release the lock. 2160 * Otherwise, race conditions may bite us. I 2161 * know that's pretty paranoid, but I prefer 2162 * to be correct. 2163 */ 2164 out_err: 2165 spin_unlock_irqrestore(&intf->seq_lock, flags); 2166 } 2167 2168 return rv; 2169 } 2170 2171 /* 2172 * Separate from ipmi_request so that the user does not have to be 2173 * supplied in certain circumstances (mainly at panic time). If 2174 * messages are supplied, they will be freed, even if an error 2175 * occurs. 2176 */ 2177 static int i_ipmi_request(struct ipmi_user *user, 2178 struct ipmi_smi *intf, 2179 struct ipmi_addr *addr, 2180 long msgid, 2181 struct kernel_ipmi_msg *msg, 2182 void *user_msg_data, 2183 void *supplied_smi, 2184 struct ipmi_recv_msg *supplied_recv, 2185 int priority, 2186 unsigned char source_address, 2187 unsigned char source_lun, 2188 int retries, 2189 unsigned int retry_time_ms) 2190 { 2191 struct ipmi_smi_msg *smi_msg; 2192 struct ipmi_recv_msg *recv_msg; 2193 int rv = 0; 2194 2195 if (supplied_recv) 2196 recv_msg = supplied_recv; 2197 else { 2198 recv_msg = ipmi_alloc_recv_msg(); 2199 if (recv_msg == NULL) { 2200 rv = -ENOMEM; 2201 goto out; 2202 } 2203 } 2204 recv_msg->user_msg_data = user_msg_data; 2205 2206 if (supplied_smi) 2207 smi_msg = (struct ipmi_smi_msg *) supplied_smi; 2208 else { 2209 smi_msg = ipmi_alloc_smi_msg(); 2210 if (smi_msg == NULL) { 2211 if (!supplied_recv) 2212 ipmi_free_recv_msg(recv_msg); 2213 rv = -ENOMEM; 2214 goto out; 2215 } 2216 } 2217 2218 rcu_read_lock(); 2219 if (intf->in_shutdown) { 2220 rv = -ENODEV; 2221 goto out_err; 2222 } 2223 2224 recv_msg->user = user; 2225 if (user) 2226 /* The put happens when the message is freed. */ 2227 kref_get(&user->refcount); 2228 recv_msg->msgid = msgid; 2229 /* 2230 * Store the message to send in the receive message so timeout 2231 * responses can get the proper response data. 2232 */ 2233 recv_msg->msg = *msg; 2234 2235 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2236 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2237 recv_msg, retries, retry_time_ms); 2238 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2239 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2240 source_address, source_lun, 2241 retries, retry_time_ms); 2242 } else if (is_lan_addr(addr)) { 2243 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2244 source_lun, retries, retry_time_ms); 2245 } else { 2246 /* Unknown address type. */ 2247 ipmi_inc_stat(intf, sent_invalid_commands); 2248 rv = -EINVAL; 2249 } 2250 2251 if (rv) { 2252 out_err: 2253 ipmi_free_smi_msg(smi_msg); 2254 ipmi_free_recv_msg(recv_msg); 2255 } else { 2256 pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data); 2257 2258 smi_send(intf, intf->handlers, smi_msg, priority); 2259 } 2260 rcu_read_unlock(); 2261 2262 out: 2263 return rv; 2264 } 2265 2266 static int check_addr(struct ipmi_smi *intf, 2267 struct ipmi_addr *addr, 2268 unsigned char *saddr, 2269 unsigned char *lun) 2270 { 2271 if (addr->channel >= IPMI_MAX_CHANNELS) 2272 return -EINVAL; 2273 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2274 *lun = intf->addrinfo[addr->channel].lun; 2275 *saddr = intf->addrinfo[addr->channel].address; 2276 return 0; 2277 } 2278 2279 int ipmi_request_settime(struct ipmi_user *user, 2280 struct ipmi_addr *addr, 2281 long msgid, 2282 struct kernel_ipmi_msg *msg, 2283 void *user_msg_data, 2284 int priority, 2285 int retries, 2286 unsigned int retry_time_ms) 2287 { 2288 unsigned char saddr = 0, lun = 0; 2289 int rv, index; 2290 2291 if (!user) 2292 return -EINVAL; 2293 2294 user = acquire_ipmi_user(user, &index); 2295 if (!user) 2296 return -ENODEV; 2297 2298 rv = check_addr(user->intf, addr, &saddr, &lun); 2299 if (!rv) 2300 rv = i_ipmi_request(user, 2301 user->intf, 2302 addr, 2303 msgid, 2304 msg, 2305 user_msg_data, 2306 NULL, NULL, 2307 priority, 2308 saddr, 2309 lun, 2310 retries, 2311 retry_time_ms); 2312 2313 release_ipmi_user(user, index); 2314 return rv; 2315 } 2316 EXPORT_SYMBOL(ipmi_request_settime); 2317 2318 int ipmi_request_supply_msgs(struct ipmi_user *user, 2319 struct ipmi_addr *addr, 2320 long msgid, 2321 struct kernel_ipmi_msg *msg, 2322 void *user_msg_data, 2323 void *supplied_smi, 2324 struct ipmi_recv_msg *supplied_recv, 2325 int priority) 2326 { 2327 unsigned char saddr = 0, lun = 0; 2328 int rv, index; 2329 2330 if (!user) 2331 return -EINVAL; 2332 2333 user = acquire_ipmi_user(user, &index); 2334 if (!user) 2335 return -ENODEV; 2336 2337 rv = check_addr(user->intf, addr, &saddr, &lun); 2338 if (!rv) 2339 rv = i_ipmi_request(user, 2340 user->intf, 2341 addr, 2342 msgid, 2343 msg, 2344 user_msg_data, 2345 supplied_smi, 2346 supplied_recv, 2347 priority, 2348 saddr, 2349 lun, 2350 -1, 0); 2351 2352 release_ipmi_user(user, index); 2353 return rv; 2354 } 2355 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2356 2357 static void bmc_device_id_handler(struct ipmi_smi *intf, 2358 struct ipmi_recv_msg *msg) 2359 { 2360 int rv; 2361 2362 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2363 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2364 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2365 dev_warn(intf->si_dev, 2366 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2367 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2368 return; 2369 } 2370 2371 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2372 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2373 if (rv) { 2374 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2375 /* record completion code when error */ 2376 intf->bmc->cc = msg->msg.data[0]; 2377 intf->bmc->dyn_id_set = 0; 2378 } else { 2379 /* 2380 * Make sure the id data is available before setting 2381 * dyn_id_set. 2382 */ 2383 smp_wmb(); 2384 intf->bmc->dyn_id_set = 1; 2385 } 2386 2387 wake_up(&intf->waitq); 2388 } 2389 2390 static int 2391 send_get_device_id_cmd(struct ipmi_smi *intf) 2392 { 2393 struct ipmi_system_interface_addr si; 2394 struct kernel_ipmi_msg msg; 2395 2396 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2397 si.channel = IPMI_BMC_CHANNEL; 2398 si.lun = 0; 2399 2400 msg.netfn = IPMI_NETFN_APP_REQUEST; 2401 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2402 msg.data = NULL; 2403 msg.data_len = 0; 2404 2405 return i_ipmi_request(NULL, 2406 intf, 2407 (struct ipmi_addr *) &si, 2408 0, 2409 &msg, 2410 intf, 2411 NULL, 2412 NULL, 2413 0, 2414 intf->addrinfo[0].address, 2415 intf->addrinfo[0].lun, 2416 -1, 0); 2417 } 2418 2419 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2420 { 2421 int rv; 2422 unsigned int retry_count = 0; 2423 2424 intf->null_user_handler = bmc_device_id_handler; 2425 2426 retry: 2427 bmc->cc = 0; 2428 bmc->dyn_id_set = 2; 2429 2430 rv = send_get_device_id_cmd(intf); 2431 if (rv) 2432 goto out_reset_handler; 2433 2434 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2435 2436 if (!bmc->dyn_id_set) { 2437 if (bmc->cc != IPMI_CC_NO_ERROR && 2438 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 2439 msleep(500); 2440 dev_warn(intf->si_dev, 2441 "BMC returned 0x%2.2x, retry get bmc device id\n", 2442 bmc->cc); 2443 goto retry; 2444 } 2445 2446 rv = -EIO; /* Something went wrong in the fetch. */ 2447 } 2448 2449 /* dyn_id_set makes the id data available. */ 2450 smp_rmb(); 2451 2452 out_reset_handler: 2453 intf->null_user_handler = NULL; 2454 2455 return rv; 2456 } 2457 2458 /* 2459 * Fetch the device id for the bmc/interface. You must pass in either 2460 * bmc or intf, this code will get the other one. If the data has 2461 * been recently fetched, this will just use the cached data. Otherwise 2462 * it will run a new fetch. 2463 * 2464 * Except for the first time this is called (in ipmi_add_smi()), 2465 * this will always return good data; 2466 */ 2467 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2468 struct ipmi_device_id *id, 2469 bool *guid_set, guid_t *guid, int intf_num) 2470 { 2471 int rv = 0; 2472 int prev_dyn_id_set, prev_guid_set; 2473 bool intf_set = intf != NULL; 2474 2475 if (!intf) { 2476 mutex_lock(&bmc->dyn_mutex); 2477 retry_bmc_lock: 2478 if (list_empty(&bmc->intfs)) { 2479 mutex_unlock(&bmc->dyn_mutex); 2480 return -ENOENT; 2481 } 2482 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2483 bmc_link); 2484 kref_get(&intf->refcount); 2485 mutex_unlock(&bmc->dyn_mutex); 2486 mutex_lock(&intf->bmc_reg_mutex); 2487 mutex_lock(&bmc->dyn_mutex); 2488 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2489 bmc_link)) { 2490 mutex_unlock(&intf->bmc_reg_mutex); 2491 kref_put(&intf->refcount, intf_free); 2492 goto retry_bmc_lock; 2493 } 2494 } else { 2495 mutex_lock(&intf->bmc_reg_mutex); 2496 bmc = intf->bmc; 2497 mutex_lock(&bmc->dyn_mutex); 2498 kref_get(&intf->refcount); 2499 } 2500 2501 /* If we have a valid and current ID, just return that. */ 2502 if (intf->in_bmc_register || 2503 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2504 goto out_noprocessing; 2505 2506 prev_guid_set = bmc->dyn_guid_set; 2507 __get_guid(intf); 2508 2509 prev_dyn_id_set = bmc->dyn_id_set; 2510 rv = __get_device_id(intf, bmc); 2511 if (rv) 2512 goto out; 2513 2514 /* 2515 * The guid, device id, manufacturer id, and product id should 2516 * not change on a BMC. If it does we have to do some dancing. 2517 */ 2518 if (!intf->bmc_registered 2519 || (!prev_guid_set && bmc->dyn_guid_set) 2520 || (!prev_dyn_id_set && bmc->dyn_id_set) 2521 || (prev_guid_set && bmc->dyn_guid_set 2522 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2523 || bmc->id.device_id != bmc->fetch_id.device_id 2524 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2525 || bmc->id.product_id != bmc->fetch_id.product_id) { 2526 struct ipmi_device_id id = bmc->fetch_id; 2527 int guid_set = bmc->dyn_guid_set; 2528 guid_t guid; 2529 2530 guid = bmc->fetch_guid; 2531 mutex_unlock(&bmc->dyn_mutex); 2532 2533 __ipmi_bmc_unregister(intf); 2534 /* Fill in the temporary BMC for good measure. */ 2535 intf->bmc->id = id; 2536 intf->bmc->dyn_guid_set = guid_set; 2537 intf->bmc->guid = guid; 2538 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2539 need_waiter(intf); /* Retry later on an error. */ 2540 else 2541 __scan_channels(intf, &id); 2542 2543 2544 if (!intf_set) { 2545 /* 2546 * We weren't given the interface on the 2547 * command line, so restart the operation on 2548 * the next interface for the BMC. 2549 */ 2550 mutex_unlock(&intf->bmc_reg_mutex); 2551 mutex_lock(&bmc->dyn_mutex); 2552 goto retry_bmc_lock; 2553 } 2554 2555 /* We have a new BMC, set it up. */ 2556 bmc = intf->bmc; 2557 mutex_lock(&bmc->dyn_mutex); 2558 goto out_noprocessing; 2559 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2560 /* Version info changes, scan the channels again. */ 2561 __scan_channels(intf, &bmc->fetch_id); 2562 2563 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2564 2565 out: 2566 if (rv && prev_dyn_id_set) { 2567 rv = 0; /* Ignore failures if we have previous data. */ 2568 bmc->dyn_id_set = prev_dyn_id_set; 2569 } 2570 if (!rv) { 2571 bmc->id = bmc->fetch_id; 2572 if (bmc->dyn_guid_set) 2573 bmc->guid = bmc->fetch_guid; 2574 else if (prev_guid_set) 2575 /* 2576 * The guid used to be valid and it failed to fetch, 2577 * just use the cached value. 2578 */ 2579 bmc->dyn_guid_set = prev_guid_set; 2580 } 2581 out_noprocessing: 2582 if (!rv) { 2583 if (id) 2584 *id = bmc->id; 2585 2586 if (guid_set) 2587 *guid_set = bmc->dyn_guid_set; 2588 2589 if (guid && bmc->dyn_guid_set) 2590 *guid = bmc->guid; 2591 } 2592 2593 mutex_unlock(&bmc->dyn_mutex); 2594 mutex_unlock(&intf->bmc_reg_mutex); 2595 2596 kref_put(&intf->refcount, intf_free); 2597 return rv; 2598 } 2599 2600 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2601 struct ipmi_device_id *id, 2602 bool *guid_set, guid_t *guid) 2603 { 2604 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2605 } 2606 2607 static ssize_t device_id_show(struct device *dev, 2608 struct device_attribute *attr, 2609 char *buf) 2610 { 2611 struct bmc_device *bmc = to_bmc_device(dev); 2612 struct ipmi_device_id id; 2613 int rv; 2614 2615 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2616 if (rv) 2617 return rv; 2618 2619 return snprintf(buf, 10, "%u\n", id.device_id); 2620 } 2621 static DEVICE_ATTR_RO(device_id); 2622 2623 static ssize_t provides_device_sdrs_show(struct device *dev, 2624 struct device_attribute *attr, 2625 char *buf) 2626 { 2627 struct bmc_device *bmc = to_bmc_device(dev); 2628 struct ipmi_device_id id; 2629 int rv; 2630 2631 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2632 if (rv) 2633 return rv; 2634 2635 return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7); 2636 } 2637 static DEVICE_ATTR_RO(provides_device_sdrs); 2638 2639 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2640 char *buf) 2641 { 2642 struct bmc_device *bmc = to_bmc_device(dev); 2643 struct ipmi_device_id id; 2644 int rv; 2645 2646 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2647 if (rv) 2648 return rv; 2649 2650 return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F); 2651 } 2652 static DEVICE_ATTR_RO(revision); 2653 2654 static ssize_t firmware_revision_show(struct device *dev, 2655 struct device_attribute *attr, 2656 char *buf) 2657 { 2658 struct bmc_device *bmc = to_bmc_device(dev); 2659 struct ipmi_device_id id; 2660 int rv; 2661 2662 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2663 if (rv) 2664 return rv; 2665 2666 return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1, 2667 id.firmware_revision_2); 2668 } 2669 static DEVICE_ATTR_RO(firmware_revision); 2670 2671 static ssize_t ipmi_version_show(struct device *dev, 2672 struct device_attribute *attr, 2673 char *buf) 2674 { 2675 struct bmc_device *bmc = to_bmc_device(dev); 2676 struct ipmi_device_id id; 2677 int rv; 2678 2679 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2680 if (rv) 2681 return rv; 2682 2683 return snprintf(buf, 20, "%u.%u\n", 2684 ipmi_version_major(&id), 2685 ipmi_version_minor(&id)); 2686 } 2687 static DEVICE_ATTR_RO(ipmi_version); 2688 2689 static ssize_t add_dev_support_show(struct device *dev, 2690 struct device_attribute *attr, 2691 char *buf) 2692 { 2693 struct bmc_device *bmc = to_bmc_device(dev); 2694 struct ipmi_device_id id; 2695 int rv; 2696 2697 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2698 if (rv) 2699 return rv; 2700 2701 return snprintf(buf, 10, "0x%02x\n", id.additional_device_support); 2702 } 2703 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2704 NULL); 2705 2706 static ssize_t manufacturer_id_show(struct device *dev, 2707 struct device_attribute *attr, 2708 char *buf) 2709 { 2710 struct bmc_device *bmc = to_bmc_device(dev); 2711 struct ipmi_device_id id; 2712 int rv; 2713 2714 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2715 if (rv) 2716 return rv; 2717 2718 return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id); 2719 } 2720 static DEVICE_ATTR_RO(manufacturer_id); 2721 2722 static ssize_t product_id_show(struct device *dev, 2723 struct device_attribute *attr, 2724 char *buf) 2725 { 2726 struct bmc_device *bmc = to_bmc_device(dev); 2727 struct ipmi_device_id id; 2728 int rv; 2729 2730 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2731 if (rv) 2732 return rv; 2733 2734 return snprintf(buf, 10, "0x%4.4x\n", id.product_id); 2735 } 2736 static DEVICE_ATTR_RO(product_id); 2737 2738 static ssize_t aux_firmware_rev_show(struct device *dev, 2739 struct device_attribute *attr, 2740 char *buf) 2741 { 2742 struct bmc_device *bmc = to_bmc_device(dev); 2743 struct ipmi_device_id id; 2744 int rv; 2745 2746 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2747 if (rv) 2748 return rv; 2749 2750 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2751 id.aux_firmware_revision[3], 2752 id.aux_firmware_revision[2], 2753 id.aux_firmware_revision[1], 2754 id.aux_firmware_revision[0]); 2755 } 2756 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2757 2758 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2759 char *buf) 2760 { 2761 struct bmc_device *bmc = to_bmc_device(dev); 2762 bool guid_set; 2763 guid_t guid; 2764 int rv; 2765 2766 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2767 if (rv) 2768 return rv; 2769 if (!guid_set) 2770 return -ENOENT; 2771 2772 return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid); 2773 } 2774 static DEVICE_ATTR_RO(guid); 2775 2776 static struct attribute *bmc_dev_attrs[] = { 2777 &dev_attr_device_id.attr, 2778 &dev_attr_provides_device_sdrs.attr, 2779 &dev_attr_revision.attr, 2780 &dev_attr_firmware_revision.attr, 2781 &dev_attr_ipmi_version.attr, 2782 &dev_attr_additional_device_support.attr, 2783 &dev_attr_manufacturer_id.attr, 2784 &dev_attr_product_id.attr, 2785 &dev_attr_aux_firmware_revision.attr, 2786 &dev_attr_guid.attr, 2787 NULL 2788 }; 2789 2790 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2791 struct attribute *attr, int idx) 2792 { 2793 struct device *dev = kobj_to_dev(kobj); 2794 struct bmc_device *bmc = to_bmc_device(dev); 2795 umode_t mode = attr->mode; 2796 int rv; 2797 2798 if (attr == &dev_attr_aux_firmware_revision.attr) { 2799 struct ipmi_device_id id; 2800 2801 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2802 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2803 } 2804 if (attr == &dev_attr_guid.attr) { 2805 bool guid_set; 2806 2807 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2808 return (!rv && guid_set) ? mode : 0; 2809 } 2810 return mode; 2811 } 2812 2813 static const struct attribute_group bmc_dev_attr_group = { 2814 .attrs = bmc_dev_attrs, 2815 .is_visible = bmc_dev_attr_is_visible, 2816 }; 2817 2818 static const struct attribute_group *bmc_dev_attr_groups[] = { 2819 &bmc_dev_attr_group, 2820 NULL 2821 }; 2822 2823 static const struct device_type bmc_device_type = { 2824 .groups = bmc_dev_attr_groups, 2825 }; 2826 2827 static int __find_bmc_guid(struct device *dev, const void *data) 2828 { 2829 const guid_t *guid = data; 2830 struct bmc_device *bmc; 2831 int rv; 2832 2833 if (dev->type != &bmc_device_type) 2834 return 0; 2835 2836 bmc = to_bmc_device(dev); 2837 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2838 if (rv) 2839 rv = kref_get_unless_zero(&bmc->usecount); 2840 return rv; 2841 } 2842 2843 /* 2844 * Returns with the bmc's usecount incremented, if it is non-NULL. 2845 */ 2846 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2847 guid_t *guid) 2848 { 2849 struct device *dev; 2850 struct bmc_device *bmc = NULL; 2851 2852 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2853 if (dev) { 2854 bmc = to_bmc_device(dev); 2855 put_device(dev); 2856 } 2857 return bmc; 2858 } 2859 2860 struct prod_dev_id { 2861 unsigned int product_id; 2862 unsigned char device_id; 2863 }; 2864 2865 static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 2866 { 2867 const struct prod_dev_id *cid = data; 2868 struct bmc_device *bmc; 2869 int rv; 2870 2871 if (dev->type != &bmc_device_type) 2872 return 0; 2873 2874 bmc = to_bmc_device(dev); 2875 rv = (bmc->id.product_id == cid->product_id 2876 && bmc->id.device_id == cid->device_id); 2877 if (rv) 2878 rv = kref_get_unless_zero(&bmc->usecount); 2879 return rv; 2880 } 2881 2882 /* 2883 * Returns with the bmc's usecount incremented, if it is non-NULL. 2884 */ 2885 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 2886 struct device_driver *drv, 2887 unsigned int product_id, unsigned char device_id) 2888 { 2889 struct prod_dev_id id = { 2890 .product_id = product_id, 2891 .device_id = device_id, 2892 }; 2893 struct device *dev; 2894 struct bmc_device *bmc = NULL; 2895 2896 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 2897 if (dev) { 2898 bmc = to_bmc_device(dev); 2899 put_device(dev); 2900 } 2901 return bmc; 2902 } 2903 2904 static DEFINE_IDA(ipmi_bmc_ida); 2905 2906 static void 2907 release_bmc_device(struct device *dev) 2908 { 2909 kfree(to_bmc_device(dev)); 2910 } 2911 2912 static void cleanup_bmc_work(struct work_struct *work) 2913 { 2914 struct bmc_device *bmc = container_of(work, struct bmc_device, 2915 remove_work); 2916 int id = bmc->pdev.id; /* Unregister overwrites id */ 2917 2918 platform_device_unregister(&bmc->pdev); 2919 ida_simple_remove(&ipmi_bmc_ida, id); 2920 } 2921 2922 static void 2923 cleanup_bmc_device(struct kref *ref) 2924 { 2925 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 2926 2927 /* 2928 * Remove the platform device in a work queue to avoid issues 2929 * with removing the device attributes while reading a device 2930 * attribute. 2931 */ 2932 schedule_work(&bmc->remove_work); 2933 } 2934 2935 /* 2936 * Must be called with intf->bmc_reg_mutex held. 2937 */ 2938 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 2939 { 2940 struct bmc_device *bmc = intf->bmc; 2941 2942 if (!intf->bmc_registered) 2943 return; 2944 2945 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 2946 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 2947 kfree(intf->my_dev_name); 2948 intf->my_dev_name = NULL; 2949 2950 mutex_lock(&bmc->dyn_mutex); 2951 list_del(&intf->bmc_link); 2952 mutex_unlock(&bmc->dyn_mutex); 2953 intf->bmc = &intf->tmp_bmc; 2954 kref_put(&bmc->usecount, cleanup_bmc_device); 2955 intf->bmc_registered = false; 2956 } 2957 2958 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 2959 { 2960 mutex_lock(&intf->bmc_reg_mutex); 2961 __ipmi_bmc_unregister(intf); 2962 mutex_unlock(&intf->bmc_reg_mutex); 2963 } 2964 2965 /* 2966 * Must be called with intf->bmc_reg_mutex held. 2967 */ 2968 static int __ipmi_bmc_register(struct ipmi_smi *intf, 2969 struct ipmi_device_id *id, 2970 bool guid_set, guid_t *guid, int intf_num) 2971 { 2972 int rv; 2973 struct bmc_device *bmc; 2974 struct bmc_device *old_bmc; 2975 2976 /* 2977 * platform_device_register() can cause bmc_reg_mutex to 2978 * be claimed because of the is_visible functions of 2979 * the attributes. Eliminate possible recursion and 2980 * release the lock. 2981 */ 2982 intf->in_bmc_register = true; 2983 mutex_unlock(&intf->bmc_reg_mutex); 2984 2985 /* 2986 * Try to find if there is an bmc_device struct 2987 * representing the interfaced BMC already 2988 */ 2989 mutex_lock(&ipmidriver_mutex); 2990 if (guid_set) 2991 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 2992 else 2993 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 2994 id->product_id, 2995 id->device_id); 2996 2997 /* 2998 * If there is already an bmc_device, free the new one, 2999 * otherwise register the new BMC device 3000 */ 3001 if (old_bmc) { 3002 bmc = old_bmc; 3003 /* 3004 * Note: old_bmc already has usecount incremented by 3005 * the BMC find functions. 3006 */ 3007 intf->bmc = old_bmc; 3008 mutex_lock(&bmc->dyn_mutex); 3009 list_add_tail(&intf->bmc_link, &bmc->intfs); 3010 mutex_unlock(&bmc->dyn_mutex); 3011 3012 dev_info(intf->si_dev, 3013 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3014 bmc->id.manufacturer_id, 3015 bmc->id.product_id, 3016 bmc->id.device_id); 3017 } else { 3018 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 3019 if (!bmc) { 3020 rv = -ENOMEM; 3021 goto out; 3022 } 3023 INIT_LIST_HEAD(&bmc->intfs); 3024 mutex_init(&bmc->dyn_mutex); 3025 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3026 3027 bmc->id = *id; 3028 bmc->dyn_id_set = 1; 3029 bmc->dyn_guid_set = guid_set; 3030 bmc->guid = *guid; 3031 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3032 3033 bmc->pdev.name = "ipmi_bmc"; 3034 3035 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); 3036 if (rv < 0) { 3037 kfree(bmc); 3038 goto out; 3039 } 3040 3041 bmc->pdev.dev.driver = &ipmidriver.driver; 3042 bmc->pdev.id = rv; 3043 bmc->pdev.dev.release = release_bmc_device; 3044 bmc->pdev.dev.type = &bmc_device_type; 3045 kref_init(&bmc->usecount); 3046 3047 intf->bmc = bmc; 3048 mutex_lock(&bmc->dyn_mutex); 3049 list_add_tail(&intf->bmc_link, &bmc->intfs); 3050 mutex_unlock(&bmc->dyn_mutex); 3051 3052 rv = platform_device_register(&bmc->pdev); 3053 if (rv) { 3054 dev_err(intf->si_dev, 3055 "Unable to register bmc device: %d\n", 3056 rv); 3057 goto out_list_del; 3058 } 3059 3060 dev_info(intf->si_dev, 3061 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3062 bmc->id.manufacturer_id, 3063 bmc->id.product_id, 3064 bmc->id.device_id); 3065 } 3066 3067 /* 3068 * create symlink from system interface device to bmc device 3069 * and back. 3070 */ 3071 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3072 if (rv) { 3073 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3074 goto out_put_bmc; 3075 } 3076 3077 if (intf_num == -1) 3078 intf_num = intf->intf_num; 3079 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3080 if (!intf->my_dev_name) { 3081 rv = -ENOMEM; 3082 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3083 rv); 3084 goto out_unlink1; 3085 } 3086 3087 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3088 intf->my_dev_name); 3089 if (rv) { 3090 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3091 rv); 3092 goto out_free_my_dev_name; 3093 } 3094 3095 intf->bmc_registered = true; 3096 3097 out: 3098 mutex_unlock(&ipmidriver_mutex); 3099 mutex_lock(&intf->bmc_reg_mutex); 3100 intf->in_bmc_register = false; 3101 return rv; 3102 3103 3104 out_free_my_dev_name: 3105 kfree(intf->my_dev_name); 3106 intf->my_dev_name = NULL; 3107 3108 out_unlink1: 3109 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3110 3111 out_put_bmc: 3112 mutex_lock(&bmc->dyn_mutex); 3113 list_del(&intf->bmc_link); 3114 mutex_unlock(&bmc->dyn_mutex); 3115 intf->bmc = &intf->tmp_bmc; 3116 kref_put(&bmc->usecount, cleanup_bmc_device); 3117 goto out; 3118 3119 out_list_del: 3120 mutex_lock(&bmc->dyn_mutex); 3121 list_del(&intf->bmc_link); 3122 mutex_unlock(&bmc->dyn_mutex); 3123 intf->bmc = &intf->tmp_bmc; 3124 put_device(&bmc->pdev.dev); 3125 goto out; 3126 } 3127 3128 static int 3129 send_guid_cmd(struct ipmi_smi *intf, int chan) 3130 { 3131 struct kernel_ipmi_msg msg; 3132 struct ipmi_system_interface_addr si; 3133 3134 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3135 si.channel = IPMI_BMC_CHANNEL; 3136 si.lun = 0; 3137 3138 msg.netfn = IPMI_NETFN_APP_REQUEST; 3139 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3140 msg.data = NULL; 3141 msg.data_len = 0; 3142 return i_ipmi_request(NULL, 3143 intf, 3144 (struct ipmi_addr *) &si, 3145 0, 3146 &msg, 3147 intf, 3148 NULL, 3149 NULL, 3150 0, 3151 intf->addrinfo[0].address, 3152 intf->addrinfo[0].lun, 3153 -1, 0); 3154 } 3155 3156 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3157 { 3158 struct bmc_device *bmc = intf->bmc; 3159 3160 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3161 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3162 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3163 /* Not for me */ 3164 return; 3165 3166 if (msg->msg.data[0] != 0) { 3167 /* Error from getting the GUID, the BMC doesn't have one. */ 3168 bmc->dyn_guid_set = 0; 3169 goto out; 3170 } 3171 3172 if (msg->msg.data_len < UUID_SIZE + 1) { 3173 bmc->dyn_guid_set = 0; 3174 dev_warn(intf->si_dev, 3175 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3176 msg->msg.data_len, UUID_SIZE + 1); 3177 goto out; 3178 } 3179 3180 import_guid(&bmc->fetch_guid, msg->msg.data + 1); 3181 /* 3182 * Make sure the guid data is available before setting 3183 * dyn_guid_set. 3184 */ 3185 smp_wmb(); 3186 bmc->dyn_guid_set = 1; 3187 out: 3188 wake_up(&intf->waitq); 3189 } 3190 3191 static void __get_guid(struct ipmi_smi *intf) 3192 { 3193 int rv; 3194 struct bmc_device *bmc = intf->bmc; 3195 3196 bmc->dyn_guid_set = 2; 3197 intf->null_user_handler = guid_handler; 3198 rv = send_guid_cmd(intf, 0); 3199 if (rv) 3200 /* Send failed, no GUID available. */ 3201 bmc->dyn_guid_set = 0; 3202 else 3203 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3204 3205 /* dyn_guid_set makes the guid data available. */ 3206 smp_rmb(); 3207 3208 intf->null_user_handler = NULL; 3209 } 3210 3211 static int 3212 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3213 { 3214 struct kernel_ipmi_msg msg; 3215 unsigned char data[1]; 3216 struct ipmi_system_interface_addr si; 3217 3218 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3219 si.channel = IPMI_BMC_CHANNEL; 3220 si.lun = 0; 3221 3222 msg.netfn = IPMI_NETFN_APP_REQUEST; 3223 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3224 msg.data = data; 3225 msg.data_len = 1; 3226 data[0] = chan; 3227 return i_ipmi_request(NULL, 3228 intf, 3229 (struct ipmi_addr *) &si, 3230 0, 3231 &msg, 3232 intf, 3233 NULL, 3234 NULL, 3235 0, 3236 intf->addrinfo[0].address, 3237 intf->addrinfo[0].lun, 3238 -1, 0); 3239 } 3240 3241 static void 3242 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3243 { 3244 int rv = 0; 3245 int ch; 3246 unsigned int set = intf->curr_working_cset; 3247 struct ipmi_channel *chans; 3248 3249 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3250 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3251 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3252 /* It's the one we want */ 3253 if (msg->msg.data[0] != 0) { 3254 /* Got an error from the channel, just go on. */ 3255 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3256 /* 3257 * If the MC does not support this 3258 * command, that is legal. We just 3259 * assume it has one IPMB at channel 3260 * zero. 3261 */ 3262 intf->wchannels[set].c[0].medium 3263 = IPMI_CHANNEL_MEDIUM_IPMB; 3264 intf->wchannels[set].c[0].protocol 3265 = IPMI_CHANNEL_PROTOCOL_IPMB; 3266 3267 intf->channel_list = intf->wchannels + set; 3268 intf->channels_ready = true; 3269 wake_up(&intf->waitq); 3270 goto out; 3271 } 3272 goto next_channel; 3273 } 3274 if (msg->msg.data_len < 4) { 3275 /* Message not big enough, just go on. */ 3276 goto next_channel; 3277 } 3278 ch = intf->curr_channel; 3279 chans = intf->wchannels[set].c; 3280 chans[ch].medium = msg->msg.data[2] & 0x7f; 3281 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3282 3283 next_channel: 3284 intf->curr_channel++; 3285 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3286 intf->channel_list = intf->wchannels + set; 3287 intf->channels_ready = true; 3288 wake_up(&intf->waitq); 3289 } else { 3290 intf->channel_list = intf->wchannels + set; 3291 intf->channels_ready = true; 3292 rv = send_channel_info_cmd(intf, intf->curr_channel); 3293 } 3294 3295 if (rv) { 3296 /* Got an error somehow, just give up. */ 3297 dev_warn(intf->si_dev, 3298 "Error sending channel information for channel %d: %d\n", 3299 intf->curr_channel, rv); 3300 3301 intf->channel_list = intf->wchannels + set; 3302 intf->channels_ready = true; 3303 wake_up(&intf->waitq); 3304 } 3305 } 3306 out: 3307 return; 3308 } 3309 3310 /* 3311 * Must be holding intf->bmc_reg_mutex to call this. 3312 */ 3313 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3314 { 3315 int rv; 3316 3317 if (ipmi_version_major(id) > 1 3318 || (ipmi_version_major(id) == 1 3319 && ipmi_version_minor(id) >= 5)) { 3320 unsigned int set; 3321 3322 /* 3323 * Start scanning the channels to see what is 3324 * available. 3325 */ 3326 set = !intf->curr_working_cset; 3327 intf->curr_working_cset = set; 3328 memset(&intf->wchannels[set], 0, 3329 sizeof(struct ipmi_channel_set)); 3330 3331 intf->null_user_handler = channel_handler; 3332 intf->curr_channel = 0; 3333 rv = send_channel_info_cmd(intf, 0); 3334 if (rv) { 3335 dev_warn(intf->si_dev, 3336 "Error sending channel information for channel 0, %d\n", 3337 rv); 3338 intf->null_user_handler = NULL; 3339 return -EIO; 3340 } 3341 3342 /* Wait for the channel info to be read. */ 3343 wait_event(intf->waitq, intf->channels_ready); 3344 intf->null_user_handler = NULL; 3345 } else { 3346 unsigned int set = intf->curr_working_cset; 3347 3348 /* Assume a single IPMB channel at zero. */ 3349 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3350 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3351 intf->channel_list = intf->wchannels + set; 3352 intf->channels_ready = true; 3353 } 3354 3355 return 0; 3356 } 3357 3358 static void ipmi_poll(struct ipmi_smi *intf) 3359 { 3360 if (intf->handlers->poll) 3361 intf->handlers->poll(intf->send_info); 3362 /* In case something came in */ 3363 handle_new_recv_msgs(intf); 3364 } 3365 3366 void ipmi_poll_interface(struct ipmi_user *user) 3367 { 3368 ipmi_poll(user->intf); 3369 } 3370 EXPORT_SYMBOL(ipmi_poll_interface); 3371 3372 static void redo_bmc_reg(struct work_struct *work) 3373 { 3374 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3375 bmc_reg_work); 3376 3377 if (!intf->in_shutdown) 3378 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3379 3380 kref_put(&intf->refcount, intf_free); 3381 } 3382 3383 int ipmi_add_smi(struct module *owner, 3384 const struct ipmi_smi_handlers *handlers, 3385 void *send_info, 3386 struct device *si_dev, 3387 unsigned char slave_addr) 3388 { 3389 int i, j; 3390 int rv; 3391 struct ipmi_smi *intf, *tintf; 3392 struct list_head *link; 3393 struct ipmi_device_id id; 3394 3395 /* 3396 * Make sure the driver is actually initialized, this handles 3397 * problems with initialization order. 3398 */ 3399 rv = ipmi_init_msghandler(); 3400 if (rv) 3401 return rv; 3402 3403 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3404 if (!intf) 3405 return -ENOMEM; 3406 3407 rv = init_srcu_struct(&intf->users_srcu); 3408 if (rv) { 3409 kfree(intf); 3410 return rv; 3411 } 3412 3413 intf->owner = owner; 3414 intf->bmc = &intf->tmp_bmc; 3415 INIT_LIST_HEAD(&intf->bmc->intfs); 3416 mutex_init(&intf->bmc->dyn_mutex); 3417 INIT_LIST_HEAD(&intf->bmc_link); 3418 mutex_init(&intf->bmc_reg_mutex); 3419 intf->intf_num = -1; /* Mark it invalid for now. */ 3420 kref_init(&intf->refcount); 3421 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3422 intf->si_dev = si_dev; 3423 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3424 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3425 intf->addrinfo[j].lun = 2; 3426 } 3427 if (slave_addr != 0) 3428 intf->addrinfo[0].address = slave_addr; 3429 INIT_LIST_HEAD(&intf->users); 3430 intf->handlers = handlers; 3431 intf->send_info = send_info; 3432 spin_lock_init(&intf->seq_lock); 3433 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3434 intf->seq_table[j].inuse = 0; 3435 intf->seq_table[j].seqid = 0; 3436 } 3437 intf->curr_seq = 0; 3438 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3439 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3440 tasklet_setup(&intf->recv_tasklet, 3441 smi_recv_tasklet); 3442 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3443 spin_lock_init(&intf->xmit_msgs_lock); 3444 INIT_LIST_HEAD(&intf->xmit_msgs); 3445 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3446 spin_lock_init(&intf->events_lock); 3447 spin_lock_init(&intf->watch_lock); 3448 atomic_set(&intf->event_waiters, 0); 3449 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3450 INIT_LIST_HEAD(&intf->waiting_events); 3451 intf->waiting_events_count = 0; 3452 mutex_init(&intf->cmd_rcvrs_mutex); 3453 spin_lock_init(&intf->maintenance_mode_lock); 3454 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3455 init_waitqueue_head(&intf->waitq); 3456 for (i = 0; i < IPMI_NUM_STATS; i++) 3457 atomic_set(&intf->stats[i], 0); 3458 3459 mutex_lock(&ipmi_interfaces_mutex); 3460 /* Look for a hole in the numbers. */ 3461 i = 0; 3462 link = &ipmi_interfaces; 3463 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link, 3464 ipmi_interfaces_mutex_held()) { 3465 if (tintf->intf_num != i) { 3466 link = &tintf->link; 3467 break; 3468 } 3469 i++; 3470 } 3471 /* Add the new interface in numeric order. */ 3472 if (i == 0) 3473 list_add_rcu(&intf->link, &ipmi_interfaces); 3474 else 3475 list_add_tail_rcu(&intf->link, link); 3476 3477 rv = handlers->start_processing(send_info, intf); 3478 if (rv) 3479 goto out_err; 3480 3481 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3482 if (rv) { 3483 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3484 goto out_err_started; 3485 } 3486 3487 mutex_lock(&intf->bmc_reg_mutex); 3488 rv = __scan_channels(intf, &id); 3489 mutex_unlock(&intf->bmc_reg_mutex); 3490 if (rv) 3491 goto out_err_bmc_reg; 3492 3493 /* 3494 * Keep memory order straight for RCU readers. Make 3495 * sure everything else is committed to memory before 3496 * setting intf_num to mark the interface valid. 3497 */ 3498 smp_wmb(); 3499 intf->intf_num = i; 3500 mutex_unlock(&ipmi_interfaces_mutex); 3501 3502 /* After this point the interface is legal to use. */ 3503 call_smi_watchers(i, intf->si_dev); 3504 3505 return 0; 3506 3507 out_err_bmc_reg: 3508 ipmi_bmc_unregister(intf); 3509 out_err_started: 3510 if (intf->handlers->shutdown) 3511 intf->handlers->shutdown(intf->send_info); 3512 out_err: 3513 list_del_rcu(&intf->link); 3514 mutex_unlock(&ipmi_interfaces_mutex); 3515 synchronize_srcu(&ipmi_interfaces_srcu); 3516 cleanup_srcu_struct(&intf->users_srcu); 3517 kref_put(&intf->refcount, intf_free); 3518 3519 return rv; 3520 } 3521 EXPORT_SYMBOL(ipmi_add_smi); 3522 3523 static void deliver_smi_err_response(struct ipmi_smi *intf, 3524 struct ipmi_smi_msg *msg, 3525 unsigned char err) 3526 { 3527 msg->rsp[0] = msg->data[0] | 4; 3528 msg->rsp[1] = msg->data[1]; 3529 msg->rsp[2] = err; 3530 msg->rsp_size = 3; 3531 /* It's an error, so it will never requeue, no need to check return. */ 3532 handle_one_recv_msg(intf, msg); 3533 } 3534 3535 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3536 { 3537 int i; 3538 struct seq_table *ent; 3539 struct ipmi_smi_msg *msg; 3540 struct list_head *entry; 3541 struct list_head tmplist; 3542 3543 /* Clear out our transmit queues and hold the messages. */ 3544 INIT_LIST_HEAD(&tmplist); 3545 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3546 list_splice_tail(&intf->xmit_msgs, &tmplist); 3547 3548 /* Current message first, to preserve order */ 3549 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3550 /* Wait for the message to clear out. */ 3551 schedule_timeout(1); 3552 } 3553 3554 /* No need for locks, the interface is down. */ 3555 3556 /* 3557 * Return errors for all pending messages in queue and in the 3558 * tables waiting for remote responses. 3559 */ 3560 while (!list_empty(&tmplist)) { 3561 entry = tmplist.next; 3562 list_del(entry); 3563 msg = list_entry(entry, struct ipmi_smi_msg, link); 3564 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3565 } 3566 3567 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3568 ent = &intf->seq_table[i]; 3569 if (!ent->inuse) 3570 continue; 3571 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3572 } 3573 } 3574 3575 void ipmi_unregister_smi(struct ipmi_smi *intf) 3576 { 3577 struct ipmi_smi_watcher *w; 3578 int intf_num = intf->intf_num, index; 3579 3580 mutex_lock(&ipmi_interfaces_mutex); 3581 intf->intf_num = -1; 3582 intf->in_shutdown = true; 3583 list_del_rcu(&intf->link); 3584 mutex_unlock(&ipmi_interfaces_mutex); 3585 synchronize_srcu(&ipmi_interfaces_srcu); 3586 3587 /* At this point no users can be added to the interface. */ 3588 3589 /* 3590 * Call all the watcher interfaces to tell them that 3591 * an interface is going away. 3592 */ 3593 mutex_lock(&smi_watchers_mutex); 3594 list_for_each_entry(w, &smi_watchers, link) 3595 w->smi_gone(intf_num); 3596 mutex_unlock(&smi_watchers_mutex); 3597 3598 index = srcu_read_lock(&intf->users_srcu); 3599 while (!list_empty(&intf->users)) { 3600 struct ipmi_user *user = 3601 container_of(list_next_rcu(&intf->users), 3602 struct ipmi_user, link); 3603 3604 _ipmi_destroy_user(user); 3605 } 3606 srcu_read_unlock(&intf->users_srcu, index); 3607 3608 if (intf->handlers->shutdown) 3609 intf->handlers->shutdown(intf->send_info); 3610 3611 cleanup_smi_msgs(intf); 3612 3613 ipmi_bmc_unregister(intf); 3614 3615 cleanup_srcu_struct(&intf->users_srcu); 3616 kref_put(&intf->refcount, intf_free); 3617 } 3618 EXPORT_SYMBOL(ipmi_unregister_smi); 3619 3620 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3621 struct ipmi_smi_msg *msg) 3622 { 3623 struct ipmi_ipmb_addr ipmb_addr; 3624 struct ipmi_recv_msg *recv_msg; 3625 3626 /* 3627 * This is 11, not 10, because the response must contain a 3628 * completion code. 3629 */ 3630 if (msg->rsp_size < 11) { 3631 /* Message not big enough, just ignore it. */ 3632 ipmi_inc_stat(intf, invalid_ipmb_responses); 3633 return 0; 3634 } 3635 3636 if (msg->rsp[2] != 0) { 3637 /* An error getting the response, just ignore it. */ 3638 return 0; 3639 } 3640 3641 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3642 ipmb_addr.slave_addr = msg->rsp[6]; 3643 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3644 ipmb_addr.lun = msg->rsp[7] & 3; 3645 3646 /* 3647 * It's a response from a remote entity. Look up the sequence 3648 * number and handle the response. 3649 */ 3650 if (intf_find_seq(intf, 3651 msg->rsp[7] >> 2, 3652 msg->rsp[3] & 0x0f, 3653 msg->rsp[8], 3654 (msg->rsp[4] >> 2) & (~1), 3655 (struct ipmi_addr *) &ipmb_addr, 3656 &recv_msg)) { 3657 /* 3658 * We were unable to find the sequence number, 3659 * so just nuke the message. 3660 */ 3661 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3662 return 0; 3663 } 3664 3665 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3666 /* 3667 * The other fields matched, so no need to set them, except 3668 * for netfn, which needs to be the response that was 3669 * returned, not the request value. 3670 */ 3671 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3672 recv_msg->msg.data = recv_msg->msg_data; 3673 recv_msg->msg.data_len = msg->rsp_size - 10; 3674 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3675 if (deliver_response(intf, recv_msg)) 3676 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3677 else 3678 ipmi_inc_stat(intf, handled_ipmb_responses); 3679 3680 return 0; 3681 } 3682 3683 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3684 struct ipmi_smi_msg *msg) 3685 { 3686 struct cmd_rcvr *rcvr; 3687 int rv = 0; 3688 unsigned char netfn; 3689 unsigned char cmd; 3690 unsigned char chan; 3691 struct ipmi_user *user = NULL; 3692 struct ipmi_ipmb_addr *ipmb_addr; 3693 struct ipmi_recv_msg *recv_msg; 3694 3695 if (msg->rsp_size < 10) { 3696 /* Message not big enough, just ignore it. */ 3697 ipmi_inc_stat(intf, invalid_commands); 3698 return 0; 3699 } 3700 3701 if (msg->rsp[2] != 0) { 3702 /* An error getting the response, just ignore it. */ 3703 return 0; 3704 } 3705 3706 netfn = msg->rsp[4] >> 2; 3707 cmd = msg->rsp[8]; 3708 chan = msg->rsp[3] & 0xf; 3709 3710 rcu_read_lock(); 3711 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3712 if (rcvr) { 3713 user = rcvr->user; 3714 kref_get(&user->refcount); 3715 } else 3716 user = NULL; 3717 rcu_read_unlock(); 3718 3719 if (user == NULL) { 3720 /* We didn't find a user, deliver an error response. */ 3721 ipmi_inc_stat(intf, unhandled_commands); 3722 3723 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3724 msg->data[1] = IPMI_SEND_MSG_CMD; 3725 msg->data[2] = msg->rsp[3]; 3726 msg->data[3] = msg->rsp[6]; 3727 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3728 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3729 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3730 /* rqseq/lun */ 3731 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3732 msg->data[8] = msg->rsp[8]; /* cmd */ 3733 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3734 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3735 msg->data_size = 11; 3736 3737 pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data); 3738 3739 rcu_read_lock(); 3740 if (!intf->in_shutdown) { 3741 smi_send(intf, intf->handlers, msg, 0); 3742 /* 3743 * We used the message, so return the value 3744 * that causes it to not be freed or 3745 * queued. 3746 */ 3747 rv = -1; 3748 } 3749 rcu_read_unlock(); 3750 } else { 3751 recv_msg = ipmi_alloc_recv_msg(); 3752 if (!recv_msg) { 3753 /* 3754 * We couldn't allocate memory for the 3755 * message, so requeue it for handling 3756 * later. 3757 */ 3758 rv = 1; 3759 kref_put(&user->refcount, free_user); 3760 } else { 3761 /* Extract the source address from the data. */ 3762 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3763 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3764 ipmb_addr->slave_addr = msg->rsp[6]; 3765 ipmb_addr->lun = msg->rsp[7] & 3; 3766 ipmb_addr->channel = msg->rsp[3] & 0xf; 3767 3768 /* 3769 * Extract the rest of the message information 3770 * from the IPMB header. 3771 */ 3772 recv_msg->user = user; 3773 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3774 recv_msg->msgid = msg->rsp[7] >> 2; 3775 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3776 recv_msg->msg.cmd = msg->rsp[8]; 3777 recv_msg->msg.data = recv_msg->msg_data; 3778 3779 /* 3780 * We chop off 10, not 9 bytes because the checksum 3781 * at the end also needs to be removed. 3782 */ 3783 recv_msg->msg.data_len = msg->rsp_size - 10; 3784 memcpy(recv_msg->msg_data, &msg->rsp[9], 3785 msg->rsp_size - 10); 3786 if (deliver_response(intf, recv_msg)) 3787 ipmi_inc_stat(intf, unhandled_commands); 3788 else 3789 ipmi_inc_stat(intf, handled_commands); 3790 } 3791 } 3792 3793 return rv; 3794 } 3795 3796 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 3797 struct ipmi_smi_msg *msg) 3798 { 3799 struct ipmi_lan_addr lan_addr; 3800 struct ipmi_recv_msg *recv_msg; 3801 3802 3803 /* 3804 * This is 13, not 12, because the response must contain a 3805 * completion code. 3806 */ 3807 if (msg->rsp_size < 13) { 3808 /* Message not big enough, just ignore it. */ 3809 ipmi_inc_stat(intf, invalid_lan_responses); 3810 return 0; 3811 } 3812 3813 if (msg->rsp[2] != 0) { 3814 /* An error getting the response, just ignore it. */ 3815 return 0; 3816 } 3817 3818 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 3819 lan_addr.session_handle = msg->rsp[4]; 3820 lan_addr.remote_SWID = msg->rsp[8]; 3821 lan_addr.local_SWID = msg->rsp[5]; 3822 lan_addr.channel = msg->rsp[3] & 0x0f; 3823 lan_addr.privilege = msg->rsp[3] >> 4; 3824 lan_addr.lun = msg->rsp[9] & 3; 3825 3826 /* 3827 * It's a response from a remote entity. Look up the sequence 3828 * number and handle the response. 3829 */ 3830 if (intf_find_seq(intf, 3831 msg->rsp[9] >> 2, 3832 msg->rsp[3] & 0x0f, 3833 msg->rsp[10], 3834 (msg->rsp[6] >> 2) & (~1), 3835 (struct ipmi_addr *) &lan_addr, 3836 &recv_msg)) { 3837 /* 3838 * We were unable to find the sequence number, 3839 * so just nuke the message. 3840 */ 3841 ipmi_inc_stat(intf, unhandled_lan_responses); 3842 return 0; 3843 } 3844 3845 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 3846 /* 3847 * The other fields matched, so no need to set them, except 3848 * for netfn, which needs to be the response that was 3849 * returned, not the request value. 3850 */ 3851 recv_msg->msg.netfn = msg->rsp[6] >> 2; 3852 recv_msg->msg.data = recv_msg->msg_data; 3853 recv_msg->msg.data_len = msg->rsp_size - 12; 3854 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3855 if (deliver_response(intf, recv_msg)) 3856 ipmi_inc_stat(intf, unhandled_lan_responses); 3857 else 3858 ipmi_inc_stat(intf, handled_lan_responses); 3859 3860 return 0; 3861 } 3862 3863 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 3864 struct ipmi_smi_msg *msg) 3865 { 3866 struct cmd_rcvr *rcvr; 3867 int rv = 0; 3868 unsigned char netfn; 3869 unsigned char cmd; 3870 unsigned char chan; 3871 struct ipmi_user *user = NULL; 3872 struct ipmi_lan_addr *lan_addr; 3873 struct ipmi_recv_msg *recv_msg; 3874 3875 if (msg->rsp_size < 12) { 3876 /* Message not big enough, just ignore it. */ 3877 ipmi_inc_stat(intf, invalid_commands); 3878 return 0; 3879 } 3880 3881 if (msg->rsp[2] != 0) { 3882 /* An error getting the response, just ignore it. */ 3883 return 0; 3884 } 3885 3886 netfn = msg->rsp[6] >> 2; 3887 cmd = msg->rsp[10]; 3888 chan = msg->rsp[3] & 0xf; 3889 3890 rcu_read_lock(); 3891 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3892 if (rcvr) { 3893 user = rcvr->user; 3894 kref_get(&user->refcount); 3895 } else 3896 user = NULL; 3897 rcu_read_unlock(); 3898 3899 if (user == NULL) { 3900 /* We didn't find a user, just give up. */ 3901 ipmi_inc_stat(intf, unhandled_commands); 3902 3903 /* 3904 * Don't do anything with these messages, just allow 3905 * them to be freed. 3906 */ 3907 rv = 0; 3908 } else { 3909 recv_msg = ipmi_alloc_recv_msg(); 3910 if (!recv_msg) { 3911 /* 3912 * We couldn't allocate memory for the 3913 * message, so requeue it for handling later. 3914 */ 3915 rv = 1; 3916 kref_put(&user->refcount, free_user); 3917 } else { 3918 /* Extract the source address from the data. */ 3919 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 3920 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 3921 lan_addr->session_handle = msg->rsp[4]; 3922 lan_addr->remote_SWID = msg->rsp[8]; 3923 lan_addr->local_SWID = msg->rsp[5]; 3924 lan_addr->lun = msg->rsp[9] & 3; 3925 lan_addr->channel = msg->rsp[3] & 0xf; 3926 lan_addr->privilege = msg->rsp[3] >> 4; 3927 3928 /* 3929 * Extract the rest of the message information 3930 * from the IPMB header. 3931 */ 3932 recv_msg->user = user; 3933 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3934 recv_msg->msgid = msg->rsp[9] >> 2; 3935 recv_msg->msg.netfn = msg->rsp[6] >> 2; 3936 recv_msg->msg.cmd = msg->rsp[10]; 3937 recv_msg->msg.data = recv_msg->msg_data; 3938 3939 /* 3940 * We chop off 12, not 11 bytes because the checksum 3941 * at the end also needs to be removed. 3942 */ 3943 recv_msg->msg.data_len = msg->rsp_size - 12; 3944 memcpy(recv_msg->msg_data, &msg->rsp[11], 3945 msg->rsp_size - 12); 3946 if (deliver_response(intf, recv_msg)) 3947 ipmi_inc_stat(intf, unhandled_commands); 3948 else 3949 ipmi_inc_stat(intf, handled_commands); 3950 } 3951 } 3952 3953 return rv; 3954 } 3955 3956 /* 3957 * This routine will handle "Get Message" command responses with 3958 * channels that use an OEM Medium. The message format belongs to 3959 * the OEM. See IPMI 2.0 specification, Chapter 6 and 3960 * Chapter 22, sections 22.6 and 22.24 for more details. 3961 */ 3962 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 3963 struct ipmi_smi_msg *msg) 3964 { 3965 struct cmd_rcvr *rcvr; 3966 int rv = 0; 3967 unsigned char netfn; 3968 unsigned char cmd; 3969 unsigned char chan; 3970 struct ipmi_user *user = NULL; 3971 struct ipmi_system_interface_addr *smi_addr; 3972 struct ipmi_recv_msg *recv_msg; 3973 3974 /* 3975 * We expect the OEM SW to perform error checking 3976 * so we just do some basic sanity checks 3977 */ 3978 if (msg->rsp_size < 4) { 3979 /* Message not big enough, just ignore it. */ 3980 ipmi_inc_stat(intf, invalid_commands); 3981 return 0; 3982 } 3983 3984 if (msg->rsp[2] != 0) { 3985 /* An error getting the response, just ignore it. */ 3986 return 0; 3987 } 3988 3989 /* 3990 * This is an OEM Message so the OEM needs to know how 3991 * handle the message. We do no interpretation. 3992 */ 3993 netfn = msg->rsp[0] >> 2; 3994 cmd = msg->rsp[1]; 3995 chan = msg->rsp[3] & 0xf; 3996 3997 rcu_read_lock(); 3998 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3999 if (rcvr) { 4000 user = rcvr->user; 4001 kref_get(&user->refcount); 4002 } else 4003 user = NULL; 4004 rcu_read_unlock(); 4005 4006 if (user == NULL) { 4007 /* We didn't find a user, just give up. */ 4008 ipmi_inc_stat(intf, unhandled_commands); 4009 4010 /* 4011 * Don't do anything with these messages, just allow 4012 * them to be freed. 4013 */ 4014 4015 rv = 0; 4016 } else { 4017 recv_msg = ipmi_alloc_recv_msg(); 4018 if (!recv_msg) { 4019 /* 4020 * We couldn't allocate memory for the 4021 * message, so requeue it for handling 4022 * later. 4023 */ 4024 rv = 1; 4025 kref_put(&user->refcount, free_user); 4026 } else { 4027 /* 4028 * OEM Messages are expected to be delivered via 4029 * the system interface to SMS software. We might 4030 * need to visit this again depending on OEM 4031 * requirements 4032 */ 4033 smi_addr = ((struct ipmi_system_interface_addr *) 4034 &recv_msg->addr); 4035 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4036 smi_addr->channel = IPMI_BMC_CHANNEL; 4037 smi_addr->lun = msg->rsp[0] & 3; 4038 4039 recv_msg->user = user; 4040 recv_msg->user_msg_data = NULL; 4041 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4042 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4043 recv_msg->msg.cmd = msg->rsp[1]; 4044 recv_msg->msg.data = recv_msg->msg_data; 4045 4046 /* 4047 * The message starts at byte 4 which follows the 4048 * the Channel Byte in the "GET MESSAGE" command 4049 */ 4050 recv_msg->msg.data_len = msg->rsp_size - 4; 4051 memcpy(recv_msg->msg_data, &msg->rsp[4], 4052 msg->rsp_size - 4); 4053 if (deliver_response(intf, recv_msg)) 4054 ipmi_inc_stat(intf, unhandled_commands); 4055 else 4056 ipmi_inc_stat(intf, handled_commands); 4057 } 4058 } 4059 4060 return rv; 4061 } 4062 4063 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4064 struct ipmi_smi_msg *msg) 4065 { 4066 struct ipmi_system_interface_addr *smi_addr; 4067 4068 recv_msg->msgid = 0; 4069 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4070 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4071 smi_addr->channel = IPMI_BMC_CHANNEL; 4072 smi_addr->lun = msg->rsp[0] & 3; 4073 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4074 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4075 recv_msg->msg.cmd = msg->rsp[1]; 4076 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4077 recv_msg->msg.data = recv_msg->msg_data; 4078 recv_msg->msg.data_len = msg->rsp_size - 3; 4079 } 4080 4081 static int handle_read_event_rsp(struct ipmi_smi *intf, 4082 struct ipmi_smi_msg *msg) 4083 { 4084 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4085 struct list_head msgs; 4086 struct ipmi_user *user; 4087 int rv = 0, deliver_count = 0, index; 4088 unsigned long flags; 4089 4090 if (msg->rsp_size < 19) { 4091 /* Message is too small to be an IPMB event. */ 4092 ipmi_inc_stat(intf, invalid_events); 4093 return 0; 4094 } 4095 4096 if (msg->rsp[2] != 0) { 4097 /* An error getting the event, just ignore it. */ 4098 return 0; 4099 } 4100 4101 INIT_LIST_HEAD(&msgs); 4102 4103 spin_lock_irqsave(&intf->events_lock, flags); 4104 4105 ipmi_inc_stat(intf, events); 4106 4107 /* 4108 * Allocate and fill in one message for every user that is 4109 * getting events. 4110 */ 4111 index = srcu_read_lock(&intf->users_srcu); 4112 list_for_each_entry_rcu(user, &intf->users, link) { 4113 if (!user->gets_events) 4114 continue; 4115 4116 recv_msg = ipmi_alloc_recv_msg(); 4117 if (!recv_msg) { 4118 rcu_read_unlock(); 4119 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4120 link) { 4121 list_del(&recv_msg->link); 4122 ipmi_free_recv_msg(recv_msg); 4123 } 4124 /* 4125 * We couldn't allocate memory for the 4126 * message, so requeue it for handling 4127 * later. 4128 */ 4129 rv = 1; 4130 goto out; 4131 } 4132 4133 deliver_count++; 4134 4135 copy_event_into_recv_msg(recv_msg, msg); 4136 recv_msg->user = user; 4137 kref_get(&user->refcount); 4138 list_add_tail(&recv_msg->link, &msgs); 4139 } 4140 srcu_read_unlock(&intf->users_srcu, index); 4141 4142 if (deliver_count) { 4143 /* Now deliver all the messages. */ 4144 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4145 list_del(&recv_msg->link); 4146 deliver_local_response(intf, recv_msg); 4147 } 4148 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4149 /* 4150 * No one to receive the message, put it in queue if there's 4151 * not already too many things in the queue. 4152 */ 4153 recv_msg = ipmi_alloc_recv_msg(); 4154 if (!recv_msg) { 4155 /* 4156 * We couldn't allocate memory for the 4157 * message, so requeue it for handling 4158 * later. 4159 */ 4160 rv = 1; 4161 goto out; 4162 } 4163 4164 copy_event_into_recv_msg(recv_msg, msg); 4165 list_add_tail(&recv_msg->link, &intf->waiting_events); 4166 intf->waiting_events_count++; 4167 } else if (!intf->event_msg_printed) { 4168 /* 4169 * There's too many things in the queue, discard this 4170 * message. 4171 */ 4172 dev_warn(intf->si_dev, 4173 "Event queue full, discarding incoming events\n"); 4174 intf->event_msg_printed = 1; 4175 } 4176 4177 out: 4178 spin_unlock_irqrestore(&intf->events_lock, flags); 4179 4180 return rv; 4181 } 4182 4183 static int handle_bmc_rsp(struct ipmi_smi *intf, 4184 struct ipmi_smi_msg *msg) 4185 { 4186 struct ipmi_recv_msg *recv_msg; 4187 struct ipmi_system_interface_addr *smi_addr; 4188 4189 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 4190 if (recv_msg == NULL) { 4191 dev_warn(intf->si_dev, 4192 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4193 return 0; 4194 } 4195 4196 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4197 recv_msg->msgid = msg->msgid; 4198 smi_addr = ((struct ipmi_system_interface_addr *) 4199 &recv_msg->addr); 4200 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4201 smi_addr->channel = IPMI_BMC_CHANNEL; 4202 smi_addr->lun = msg->rsp[0] & 3; 4203 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4204 recv_msg->msg.cmd = msg->rsp[1]; 4205 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4206 recv_msg->msg.data = recv_msg->msg_data; 4207 recv_msg->msg.data_len = msg->rsp_size - 2; 4208 deliver_local_response(intf, recv_msg); 4209 4210 return 0; 4211 } 4212 4213 /* 4214 * Handle a received message. Return 1 if the message should be requeued, 4215 * 0 if the message should be freed, or -1 if the message should not 4216 * be freed or requeued. 4217 */ 4218 static int handle_one_recv_msg(struct ipmi_smi *intf, 4219 struct ipmi_smi_msg *msg) 4220 { 4221 int requeue; 4222 int chan; 4223 4224 pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp); 4225 4226 if ((msg->data_size >= 2) 4227 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4228 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4229 && (msg->user_data == NULL)) { 4230 4231 if (intf->in_shutdown) 4232 goto free_msg; 4233 4234 /* 4235 * This is the local response to a command send, start 4236 * the timer for these. The user_data will not be 4237 * NULL if this is a response send, and we will let 4238 * response sends just go through. 4239 */ 4240 4241 /* 4242 * Check for errors, if we get certain errors (ones 4243 * that mean basically we can try again later), we 4244 * ignore them and start the timer. Otherwise we 4245 * report the error immediately. 4246 */ 4247 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4248 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4249 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4250 && (msg->rsp[2] != IPMI_BUS_ERR) 4251 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4252 int ch = msg->rsp[3] & 0xf; 4253 struct ipmi_channel *chans; 4254 4255 /* Got an error sending the message, handle it. */ 4256 4257 chans = READ_ONCE(intf->channel_list)->c; 4258 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4259 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4260 ipmi_inc_stat(intf, sent_lan_command_errs); 4261 else 4262 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4263 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4264 } else 4265 /* The message was sent, start the timer. */ 4266 intf_start_seq_timer(intf, msg->msgid); 4267 free_msg: 4268 requeue = 0; 4269 goto out; 4270 4271 } else if (msg->rsp_size < 2) { 4272 /* Message is too small to be correct. */ 4273 dev_warn(intf->si_dev, 4274 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4275 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4276 4277 /* Generate an error response for the message. */ 4278 msg->rsp[0] = msg->data[0] | (1 << 2); 4279 msg->rsp[1] = msg->data[1]; 4280 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4281 msg->rsp_size = 3; 4282 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4283 || (msg->rsp[1] != msg->data[1])) { 4284 /* 4285 * The NetFN and Command in the response is not even 4286 * marginally correct. 4287 */ 4288 dev_warn(intf->si_dev, 4289 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4290 (msg->data[0] >> 2) | 1, msg->data[1], 4291 msg->rsp[0] >> 2, msg->rsp[1]); 4292 4293 /* Generate an error response for the message. */ 4294 msg->rsp[0] = msg->data[0] | (1 << 2); 4295 msg->rsp[1] = msg->data[1]; 4296 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4297 msg->rsp_size = 3; 4298 } 4299 4300 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4301 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4302 && (msg->user_data != NULL)) { 4303 /* 4304 * It's a response to a response we sent. For this we 4305 * deliver a send message response to the user. 4306 */ 4307 struct ipmi_recv_msg *recv_msg = msg->user_data; 4308 4309 requeue = 0; 4310 if (msg->rsp_size < 2) 4311 /* Message is too small to be correct. */ 4312 goto out; 4313 4314 chan = msg->data[2] & 0x0f; 4315 if (chan >= IPMI_MAX_CHANNELS) 4316 /* Invalid channel number */ 4317 goto out; 4318 4319 if (!recv_msg) 4320 goto out; 4321 4322 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4323 recv_msg->msg.data = recv_msg->msg_data; 4324 recv_msg->msg.data_len = 1; 4325 recv_msg->msg_data[0] = msg->rsp[2]; 4326 deliver_local_response(intf, recv_msg); 4327 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4328 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4329 struct ipmi_channel *chans; 4330 4331 /* It's from the receive queue. */ 4332 chan = msg->rsp[3] & 0xf; 4333 if (chan >= IPMI_MAX_CHANNELS) { 4334 /* Invalid channel number */ 4335 requeue = 0; 4336 goto out; 4337 } 4338 4339 /* 4340 * We need to make sure the channels have been initialized. 4341 * The channel_handler routine will set the "curr_channel" 4342 * equal to or greater than IPMI_MAX_CHANNELS when all the 4343 * channels for this interface have been initialized. 4344 */ 4345 if (!intf->channels_ready) { 4346 requeue = 0; /* Throw the message away */ 4347 goto out; 4348 } 4349 4350 chans = READ_ONCE(intf->channel_list)->c; 4351 4352 switch (chans[chan].medium) { 4353 case IPMI_CHANNEL_MEDIUM_IPMB: 4354 if (msg->rsp[4] & 0x04) { 4355 /* 4356 * It's a response, so find the 4357 * requesting message and send it up. 4358 */ 4359 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4360 } else { 4361 /* 4362 * It's a command to the SMS from some other 4363 * entity. Handle that. 4364 */ 4365 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4366 } 4367 break; 4368 4369 case IPMI_CHANNEL_MEDIUM_8023LAN: 4370 case IPMI_CHANNEL_MEDIUM_ASYNC: 4371 if (msg->rsp[6] & 0x04) { 4372 /* 4373 * It's a response, so find the 4374 * requesting message and send it up. 4375 */ 4376 requeue = handle_lan_get_msg_rsp(intf, msg); 4377 } else { 4378 /* 4379 * It's a command to the SMS from some other 4380 * entity. Handle that. 4381 */ 4382 requeue = handle_lan_get_msg_cmd(intf, msg); 4383 } 4384 break; 4385 4386 default: 4387 /* Check for OEM Channels. Clients had better 4388 register for these commands. */ 4389 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4390 && (chans[chan].medium 4391 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4392 requeue = handle_oem_get_msg_cmd(intf, msg); 4393 } else { 4394 /* 4395 * We don't handle the channel type, so just 4396 * free the message. 4397 */ 4398 requeue = 0; 4399 } 4400 } 4401 4402 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4403 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4404 /* It's an asynchronous event. */ 4405 requeue = handle_read_event_rsp(intf, msg); 4406 } else { 4407 /* It's a response from the local BMC. */ 4408 requeue = handle_bmc_rsp(intf, msg); 4409 } 4410 4411 out: 4412 return requeue; 4413 } 4414 4415 /* 4416 * If there are messages in the queue or pretimeouts, handle them. 4417 */ 4418 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4419 { 4420 struct ipmi_smi_msg *smi_msg; 4421 unsigned long flags = 0; 4422 int rv; 4423 int run_to_completion = intf->run_to_completion; 4424 4425 /* See if any waiting messages need to be processed. */ 4426 if (!run_to_completion) 4427 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4428 while (!list_empty(&intf->waiting_rcv_msgs)) { 4429 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4430 struct ipmi_smi_msg, link); 4431 list_del(&smi_msg->link); 4432 if (!run_to_completion) 4433 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4434 flags); 4435 rv = handle_one_recv_msg(intf, smi_msg); 4436 if (!run_to_completion) 4437 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4438 if (rv > 0) { 4439 /* 4440 * To preserve message order, quit if we 4441 * can't handle a message. Add the message 4442 * back at the head, this is safe because this 4443 * tasklet is the only thing that pulls the 4444 * messages. 4445 */ 4446 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4447 break; 4448 } else { 4449 if (rv == 0) 4450 /* Message handled */ 4451 ipmi_free_smi_msg(smi_msg); 4452 /* If rv < 0, fatal error, del but don't free. */ 4453 } 4454 } 4455 if (!run_to_completion) 4456 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4457 4458 /* 4459 * If the pretimout count is non-zero, decrement one from it and 4460 * deliver pretimeouts to all the users. 4461 */ 4462 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4463 struct ipmi_user *user; 4464 int index; 4465 4466 index = srcu_read_lock(&intf->users_srcu); 4467 list_for_each_entry_rcu(user, &intf->users, link) { 4468 if (user->handler->ipmi_watchdog_pretimeout) 4469 user->handler->ipmi_watchdog_pretimeout( 4470 user->handler_data); 4471 } 4472 srcu_read_unlock(&intf->users_srcu, index); 4473 } 4474 } 4475 4476 static void smi_recv_tasklet(struct tasklet_struct *t) 4477 { 4478 unsigned long flags = 0; /* keep us warning-free. */ 4479 struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); 4480 int run_to_completion = intf->run_to_completion; 4481 struct ipmi_smi_msg *newmsg = NULL; 4482 4483 /* 4484 * Start the next message if available. 4485 * 4486 * Do this here, not in the actual receiver, because we may deadlock 4487 * because the lower layer is allowed to hold locks while calling 4488 * message delivery. 4489 */ 4490 4491 rcu_read_lock(); 4492 4493 if (!run_to_completion) 4494 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4495 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4496 struct list_head *entry = NULL; 4497 4498 /* Pick the high priority queue first. */ 4499 if (!list_empty(&intf->hp_xmit_msgs)) 4500 entry = intf->hp_xmit_msgs.next; 4501 else if (!list_empty(&intf->xmit_msgs)) 4502 entry = intf->xmit_msgs.next; 4503 4504 if (entry) { 4505 list_del(entry); 4506 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4507 intf->curr_msg = newmsg; 4508 } 4509 } 4510 4511 if (!run_to_completion) 4512 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4513 if (newmsg) 4514 intf->handlers->sender(intf->send_info, newmsg); 4515 4516 rcu_read_unlock(); 4517 4518 handle_new_recv_msgs(intf); 4519 } 4520 4521 /* Handle a new message from the lower layer. */ 4522 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4523 struct ipmi_smi_msg *msg) 4524 { 4525 unsigned long flags = 0; /* keep us warning-free. */ 4526 int run_to_completion = intf->run_to_completion; 4527 4528 /* 4529 * To preserve message order, we keep a queue and deliver from 4530 * a tasklet. 4531 */ 4532 if (!run_to_completion) 4533 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4534 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4535 if (!run_to_completion) 4536 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4537 flags); 4538 4539 if (!run_to_completion) 4540 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4541 /* 4542 * We can get an asynchronous event or receive message in addition 4543 * to commands we send. 4544 */ 4545 if (msg == intf->curr_msg) 4546 intf->curr_msg = NULL; 4547 if (!run_to_completion) 4548 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4549 4550 if (run_to_completion) 4551 smi_recv_tasklet(&intf->recv_tasklet); 4552 else 4553 tasklet_schedule(&intf->recv_tasklet); 4554 } 4555 EXPORT_SYMBOL(ipmi_smi_msg_received); 4556 4557 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4558 { 4559 if (intf->in_shutdown) 4560 return; 4561 4562 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4563 tasklet_schedule(&intf->recv_tasklet); 4564 } 4565 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4566 4567 static struct ipmi_smi_msg * 4568 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4569 unsigned char seq, long seqid) 4570 { 4571 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4572 if (!smi_msg) 4573 /* 4574 * If we can't allocate the message, then just return, we 4575 * get 4 retries, so this should be ok. 4576 */ 4577 return NULL; 4578 4579 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4580 smi_msg->data_size = recv_msg->msg.data_len; 4581 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4582 4583 pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data); 4584 4585 return smi_msg; 4586 } 4587 4588 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4589 struct list_head *timeouts, 4590 unsigned long timeout_period, 4591 int slot, unsigned long *flags, 4592 bool *need_timer) 4593 { 4594 struct ipmi_recv_msg *msg; 4595 4596 if (intf->in_shutdown) 4597 return; 4598 4599 if (!ent->inuse) 4600 return; 4601 4602 if (timeout_period < ent->timeout) { 4603 ent->timeout -= timeout_period; 4604 *need_timer = true; 4605 return; 4606 } 4607 4608 if (ent->retries_left == 0) { 4609 /* The message has used all its retries. */ 4610 ent->inuse = 0; 4611 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 4612 msg = ent->recv_msg; 4613 list_add_tail(&msg->link, timeouts); 4614 if (ent->broadcast) 4615 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4616 else if (is_lan_addr(&ent->recv_msg->addr)) 4617 ipmi_inc_stat(intf, timed_out_lan_commands); 4618 else 4619 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4620 } else { 4621 struct ipmi_smi_msg *smi_msg; 4622 /* More retries, send again. */ 4623 4624 *need_timer = true; 4625 4626 /* 4627 * Start with the max timer, set to normal timer after 4628 * the message is sent. 4629 */ 4630 ent->timeout = MAX_MSG_TIMEOUT; 4631 ent->retries_left--; 4632 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4633 ent->seqid); 4634 if (!smi_msg) { 4635 if (is_lan_addr(&ent->recv_msg->addr)) 4636 ipmi_inc_stat(intf, 4637 dropped_rexmit_lan_commands); 4638 else 4639 ipmi_inc_stat(intf, 4640 dropped_rexmit_ipmb_commands); 4641 return; 4642 } 4643 4644 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4645 4646 /* 4647 * Send the new message. We send with a zero 4648 * priority. It timed out, I doubt time is that 4649 * critical now, and high priority messages are really 4650 * only for messages to the local MC, which don't get 4651 * resent. 4652 */ 4653 if (intf->handlers) { 4654 if (is_lan_addr(&ent->recv_msg->addr)) 4655 ipmi_inc_stat(intf, 4656 retransmitted_lan_commands); 4657 else 4658 ipmi_inc_stat(intf, 4659 retransmitted_ipmb_commands); 4660 4661 smi_send(intf, intf->handlers, smi_msg, 0); 4662 } else 4663 ipmi_free_smi_msg(smi_msg); 4664 4665 spin_lock_irqsave(&intf->seq_lock, *flags); 4666 } 4667 } 4668 4669 static bool ipmi_timeout_handler(struct ipmi_smi *intf, 4670 unsigned long timeout_period) 4671 { 4672 struct list_head timeouts; 4673 struct ipmi_recv_msg *msg, *msg2; 4674 unsigned long flags; 4675 int i; 4676 bool need_timer = false; 4677 4678 if (!intf->bmc_registered) { 4679 kref_get(&intf->refcount); 4680 if (!schedule_work(&intf->bmc_reg_work)) { 4681 kref_put(&intf->refcount, intf_free); 4682 need_timer = true; 4683 } 4684 } 4685 4686 /* 4687 * Go through the seq table and find any messages that 4688 * have timed out, putting them in the timeouts 4689 * list. 4690 */ 4691 INIT_LIST_HEAD(&timeouts); 4692 spin_lock_irqsave(&intf->seq_lock, flags); 4693 if (intf->ipmb_maintenance_mode_timeout) { 4694 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 4695 intf->ipmb_maintenance_mode_timeout = 0; 4696 else 4697 intf->ipmb_maintenance_mode_timeout -= timeout_period; 4698 } 4699 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 4700 check_msg_timeout(intf, &intf->seq_table[i], 4701 &timeouts, timeout_period, i, 4702 &flags, &need_timer); 4703 spin_unlock_irqrestore(&intf->seq_lock, flags); 4704 4705 list_for_each_entry_safe(msg, msg2, &timeouts, link) 4706 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 4707 4708 /* 4709 * Maintenance mode handling. Check the timeout 4710 * optimistically before we claim the lock. It may 4711 * mean a timeout gets missed occasionally, but that 4712 * only means the timeout gets extended by one period 4713 * in that case. No big deal, and it avoids the lock 4714 * most of the time. 4715 */ 4716 if (intf->auto_maintenance_timeout > 0) { 4717 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 4718 if (intf->auto_maintenance_timeout > 0) { 4719 intf->auto_maintenance_timeout 4720 -= timeout_period; 4721 if (!intf->maintenance_mode 4722 && (intf->auto_maintenance_timeout <= 0)) { 4723 intf->maintenance_mode_enable = false; 4724 maintenance_mode_update(intf); 4725 } 4726 } 4727 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 4728 flags); 4729 } 4730 4731 tasklet_schedule(&intf->recv_tasklet); 4732 4733 return need_timer; 4734 } 4735 4736 static void ipmi_request_event(struct ipmi_smi *intf) 4737 { 4738 /* No event requests when in maintenance mode. */ 4739 if (intf->maintenance_mode_enable) 4740 return; 4741 4742 if (!intf->in_shutdown) 4743 intf->handlers->request_events(intf->send_info); 4744 } 4745 4746 static struct timer_list ipmi_timer; 4747 4748 static atomic_t stop_operation; 4749 4750 static void ipmi_timeout(struct timer_list *unused) 4751 { 4752 struct ipmi_smi *intf; 4753 bool need_timer = false; 4754 int index; 4755 4756 if (atomic_read(&stop_operation)) 4757 return; 4758 4759 index = srcu_read_lock(&ipmi_interfaces_srcu); 4760 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4761 if (atomic_read(&intf->event_waiters)) { 4762 intf->ticks_to_req_ev--; 4763 if (intf->ticks_to_req_ev == 0) { 4764 ipmi_request_event(intf); 4765 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 4766 } 4767 need_timer = true; 4768 } 4769 4770 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 4771 } 4772 srcu_read_unlock(&ipmi_interfaces_srcu, index); 4773 4774 if (need_timer) 4775 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4776 } 4777 4778 static void need_waiter(struct ipmi_smi *intf) 4779 { 4780 /* Racy, but worst case we start the timer twice. */ 4781 if (!timer_pending(&ipmi_timer)) 4782 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4783 } 4784 4785 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 4786 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 4787 4788 static void free_smi_msg(struct ipmi_smi_msg *msg) 4789 { 4790 atomic_dec(&smi_msg_inuse_count); 4791 kfree(msg); 4792 } 4793 4794 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 4795 { 4796 struct ipmi_smi_msg *rv; 4797 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 4798 if (rv) { 4799 rv->done = free_smi_msg; 4800 rv->user_data = NULL; 4801 atomic_inc(&smi_msg_inuse_count); 4802 } 4803 return rv; 4804 } 4805 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 4806 4807 static void free_recv_msg(struct ipmi_recv_msg *msg) 4808 { 4809 atomic_dec(&recv_msg_inuse_count); 4810 kfree(msg); 4811 } 4812 4813 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 4814 { 4815 struct ipmi_recv_msg *rv; 4816 4817 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 4818 if (rv) { 4819 rv->user = NULL; 4820 rv->done = free_recv_msg; 4821 atomic_inc(&recv_msg_inuse_count); 4822 } 4823 return rv; 4824 } 4825 4826 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 4827 { 4828 if (msg->user) 4829 kref_put(&msg->user->refcount, free_user); 4830 msg->done(msg); 4831 } 4832 EXPORT_SYMBOL(ipmi_free_recv_msg); 4833 4834 static atomic_t panic_done_count = ATOMIC_INIT(0); 4835 4836 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 4837 { 4838 atomic_dec(&panic_done_count); 4839 } 4840 4841 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 4842 { 4843 atomic_dec(&panic_done_count); 4844 } 4845 4846 /* 4847 * Inside a panic, send a message and wait for a response. 4848 */ 4849 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, 4850 struct ipmi_addr *addr, 4851 struct kernel_ipmi_msg *msg) 4852 { 4853 struct ipmi_smi_msg smi_msg; 4854 struct ipmi_recv_msg recv_msg; 4855 int rv; 4856 4857 smi_msg.done = dummy_smi_done_handler; 4858 recv_msg.done = dummy_recv_done_handler; 4859 atomic_add(2, &panic_done_count); 4860 rv = i_ipmi_request(NULL, 4861 intf, 4862 addr, 4863 0, 4864 msg, 4865 intf, 4866 &smi_msg, 4867 &recv_msg, 4868 0, 4869 intf->addrinfo[0].address, 4870 intf->addrinfo[0].lun, 4871 0, 1); /* Don't retry, and don't wait. */ 4872 if (rv) 4873 atomic_sub(2, &panic_done_count); 4874 else if (intf->handlers->flush_messages) 4875 intf->handlers->flush_messages(intf->send_info); 4876 4877 while (atomic_read(&panic_done_count) != 0) 4878 ipmi_poll(intf); 4879 } 4880 4881 static void event_receiver_fetcher(struct ipmi_smi *intf, 4882 struct ipmi_recv_msg *msg) 4883 { 4884 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 4885 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 4886 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 4887 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 4888 /* A get event receiver command, save it. */ 4889 intf->event_receiver = msg->msg.data[1]; 4890 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 4891 } 4892 } 4893 4894 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 4895 { 4896 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 4897 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 4898 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 4899 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 4900 /* 4901 * A get device id command, save if we are an event 4902 * receiver or generator. 4903 */ 4904 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 4905 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 4906 } 4907 } 4908 4909 static void send_panic_events(struct ipmi_smi *intf, char *str) 4910 { 4911 struct kernel_ipmi_msg msg; 4912 unsigned char data[16]; 4913 struct ipmi_system_interface_addr *si; 4914 struct ipmi_addr addr; 4915 char *p = str; 4916 struct ipmi_ipmb_addr *ipmb; 4917 int j; 4918 4919 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 4920 return; 4921 4922 si = (struct ipmi_system_interface_addr *) &addr; 4923 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4924 si->channel = IPMI_BMC_CHANNEL; 4925 si->lun = 0; 4926 4927 /* Fill in an event telling that we have failed. */ 4928 msg.netfn = 0x04; /* Sensor or Event. */ 4929 msg.cmd = 2; /* Platform event command. */ 4930 msg.data = data; 4931 msg.data_len = 8; 4932 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 4933 data[1] = 0x03; /* This is for IPMI 1.0. */ 4934 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 4935 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 4936 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 4937 4938 /* 4939 * Put a few breadcrumbs in. Hopefully later we can add more things 4940 * to make the panic events more useful. 4941 */ 4942 if (str) { 4943 data[3] = str[0]; 4944 data[6] = str[1]; 4945 data[7] = str[2]; 4946 } 4947 4948 /* Send the event announcing the panic. */ 4949 ipmi_panic_request_and_wait(intf, &addr, &msg); 4950 4951 /* 4952 * On every interface, dump a bunch of OEM event holding the 4953 * string. 4954 */ 4955 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 4956 return; 4957 4958 /* 4959 * intf_num is used as an marker to tell if the 4960 * interface is valid. Thus we need a read barrier to 4961 * make sure data fetched before checking intf_num 4962 * won't be used. 4963 */ 4964 smp_rmb(); 4965 4966 /* 4967 * First job here is to figure out where to send the 4968 * OEM events. There's no way in IPMI to send OEM 4969 * events using an event send command, so we have to 4970 * find the SEL to put them in and stick them in 4971 * there. 4972 */ 4973 4974 /* Get capabilities from the get device id. */ 4975 intf->local_sel_device = 0; 4976 intf->local_event_generator = 0; 4977 intf->event_receiver = 0; 4978 4979 /* Request the device info from the local MC. */ 4980 msg.netfn = IPMI_NETFN_APP_REQUEST; 4981 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 4982 msg.data = NULL; 4983 msg.data_len = 0; 4984 intf->null_user_handler = device_id_fetcher; 4985 ipmi_panic_request_and_wait(intf, &addr, &msg); 4986 4987 if (intf->local_event_generator) { 4988 /* Request the event receiver from the local MC. */ 4989 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 4990 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 4991 msg.data = NULL; 4992 msg.data_len = 0; 4993 intf->null_user_handler = event_receiver_fetcher; 4994 ipmi_panic_request_and_wait(intf, &addr, &msg); 4995 } 4996 intf->null_user_handler = NULL; 4997 4998 /* 4999 * Validate the event receiver. The low bit must not 5000 * be 1 (it must be a valid IPMB address), it cannot 5001 * be zero, and it must not be my address. 5002 */ 5003 if (((intf->event_receiver & 1) == 0) 5004 && (intf->event_receiver != 0) 5005 && (intf->event_receiver != intf->addrinfo[0].address)) { 5006 /* 5007 * The event receiver is valid, send an IPMB 5008 * message. 5009 */ 5010 ipmb = (struct ipmi_ipmb_addr *) &addr; 5011 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5012 ipmb->channel = 0; /* FIXME - is this right? */ 5013 ipmb->lun = intf->event_receiver_lun; 5014 ipmb->slave_addr = intf->event_receiver; 5015 } else if (intf->local_sel_device) { 5016 /* 5017 * The event receiver was not valid (or was 5018 * me), but I am an SEL device, just dump it 5019 * in my SEL. 5020 */ 5021 si = (struct ipmi_system_interface_addr *) &addr; 5022 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5023 si->channel = IPMI_BMC_CHANNEL; 5024 si->lun = 0; 5025 } else 5026 return; /* No where to send the event. */ 5027 5028 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5029 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5030 msg.data = data; 5031 msg.data_len = 16; 5032 5033 j = 0; 5034 while (*p) { 5035 int size = strlen(p); 5036 5037 if (size > 11) 5038 size = 11; 5039 data[0] = 0; 5040 data[1] = 0; 5041 data[2] = 0xf0; /* OEM event without timestamp. */ 5042 data[3] = intf->addrinfo[0].address; 5043 data[4] = j++; /* sequence # */ 5044 /* 5045 * Always give 11 bytes, so strncpy will fill 5046 * it with zeroes for me. 5047 */ 5048 strncpy(data+5, p, 11); 5049 p += size; 5050 5051 ipmi_panic_request_and_wait(intf, &addr, &msg); 5052 } 5053 } 5054 5055 static int has_panicked; 5056 5057 static int panic_event(struct notifier_block *this, 5058 unsigned long event, 5059 void *ptr) 5060 { 5061 struct ipmi_smi *intf; 5062 struct ipmi_user *user; 5063 5064 if (has_panicked) 5065 return NOTIFY_DONE; 5066 has_panicked = 1; 5067 5068 /* For every registered interface, set it to run to completion. */ 5069 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5070 if (!intf->handlers || intf->intf_num == -1) 5071 /* Interface is not ready. */ 5072 continue; 5073 5074 if (!intf->handlers->poll) 5075 continue; 5076 5077 /* 5078 * If we were interrupted while locking xmit_msgs_lock or 5079 * waiting_rcv_msgs_lock, the corresponding list may be 5080 * corrupted. In this case, drop items on the list for 5081 * the safety. 5082 */ 5083 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5084 INIT_LIST_HEAD(&intf->xmit_msgs); 5085 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5086 } else 5087 spin_unlock(&intf->xmit_msgs_lock); 5088 5089 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5090 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5091 else 5092 spin_unlock(&intf->waiting_rcv_msgs_lock); 5093 5094 intf->run_to_completion = 1; 5095 if (intf->handlers->set_run_to_completion) 5096 intf->handlers->set_run_to_completion(intf->send_info, 5097 1); 5098 5099 list_for_each_entry_rcu(user, &intf->users, link) { 5100 if (user->handler->ipmi_panic_handler) 5101 user->handler->ipmi_panic_handler( 5102 user->handler_data); 5103 } 5104 5105 send_panic_events(intf, ptr); 5106 } 5107 5108 return NOTIFY_DONE; 5109 } 5110 5111 /* Must be called with ipmi_interfaces_mutex held. */ 5112 static int ipmi_register_driver(void) 5113 { 5114 int rv; 5115 5116 if (drvregistered) 5117 return 0; 5118 5119 rv = driver_register(&ipmidriver.driver); 5120 if (rv) 5121 pr_err("Could not register IPMI driver\n"); 5122 else 5123 drvregistered = true; 5124 return rv; 5125 } 5126 5127 static struct notifier_block panic_block = { 5128 .notifier_call = panic_event, 5129 .next = NULL, 5130 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5131 }; 5132 5133 static int ipmi_init_msghandler(void) 5134 { 5135 int rv; 5136 5137 mutex_lock(&ipmi_interfaces_mutex); 5138 rv = ipmi_register_driver(); 5139 if (rv) 5140 goto out; 5141 if (initialized) 5142 goto out; 5143 5144 init_srcu_struct(&ipmi_interfaces_srcu); 5145 5146 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5147 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5148 5149 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5150 5151 initialized = true; 5152 5153 out: 5154 mutex_unlock(&ipmi_interfaces_mutex); 5155 return rv; 5156 } 5157 5158 static int __init ipmi_init_msghandler_mod(void) 5159 { 5160 int rv; 5161 5162 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5163 5164 mutex_lock(&ipmi_interfaces_mutex); 5165 rv = ipmi_register_driver(); 5166 mutex_unlock(&ipmi_interfaces_mutex); 5167 5168 return rv; 5169 } 5170 5171 static void __exit cleanup_ipmi(void) 5172 { 5173 int count; 5174 5175 if (initialized) { 5176 atomic_notifier_chain_unregister(&panic_notifier_list, 5177 &panic_block); 5178 5179 /* 5180 * This can't be called if any interfaces exist, so no worry 5181 * about shutting down the interfaces. 5182 */ 5183 5184 /* 5185 * Tell the timer to stop, then wait for it to stop. This 5186 * avoids problems with race conditions removing the timer 5187 * here. 5188 */ 5189 atomic_set(&stop_operation, 1); 5190 del_timer_sync(&ipmi_timer); 5191 5192 initialized = false; 5193 5194 /* Check for buffer leaks. */ 5195 count = atomic_read(&smi_msg_inuse_count); 5196 if (count != 0) 5197 pr_warn("SMI message count %d at exit\n", count); 5198 count = atomic_read(&recv_msg_inuse_count); 5199 if (count != 0) 5200 pr_warn("recv message count %d at exit\n", count); 5201 5202 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5203 } 5204 if (drvregistered) 5205 driver_unregister(&ipmidriver.driver); 5206 } 5207 module_exit(cleanup_ipmi); 5208 5209 module_init(ipmi_init_msghandler_mod); 5210 MODULE_LICENSE("GPL"); 5211 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5212 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 5213 MODULE_VERSION(IPMI_DRIVER_VERSION); 5214 MODULE_SOFTDEP("post: ipmi_devintf"); 5215