1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "IPMI message handler: " fmt 15 #define dev_fmt(fmt) pr_fmt(fmt) 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/panic_notifier.h> 20 #include <linux/poll.h> 21 #include <linux/sched.h> 22 #include <linux/seq_file.h> 23 #include <linux/spinlock.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/ipmi.h> 27 #include <linux/ipmi_smi.h> 28 #include <linux/notifier.h> 29 #include <linux/init.h> 30 #include <linux/proc_fs.h> 31 #include <linux/rcupdate.h> 32 #include <linux/interrupt.h> 33 #include <linux/moduleparam.h> 34 #include <linux/workqueue.h> 35 #include <linux/uuid.h> 36 #include <linux/nospec.h> 37 #include <linux/vmalloc.h> 38 #include <linux/delay.h> 39 40 #define IPMI_DRIVER_VERSION "39.2" 41 42 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 43 static int ipmi_init_msghandler(void); 44 static void smi_recv_tasklet(struct tasklet_struct *t); 45 static void handle_new_recv_msgs(struct ipmi_smi *intf); 46 static void need_waiter(struct ipmi_smi *intf); 47 static int handle_one_recv_msg(struct ipmi_smi *intf, 48 struct ipmi_smi_msg *msg); 49 50 static bool initialized; 51 static bool drvregistered; 52 53 /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ 54 enum ipmi_panic_event_op { 55 IPMI_SEND_PANIC_EVENT_NONE, 56 IPMI_SEND_PANIC_EVENT, 57 IPMI_SEND_PANIC_EVENT_STRING, 58 IPMI_SEND_PANIC_EVENT_MAX 59 }; 60 61 /* Indices in this array should be mapped to enum ipmi_panic_event_op */ 62 static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL }; 63 64 #ifdef CONFIG_IPMI_PANIC_STRING 65 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 66 #elif defined(CONFIG_IPMI_PANIC_EVENT) 67 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 68 #else 69 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 70 #endif 71 72 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 73 74 static int panic_op_write_handler(const char *val, 75 const struct kernel_param *kp) 76 { 77 char valcp[16]; 78 int e; 79 80 strscpy(valcp, val, sizeof(valcp)); 81 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp)); 82 if (e < 0) 83 return e; 84 85 ipmi_send_panic_event = e; 86 return 0; 87 } 88 89 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 90 { 91 const char *event_str; 92 93 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX) 94 event_str = "???"; 95 else 96 event_str = ipmi_panic_event_str[ipmi_send_panic_event]; 97 98 return sprintf(buffer, "%s\n", event_str); 99 } 100 101 static const struct kernel_param_ops panic_op_ops = { 102 .set = panic_op_write_handler, 103 .get = panic_op_read_handler 104 }; 105 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 106 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 107 108 109 #define MAX_EVENTS_IN_QUEUE 25 110 111 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 112 static unsigned long maintenance_mode_timeout_ms = 30000; 113 module_param(maintenance_mode_timeout_ms, ulong, 0644); 114 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 115 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 116 117 /* 118 * Don't let a message sit in a queue forever, always time it with at lest 119 * the max message timer. This is in milliseconds. 120 */ 121 #define MAX_MSG_TIMEOUT 60000 122 123 /* 124 * Timeout times below are in milliseconds, and are done off a 1 125 * second timer. So setting the value to 1000 would mean anything 126 * between 0 and 1000ms. So really the only reasonable minimum 127 * setting it 2000ms, which is between 1 and 2 seconds. 128 */ 129 130 /* The default timeout for message retries. */ 131 static unsigned long default_retry_ms = 2000; 132 module_param(default_retry_ms, ulong, 0644); 133 MODULE_PARM_DESC(default_retry_ms, 134 "The time (milliseconds) between retry sends"); 135 136 /* The default timeout for maintenance mode message retries. */ 137 static unsigned long default_maintenance_retry_ms = 3000; 138 module_param(default_maintenance_retry_ms, ulong, 0644); 139 MODULE_PARM_DESC(default_maintenance_retry_ms, 140 "The time (milliseconds) between retry sends in maintenance mode"); 141 142 /* The default maximum number of retries */ 143 static unsigned int default_max_retries = 4; 144 module_param(default_max_retries, uint, 0644); 145 MODULE_PARM_DESC(default_max_retries, 146 "The time (milliseconds) between retry sends in maintenance mode"); 147 148 /* The default maximum number of users that may register. */ 149 static unsigned int max_users = 30; 150 module_param(max_users, uint, 0644); 151 MODULE_PARM_DESC(max_users, 152 "The most users that may use the IPMI stack at one time."); 153 154 /* The default maximum number of message a user may have outstanding. */ 155 static unsigned int max_msgs_per_user = 100; 156 module_param(max_msgs_per_user, uint, 0644); 157 MODULE_PARM_DESC(max_msgs_per_user, 158 "The most message a user may have outstanding."); 159 160 /* Call every ~1000 ms. */ 161 #define IPMI_TIMEOUT_TIME 1000 162 163 /* How many jiffies does it take to get to the timeout time. */ 164 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 165 166 /* 167 * Request events from the queue every second (this is the number of 168 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 169 * future, IPMI will add a way to know immediately if an event is in 170 * the queue and this silliness can go away. 171 */ 172 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 173 174 /* How long should we cache dynamic device IDs? */ 175 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 176 177 /* 178 * The main "user" data structure. 179 */ 180 struct ipmi_user { 181 struct list_head link; 182 183 /* 184 * Set to NULL when the user is destroyed, a pointer to myself 185 * so srcu_dereference can be used on it. 186 */ 187 struct ipmi_user *self; 188 struct srcu_struct release_barrier; 189 190 struct kref refcount; 191 192 /* The upper layer that handles receive messages. */ 193 const struct ipmi_user_hndl *handler; 194 void *handler_data; 195 196 /* The interface this user is bound to. */ 197 struct ipmi_smi *intf; 198 199 /* Does this interface receive IPMI events? */ 200 bool gets_events; 201 202 atomic_t nr_msgs; 203 204 /* Free must run in process context for RCU cleanup. */ 205 struct work_struct remove_work; 206 }; 207 208 static struct workqueue_struct *remove_work_wq; 209 210 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) 211 __acquires(user->release_barrier) 212 { 213 struct ipmi_user *ruser; 214 215 *index = srcu_read_lock(&user->release_barrier); 216 ruser = srcu_dereference(user->self, &user->release_barrier); 217 if (!ruser) 218 srcu_read_unlock(&user->release_barrier, *index); 219 return ruser; 220 } 221 222 static void release_ipmi_user(struct ipmi_user *user, int index) 223 { 224 srcu_read_unlock(&user->release_barrier, index); 225 } 226 227 struct cmd_rcvr { 228 struct list_head link; 229 230 struct ipmi_user *user; 231 unsigned char netfn; 232 unsigned char cmd; 233 unsigned int chans; 234 235 /* 236 * This is used to form a linked lised during mass deletion. 237 * Since this is in an RCU list, we cannot use the link above 238 * or change any data until the RCU period completes. So we 239 * use this next variable during mass deletion so we can have 240 * a list and don't have to wait and restart the search on 241 * every individual deletion of a command. 242 */ 243 struct cmd_rcvr *next; 244 }; 245 246 struct seq_table { 247 unsigned int inuse : 1; 248 unsigned int broadcast : 1; 249 250 unsigned long timeout; 251 unsigned long orig_timeout; 252 unsigned int retries_left; 253 254 /* 255 * To verify on an incoming send message response that this is 256 * the message that the response is for, we keep a sequence id 257 * and increment it every time we send a message. 258 */ 259 long seqid; 260 261 /* 262 * This is held so we can properly respond to the message on a 263 * timeout, and it is used to hold the temporary data for 264 * retransmission, too. 265 */ 266 struct ipmi_recv_msg *recv_msg; 267 }; 268 269 /* 270 * Store the information in a msgid (long) to allow us to find a 271 * sequence table entry from the msgid. 272 */ 273 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 274 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 275 276 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 277 do { \ 278 seq = (((msgid) >> 26) & 0x3f); \ 279 seqid = ((msgid) & 0x3ffffff); \ 280 } while (0) 281 282 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 283 284 #define IPMI_MAX_CHANNELS 16 285 struct ipmi_channel { 286 unsigned char medium; 287 unsigned char protocol; 288 }; 289 290 struct ipmi_channel_set { 291 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 292 }; 293 294 struct ipmi_my_addrinfo { 295 /* 296 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 297 * but may be changed by the user. 298 */ 299 unsigned char address; 300 301 /* 302 * My LUN. This should generally stay the SMS LUN, but just in 303 * case... 304 */ 305 unsigned char lun; 306 }; 307 308 /* 309 * Note that the product id, manufacturer id, guid, and device id are 310 * immutable in this structure, so dyn_mutex is not required for 311 * accessing those. If those change on a BMC, a new BMC is allocated. 312 */ 313 struct bmc_device { 314 struct platform_device pdev; 315 struct list_head intfs; /* Interfaces on this BMC. */ 316 struct ipmi_device_id id; 317 struct ipmi_device_id fetch_id; 318 int dyn_id_set; 319 unsigned long dyn_id_expiry; 320 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 321 guid_t guid; 322 guid_t fetch_guid; 323 int dyn_guid_set; 324 struct kref usecount; 325 struct work_struct remove_work; 326 unsigned char cc; /* completion code */ 327 }; 328 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 329 330 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 331 struct ipmi_device_id *id, 332 bool *guid_set, guid_t *guid); 333 334 /* 335 * Various statistics for IPMI, these index stats[] in the ipmi_smi 336 * structure. 337 */ 338 enum ipmi_stat_indexes { 339 /* Commands we got from the user that were invalid. */ 340 IPMI_STAT_sent_invalid_commands = 0, 341 342 /* Commands we sent to the MC. */ 343 IPMI_STAT_sent_local_commands, 344 345 /* Responses from the MC that were delivered to a user. */ 346 IPMI_STAT_handled_local_responses, 347 348 /* Responses from the MC that were not delivered to a user. */ 349 IPMI_STAT_unhandled_local_responses, 350 351 /* Commands we sent out to the IPMB bus. */ 352 IPMI_STAT_sent_ipmb_commands, 353 354 /* Commands sent on the IPMB that had errors on the SEND CMD */ 355 IPMI_STAT_sent_ipmb_command_errs, 356 357 /* Each retransmit increments this count. */ 358 IPMI_STAT_retransmitted_ipmb_commands, 359 360 /* 361 * When a message times out (runs out of retransmits) this is 362 * incremented. 363 */ 364 IPMI_STAT_timed_out_ipmb_commands, 365 366 /* 367 * This is like above, but for broadcasts. Broadcasts are 368 * *not* included in the above count (they are expected to 369 * time out). 370 */ 371 IPMI_STAT_timed_out_ipmb_broadcasts, 372 373 /* Responses I have sent to the IPMB bus. */ 374 IPMI_STAT_sent_ipmb_responses, 375 376 /* The response was delivered to the user. */ 377 IPMI_STAT_handled_ipmb_responses, 378 379 /* The response had invalid data in it. */ 380 IPMI_STAT_invalid_ipmb_responses, 381 382 /* The response didn't have anyone waiting for it. */ 383 IPMI_STAT_unhandled_ipmb_responses, 384 385 /* Commands we sent out to the IPMB bus. */ 386 IPMI_STAT_sent_lan_commands, 387 388 /* Commands sent on the IPMB that had errors on the SEND CMD */ 389 IPMI_STAT_sent_lan_command_errs, 390 391 /* Each retransmit increments this count. */ 392 IPMI_STAT_retransmitted_lan_commands, 393 394 /* 395 * When a message times out (runs out of retransmits) this is 396 * incremented. 397 */ 398 IPMI_STAT_timed_out_lan_commands, 399 400 /* Responses I have sent to the IPMB bus. */ 401 IPMI_STAT_sent_lan_responses, 402 403 /* The response was delivered to the user. */ 404 IPMI_STAT_handled_lan_responses, 405 406 /* The response had invalid data in it. */ 407 IPMI_STAT_invalid_lan_responses, 408 409 /* The response didn't have anyone waiting for it. */ 410 IPMI_STAT_unhandled_lan_responses, 411 412 /* The command was delivered to the user. */ 413 IPMI_STAT_handled_commands, 414 415 /* The command had invalid data in it. */ 416 IPMI_STAT_invalid_commands, 417 418 /* The command didn't have anyone waiting for it. */ 419 IPMI_STAT_unhandled_commands, 420 421 /* Invalid data in an event. */ 422 IPMI_STAT_invalid_events, 423 424 /* Events that were received with the proper format. */ 425 IPMI_STAT_events, 426 427 /* Retransmissions on IPMB that failed. */ 428 IPMI_STAT_dropped_rexmit_ipmb_commands, 429 430 /* Retransmissions on LAN that failed. */ 431 IPMI_STAT_dropped_rexmit_lan_commands, 432 433 /* This *must* remain last, add new values above this. */ 434 IPMI_NUM_STATS 435 }; 436 437 438 #define IPMI_IPMB_NUM_SEQ 64 439 struct ipmi_smi { 440 struct module *owner; 441 442 /* What interface number are we? */ 443 int intf_num; 444 445 struct kref refcount; 446 447 /* Set when the interface is being unregistered. */ 448 bool in_shutdown; 449 450 /* Used for a list of interfaces. */ 451 struct list_head link; 452 453 /* 454 * The list of upper layers that are using me. seq_lock write 455 * protects this. Read protection is with srcu. 456 */ 457 struct list_head users; 458 struct srcu_struct users_srcu; 459 atomic_t nr_users; 460 struct device_attribute nr_users_devattr; 461 struct device_attribute nr_msgs_devattr; 462 463 464 /* Used for wake ups at startup. */ 465 wait_queue_head_t waitq; 466 467 /* 468 * Prevents the interface from being unregistered when the 469 * interface is used by being looked up through the BMC 470 * structure. 471 */ 472 struct mutex bmc_reg_mutex; 473 474 struct bmc_device tmp_bmc; 475 struct bmc_device *bmc; 476 bool bmc_registered; 477 struct list_head bmc_link; 478 char *my_dev_name; 479 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 480 struct work_struct bmc_reg_work; 481 482 const struct ipmi_smi_handlers *handlers; 483 void *send_info; 484 485 /* Driver-model device for the system interface. */ 486 struct device *si_dev; 487 488 /* 489 * A table of sequence numbers for this interface. We use the 490 * sequence numbers for IPMB messages that go out of the 491 * interface to match them up with their responses. A routine 492 * is called periodically to time the items in this list. 493 */ 494 spinlock_t seq_lock; 495 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 496 int curr_seq; 497 498 /* 499 * Messages queued for delivery. If delivery fails (out of memory 500 * for instance), They will stay in here to be processed later in a 501 * periodic timer interrupt. The tasklet is for handling received 502 * messages directly from the handler. 503 */ 504 spinlock_t waiting_rcv_msgs_lock; 505 struct list_head waiting_rcv_msgs; 506 atomic_t watchdog_pretimeouts_to_deliver; 507 struct tasklet_struct recv_tasklet; 508 509 spinlock_t xmit_msgs_lock; 510 struct list_head xmit_msgs; 511 struct ipmi_smi_msg *curr_msg; 512 struct list_head hp_xmit_msgs; 513 514 /* 515 * The list of command receivers that are registered for commands 516 * on this interface. 517 */ 518 struct mutex cmd_rcvrs_mutex; 519 struct list_head cmd_rcvrs; 520 521 /* 522 * Events that were queues because no one was there to receive 523 * them. 524 */ 525 spinlock_t events_lock; /* For dealing with event stuff. */ 526 struct list_head waiting_events; 527 unsigned int waiting_events_count; /* How many events in queue? */ 528 char delivering_events; 529 char event_msg_printed; 530 531 /* How many users are waiting for events? */ 532 atomic_t event_waiters; 533 unsigned int ticks_to_req_ev; 534 535 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 536 537 /* How many users are waiting for commands? */ 538 unsigned int command_waiters; 539 540 /* How many users are waiting for watchdogs? */ 541 unsigned int watchdog_waiters; 542 543 /* How many users are waiting for message responses? */ 544 unsigned int response_waiters; 545 546 /* 547 * Tells what the lower layer has last been asked to watch for, 548 * messages and/or watchdogs. Protected by watch_lock. 549 */ 550 unsigned int last_watch_mask; 551 552 /* 553 * The event receiver for my BMC, only really used at panic 554 * shutdown as a place to store this. 555 */ 556 unsigned char event_receiver; 557 unsigned char event_receiver_lun; 558 unsigned char local_sel_device; 559 unsigned char local_event_generator; 560 561 /* For handling of maintenance mode. */ 562 int maintenance_mode; 563 bool maintenance_mode_enable; 564 int auto_maintenance_timeout; 565 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 566 567 /* 568 * If we are doing maintenance on something on IPMB, extend 569 * the timeout time to avoid timeouts writing firmware and 570 * such. 571 */ 572 int ipmb_maintenance_mode_timeout; 573 574 /* 575 * A cheap hack, if this is non-null and a message to an 576 * interface comes in with a NULL user, call this routine with 577 * it. Note that the message will still be freed by the 578 * caller. This only works on the system interface. 579 * 580 * Protected by bmc_reg_mutex. 581 */ 582 void (*null_user_handler)(struct ipmi_smi *intf, 583 struct ipmi_recv_msg *msg); 584 585 /* 586 * When we are scanning the channels for an SMI, this will 587 * tell which channel we are scanning. 588 */ 589 int curr_channel; 590 591 /* Channel information */ 592 struct ipmi_channel_set *channel_list; 593 unsigned int curr_working_cset; /* First index into the following. */ 594 struct ipmi_channel_set wchannels[2]; 595 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 596 bool channels_ready; 597 598 atomic_t stats[IPMI_NUM_STATS]; 599 600 /* 601 * run_to_completion duplicate of smb_info, smi_info 602 * and ipmi_serial_info structures. Used to decrease numbers of 603 * parameters passed by "low" level IPMI code. 604 */ 605 int run_to_completion; 606 }; 607 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 608 609 static void __get_guid(struct ipmi_smi *intf); 610 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 611 static int __ipmi_bmc_register(struct ipmi_smi *intf, 612 struct ipmi_device_id *id, 613 bool guid_set, guid_t *guid, int intf_num); 614 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 615 616 617 /** 618 * The driver model view of the IPMI messaging driver. 619 */ 620 static struct platform_driver ipmidriver = { 621 .driver = { 622 .name = "ipmi", 623 .bus = &platform_bus_type 624 } 625 }; 626 /* 627 * This mutex keeps us from adding the same BMC twice. 628 */ 629 static DEFINE_MUTEX(ipmidriver_mutex); 630 631 static LIST_HEAD(ipmi_interfaces); 632 static DEFINE_MUTEX(ipmi_interfaces_mutex); 633 #define ipmi_interfaces_mutex_held() \ 634 lockdep_is_held(&ipmi_interfaces_mutex) 635 static struct srcu_struct ipmi_interfaces_srcu; 636 637 /* 638 * List of watchers that want to know when smi's are added and deleted. 639 */ 640 static LIST_HEAD(smi_watchers); 641 static DEFINE_MUTEX(smi_watchers_mutex); 642 643 #define ipmi_inc_stat(intf, stat) \ 644 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 645 #define ipmi_get_stat(intf, stat) \ 646 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 647 648 static const char * const addr_src_to_str[] = { 649 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 650 "device-tree", "platform" 651 }; 652 653 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 654 { 655 if (src >= SI_LAST) 656 src = 0; /* Invalid */ 657 return addr_src_to_str[src]; 658 } 659 EXPORT_SYMBOL(ipmi_addr_src_to_str); 660 661 static int is_lan_addr(struct ipmi_addr *addr) 662 { 663 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 664 } 665 666 static int is_ipmb_addr(struct ipmi_addr *addr) 667 { 668 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 669 } 670 671 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 672 { 673 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 674 } 675 676 static int is_ipmb_direct_addr(struct ipmi_addr *addr) 677 { 678 return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE; 679 } 680 681 static void free_recv_msg_list(struct list_head *q) 682 { 683 struct ipmi_recv_msg *msg, *msg2; 684 685 list_for_each_entry_safe(msg, msg2, q, link) { 686 list_del(&msg->link); 687 ipmi_free_recv_msg(msg); 688 } 689 } 690 691 static void free_smi_msg_list(struct list_head *q) 692 { 693 struct ipmi_smi_msg *msg, *msg2; 694 695 list_for_each_entry_safe(msg, msg2, q, link) { 696 list_del(&msg->link); 697 ipmi_free_smi_msg(msg); 698 } 699 } 700 701 static void clean_up_interface_data(struct ipmi_smi *intf) 702 { 703 int i; 704 struct cmd_rcvr *rcvr, *rcvr2; 705 struct list_head list; 706 707 tasklet_kill(&intf->recv_tasklet); 708 709 free_smi_msg_list(&intf->waiting_rcv_msgs); 710 free_recv_msg_list(&intf->waiting_events); 711 712 /* 713 * Wholesale remove all the entries from the list in the 714 * interface and wait for RCU to know that none are in use. 715 */ 716 mutex_lock(&intf->cmd_rcvrs_mutex); 717 INIT_LIST_HEAD(&list); 718 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); 719 mutex_unlock(&intf->cmd_rcvrs_mutex); 720 721 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 722 kfree(rcvr); 723 724 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 725 if ((intf->seq_table[i].inuse) 726 && (intf->seq_table[i].recv_msg)) 727 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 728 } 729 } 730 731 static void intf_free(struct kref *ref) 732 { 733 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 734 735 clean_up_interface_data(intf); 736 kfree(intf); 737 } 738 739 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 740 { 741 struct ipmi_smi *intf; 742 int index, rv; 743 744 /* 745 * Make sure the driver is actually initialized, this handles 746 * problems with initialization order. 747 */ 748 rv = ipmi_init_msghandler(); 749 if (rv) 750 return rv; 751 752 mutex_lock(&smi_watchers_mutex); 753 754 list_add(&watcher->link, &smi_watchers); 755 756 index = srcu_read_lock(&ipmi_interfaces_srcu); 757 list_for_each_entry_rcu(intf, &ipmi_interfaces, link, 758 lockdep_is_held(&smi_watchers_mutex)) { 759 int intf_num = READ_ONCE(intf->intf_num); 760 761 if (intf_num == -1) 762 continue; 763 watcher->new_smi(intf_num, intf->si_dev); 764 } 765 srcu_read_unlock(&ipmi_interfaces_srcu, index); 766 767 mutex_unlock(&smi_watchers_mutex); 768 769 return 0; 770 } 771 EXPORT_SYMBOL(ipmi_smi_watcher_register); 772 773 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 774 { 775 mutex_lock(&smi_watchers_mutex); 776 list_del(&watcher->link); 777 mutex_unlock(&smi_watchers_mutex); 778 return 0; 779 } 780 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 781 782 /* 783 * Must be called with smi_watchers_mutex held. 784 */ 785 static void 786 call_smi_watchers(int i, struct device *dev) 787 { 788 struct ipmi_smi_watcher *w; 789 790 mutex_lock(&smi_watchers_mutex); 791 list_for_each_entry(w, &smi_watchers, link) { 792 if (try_module_get(w->owner)) { 793 w->new_smi(i, dev); 794 module_put(w->owner); 795 } 796 } 797 mutex_unlock(&smi_watchers_mutex); 798 } 799 800 static int 801 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 802 { 803 if (addr1->addr_type != addr2->addr_type) 804 return 0; 805 806 if (addr1->channel != addr2->channel) 807 return 0; 808 809 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 810 struct ipmi_system_interface_addr *smi_addr1 811 = (struct ipmi_system_interface_addr *) addr1; 812 struct ipmi_system_interface_addr *smi_addr2 813 = (struct ipmi_system_interface_addr *) addr2; 814 return (smi_addr1->lun == smi_addr2->lun); 815 } 816 817 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 818 struct ipmi_ipmb_addr *ipmb_addr1 819 = (struct ipmi_ipmb_addr *) addr1; 820 struct ipmi_ipmb_addr *ipmb_addr2 821 = (struct ipmi_ipmb_addr *) addr2; 822 823 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 824 && (ipmb_addr1->lun == ipmb_addr2->lun)); 825 } 826 827 if (is_ipmb_direct_addr(addr1)) { 828 struct ipmi_ipmb_direct_addr *daddr1 829 = (struct ipmi_ipmb_direct_addr *) addr1; 830 struct ipmi_ipmb_direct_addr *daddr2 831 = (struct ipmi_ipmb_direct_addr *) addr2; 832 833 return daddr1->slave_addr == daddr2->slave_addr && 834 daddr1->rq_lun == daddr2->rq_lun && 835 daddr1->rs_lun == daddr2->rs_lun; 836 } 837 838 if (is_lan_addr(addr1)) { 839 struct ipmi_lan_addr *lan_addr1 840 = (struct ipmi_lan_addr *) addr1; 841 struct ipmi_lan_addr *lan_addr2 842 = (struct ipmi_lan_addr *) addr2; 843 844 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 845 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 846 && (lan_addr1->session_handle 847 == lan_addr2->session_handle) 848 && (lan_addr1->lun == lan_addr2->lun)); 849 } 850 851 return 1; 852 } 853 854 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 855 { 856 if (len < sizeof(struct ipmi_system_interface_addr)) 857 return -EINVAL; 858 859 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 860 if (addr->channel != IPMI_BMC_CHANNEL) 861 return -EINVAL; 862 return 0; 863 } 864 865 if ((addr->channel == IPMI_BMC_CHANNEL) 866 || (addr->channel >= IPMI_MAX_CHANNELS) 867 || (addr->channel < 0)) 868 return -EINVAL; 869 870 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 871 if (len < sizeof(struct ipmi_ipmb_addr)) 872 return -EINVAL; 873 return 0; 874 } 875 876 if (is_ipmb_direct_addr(addr)) { 877 struct ipmi_ipmb_direct_addr *daddr = (void *) addr; 878 879 if (addr->channel != 0) 880 return -EINVAL; 881 if (len < sizeof(struct ipmi_ipmb_direct_addr)) 882 return -EINVAL; 883 884 if (daddr->slave_addr & 0x01) 885 return -EINVAL; 886 if (daddr->rq_lun >= 4) 887 return -EINVAL; 888 if (daddr->rs_lun >= 4) 889 return -EINVAL; 890 return 0; 891 } 892 893 if (is_lan_addr(addr)) { 894 if (len < sizeof(struct ipmi_lan_addr)) 895 return -EINVAL; 896 return 0; 897 } 898 899 return -EINVAL; 900 } 901 EXPORT_SYMBOL(ipmi_validate_addr); 902 903 unsigned int ipmi_addr_length(int addr_type) 904 { 905 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 906 return sizeof(struct ipmi_system_interface_addr); 907 908 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 909 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 910 return sizeof(struct ipmi_ipmb_addr); 911 912 if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE) 913 return sizeof(struct ipmi_ipmb_direct_addr); 914 915 if (addr_type == IPMI_LAN_ADDR_TYPE) 916 return sizeof(struct ipmi_lan_addr); 917 918 return 0; 919 } 920 EXPORT_SYMBOL(ipmi_addr_length); 921 922 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 923 { 924 int rv = 0; 925 926 if (!msg->user) { 927 /* Special handling for NULL users. */ 928 if (intf->null_user_handler) { 929 intf->null_user_handler(intf, msg); 930 } else { 931 /* No handler, so give up. */ 932 rv = -EINVAL; 933 } 934 ipmi_free_recv_msg(msg); 935 } else if (oops_in_progress) { 936 /* 937 * If we are running in the panic context, calling the 938 * receive handler doesn't much meaning and has a deadlock 939 * risk. At this moment, simply skip it in that case. 940 */ 941 ipmi_free_recv_msg(msg); 942 atomic_dec(&msg->user->nr_msgs); 943 } else { 944 int index; 945 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); 946 947 if (user) { 948 atomic_dec(&user->nr_msgs); 949 user->handler->ipmi_recv_hndl(msg, user->handler_data); 950 release_ipmi_user(user, index); 951 } else { 952 /* User went away, give up. */ 953 ipmi_free_recv_msg(msg); 954 rv = -EINVAL; 955 } 956 } 957 958 return rv; 959 } 960 961 static void deliver_local_response(struct ipmi_smi *intf, 962 struct ipmi_recv_msg *msg) 963 { 964 if (deliver_response(intf, msg)) 965 ipmi_inc_stat(intf, unhandled_local_responses); 966 else 967 ipmi_inc_stat(intf, handled_local_responses); 968 } 969 970 static void deliver_err_response(struct ipmi_smi *intf, 971 struct ipmi_recv_msg *msg, int err) 972 { 973 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 974 msg->msg_data[0] = err; 975 msg->msg.netfn |= 1; /* Convert to a response. */ 976 msg->msg.data_len = 1; 977 msg->msg.data = msg->msg_data; 978 deliver_local_response(intf, msg); 979 } 980 981 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 982 { 983 unsigned long iflags; 984 985 if (!intf->handlers->set_need_watch) 986 return; 987 988 spin_lock_irqsave(&intf->watch_lock, iflags); 989 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 990 intf->response_waiters++; 991 992 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 993 intf->watchdog_waiters++; 994 995 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 996 intf->command_waiters++; 997 998 if ((intf->last_watch_mask & flags) != flags) { 999 intf->last_watch_mask |= flags; 1000 intf->handlers->set_need_watch(intf->send_info, 1001 intf->last_watch_mask); 1002 } 1003 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1004 } 1005 1006 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 1007 { 1008 unsigned long iflags; 1009 1010 if (!intf->handlers->set_need_watch) 1011 return; 1012 1013 spin_lock_irqsave(&intf->watch_lock, iflags); 1014 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1015 intf->response_waiters--; 1016 1017 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1018 intf->watchdog_waiters--; 1019 1020 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1021 intf->command_waiters--; 1022 1023 flags = 0; 1024 if (intf->response_waiters) 1025 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 1026 if (intf->watchdog_waiters) 1027 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 1028 if (intf->command_waiters) 1029 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 1030 1031 if (intf->last_watch_mask != flags) { 1032 intf->last_watch_mask = flags; 1033 intf->handlers->set_need_watch(intf->send_info, 1034 intf->last_watch_mask); 1035 } 1036 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1037 } 1038 1039 /* 1040 * Find the next sequence number not being used and add the given 1041 * message with the given timeout to the sequence table. This must be 1042 * called with the interface's seq_lock held. 1043 */ 1044 static int intf_next_seq(struct ipmi_smi *intf, 1045 struct ipmi_recv_msg *recv_msg, 1046 unsigned long timeout, 1047 int retries, 1048 int broadcast, 1049 unsigned char *seq, 1050 long *seqid) 1051 { 1052 int rv = 0; 1053 unsigned int i; 1054 1055 if (timeout == 0) 1056 timeout = default_retry_ms; 1057 if (retries < 0) 1058 retries = default_max_retries; 1059 1060 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1061 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1062 if (!intf->seq_table[i].inuse) 1063 break; 1064 } 1065 1066 if (!intf->seq_table[i].inuse) { 1067 intf->seq_table[i].recv_msg = recv_msg; 1068 1069 /* 1070 * Start with the maximum timeout, when the send response 1071 * comes in we will start the real timer. 1072 */ 1073 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1074 intf->seq_table[i].orig_timeout = timeout; 1075 intf->seq_table[i].retries_left = retries; 1076 intf->seq_table[i].broadcast = broadcast; 1077 intf->seq_table[i].inuse = 1; 1078 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1079 *seq = i; 1080 *seqid = intf->seq_table[i].seqid; 1081 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1082 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1083 need_waiter(intf); 1084 } else { 1085 rv = -EAGAIN; 1086 } 1087 1088 return rv; 1089 } 1090 1091 /* 1092 * Return the receive message for the given sequence number and 1093 * release the sequence number so it can be reused. Some other data 1094 * is passed in to be sure the message matches up correctly (to help 1095 * guard against message coming in after their timeout and the 1096 * sequence number being reused). 1097 */ 1098 static int intf_find_seq(struct ipmi_smi *intf, 1099 unsigned char seq, 1100 short channel, 1101 unsigned char cmd, 1102 unsigned char netfn, 1103 struct ipmi_addr *addr, 1104 struct ipmi_recv_msg **recv_msg) 1105 { 1106 int rv = -ENODEV; 1107 unsigned long flags; 1108 1109 if (seq >= IPMI_IPMB_NUM_SEQ) 1110 return -EINVAL; 1111 1112 spin_lock_irqsave(&intf->seq_lock, flags); 1113 if (intf->seq_table[seq].inuse) { 1114 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1115 1116 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1117 && (msg->msg.netfn == netfn) 1118 && (ipmi_addr_equal(addr, &msg->addr))) { 1119 *recv_msg = msg; 1120 intf->seq_table[seq].inuse = 0; 1121 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1122 rv = 0; 1123 } 1124 } 1125 spin_unlock_irqrestore(&intf->seq_lock, flags); 1126 1127 return rv; 1128 } 1129 1130 1131 /* Start the timer for a specific sequence table entry. */ 1132 static int intf_start_seq_timer(struct ipmi_smi *intf, 1133 long msgid) 1134 { 1135 int rv = -ENODEV; 1136 unsigned long flags; 1137 unsigned char seq; 1138 unsigned long seqid; 1139 1140 1141 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1142 1143 spin_lock_irqsave(&intf->seq_lock, flags); 1144 /* 1145 * We do this verification because the user can be deleted 1146 * while a message is outstanding. 1147 */ 1148 if ((intf->seq_table[seq].inuse) 1149 && (intf->seq_table[seq].seqid == seqid)) { 1150 struct seq_table *ent = &intf->seq_table[seq]; 1151 ent->timeout = ent->orig_timeout; 1152 rv = 0; 1153 } 1154 spin_unlock_irqrestore(&intf->seq_lock, flags); 1155 1156 return rv; 1157 } 1158 1159 /* Got an error for the send message for a specific sequence number. */ 1160 static int intf_err_seq(struct ipmi_smi *intf, 1161 long msgid, 1162 unsigned int err) 1163 { 1164 int rv = -ENODEV; 1165 unsigned long flags; 1166 unsigned char seq; 1167 unsigned long seqid; 1168 struct ipmi_recv_msg *msg = NULL; 1169 1170 1171 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1172 1173 spin_lock_irqsave(&intf->seq_lock, flags); 1174 /* 1175 * We do this verification because the user can be deleted 1176 * while a message is outstanding. 1177 */ 1178 if ((intf->seq_table[seq].inuse) 1179 && (intf->seq_table[seq].seqid == seqid)) { 1180 struct seq_table *ent = &intf->seq_table[seq]; 1181 1182 ent->inuse = 0; 1183 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1184 msg = ent->recv_msg; 1185 rv = 0; 1186 } 1187 spin_unlock_irqrestore(&intf->seq_lock, flags); 1188 1189 if (msg) 1190 deliver_err_response(intf, msg, err); 1191 1192 return rv; 1193 } 1194 1195 static void free_user_work(struct work_struct *work) 1196 { 1197 struct ipmi_user *user = container_of(work, struct ipmi_user, 1198 remove_work); 1199 1200 cleanup_srcu_struct(&user->release_barrier); 1201 vfree(user); 1202 } 1203 1204 int ipmi_create_user(unsigned int if_num, 1205 const struct ipmi_user_hndl *handler, 1206 void *handler_data, 1207 struct ipmi_user **user) 1208 { 1209 unsigned long flags; 1210 struct ipmi_user *new_user; 1211 int rv, index; 1212 struct ipmi_smi *intf; 1213 1214 /* 1215 * There is no module usecount here, because it's not 1216 * required. Since this can only be used by and called from 1217 * other modules, they will implicitly use this module, and 1218 * thus this can't be removed unless the other modules are 1219 * removed. 1220 */ 1221 1222 if (handler == NULL) 1223 return -EINVAL; 1224 1225 /* 1226 * Make sure the driver is actually initialized, this handles 1227 * problems with initialization order. 1228 */ 1229 rv = ipmi_init_msghandler(); 1230 if (rv) 1231 return rv; 1232 1233 new_user = vzalloc(sizeof(*new_user)); 1234 if (!new_user) 1235 return -ENOMEM; 1236 1237 index = srcu_read_lock(&ipmi_interfaces_srcu); 1238 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1239 if (intf->intf_num == if_num) 1240 goto found; 1241 } 1242 /* Not found, return an error */ 1243 rv = -EINVAL; 1244 goto out_kfree; 1245 1246 found: 1247 if (atomic_add_return(1, &intf->nr_users) > max_users) { 1248 rv = -EBUSY; 1249 goto out_kfree; 1250 } 1251 1252 INIT_WORK(&new_user->remove_work, free_user_work); 1253 1254 rv = init_srcu_struct(&new_user->release_barrier); 1255 if (rv) 1256 goto out_kfree; 1257 1258 if (!try_module_get(intf->owner)) { 1259 rv = -ENODEV; 1260 goto out_kfree; 1261 } 1262 1263 /* Note that each existing user holds a refcount to the interface. */ 1264 kref_get(&intf->refcount); 1265 1266 atomic_set(&new_user->nr_msgs, 0); 1267 kref_init(&new_user->refcount); 1268 new_user->handler = handler; 1269 new_user->handler_data = handler_data; 1270 new_user->intf = intf; 1271 new_user->gets_events = false; 1272 1273 rcu_assign_pointer(new_user->self, new_user); 1274 spin_lock_irqsave(&intf->seq_lock, flags); 1275 list_add_rcu(&new_user->link, &intf->users); 1276 spin_unlock_irqrestore(&intf->seq_lock, flags); 1277 if (handler->ipmi_watchdog_pretimeout) 1278 /* User wants pretimeouts, so make sure to watch for them. */ 1279 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1280 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1281 *user = new_user; 1282 return 0; 1283 1284 out_kfree: 1285 atomic_dec(&intf->nr_users); 1286 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1287 vfree(new_user); 1288 return rv; 1289 } 1290 EXPORT_SYMBOL(ipmi_create_user); 1291 1292 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1293 { 1294 int rv, index; 1295 struct ipmi_smi *intf; 1296 1297 index = srcu_read_lock(&ipmi_interfaces_srcu); 1298 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1299 if (intf->intf_num == if_num) 1300 goto found; 1301 } 1302 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1303 1304 /* Not found, return an error */ 1305 return -EINVAL; 1306 1307 found: 1308 if (!intf->handlers->get_smi_info) 1309 rv = -ENOTTY; 1310 else 1311 rv = intf->handlers->get_smi_info(intf->send_info, data); 1312 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1313 1314 return rv; 1315 } 1316 EXPORT_SYMBOL(ipmi_get_smi_info); 1317 1318 static void free_user(struct kref *ref) 1319 { 1320 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1321 1322 /* SRCU cleanup must happen in task context. */ 1323 queue_work(remove_work_wq, &user->remove_work); 1324 } 1325 1326 static void _ipmi_destroy_user(struct ipmi_user *user) 1327 { 1328 struct ipmi_smi *intf = user->intf; 1329 int i; 1330 unsigned long flags; 1331 struct cmd_rcvr *rcvr; 1332 struct cmd_rcvr *rcvrs = NULL; 1333 1334 if (!acquire_ipmi_user(user, &i)) { 1335 /* 1336 * The user has already been cleaned up, just make sure 1337 * nothing is using it and return. 1338 */ 1339 synchronize_srcu(&user->release_barrier); 1340 return; 1341 } 1342 1343 rcu_assign_pointer(user->self, NULL); 1344 release_ipmi_user(user, i); 1345 1346 synchronize_srcu(&user->release_barrier); 1347 1348 if (user->handler->shutdown) 1349 user->handler->shutdown(user->handler_data); 1350 1351 if (user->handler->ipmi_watchdog_pretimeout) 1352 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1353 1354 if (user->gets_events) 1355 atomic_dec(&intf->event_waiters); 1356 1357 /* Remove the user from the interface's sequence table. */ 1358 spin_lock_irqsave(&intf->seq_lock, flags); 1359 list_del_rcu(&user->link); 1360 atomic_dec(&intf->nr_users); 1361 1362 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1363 if (intf->seq_table[i].inuse 1364 && (intf->seq_table[i].recv_msg->user == user)) { 1365 intf->seq_table[i].inuse = 0; 1366 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1367 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1368 } 1369 } 1370 spin_unlock_irqrestore(&intf->seq_lock, flags); 1371 1372 /* 1373 * Remove the user from the command receiver's table. First 1374 * we build a list of everything (not using the standard link, 1375 * since other things may be using it till we do 1376 * synchronize_srcu()) then free everything in that list. 1377 */ 1378 mutex_lock(&intf->cmd_rcvrs_mutex); 1379 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1380 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1381 if (rcvr->user == user) { 1382 list_del_rcu(&rcvr->link); 1383 rcvr->next = rcvrs; 1384 rcvrs = rcvr; 1385 } 1386 } 1387 mutex_unlock(&intf->cmd_rcvrs_mutex); 1388 synchronize_rcu(); 1389 while (rcvrs) { 1390 rcvr = rcvrs; 1391 rcvrs = rcvr->next; 1392 kfree(rcvr); 1393 } 1394 1395 kref_put(&intf->refcount, intf_free); 1396 module_put(intf->owner); 1397 } 1398 1399 int ipmi_destroy_user(struct ipmi_user *user) 1400 { 1401 _ipmi_destroy_user(user); 1402 1403 kref_put(&user->refcount, free_user); 1404 1405 return 0; 1406 } 1407 EXPORT_SYMBOL(ipmi_destroy_user); 1408 1409 int ipmi_get_version(struct ipmi_user *user, 1410 unsigned char *major, 1411 unsigned char *minor) 1412 { 1413 struct ipmi_device_id id; 1414 int rv, index; 1415 1416 user = acquire_ipmi_user(user, &index); 1417 if (!user) 1418 return -ENODEV; 1419 1420 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1421 if (!rv) { 1422 *major = ipmi_version_major(&id); 1423 *minor = ipmi_version_minor(&id); 1424 } 1425 release_ipmi_user(user, index); 1426 1427 return rv; 1428 } 1429 EXPORT_SYMBOL(ipmi_get_version); 1430 1431 int ipmi_set_my_address(struct ipmi_user *user, 1432 unsigned int channel, 1433 unsigned char address) 1434 { 1435 int index, rv = 0; 1436 1437 user = acquire_ipmi_user(user, &index); 1438 if (!user) 1439 return -ENODEV; 1440 1441 if (channel >= IPMI_MAX_CHANNELS) { 1442 rv = -EINVAL; 1443 } else { 1444 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1445 user->intf->addrinfo[channel].address = address; 1446 } 1447 release_ipmi_user(user, index); 1448 1449 return rv; 1450 } 1451 EXPORT_SYMBOL(ipmi_set_my_address); 1452 1453 int ipmi_get_my_address(struct ipmi_user *user, 1454 unsigned int channel, 1455 unsigned char *address) 1456 { 1457 int index, rv = 0; 1458 1459 user = acquire_ipmi_user(user, &index); 1460 if (!user) 1461 return -ENODEV; 1462 1463 if (channel >= IPMI_MAX_CHANNELS) { 1464 rv = -EINVAL; 1465 } else { 1466 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1467 *address = user->intf->addrinfo[channel].address; 1468 } 1469 release_ipmi_user(user, index); 1470 1471 return rv; 1472 } 1473 EXPORT_SYMBOL(ipmi_get_my_address); 1474 1475 int ipmi_set_my_LUN(struct ipmi_user *user, 1476 unsigned int channel, 1477 unsigned char LUN) 1478 { 1479 int index, rv = 0; 1480 1481 user = acquire_ipmi_user(user, &index); 1482 if (!user) 1483 return -ENODEV; 1484 1485 if (channel >= IPMI_MAX_CHANNELS) { 1486 rv = -EINVAL; 1487 } else { 1488 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1489 user->intf->addrinfo[channel].lun = LUN & 0x3; 1490 } 1491 release_ipmi_user(user, index); 1492 1493 return rv; 1494 } 1495 EXPORT_SYMBOL(ipmi_set_my_LUN); 1496 1497 int ipmi_get_my_LUN(struct ipmi_user *user, 1498 unsigned int channel, 1499 unsigned char *address) 1500 { 1501 int index, rv = 0; 1502 1503 user = acquire_ipmi_user(user, &index); 1504 if (!user) 1505 return -ENODEV; 1506 1507 if (channel >= IPMI_MAX_CHANNELS) { 1508 rv = -EINVAL; 1509 } else { 1510 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1511 *address = user->intf->addrinfo[channel].lun; 1512 } 1513 release_ipmi_user(user, index); 1514 1515 return rv; 1516 } 1517 EXPORT_SYMBOL(ipmi_get_my_LUN); 1518 1519 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1520 { 1521 int mode, index; 1522 unsigned long flags; 1523 1524 user = acquire_ipmi_user(user, &index); 1525 if (!user) 1526 return -ENODEV; 1527 1528 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1529 mode = user->intf->maintenance_mode; 1530 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1531 release_ipmi_user(user, index); 1532 1533 return mode; 1534 } 1535 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1536 1537 static void maintenance_mode_update(struct ipmi_smi *intf) 1538 { 1539 if (intf->handlers->set_maintenance_mode) 1540 intf->handlers->set_maintenance_mode( 1541 intf->send_info, intf->maintenance_mode_enable); 1542 } 1543 1544 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1545 { 1546 int rv = 0, index; 1547 unsigned long flags; 1548 struct ipmi_smi *intf = user->intf; 1549 1550 user = acquire_ipmi_user(user, &index); 1551 if (!user) 1552 return -ENODEV; 1553 1554 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1555 if (intf->maintenance_mode != mode) { 1556 switch (mode) { 1557 case IPMI_MAINTENANCE_MODE_AUTO: 1558 intf->maintenance_mode_enable 1559 = (intf->auto_maintenance_timeout > 0); 1560 break; 1561 1562 case IPMI_MAINTENANCE_MODE_OFF: 1563 intf->maintenance_mode_enable = false; 1564 break; 1565 1566 case IPMI_MAINTENANCE_MODE_ON: 1567 intf->maintenance_mode_enable = true; 1568 break; 1569 1570 default: 1571 rv = -EINVAL; 1572 goto out_unlock; 1573 } 1574 intf->maintenance_mode = mode; 1575 1576 maintenance_mode_update(intf); 1577 } 1578 out_unlock: 1579 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1580 release_ipmi_user(user, index); 1581 1582 return rv; 1583 } 1584 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1585 1586 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1587 { 1588 unsigned long flags; 1589 struct ipmi_smi *intf = user->intf; 1590 struct ipmi_recv_msg *msg, *msg2; 1591 struct list_head msgs; 1592 int index; 1593 1594 user = acquire_ipmi_user(user, &index); 1595 if (!user) 1596 return -ENODEV; 1597 1598 INIT_LIST_HEAD(&msgs); 1599 1600 spin_lock_irqsave(&intf->events_lock, flags); 1601 if (user->gets_events == val) 1602 goto out; 1603 1604 user->gets_events = val; 1605 1606 if (val) { 1607 if (atomic_inc_return(&intf->event_waiters) == 1) 1608 need_waiter(intf); 1609 } else { 1610 atomic_dec(&intf->event_waiters); 1611 } 1612 1613 if (intf->delivering_events) 1614 /* 1615 * Another thread is delivering events for this, so 1616 * let it handle any new events. 1617 */ 1618 goto out; 1619 1620 /* Deliver any queued events. */ 1621 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1622 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1623 list_move_tail(&msg->link, &msgs); 1624 intf->waiting_events_count = 0; 1625 if (intf->event_msg_printed) { 1626 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1627 intf->event_msg_printed = 0; 1628 } 1629 1630 intf->delivering_events = 1; 1631 spin_unlock_irqrestore(&intf->events_lock, flags); 1632 1633 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1634 msg->user = user; 1635 kref_get(&user->refcount); 1636 deliver_local_response(intf, msg); 1637 } 1638 1639 spin_lock_irqsave(&intf->events_lock, flags); 1640 intf->delivering_events = 0; 1641 } 1642 1643 out: 1644 spin_unlock_irqrestore(&intf->events_lock, flags); 1645 release_ipmi_user(user, index); 1646 1647 return 0; 1648 } 1649 EXPORT_SYMBOL(ipmi_set_gets_events); 1650 1651 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1652 unsigned char netfn, 1653 unsigned char cmd, 1654 unsigned char chan) 1655 { 1656 struct cmd_rcvr *rcvr; 1657 1658 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1659 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1660 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1661 && (rcvr->chans & (1 << chan))) 1662 return rcvr; 1663 } 1664 return NULL; 1665 } 1666 1667 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1668 unsigned char netfn, 1669 unsigned char cmd, 1670 unsigned int chans) 1671 { 1672 struct cmd_rcvr *rcvr; 1673 1674 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1675 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1676 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1677 && (rcvr->chans & chans)) 1678 return 0; 1679 } 1680 return 1; 1681 } 1682 1683 int ipmi_register_for_cmd(struct ipmi_user *user, 1684 unsigned char netfn, 1685 unsigned char cmd, 1686 unsigned int chans) 1687 { 1688 struct ipmi_smi *intf = user->intf; 1689 struct cmd_rcvr *rcvr; 1690 int rv = 0, index; 1691 1692 user = acquire_ipmi_user(user, &index); 1693 if (!user) 1694 return -ENODEV; 1695 1696 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1697 if (!rcvr) { 1698 rv = -ENOMEM; 1699 goto out_release; 1700 } 1701 rcvr->cmd = cmd; 1702 rcvr->netfn = netfn; 1703 rcvr->chans = chans; 1704 rcvr->user = user; 1705 1706 mutex_lock(&intf->cmd_rcvrs_mutex); 1707 /* Make sure the command/netfn is not already registered. */ 1708 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1709 rv = -EBUSY; 1710 goto out_unlock; 1711 } 1712 1713 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1714 1715 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1716 1717 out_unlock: 1718 mutex_unlock(&intf->cmd_rcvrs_mutex); 1719 if (rv) 1720 kfree(rcvr); 1721 out_release: 1722 release_ipmi_user(user, index); 1723 1724 return rv; 1725 } 1726 EXPORT_SYMBOL(ipmi_register_for_cmd); 1727 1728 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1729 unsigned char netfn, 1730 unsigned char cmd, 1731 unsigned int chans) 1732 { 1733 struct ipmi_smi *intf = user->intf; 1734 struct cmd_rcvr *rcvr; 1735 struct cmd_rcvr *rcvrs = NULL; 1736 int i, rv = -ENOENT, index; 1737 1738 user = acquire_ipmi_user(user, &index); 1739 if (!user) 1740 return -ENODEV; 1741 1742 mutex_lock(&intf->cmd_rcvrs_mutex); 1743 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1744 if (((1 << i) & chans) == 0) 1745 continue; 1746 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1747 if (rcvr == NULL) 1748 continue; 1749 if (rcvr->user == user) { 1750 rv = 0; 1751 rcvr->chans &= ~chans; 1752 if (rcvr->chans == 0) { 1753 list_del_rcu(&rcvr->link); 1754 rcvr->next = rcvrs; 1755 rcvrs = rcvr; 1756 } 1757 } 1758 } 1759 mutex_unlock(&intf->cmd_rcvrs_mutex); 1760 synchronize_rcu(); 1761 release_ipmi_user(user, index); 1762 while (rcvrs) { 1763 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1764 rcvr = rcvrs; 1765 rcvrs = rcvr->next; 1766 kfree(rcvr); 1767 } 1768 1769 return rv; 1770 } 1771 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1772 1773 unsigned char 1774 ipmb_checksum(unsigned char *data, int size) 1775 { 1776 unsigned char csum = 0; 1777 1778 for (; size > 0; size--, data++) 1779 csum += *data; 1780 1781 return -csum; 1782 } 1783 EXPORT_SYMBOL(ipmb_checksum); 1784 1785 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1786 struct kernel_ipmi_msg *msg, 1787 struct ipmi_ipmb_addr *ipmb_addr, 1788 long msgid, 1789 unsigned char ipmb_seq, 1790 int broadcast, 1791 unsigned char source_address, 1792 unsigned char source_lun) 1793 { 1794 int i = broadcast; 1795 1796 /* Format the IPMB header data. */ 1797 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1798 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1799 smi_msg->data[2] = ipmb_addr->channel; 1800 if (broadcast) 1801 smi_msg->data[3] = 0; 1802 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1803 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1804 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1805 smi_msg->data[i+6] = source_address; 1806 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1807 smi_msg->data[i+8] = msg->cmd; 1808 1809 /* Now tack on the data to the message. */ 1810 if (msg->data_len > 0) 1811 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1812 smi_msg->data_size = msg->data_len + 9; 1813 1814 /* Now calculate the checksum and tack it on. */ 1815 smi_msg->data[i+smi_msg->data_size] 1816 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1817 1818 /* 1819 * Add on the checksum size and the offset from the 1820 * broadcast. 1821 */ 1822 smi_msg->data_size += 1 + i; 1823 1824 smi_msg->msgid = msgid; 1825 } 1826 1827 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1828 struct kernel_ipmi_msg *msg, 1829 struct ipmi_lan_addr *lan_addr, 1830 long msgid, 1831 unsigned char ipmb_seq, 1832 unsigned char source_lun) 1833 { 1834 /* Format the IPMB header data. */ 1835 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1836 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1837 smi_msg->data[2] = lan_addr->channel; 1838 smi_msg->data[3] = lan_addr->session_handle; 1839 smi_msg->data[4] = lan_addr->remote_SWID; 1840 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1841 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1842 smi_msg->data[7] = lan_addr->local_SWID; 1843 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1844 smi_msg->data[9] = msg->cmd; 1845 1846 /* Now tack on the data to the message. */ 1847 if (msg->data_len > 0) 1848 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1849 smi_msg->data_size = msg->data_len + 10; 1850 1851 /* Now calculate the checksum and tack it on. */ 1852 smi_msg->data[smi_msg->data_size] 1853 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1854 1855 /* 1856 * Add on the checksum size and the offset from the 1857 * broadcast. 1858 */ 1859 smi_msg->data_size += 1; 1860 1861 smi_msg->msgid = msgid; 1862 } 1863 1864 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1865 struct ipmi_smi_msg *smi_msg, 1866 int priority) 1867 { 1868 if (intf->curr_msg) { 1869 if (priority > 0) 1870 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1871 else 1872 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1873 smi_msg = NULL; 1874 } else { 1875 intf->curr_msg = smi_msg; 1876 } 1877 1878 return smi_msg; 1879 } 1880 1881 static void smi_send(struct ipmi_smi *intf, 1882 const struct ipmi_smi_handlers *handlers, 1883 struct ipmi_smi_msg *smi_msg, int priority) 1884 { 1885 int run_to_completion = intf->run_to_completion; 1886 unsigned long flags = 0; 1887 1888 if (!run_to_completion) 1889 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1890 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1891 1892 if (!run_to_completion) 1893 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1894 1895 if (smi_msg) 1896 handlers->sender(intf->send_info, smi_msg); 1897 } 1898 1899 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1900 { 1901 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1902 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1903 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1904 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1905 } 1906 1907 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1908 struct ipmi_addr *addr, 1909 long msgid, 1910 struct kernel_ipmi_msg *msg, 1911 struct ipmi_smi_msg *smi_msg, 1912 struct ipmi_recv_msg *recv_msg, 1913 int retries, 1914 unsigned int retry_time_ms) 1915 { 1916 struct ipmi_system_interface_addr *smi_addr; 1917 1918 if (msg->netfn & 1) 1919 /* Responses are not allowed to the SMI. */ 1920 return -EINVAL; 1921 1922 smi_addr = (struct ipmi_system_interface_addr *) addr; 1923 if (smi_addr->lun > 3) { 1924 ipmi_inc_stat(intf, sent_invalid_commands); 1925 return -EINVAL; 1926 } 1927 1928 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1929 1930 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1931 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1932 || (msg->cmd == IPMI_GET_MSG_CMD) 1933 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1934 /* 1935 * We don't let the user do these, since we manage 1936 * the sequence numbers. 1937 */ 1938 ipmi_inc_stat(intf, sent_invalid_commands); 1939 return -EINVAL; 1940 } 1941 1942 if (is_maintenance_mode_cmd(msg)) { 1943 unsigned long flags; 1944 1945 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1946 intf->auto_maintenance_timeout 1947 = maintenance_mode_timeout_ms; 1948 if (!intf->maintenance_mode 1949 && !intf->maintenance_mode_enable) { 1950 intf->maintenance_mode_enable = true; 1951 maintenance_mode_update(intf); 1952 } 1953 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1954 flags); 1955 } 1956 1957 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1958 ipmi_inc_stat(intf, sent_invalid_commands); 1959 return -EMSGSIZE; 1960 } 1961 1962 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1963 smi_msg->data[1] = msg->cmd; 1964 smi_msg->msgid = msgid; 1965 smi_msg->user_data = recv_msg; 1966 if (msg->data_len > 0) 1967 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1968 smi_msg->data_size = msg->data_len + 2; 1969 ipmi_inc_stat(intf, sent_local_commands); 1970 1971 return 0; 1972 } 1973 1974 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1975 struct ipmi_addr *addr, 1976 long msgid, 1977 struct kernel_ipmi_msg *msg, 1978 struct ipmi_smi_msg *smi_msg, 1979 struct ipmi_recv_msg *recv_msg, 1980 unsigned char source_address, 1981 unsigned char source_lun, 1982 int retries, 1983 unsigned int retry_time_ms) 1984 { 1985 struct ipmi_ipmb_addr *ipmb_addr; 1986 unsigned char ipmb_seq; 1987 long seqid; 1988 int broadcast = 0; 1989 struct ipmi_channel *chans; 1990 int rv = 0; 1991 1992 if (addr->channel >= IPMI_MAX_CHANNELS) { 1993 ipmi_inc_stat(intf, sent_invalid_commands); 1994 return -EINVAL; 1995 } 1996 1997 chans = READ_ONCE(intf->channel_list)->c; 1998 1999 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 2000 ipmi_inc_stat(intf, sent_invalid_commands); 2001 return -EINVAL; 2002 } 2003 2004 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 2005 /* 2006 * Broadcasts add a zero at the beginning of the 2007 * message, but otherwise is the same as an IPMB 2008 * address. 2009 */ 2010 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 2011 broadcast = 1; 2012 retries = 0; /* Don't retry broadcasts. */ 2013 } 2014 2015 /* 2016 * 9 for the header and 1 for the checksum, plus 2017 * possibly one for the broadcast. 2018 */ 2019 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 2020 ipmi_inc_stat(intf, sent_invalid_commands); 2021 return -EMSGSIZE; 2022 } 2023 2024 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 2025 if (ipmb_addr->lun > 3) { 2026 ipmi_inc_stat(intf, sent_invalid_commands); 2027 return -EINVAL; 2028 } 2029 2030 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 2031 2032 if (recv_msg->msg.netfn & 0x1) { 2033 /* 2034 * It's a response, so use the user's sequence 2035 * from msgid. 2036 */ 2037 ipmi_inc_stat(intf, sent_ipmb_responses); 2038 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 2039 msgid, broadcast, 2040 source_address, source_lun); 2041 2042 /* 2043 * Save the receive message so we can use it 2044 * to deliver the response. 2045 */ 2046 smi_msg->user_data = recv_msg; 2047 } else { 2048 /* It's a command, so get a sequence for it. */ 2049 unsigned long flags; 2050 2051 spin_lock_irqsave(&intf->seq_lock, flags); 2052 2053 if (is_maintenance_mode_cmd(msg)) 2054 intf->ipmb_maintenance_mode_timeout = 2055 maintenance_mode_timeout_ms; 2056 2057 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 2058 /* Different default in maintenance mode */ 2059 retry_time_ms = default_maintenance_retry_ms; 2060 2061 /* 2062 * Create a sequence number with a 1 second 2063 * timeout and 4 retries. 2064 */ 2065 rv = intf_next_seq(intf, 2066 recv_msg, 2067 retry_time_ms, 2068 retries, 2069 broadcast, 2070 &ipmb_seq, 2071 &seqid); 2072 if (rv) 2073 /* 2074 * We have used up all the sequence numbers, 2075 * probably, so abort. 2076 */ 2077 goto out_err; 2078 2079 ipmi_inc_stat(intf, sent_ipmb_commands); 2080 2081 /* 2082 * Store the sequence number in the message, 2083 * so that when the send message response 2084 * comes back we can start the timer. 2085 */ 2086 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2087 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2088 ipmb_seq, broadcast, 2089 source_address, source_lun); 2090 2091 /* 2092 * Copy the message into the recv message data, so we 2093 * can retransmit it later if necessary. 2094 */ 2095 memcpy(recv_msg->msg_data, smi_msg->data, 2096 smi_msg->data_size); 2097 recv_msg->msg.data = recv_msg->msg_data; 2098 recv_msg->msg.data_len = smi_msg->data_size; 2099 2100 /* 2101 * We don't unlock until here, because we need 2102 * to copy the completed message into the 2103 * recv_msg before we release the lock. 2104 * Otherwise, race conditions may bite us. I 2105 * know that's pretty paranoid, but I prefer 2106 * to be correct. 2107 */ 2108 out_err: 2109 spin_unlock_irqrestore(&intf->seq_lock, flags); 2110 } 2111 2112 return rv; 2113 } 2114 2115 static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, 2116 struct ipmi_addr *addr, 2117 long msgid, 2118 struct kernel_ipmi_msg *msg, 2119 struct ipmi_smi_msg *smi_msg, 2120 struct ipmi_recv_msg *recv_msg, 2121 unsigned char source_lun) 2122 { 2123 struct ipmi_ipmb_direct_addr *daddr; 2124 bool is_cmd = !(recv_msg->msg.netfn & 0x1); 2125 2126 if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT)) 2127 return -EAFNOSUPPORT; 2128 2129 /* Responses must have a completion code. */ 2130 if (!is_cmd && msg->data_len < 1) { 2131 ipmi_inc_stat(intf, sent_invalid_commands); 2132 return -EINVAL; 2133 } 2134 2135 if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) { 2136 ipmi_inc_stat(intf, sent_invalid_commands); 2137 return -EMSGSIZE; 2138 } 2139 2140 daddr = (struct ipmi_ipmb_direct_addr *) addr; 2141 if (daddr->rq_lun > 3 || daddr->rs_lun > 3) { 2142 ipmi_inc_stat(intf, sent_invalid_commands); 2143 return -EINVAL; 2144 } 2145 2146 smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT; 2147 smi_msg->msgid = msgid; 2148 2149 if (is_cmd) { 2150 smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun; 2151 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun; 2152 } else { 2153 smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun; 2154 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun; 2155 } 2156 smi_msg->data[1] = daddr->slave_addr; 2157 smi_msg->data[3] = msg->cmd; 2158 2159 memcpy(smi_msg->data + 4, msg->data, msg->data_len); 2160 smi_msg->data_size = msg->data_len + 4; 2161 2162 smi_msg->user_data = recv_msg; 2163 2164 return 0; 2165 } 2166 2167 static int i_ipmi_req_lan(struct ipmi_smi *intf, 2168 struct ipmi_addr *addr, 2169 long msgid, 2170 struct kernel_ipmi_msg *msg, 2171 struct ipmi_smi_msg *smi_msg, 2172 struct ipmi_recv_msg *recv_msg, 2173 unsigned char source_lun, 2174 int retries, 2175 unsigned int retry_time_ms) 2176 { 2177 struct ipmi_lan_addr *lan_addr; 2178 unsigned char ipmb_seq; 2179 long seqid; 2180 struct ipmi_channel *chans; 2181 int rv = 0; 2182 2183 if (addr->channel >= IPMI_MAX_CHANNELS) { 2184 ipmi_inc_stat(intf, sent_invalid_commands); 2185 return -EINVAL; 2186 } 2187 2188 chans = READ_ONCE(intf->channel_list)->c; 2189 2190 if ((chans[addr->channel].medium 2191 != IPMI_CHANNEL_MEDIUM_8023LAN) 2192 && (chans[addr->channel].medium 2193 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2194 ipmi_inc_stat(intf, sent_invalid_commands); 2195 return -EINVAL; 2196 } 2197 2198 /* 11 for the header and 1 for the checksum. */ 2199 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2200 ipmi_inc_stat(intf, sent_invalid_commands); 2201 return -EMSGSIZE; 2202 } 2203 2204 lan_addr = (struct ipmi_lan_addr *) addr; 2205 if (lan_addr->lun > 3) { 2206 ipmi_inc_stat(intf, sent_invalid_commands); 2207 return -EINVAL; 2208 } 2209 2210 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2211 2212 if (recv_msg->msg.netfn & 0x1) { 2213 /* 2214 * It's a response, so use the user's sequence 2215 * from msgid. 2216 */ 2217 ipmi_inc_stat(intf, sent_lan_responses); 2218 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2219 msgid, source_lun); 2220 2221 /* 2222 * Save the receive message so we can use it 2223 * to deliver the response. 2224 */ 2225 smi_msg->user_data = recv_msg; 2226 } else { 2227 /* It's a command, so get a sequence for it. */ 2228 unsigned long flags; 2229 2230 spin_lock_irqsave(&intf->seq_lock, flags); 2231 2232 /* 2233 * Create a sequence number with a 1 second 2234 * timeout and 4 retries. 2235 */ 2236 rv = intf_next_seq(intf, 2237 recv_msg, 2238 retry_time_ms, 2239 retries, 2240 0, 2241 &ipmb_seq, 2242 &seqid); 2243 if (rv) 2244 /* 2245 * We have used up all the sequence numbers, 2246 * probably, so abort. 2247 */ 2248 goto out_err; 2249 2250 ipmi_inc_stat(intf, sent_lan_commands); 2251 2252 /* 2253 * Store the sequence number in the message, 2254 * so that when the send message response 2255 * comes back we can start the timer. 2256 */ 2257 format_lan_msg(smi_msg, msg, lan_addr, 2258 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2259 ipmb_seq, source_lun); 2260 2261 /* 2262 * Copy the message into the recv message data, so we 2263 * can retransmit it later if necessary. 2264 */ 2265 memcpy(recv_msg->msg_data, smi_msg->data, 2266 smi_msg->data_size); 2267 recv_msg->msg.data = recv_msg->msg_data; 2268 recv_msg->msg.data_len = smi_msg->data_size; 2269 2270 /* 2271 * We don't unlock until here, because we need 2272 * to copy the completed message into the 2273 * recv_msg before we release the lock. 2274 * Otherwise, race conditions may bite us. I 2275 * know that's pretty paranoid, but I prefer 2276 * to be correct. 2277 */ 2278 out_err: 2279 spin_unlock_irqrestore(&intf->seq_lock, flags); 2280 } 2281 2282 return rv; 2283 } 2284 2285 /* 2286 * Separate from ipmi_request so that the user does not have to be 2287 * supplied in certain circumstances (mainly at panic time). If 2288 * messages are supplied, they will be freed, even if an error 2289 * occurs. 2290 */ 2291 static int i_ipmi_request(struct ipmi_user *user, 2292 struct ipmi_smi *intf, 2293 struct ipmi_addr *addr, 2294 long msgid, 2295 struct kernel_ipmi_msg *msg, 2296 void *user_msg_data, 2297 void *supplied_smi, 2298 struct ipmi_recv_msg *supplied_recv, 2299 int priority, 2300 unsigned char source_address, 2301 unsigned char source_lun, 2302 int retries, 2303 unsigned int retry_time_ms) 2304 { 2305 struct ipmi_smi_msg *smi_msg; 2306 struct ipmi_recv_msg *recv_msg; 2307 int rv = 0; 2308 2309 if (user) { 2310 if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { 2311 /* Decrement will happen at the end of the routine. */ 2312 rv = -EBUSY; 2313 goto out; 2314 } 2315 } 2316 2317 if (supplied_recv) 2318 recv_msg = supplied_recv; 2319 else { 2320 recv_msg = ipmi_alloc_recv_msg(); 2321 if (recv_msg == NULL) { 2322 rv = -ENOMEM; 2323 goto out; 2324 } 2325 } 2326 recv_msg->user_msg_data = user_msg_data; 2327 2328 if (supplied_smi) 2329 smi_msg = supplied_smi; 2330 else { 2331 smi_msg = ipmi_alloc_smi_msg(); 2332 if (smi_msg == NULL) { 2333 if (!supplied_recv) 2334 ipmi_free_recv_msg(recv_msg); 2335 rv = -ENOMEM; 2336 goto out; 2337 } 2338 } 2339 2340 rcu_read_lock(); 2341 if (intf->in_shutdown) { 2342 rv = -ENODEV; 2343 goto out_err; 2344 } 2345 2346 recv_msg->user = user; 2347 if (user) 2348 /* The put happens when the message is freed. */ 2349 kref_get(&user->refcount); 2350 recv_msg->msgid = msgid; 2351 /* 2352 * Store the message to send in the receive message so timeout 2353 * responses can get the proper response data. 2354 */ 2355 recv_msg->msg = *msg; 2356 2357 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2358 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2359 recv_msg, retries, retry_time_ms); 2360 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2361 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2362 source_address, source_lun, 2363 retries, retry_time_ms); 2364 } else if (is_ipmb_direct_addr(addr)) { 2365 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, 2366 recv_msg, source_lun); 2367 } else if (is_lan_addr(addr)) { 2368 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2369 source_lun, retries, retry_time_ms); 2370 } else { 2371 /* Unknown address type. */ 2372 ipmi_inc_stat(intf, sent_invalid_commands); 2373 rv = -EINVAL; 2374 } 2375 2376 if (rv) { 2377 out_err: 2378 ipmi_free_smi_msg(smi_msg); 2379 ipmi_free_recv_msg(recv_msg); 2380 } else { 2381 dev_dbg(intf->si_dev, "Send: %*ph\n", 2382 smi_msg->data_size, smi_msg->data); 2383 2384 smi_send(intf, intf->handlers, smi_msg, priority); 2385 } 2386 rcu_read_unlock(); 2387 2388 out: 2389 if (rv && user) 2390 atomic_dec(&user->nr_msgs); 2391 return rv; 2392 } 2393 2394 static int check_addr(struct ipmi_smi *intf, 2395 struct ipmi_addr *addr, 2396 unsigned char *saddr, 2397 unsigned char *lun) 2398 { 2399 if (addr->channel >= IPMI_MAX_CHANNELS) 2400 return -EINVAL; 2401 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2402 *lun = intf->addrinfo[addr->channel].lun; 2403 *saddr = intf->addrinfo[addr->channel].address; 2404 return 0; 2405 } 2406 2407 int ipmi_request_settime(struct ipmi_user *user, 2408 struct ipmi_addr *addr, 2409 long msgid, 2410 struct kernel_ipmi_msg *msg, 2411 void *user_msg_data, 2412 int priority, 2413 int retries, 2414 unsigned int retry_time_ms) 2415 { 2416 unsigned char saddr = 0, lun = 0; 2417 int rv, index; 2418 2419 if (!user) 2420 return -EINVAL; 2421 2422 user = acquire_ipmi_user(user, &index); 2423 if (!user) 2424 return -ENODEV; 2425 2426 rv = check_addr(user->intf, addr, &saddr, &lun); 2427 if (!rv) 2428 rv = i_ipmi_request(user, 2429 user->intf, 2430 addr, 2431 msgid, 2432 msg, 2433 user_msg_data, 2434 NULL, NULL, 2435 priority, 2436 saddr, 2437 lun, 2438 retries, 2439 retry_time_ms); 2440 2441 release_ipmi_user(user, index); 2442 return rv; 2443 } 2444 EXPORT_SYMBOL(ipmi_request_settime); 2445 2446 int ipmi_request_supply_msgs(struct ipmi_user *user, 2447 struct ipmi_addr *addr, 2448 long msgid, 2449 struct kernel_ipmi_msg *msg, 2450 void *user_msg_data, 2451 void *supplied_smi, 2452 struct ipmi_recv_msg *supplied_recv, 2453 int priority) 2454 { 2455 unsigned char saddr = 0, lun = 0; 2456 int rv, index; 2457 2458 if (!user) 2459 return -EINVAL; 2460 2461 user = acquire_ipmi_user(user, &index); 2462 if (!user) 2463 return -ENODEV; 2464 2465 rv = check_addr(user->intf, addr, &saddr, &lun); 2466 if (!rv) 2467 rv = i_ipmi_request(user, 2468 user->intf, 2469 addr, 2470 msgid, 2471 msg, 2472 user_msg_data, 2473 supplied_smi, 2474 supplied_recv, 2475 priority, 2476 saddr, 2477 lun, 2478 -1, 0); 2479 2480 release_ipmi_user(user, index); 2481 return rv; 2482 } 2483 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2484 2485 static void bmc_device_id_handler(struct ipmi_smi *intf, 2486 struct ipmi_recv_msg *msg) 2487 { 2488 int rv; 2489 2490 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2491 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2492 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2493 dev_warn(intf->si_dev, 2494 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2495 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2496 return; 2497 } 2498 2499 if (msg->msg.data[0]) { 2500 dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n", 2501 msg->msg.data[0]); 2502 intf->bmc->dyn_id_set = 0; 2503 goto out; 2504 } 2505 2506 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2507 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2508 if (rv) { 2509 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2510 /* record completion code when error */ 2511 intf->bmc->cc = msg->msg.data[0]; 2512 intf->bmc->dyn_id_set = 0; 2513 } else { 2514 /* 2515 * Make sure the id data is available before setting 2516 * dyn_id_set. 2517 */ 2518 smp_wmb(); 2519 intf->bmc->dyn_id_set = 1; 2520 } 2521 out: 2522 wake_up(&intf->waitq); 2523 } 2524 2525 static int 2526 send_get_device_id_cmd(struct ipmi_smi *intf) 2527 { 2528 struct ipmi_system_interface_addr si; 2529 struct kernel_ipmi_msg msg; 2530 2531 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2532 si.channel = IPMI_BMC_CHANNEL; 2533 si.lun = 0; 2534 2535 msg.netfn = IPMI_NETFN_APP_REQUEST; 2536 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2537 msg.data = NULL; 2538 msg.data_len = 0; 2539 2540 return i_ipmi_request(NULL, 2541 intf, 2542 (struct ipmi_addr *) &si, 2543 0, 2544 &msg, 2545 intf, 2546 NULL, 2547 NULL, 2548 0, 2549 intf->addrinfo[0].address, 2550 intf->addrinfo[0].lun, 2551 -1, 0); 2552 } 2553 2554 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2555 { 2556 int rv; 2557 unsigned int retry_count = 0; 2558 2559 intf->null_user_handler = bmc_device_id_handler; 2560 2561 retry: 2562 bmc->cc = 0; 2563 bmc->dyn_id_set = 2; 2564 2565 rv = send_get_device_id_cmd(intf); 2566 if (rv) 2567 goto out_reset_handler; 2568 2569 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2570 2571 if (!bmc->dyn_id_set) { 2572 if (bmc->cc != IPMI_CC_NO_ERROR && 2573 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 2574 msleep(500); 2575 dev_warn(intf->si_dev, 2576 "BMC returned 0x%2.2x, retry get bmc device id\n", 2577 bmc->cc); 2578 goto retry; 2579 } 2580 2581 rv = -EIO; /* Something went wrong in the fetch. */ 2582 } 2583 2584 /* dyn_id_set makes the id data available. */ 2585 smp_rmb(); 2586 2587 out_reset_handler: 2588 intf->null_user_handler = NULL; 2589 2590 return rv; 2591 } 2592 2593 /* 2594 * Fetch the device id for the bmc/interface. You must pass in either 2595 * bmc or intf, this code will get the other one. If the data has 2596 * been recently fetched, this will just use the cached data. Otherwise 2597 * it will run a new fetch. 2598 * 2599 * Except for the first time this is called (in ipmi_add_smi()), 2600 * this will always return good data; 2601 */ 2602 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2603 struct ipmi_device_id *id, 2604 bool *guid_set, guid_t *guid, int intf_num) 2605 { 2606 int rv = 0; 2607 int prev_dyn_id_set, prev_guid_set; 2608 bool intf_set = intf != NULL; 2609 2610 if (!intf) { 2611 mutex_lock(&bmc->dyn_mutex); 2612 retry_bmc_lock: 2613 if (list_empty(&bmc->intfs)) { 2614 mutex_unlock(&bmc->dyn_mutex); 2615 return -ENOENT; 2616 } 2617 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2618 bmc_link); 2619 kref_get(&intf->refcount); 2620 mutex_unlock(&bmc->dyn_mutex); 2621 mutex_lock(&intf->bmc_reg_mutex); 2622 mutex_lock(&bmc->dyn_mutex); 2623 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2624 bmc_link)) { 2625 mutex_unlock(&intf->bmc_reg_mutex); 2626 kref_put(&intf->refcount, intf_free); 2627 goto retry_bmc_lock; 2628 } 2629 } else { 2630 mutex_lock(&intf->bmc_reg_mutex); 2631 bmc = intf->bmc; 2632 mutex_lock(&bmc->dyn_mutex); 2633 kref_get(&intf->refcount); 2634 } 2635 2636 /* If we have a valid and current ID, just return that. */ 2637 if (intf->in_bmc_register || 2638 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2639 goto out_noprocessing; 2640 2641 prev_guid_set = bmc->dyn_guid_set; 2642 __get_guid(intf); 2643 2644 prev_dyn_id_set = bmc->dyn_id_set; 2645 rv = __get_device_id(intf, bmc); 2646 if (rv) 2647 goto out; 2648 2649 /* 2650 * The guid, device id, manufacturer id, and product id should 2651 * not change on a BMC. If it does we have to do some dancing. 2652 */ 2653 if (!intf->bmc_registered 2654 || (!prev_guid_set && bmc->dyn_guid_set) 2655 || (!prev_dyn_id_set && bmc->dyn_id_set) 2656 || (prev_guid_set && bmc->dyn_guid_set 2657 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2658 || bmc->id.device_id != bmc->fetch_id.device_id 2659 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2660 || bmc->id.product_id != bmc->fetch_id.product_id) { 2661 struct ipmi_device_id id = bmc->fetch_id; 2662 int guid_set = bmc->dyn_guid_set; 2663 guid_t guid; 2664 2665 guid = bmc->fetch_guid; 2666 mutex_unlock(&bmc->dyn_mutex); 2667 2668 __ipmi_bmc_unregister(intf); 2669 /* Fill in the temporary BMC for good measure. */ 2670 intf->bmc->id = id; 2671 intf->bmc->dyn_guid_set = guid_set; 2672 intf->bmc->guid = guid; 2673 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2674 need_waiter(intf); /* Retry later on an error. */ 2675 else 2676 __scan_channels(intf, &id); 2677 2678 2679 if (!intf_set) { 2680 /* 2681 * We weren't given the interface on the 2682 * command line, so restart the operation on 2683 * the next interface for the BMC. 2684 */ 2685 mutex_unlock(&intf->bmc_reg_mutex); 2686 mutex_lock(&bmc->dyn_mutex); 2687 goto retry_bmc_lock; 2688 } 2689 2690 /* We have a new BMC, set it up. */ 2691 bmc = intf->bmc; 2692 mutex_lock(&bmc->dyn_mutex); 2693 goto out_noprocessing; 2694 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2695 /* Version info changes, scan the channels again. */ 2696 __scan_channels(intf, &bmc->fetch_id); 2697 2698 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2699 2700 out: 2701 if (rv && prev_dyn_id_set) { 2702 rv = 0; /* Ignore failures if we have previous data. */ 2703 bmc->dyn_id_set = prev_dyn_id_set; 2704 } 2705 if (!rv) { 2706 bmc->id = bmc->fetch_id; 2707 if (bmc->dyn_guid_set) 2708 bmc->guid = bmc->fetch_guid; 2709 else if (prev_guid_set) 2710 /* 2711 * The guid used to be valid and it failed to fetch, 2712 * just use the cached value. 2713 */ 2714 bmc->dyn_guid_set = prev_guid_set; 2715 } 2716 out_noprocessing: 2717 if (!rv) { 2718 if (id) 2719 *id = bmc->id; 2720 2721 if (guid_set) 2722 *guid_set = bmc->dyn_guid_set; 2723 2724 if (guid && bmc->dyn_guid_set) 2725 *guid = bmc->guid; 2726 } 2727 2728 mutex_unlock(&bmc->dyn_mutex); 2729 mutex_unlock(&intf->bmc_reg_mutex); 2730 2731 kref_put(&intf->refcount, intf_free); 2732 return rv; 2733 } 2734 2735 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2736 struct ipmi_device_id *id, 2737 bool *guid_set, guid_t *guid) 2738 { 2739 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2740 } 2741 2742 static ssize_t device_id_show(struct device *dev, 2743 struct device_attribute *attr, 2744 char *buf) 2745 { 2746 struct bmc_device *bmc = to_bmc_device(dev); 2747 struct ipmi_device_id id; 2748 int rv; 2749 2750 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2751 if (rv) 2752 return rv; 2753 2754 return sysfs_emit(buf, "%u\n", id.device_id); 2755 } 2756 static DEVICE_ATTR_RO(device_id); 2757 2758 static ssize_t provides_device_sdrs_show(struct device *dev, 2759 struct device_attribute *attr, 2760 char *buf) 2761 { 2762 struct bmc_device *bmc = to_bmc_device(dev); 2763 struct ipmi_device_id id; 2764 int rv; 2765 2766 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2767 if (rv) 2768 return rv; 2769 2770 return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7); 2771 } 2772 static DEVICE_ATTR_RO(provides_device_sdrs); 2773 2774 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2775 char *buf) 2776 { 2777 struct bmc_device *bmc = to_bmc_device(dev); 2778 struct ipmi_device_id id; 2779 int rv; 2780 2781 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2782 if (rv) 2783 return rv; 2784 2785 return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F); 2786 } 2787 static DEVICE_ATTR_RO(revision); 2788 2789 static ssize_t firmware_revision_show(struct device *dev, 2790 struct device_attribute *attr, 2791 char *buf) 2792 { 2793 struct bmc_device *bmc = to_bmc_device(dev); 2794 struct ipmi_device_id id; 2795 int rv; 2796 2797 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2798 if (rv) 2799 return rv; 2800 2801 return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1, 2802 id.firmware_revision_2); 2803 } 2804 static DEVICE_ATTR_RO(firmware_revision); 2805 2806 static ssize_t ipmi_version_show(struct device *dev, 2807 struct device_attribute *attr, 2808 char *buf) 2809 { 2810 struct bmc_device *bmc = to_bmc_device(dev); 2811 struct ipmi_device_id id; 2812 int rv; 2813 2814 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2815 if (rv) 2816 return rv; 2817 2818 return sysfs_emit(buf, "%u.%u\n", 2819 ipmi_version_major(&id), 2820 ipmi_version_minor(&id)); 2821 } 2822 static DEVICE_ATTR_RO(ipmi_version); 2823 2824 static ssize_t add_dev_support_show(struct device *dev, 2825 struct device_attribute *attr, 2826 char *buf) 2827 { 2828 struct bmc_device *bmc = to_bmc_device(dev); 2829 struct ipmi_device_id id; 2830 int rv; 2831 2832 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2833 if (rv) 2834 return rv; 2835 2836 return sysfs_emit(buf, "0x%02x\n", id.additional_device_support); 2837 } 2838 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2839 NULL); 2840 2841 static ssize_t manufacturer_id_show(struct device *dev, 2842 struct device_attribute *attr, 2843 char *buf) 2844 { 2845 struct bmc_device *bmc = to_bmc_device(dev); 2846 struct ipmi_device_id id; 2847 int rv; 2848 2849 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2850 if (rv) 2851 return rv; 2852 2853 return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id); 2854 } 2855 static DEVICE_ATTR_RO(manufacturer_id); 2856 2857 static ssize_t product_id_show(struct device *dev, 2858 struct device_attribute *attr, 2859 char *buf) 2860 { 2861 struct bmc_device *bmc = to_bmc_device(dev); 2862 struct ipmi_device_id id; 2863 int rv; 2864 2865 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2866 if (rv) 2867 return rv; 2868 2869 return sysfs_emit(buf, "0x%4.4x\n", id.product_id); 2870 } 2871 static DEVICE_ATTR_RO(product_id); 2872 2873 static ssize_t aux_firmware_rev_show(struct device *dev, 2874 struct device_attribute *attr, 2875 char *buf) 2876 { 2877 struct bmc_device *bmc = to_bmc_device(dev); 2878 struct ipmi_device_id id; 2879 int rv; 2880 2881 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2882 if (rv) 2883 return rv; 2884 2885 return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2886 id.aux_firmware_revision[3], 2887 id.aux_firmware_revision[2], 2888 id.aux_firmware_revision[1], 2889 id.aux_firmware_revision[0]); 2890 } 2891 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2892 2893 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2894 char *buf) 2895 { 2896 struct bmc_device *bmc = to_bmc_device(dev); 2897 bool guid_set; 2898 guid_t guid; 2899 int rv; 2900 2901 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2902 if (rv) 2903 return rv; 2904 if (!guid_set) 2905 return -ENOENT; 2906 2907 return sysfs_emit(buf, "%pUl\n", &guid); 2908 } 2909 static DEVICE_ATTR_RO(guid); 2910 2911 static struct attribute *bmc_dev_attrs[] = { 2912 &dev_attr_device_id.attr, 2913 &dev_attr_provides_device_sdrs.attr, 2914 &dev_attr_revision.attr, 2915 &dev_attr_firmware_revision.attr, 2916 &dev_attr_ipmi_version.attr, 2917 &dev_attr_additional_device_support.attr, 2918 &dev_attr_manufacturer_id.attr, 2919 &dev_attr_product_id.attr, 2920 &dev_attr_aux_firmware_revision.attr, 2921 &dev_attr_guid.attr, 2922 NULL 2923 }; 2924 2925 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2926 struct attribute *attr, int idx) 2927 { 2928 struct device *dev = kobj_to_dev(kobj); 2929 struct bmc_device *bmc = to_bmc_device(dev); 2930 umode_t mode = attr->mode; 2931 int rv; 2932 2933 if (attr == &dev_attr_aux_firmware_revision.attr) { 2934 struct ipmi_device_id id; 2935 2936 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2937 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2938 } 2939 if (attr == &dev_attr_guid.attr) { 2940 bool guid_set; 2941 2942 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2943 return (!rv && guid_set) ? mode : 0; 2944 } 2945 return mode; 2946 } 2947 2948 static const struct attribute_group bmc_dev_attr_group = { 2949 .attrs = bmc_dev_attrs, 2950 .is_visible = bmc_dev_attr_is_visible, 2951 }; 2952 2953 static const struct attribute_group *bmc_dev_attr_groups[] = { 2954 &bmc_dev_attr_group, 2955 NULL 2956 }; 2957 2958 static const struct device_type bmc_device_type = { 2959 .groups = bmc_dev_attr_groups, 2960 }; 2961 2962 static int __find_bmc_guid(struct device *dev, const void *data) 2963 { 2964 const guid_t *guid = data; 2965 struct bmc_device *bmc; 2966 int rv; 2967 2968 if (dev->type != &bmc_device_type) 2969 return 0; 2970 2971 bmc = to_bmc_device(dev); 2972 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2973 if (rv) 2974 rv = kref_get_unless_zero(&bmc->usecount); 2975 return rv; 2976 } 2977 2978 /* 2979 * Returns with the bmc's usecount incremented, if it is non-NULL. 2980 */ 2981 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2982 guid_t *guid) 2983 { 2984 struct device *dev; 2985 struct bmc_device *bmc = NULL; 2986 2987 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2988 if (dev) { 2989 bmc = to_bmc_device(dev); 2990 put_device(dev); 2991 } 2992 return bmc; 2993 } 2994 2995 struct prod_dev_id { 2996 unsigned int product_id; 2997 unsigned char device_id; 2998 }; 2999 3000 static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 3001 { 3002 const struct prod_dev_id *cid = data; 3003 struct bmc_device *bmc; 3004 int rv; 3005 3006 if (dev->type != &bmc_device_type) 3007 return 0; 3008 3009 bmc = to_bmc_device(dev); 3010 rv = (bmc->id.product_id == cid->product_id 3011 && bmc->id.device_id == cid->device_id); 3012 if (rv) 3013 rv = kref_get_unless_zero(&bmc->usecount); 3014 return rv; 3015 } 3016 3017 /* 3018 * Returns with the bmc's usecount incremented, if it is non-NULL. 3019 */ 3020 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 3021 struct device_driver *drv, 3022 unsigned int product_id, unsigned char device_id) 3023 { 3024 struct prod_dev_id id = { 3025 .product_id = product_id, 3026 .device_id = device_id, 3027 }; 3028 struct device *dev; 3029 struct bmc_device *bmc = NULL; 3030 3031 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 3032 if (dev) { 3033 bmc = to_bmc_device(dev); 3034 put_device(dev); 3035 } 3036 return bmc; 3037 } 3038 3039 static DEFINE_IDA(ipmi_bmc_ida); 3040 3041 static void 3042 release_bmc_device(struct device *dev) 3043 { 3044 kfree(to_bmc_device(dev)); 3045 } 3046 3047 static void cleanup_bmc_work(struct work_struct *work) 3048 { 3049 struct bmc_device *bmc = container_of(work, struct bmc_device, 3050 remove_work); 3051 int id = bmc->pdev.id; /* Unregister overwrites id */ 3052 3053 platform_device_unregister(&bmc->pdev); 3054 ida_simple_remove(&ipmi_bmc_ida, id); 3055 } 3056 3057 static void 3058 cleanup_bmc_device(struct kref *ref) 3059 { 3060 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 3061 3062 /* 3063 * Remove the platform device in a work queue to avoid issues 3064 * with removing the device attributes while reading a device 3065 * attribute. 3066 */ 3067 queue_work(remove_work_wq, &bmc->remove_work); 3068 } 3069 3070 /* 3071 * Must be called with intf->bmc_reg_mutex held. 3072 */ 3073 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 3074 { 3075 struct bmc_device *bmc = intf->bmc; 3076 3077 if (!intf->bmc_registered) 3078 return; 3079 3080 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3081 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 3082 kfree(intf->my_dev_name); 3083 intf->my_dev_name = NULL; 3084 3085 mutex_lock(&bmc->dyn_mutex); 3086 list_del(&intf->bmc_link); 3087 mutex_unlock(&bmc->dyn_mutex); 3088 intf->bmc = &intf->tmp_bmc; 3089 kref_put(&bmc->usecount, cleanup_bmc_device); 3090 intf->bmc_registered = false; 3091 } 3092 3093 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 3094 { 3095 mutex_lock(&intf->bmc_reg_mutex); 3096 __ipmi_bmc_unregister(intf); 3097 mutex_unlock(&intf->bmc_reg_mutex); 3098 } 3099 3100 /* 3101 * Must be called with intf->bmc_reg_mutex held. 3102 */ 3103 static int __ipmi_bmc_register(struct ipmi_smi *intf, 3104 struct ipmi_device_id *id, 3105 bool guid_set, guid_t *guid, int intf_num) 3106 { 3107 int rv; 3108 struct bmc_device *bmc; 3109 struct bmc_device *old_bmc; 3110 3111 /* 3112 * platform_device_register() can cause bmc_reg_mutex to 3113 * be claimed because of the is_visible functions of 3114 * the attributes. Eliminate possible recursion and 3115 * release the lock. 3116 */ 3117 intf->in_bmc_register = true; 3118 mutex_unlock(&intf->bmc_reg_mutex); 3119 3120 /* 3121 * Try to find if there is an bmc_device struct 3122 * representing the interfaced BMC already 3123 */ 3124 mutex_lock(&ipmidriver_mutex); 3125 if (guid_set) 3126 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 3127 else 3128 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 3129 id->product_id, 3130 id->device_id); 3131 3132 /* 3133 * If there is already an bmc_device, free the new one, 3134 * otherwise register the new BMC device 3135 */ 3136 if (old_bmc) { 3137 bmc = old_bmc; 3138 /* 3139 * Note: old_bmc already has usecount incremented by 3140 * the BMC find functions. 3141 */ 3142 intf->bmc = old_bmc; 3143 mutex_lock(&bmc->dyn_mutex); 3144 list_add_tail(&intf->bmc_link, &bmc->intfs); 3145 mutex_unlock(&bmc->dyn_mutex); 3146 3147 dev_info(intf->si_dev, 3148 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3149 bmc->id.manufacturer_id, 3150 bmc->id.product_id, 3151 bmc->id.device_id); 3152 } else { 3153 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 3154 if (!bmc) { 3155 rv = -ENOMEM; 3156 goto out; 3157 } 3158 INIT_LIST_HEAD(&bmc->intfs); 3159 mutex_init(&bmc->dyn_mutex); 3160 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3161 3162 bmc->id = *id; 3163 bmc->dyn_id_set = 1; 3164 bmc->dyn_guid_set = guid_set; 3165 bmc->guid = *guid; 3166 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3167 3168 bmc->pdev.name = "ipmi_bmc"; 3169 3170 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); 3171 if (rv < 0) { 3172 kfree(bmc); 3173 goto out; 3174 } 3175 3176 bmc->pdev.dev.driver = &ipmidriver.driver; 3177 bmc->pdev.id = rv; 3178 bmc->pdev.dev.release = release_bmc_device; 3179 bmc->pdev.dev.type = &bmc_device_type; 3180 kref_init(&bmc->usecount); 3181 3182 intf->bmc = bmc; 3183 mutex_lock(&bmc->dyn_mutex); 3184 list_add_tail(&intf->bmc_link, &bmc->intfs); 3185 mutex_unlock(&bmc->dyn_mutex); 3186 3187 rv = platform_device_register(&bmc->pdev); 3188 if (rv) { 3189 dev_err(intf->si_dev, 3190 "Unable to register bmc device: %d\n", 3191 rv); 3192 goto out_list_del; 3193 } 3194 3195 dev_info(intf->si_dev, 3196 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3197 bmc->id.manufacturer_id, 3198 bmc->id.product_id, 3199 bmc->id.device_id); 3200 } 3201 3202 /* 3203 * create symlink from system interface device to bmc device 3204 * and back. 3205 */ 3206 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3207 if (rv) { 3208 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3209 goto out_put_bmc; 3210 } 3211 3212 if (intf_num == -1) 3213 intf_num = intf->intf_num; 3214 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3215 if (!intf->my_dev_name) { 3216 rv = -ENOMEM; 3217 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3218 rv); 3219 goto out_unlink1; 3220 } 3221 3222 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3223 intf->my_dev_name); 3224 if (rv) { 3225 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3226 rv); 3227 goto out_free_my_dev_name; 3228 } 3229 3230 intf->bmc_registered = true; 3231 3232 out: 3233 mutex_unlock(&ipmidriver_mutex); 3234 mutex_lock(&intf->bmc_reg_mutex); 3235 intf->in_bmc_register = false; 3236 return rv; 3237 3238 3239 out_free_my_dev_name: 3240 kfree(intf->my_dev_name); 3241 intf->my_dev_name = NULL; 3242 3243 out_unlink1: 3244 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3245 3246 out_put_bmc: 3247 mutex_lock(&bmc->dyn_mutex); 3248 list_del(&intf->bmc_link); 3249 mutex_unlock(&bmc->dyn_mutex); 3250 intf->bmc = &intf->tmp_bmc; 3251 kref_put(&bmc->usecount, cleanup_bmc_device); 3252 goto out; 3253 3254 out_list_del: 3255 mutex_lock(&bmc->dyn_mutex); 3256 list_del(&intf->bmc_link); 3257 mutex_unlock(&bmc->dyn_mutex); 3258 intf->bmc = &intf->tmp_bmc; 3259 put_device(&bmc->pdev.dev); 3260 goto out; 3261 } 3262 3263 static int 3264 send_guid_cmd(struct ipmi_smi *intf, int chan) 3265 { 3266 struct kernel_ipmi_msg msg; 3267 struct ipmi_system_interface_addr si; 3268 3269 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3270 si.channel = IPMI_BMC_CHANNEL; 3271 si.lun = 0; 3272 3273 msg.netfn = IPMI_NETFN_APP_REQUEST; 3274 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3275 msg.data = NULL; 3276 msg.data_len = 0; 3277 return i_ipmi_request(NULL, 3278 intf, 3279 (struct ipmi_addr *) &si, 3280 0, 3281 &msg, 3282 intf, 3283 NULL, 3284 NULL, 3285 0, 3286 intf->addrinfo[0].address, 3287 intf->addrinfo[0].lun, 3288 -1, 0); 3289 } 3290 3291 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3292 { 3293 struct bmc_device *bmc = intf->bmc; 3294 3295 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3296 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3297 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3298 /* Not for me */ 3299 return; 3300 3301 if (msg->msg.data[0] != 0) { 3302 /* Error from getting the GUID, the BMC doesn't have one. */ 3303 bmc->dyn_guid_set = 0; 3304 goto out; 3305 } 3306 3307 if (msg->msg.data_len < UUID_SIZE + 1) { 3308 bmc->dyn_guid_set = 0; 3309 dev_warn(intf->si_dev, 3310 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3311 msg->msg.data_len, UUID_SIZE + 1); 3312 goto out; 3313 } 3314 3315 import_guid(&bmc->fetch_guid, msg->msg.data + 1); 3316 /* 3317 * Make sure the guid data is available before setting 3318 * dyn_guid_set. 3319 */ 3320 smp_wmb(); 3321 bmc->dyn_guid_set = 1; 3322 out: 3323 wake_up(&intf->waitq); 3324 } 3325 3326 static void __get_guid(struct ipmi_smi *intf) 3327 { 3328 int rv; 3329 struct bmc_device *bmc = intf->bmc; 3330 3331 bmc->dyn_guid_set = 2; 3332 intf->null_user_handler = guid_handler; 3333 rv = send_guid_cmd(intf, 0); 3334 if (rv) 3335 /* Send failed, no GUID available. */ 3336 bmc->dyn_guid_set = 0; 3337 else 3338 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3339 3340 /* dyn_guid_set makes the guid data available. */ 3341 smp_rmb(); 3342 3343 intf->null_user_handler = NULL; 3344 } 3345 3346 static int 3347 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3348 { 3349 struct kernel_ipmi_msg msg; 3350 unsigned char data[1]; 3351 struct ipmi_system_interface_addr si; 3352 3353 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3354 si.channel = IPMI_BMC_CHANNEL; 3355 si.lun = 0; 3356 3357 msg.netfn = IPMI_NETFN_APP_REQUEST; 3358 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3359 msg.data = data; 3360 msg.data_len = 1; 3361 data[0] = chan; 3362 return i_ipmi_request(NULL, 3363 intf, 3364 (struct ipmi_addr *) &si, 3365 0, 3366 &msg, 3367 intf, 3368 NULL, 3369 NULL, 3370 0, 3371 intf->addrinfo[0].address, 3372 intf->addrinfo[0].lun, 3373 -1, 0); 3374 } 3375 3376 static void 3377 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3378 { 3379 int rv = 0; 3380 int ch; 3381 unsigned int set = intf->curr_working_cset; 3382 struct ipmi_channel *chans; 3383 3384 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3385 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3386 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3387 /* It's the one we want */ 3388 if (msg->msg.data[0] != 0) { 3389 /* Got an error from the channel, just go on. */ 3390 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3391 /* 3392 * If the MC does not support this 3393 * command, that is legal. We just 3394 * assume it has one IPMB at channel 3395 * zero. 3396 */ 3397 intf->wchannels[set].c[0].medium 3398 = IPMI_CHANNEL_MEDIUM_IPMB; 3399 intf->wchannels[set].c[0].protocol 3400 = IPMI_CHANNEL_PROTOCOL_IPMB; 3401 3402 intf->channel_list = intf->wchannels + set; 3403 intf->channels_ready = true; 3404 wake_up(&intf->waitq); 3405 goto out; 3406 } 3407 goto next_channel; 3408 } 3409 if (msg->msg.data_len < 4) { 3410 /* Message not big enough, just go on. */ 3411 goto next_channel; 3412 } 3413 ch = intf->curr_channel; 3414 chans = intf->wchannels[set].c; 3415 chans[ch].medium = msg->msg.data[2] & 0x7f; 3416 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3417 3418 next_channel: 3419 intf->curr_channel++; 3420 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3421 intf->channel_list = intf->wchannels + set; 3422 intf->channels_ready = true; 3423 wake_up(&intf->waitq); 3424 } else { 3425 intf->channel_list = intf->wchannels + set; 3426 intf->channels_ready = true; 3427 rv = send_channel_info_cmd(intf, intf->curr_channel); 3428 } 3429 3430 if (rv) { 3431 /* Got an error somehow, just give up. */ 3432 dev_warn(intf->si_dev, 3433 "Error sending channel information for channel %d: %d\n", 3434 intf->curr_channel, rv); 3435 3436 intf->channel_list = intf->wchannels + set; 3437 intf->channels_ready = true; 3438 wake_up(&intf->waitq); 3439 } 3440 } 3441 out: 3442 return; 3443 } 3444 3445 /* 3446 * Must be holding intf->bmc_reg_mutex to call this. 3447 */ 3448 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3449 { 3450 int rv; 3451 3452 if (ipmi_version_major(id) > 1 3453 || (ipmi_version_major(id) == 1 3454 && ipmi_version_minor(id) >= 5)) { 3455 unsigned int set; 3456 3457 /* 3458 * Start scanning the channels to see what is 3459 * available. 3460 */ 3461 set = !intf->curr_working_cset; 3462 intf->curr_working_cset = set; 3463 memset(&intf->wchannels[set], 0, 3464 sizeof(struct ipmi_channel_set)); 3465 3466 intf->null_user_handler = channel_handler; 3467 intf->curr_channel = 0; 3468 rv = send_channel_info_cmd(intf, 0); 3469 if (rv) { 3470 dev_warn(intf->si_dev, 3471 "Error sending channel information for channel 0, %d\n", 3472 rv); 3473 intf->null_user_handler = NULL; 3474 return -EIO; 3475 } 3476 3477 /* Wait for the channel info to be read. */ 3478 wait_event(intf->waitq, intf->channels_ready); 3479 intf->null_user_handler = NULL; 3480 } else { 3481 unsigned int set = intf->curr_working_cset; 3482 3483 /* Assume a single IPMB channel at zero. */ 3484 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3485 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3486 intf->channel_list = intf->wchannels + set; 3487 intf->channels_ready = true; 3488 } 3489 3490 return 0; 3491 } 3492 3493 static void ipmi_poll(struct ipmi_smi *intf) 3494 { 3495 if (intf->handlers->poll) 3496 intf->handlers->poll(intf->send_info); 3497 /* In case something came in */ 3498 handle_new_recv_msgs(intf); 3499 } 3500 3501 void ipmi_poll_interface(struct ipmi_user *user) 3502 { 3503 ipmi_poll(user->intf); 3504 } 3505 EXPORT_SYMBOL(ipmi_poll_interface); 3506 3507 static ssize_t nr_users_show(struct device *dev, 3508 struct device_attribute *attr, 3509 char *buf) 3510 { 3511 struct ipmi_smi *intf = container_of(attr, 3512 struct ipmi_smi, nr_users_devattr); 3513 3514 return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users)); 3515 } 3516 static DEVICE_ATTR_RO(nr_users); 3517 3518 static ssize_t nr_msgs_show(struct device *dev, 3519 struct device_attribute *attr, 3520 char *buf) 3521 { 3522 struct ipmi_smi *intf = container_of(attr, 3523 struct ipmi_smi, nr_msgs_devattr); 3524 struct ipmi_user *user; 3525 int index; 3526 unsigned int count = 0; 3527 3528 index = srcu_read_lock(&intf->users_srcu); 3529 list_for_each_entry_rcu(user, &intf->users, link) 3530 count += atomic_read(&user->nr_msgs); 3531 srcu_read_unlock(&intf->users_srcu, index); 3532 3533 return sysfs_emit(buf, "%u\n", count); 3534 } 3535 static DEVICE_ATTR_RO(nr_msgs); 3536 3537 static void redo_bmc_reg(struct work_struct *work) 3538 { 3539 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3540 bmc_reg_work); 3541 3542 if (!intf->in_shutdown) 3543 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3544 3545 kref_put(&intf->refcount, intf_free); 3546 } 3547 3548 int ipmi_add_smi(struct module *owner, 3549 const struct ipmi_smi_handlers *handlers, 3550 void *send_info, 3551 struct device *si_dev, 3552 unsigned char slave_addr) 3553 { 3554 int i, j; 3555 int rv; 3556 struct ipmi_smi *intf, *tintf; 3557 struct list_head *link; 3558 struct ipmi_device_id id; 3559 3560 /* 3561 * Make sure the driver is actually initialized, this handles 3562 * problems with initialization order. 3563 */ 3564 rv = ipmi_init_msghandler(); 3565 if (rv) 3566 return rv; 3567 3568 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3569 if (!intf) 3570 return -ENOMEM; 3571 3572 rv = init_srcu_struct(&intf->users_srcu); 3573 if (rv) { 3574 kfree(intf); 3575 return rv; 3576 } 3577 3578 intf->owner = owner; 3579 intf->bmc = &intf->tmp_bmc; 3580 INIT_LIST_HEAD(&intf->bmc->intfs); 3581 mutex_init(&intf->bmc->dyn_mutex); 3582 INIT_LIST_HEAD(&intf->bmc_link); 3583 mutex_init(&intf->bmc_reg_mutex); 3584 intf->intf_num = -1; /* Mark it invalid for now. */ 3585 kref_init(&intf->refcount); 3586 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3587 intf->si_dev = si_dev; 3588 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3589 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3590 intf->addrinfo[j].lun = 2; 3591 } 3592 if (slave_addr != 0) 3593 intf->addrinfo[0].address = slave_addr; 3594 INIT_LIST_HEAD(&intf->users); 3595 atomic_set(&intf->nr_users, 0); 3596 intf->handlers = handlers; 3597 intf->send_info = send_info; 3598 spin_lock_init(&intf->seq_lock); 3599 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3600 intf->seq_table[j].inuse = 0; 3601 intf->seq_table[j].seqid = 0; 3602 } 3603 intf->curr_seq = 0; 3604 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3605 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3606 tasklet_setup(&intf->recv_tasklet, 3607 smi_recv_tasklet); 3608 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3609 spin_lock_init(&intf->xmit_msgs_lock); 3610 INIT_LIST_HEAD(&intf->xmit_msgs); 3611 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3612 spin_lock_init(&intf->events_lock); 3613 spin_lock_init(&intf->watch_lock); 3614 atomic_set(&intf->event_waiters, 0); 3615 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3616 INIT_LIST_HEAD(&intf->waiting_events); 3617 intf->waiting_events_count = 0; 3618 mutex_init(&intf->cmd_rcvrs_mutex); 3619 spin_lock_init(&intf->maintenance_mode_lock); 3620 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3621 init_waitqueue_head(&intf->waitq); 3622 for (i = 0; i < IPMI_NUM_STATS; i++) 3623 atomic_set(&intf->stats[i], 0); 3624 3625 mutex_lock(&ipmi_interfaces_mutex); 3626 /* Look for a hole in the numbers. */ 3627 i = 0; 3628 link = &ipmi_interfaces; 3629 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link, 3630 ipmi_interfaces_mutex_held()) { 3631 if (tintf->intf_num != i) { 3632 link = &tintf->link; 3633 break; 3634 } 3635 i++; 3636 } 3637 /* Add the new interface in numeric order. */ 3638 if (i == 0) 3639 list_add_rcu(&intf->link, &ipmi_interfaces); 3640 else 3641 list_add_tail_rcu(&intf->link, link); 3642 3643 rv = handlers->start_processing(send_info, intf); 3644 if (rv) 3645 goto out_err; 3646 3647 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3648 if (rv) { 3649 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3650 goto out_err_started; 3651 } 3652 3653 mutex_lock(&intf->bmc_reg_mutex); 3654 rv = __scan_channels(intf, &id); 3655 mutex_unlock(&intf->bmc_reg_mutex); 3656 if (rv) 3657 goto out_err_bmc_reg; 3658 3659 intf->nr_users_devattr = dev_attr_nr_users; 3660 sysfs_attr_init(&intf->nr_users_devattr.attr); 3661 rv = device_create_file(intf->si_dev, &intf->nr_users_devattr); 3662 if (rv) 3663 goto out_err_bmc_reg; 3664 3665 intf->nr_msgs_devattr = dev_attr_nr_msgs; 3666 sysfs_attr_init(&intf->nr_msgs_devattr.attr); 3667 rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr); 3668 if (rv) { 3669 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3670 goto out_err_bmc_reg; 3671 } 3672 3673 /* 3674 * Keep memory order straight for RCU readers. Make 3675 * sure everything else is committed to memory before 3676 * setting intf_num to mark the interface valid. 3677 */ 3678 smp_wmb(); 3679 intf->intf_num = i; 3680 mutex_unlock(&ipmi_interfaces_mutex); 3681 3682 /* After this point the interface is legal to use. */ 3683 call_smi_watchers(i, intf->si_dev); 3684 3685 return 0; 3686 3687 out_err_bmc_reg: 3688 ipmi_bmc_unregister(intf); 3689 out_err_started: 3690 if (intf->handlers->shutdown) 3691 intf->handlers->shutdown(intf->send_info); 3692 out_err: 3693 list_del_rcu(&intf->link); 3694 mutex_unlock(&ipmi_interfaces_mutex); 3695 synchronize_srcu(&ipmi_interfaces_srcu); 3696 cleanup_srcu_struct(&intf->users_srcu); 3697 kref_put(&intf->refcount, intf_free); 3698 3699 return rv; 3700 } 3701 EXPORT_SYMBOL(ipmi_add_smi); 3702 3703 static void deliver_smi_err_response(struct ipmi_smi *intf, 3704 struct ipmi_smi_msg *msg, 3705 unsigned char err) 3706 { 3707 msg->rsp[0] = msg->data[0] | 4; 3708 msg->rsp[1] = msg->data[1]; 3709 msg->rsp[2] = err; 3710 msg->rsp_size = 3; 3711 /* It's an error, so it will never requeue, no need to check return. */ 3712 handle_one_recv_msg(intf, msg); 3713 } 3714 3715 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3716 { 3717 int i; 3718 struct seq_table *ent; 3719 struct ipmi_smi_msg *msg; 3720 struct list_head *entry; 3721 struct list_head tmplist; 3722 3723 /* Clear out our transmit queues and hold the messages. */ 3724 INIT_LIST_HEAD(&tmplist); 3725 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3726 list_splice_tail(&intf->xmit_msgs, &tmplist); 3727 3728 /* Current message first, to preserve order */ 3729 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3730 /* Wait for the message to clear out. */ 3731 schedule_timeout(1); 3732 } 3733 3734 /* No need for locks, the interface is down. */ 3735 3736 /* 3737 * Return errors for all pending messages in queue and in the 3738 * tables waiting for remote responses. 3739 */ 3740 while (!list_empty(&tmplist)) { 3741 entry = tmplist.next; 3742 list_del(entry); 3743 msg = list_entry(entry, struct ipmi_smi_msg, link); 3744 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3745 } 3746 3747 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3748 ent = &intf->seq_table[i]; 3749 if (!ent->inuse) 3750 continue; 3751 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3752 } 3753 } 3754 3755 void ipmi_unregister_smi(struct ipmi_smi *intf) 3756 { 3757 struct ipmi_smi_watcher *w; 3758 int intf_num, index; 3759 3760 if (!intf) 3761 return; 3762 intf_num = intf->intf_num; 3763 mutex_lock(&ipmi_interfaces_mutex); 3764 intf->intf_num = -1; 3765 intf->in_shutdown = true; 3766 list_del_rcu(&intf->link); 3767 mutex_unlock(&ipmi_interfaces_mutex); 3768 synchronize_srcu(&ipmi_interfaces_srcu); 3769 3770 /* At this point no users can be added to the interface. */ 3771 3772 device_remove_file(intf->si_dev, &intf->nr_msgs_devattr); 3773 device_remove_file(intf->si_dev, &intf->nr_users_devattr); 3774 3775 /* 3776 * Call all the watcher interfaces to tell them that 3777 * an interface is going away. 3778 */ 3779 mutex_lock(&smi_watchers_mutex); 3780 list_for_each_entry(w, &smi_watchers, link) 3781 w->smi_gone(intf_num); 3782 mutex_unlock(&smi_watchers_mutex); 3783 3784 index = srcu_read_lock(&intf->users_srcu); 3785 while (!list_empty(&intf->users)) { 3786 struct ipmi_user *user = 3787 container_of(list_next_rcu(&intf->users), 3788 struct ipmi_user, link); 3789 3790 _ipmi_destroy_user(user); 3791 } 3792 srcu_read_unlock(&intf->users_srcu, index); 3793 3794 if (intf->handlers->shutdown) 3795 intf->handlers->shutdown(intf->send_info); 3796 3797 cleanup_smi_msgs(intf); 3798 3799 ipmi_bmc_unregister(intf); 3800 3801 cleanup_srcu_struct(&intf->users_srcu); 3802 kref_put(&intf->refcount, intf_free); 3803 } 3804 EXPORT_SYMBOL(ipmi_unregister_smi); 3805 3806 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3807 struct ipmi_smi_msg *msg) 3808 { 3809 struct ipmi_ipmb_addr ipmb_addr; 3810 struct ipmi_recv_msg *recv_msg; 3811 3812 /* 3813 * This is 11, not 10, because the response must contain a 3814 * completion code. 3815 */ 3816 if (msg->rsp_size < 11) { 3817 /* Message not big enough, just ignore it. */ 3818 ipmi_inc_stat(intf, invalid_ipmb_responses); 3819 return 0; 3820 } 3821 3822 if (msg->rsp[2] != 0) { 3823 /* An error getting the response, just ignore it. */ 3824 return 0; 3825 } 3826 3827 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3828 ipmb_addr.slave_addr = msg->rsp[6]; 3829 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3830 ipmb_addr.lun = msg->rsp[7] & 3; 3831 3832 /* 3833 * It's a response from a remote entity. Look up the sequence 3834 * number and handle the response. 3835 */ 3836 if (intf_find_seq(intf, 3837 msg->rsp[7] >> 2, 3838 msg->rsp[3] & 0x0f, 3839 msg->rsp[8], 3840 (msg->rsp[4] >> 2) & (~1), 3841 (struct ipmi_addr *) &ipmb_addr, 3842 &recv_msg)) { 3843 /* 3844 * We were unable to find the sequence number, 3845 * so just nuke the message. 3846 */ 3847 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3848 return 0; 3849 } 3850 3851 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3852 /* 3853 * The other fields matched, so no need to set them, except 3854 * for netfn, which needs to be the response that was 3855 * returned, not the request value. 3856 */ 3857 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3858 recv_msg->msg.data = recv_msg->msg_data; 3859 recv_msg->msg.data_len = msg->rsp_size - 10; 3860 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3861 if (deliver_response(intf, recv_msg)) 3862 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3863 else 3864 ipmi_inc_stat(intf, handled_ipmb_responses); 3865 3866 return 0; 3867 } 3868 3869 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3870 struct ipmi_smi_msg *msg) 3871 { 3872 struct cmd_rcvr *rcvr; 3873 int rv = 0; 3874 unsigned char netfn; 3875 unsigned char cmd; 3876 unsigned char chan; 3877 struct ipmi_user *user = NULL; 3878 struct ipmi_ipmb_addr *ipmb_addr; 3879 struct ipmi_recv_msg *recv_msg; 3880 3881 if (msg->rsp_size < 10) { 3882 /* Message not big enough, just ignore it. */ 3883 ipmi_inc_stat(intf, invalid_commands); 3884 return 0; 3885 } 3886 3887 if (msg->rsp[2] != 0) { 3888 /* An error getting the response, just ignore it. */ 3889 return 0; 3890 } 3891 3892 netfn = msg->rsp[4] >> 2; 3893 cmd = msg->rsp[8]; 3894 chan = msg->rsp[3] & 0xf; 3895 3896 rcu_read_lock(); 3897 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3898 if (rcvr) { 3899 user = rcvr->user; 3900 kref_get(&user->refcount); 3901 } else 3902 user = NULL; 3903 rcu_read_unlock(); 3904 3905 if (user == NULL) { 3906 /* We didn't find a user, deliver an error response. */ 3907 ipmi_inc_stat(intf, unhandled_commands); 3908 3909 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3910 msg->data[1] = IPMI_SEND_MSG_CMD; 3911 msg->data[2] = msg->rsp[3]; 3912 msg->data[3] = msg->rsp[6]; 3913 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3914 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3915 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3916 /* rqseq/lun */ 3917 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3918 msg->data[8] = msg->rsp[8]; /* cmd */ 3919 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3920 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3921 msg->data_size = 11; 3922 3923 dev_dbg(intf->si_dev, "Invalid command: %*ph\n", 3924 msg->data_size, msg->data); 3925 3926 rcu_read_lock(); 3927 if (!intf->in_shutdown) { 3928 smi_send(intf, intf->handlers, msg, 0); 3929 /* 3930 * We used the message, so return the value 3931 * that causes it to not be freed or 3932 * queued. 3933 */ 3934 rv = -1; 3935 } 3936 rcu_read_unlock(); 3937 } else { 3938 recv_msg = ipmi_alloc_recv_msg(); 3939 if (!recv_msg) { 3940 /* 3941 * We couldn't allocate memory for the 3942 * message, so requeue it for handling 3943 * later. 3944 */ 3945 rv = 1; 3946 kref_put(&user->refcount, free_user); 3947 } else { 3948 /* Extract the source address from the data. */ 3949 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3950 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3951 ipmb_addr->slave_addr = msg->rsp[6]; 3952 ipmb_addr->lun = msg->rsp[7] & 3; 3953 ipmb_addr->channel = msg->rsp[3] & 0xf; 3954 3955 /* 3956 * Extract the rest of the message information 3957 * from the IPMB header. 3958 */ 3959 recv_msg->user = user; 3960 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3961 recv_msg->msgid = msg->rsp[7] >> 2; 3962 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3963 recv_msg->msg.cmd = msg->rsp[8]; 3964 recv_msg->msg.data = recv_msg->msg_data; 3965 3966 /* 3967 * We chop off 10, not 9 bytes because the checksum 3968 * at the end also needs to be removed. 3969 */ 3970 recv_msg->msg.data_len = msg->rsp_size - 10; 3971 memcpy(recv_msg->msg_data, &msg->rsp[9], 3972 msg->rsp_size - 10); 3973 if (deliver_response(intf, recv_msg)) 3974 ipmi_inc_stat(intf, unhandled_commands); 3975 else 3976 ipmi_inc_stat(intf, handled_commands); 3977 } 3978 } 3979 3980 return rv; 3981 } 3982 3983 static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, 3984 struct ipmi_smi_msg *msg) 3985 { 3986 struct cmd_rcvr *rcvr; 3987 int rv = 0; 3988 struct ipmi_user *user = NULL; 3989 struct ipmi_ipmb_direct_addr *daddr; 3990 struct ipmi_recv_msg *recv_msg; 3991 unsigned char netfn = msg->rsp[0] >> 2; 3992 unsigned char cmd = msg->rsp[3]; 3993 3994 rcu_read_lock(); 3995 /* We always use channel 0 for direct messages. */ 3996 rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); 3997 if (rcvr) { 3998 user = rcvr->user; 3999 kref_get(&user->refcount); 4000 } else 4001 user = NULL; 4002 rcu_read_unlock(); 4003 4004 if (user == NULL) { 4005 /* We didn't find a user, deliver an error response. */ 4006 ipmi_inc_stat(intf, unhandled_commands); 4007 4008 msg->data[0] = (netfn + 1) << 2; 4009 msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */ 4010 msg->data[1] = msg->rsp[1]; /* Addr */ 4011 msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */ 4012 msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */ 4013 msg->data[3] = cmd; 4014 msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; 4015 msg->data_size = 5; 4016 4017 rcu_read_lock(); 4018 if (!intf->in_shutdown) { 4019 smi_send(intf, intf->handlers, msg, 0); 4020 /* 4021 * We used the message, so return the value 4022 * that causes it to not be freed or 4023 * queued. 4024 */ 4025 rv = -1; 4026 } 4027 rcu_read_unlock(); 4028 } else { 4029 recv_msg = ipmi_alloc_recv_msg(); 4030 if (!recv_msg) { 4031 /* 4032 * We couldn't allocate memory for the 4033 * message, so requeue it for handling 4034 * later. 4035 */ 4036 rv = 1; 4037 kref_put(&user->refcount, free_user); 4038 } else { 4039 /* Extract the source address from the data. */ 4040 daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; 4041 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4042 daddr->channel = 0; 4043 daddr->slave_addr = msg->rsp[1]; 4044 daddr->rs_lun = msg->rsp[0] & 3; 4045 daddr->rq_lun = msg->rsp[2] & 3; 4046 4047 /* 4048 * Extract the rest of the message information 4049 * from the IPMB header. 4050 */ 4051 recv_msg->user = user; 4052 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4053 recv_msg->msgid = (msg->rsp[2] >> 2); 4054 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4055 recv_msg->msg.cmd = msg->rsp[3]; 4056 recv_msg->msg.data = recv_msg->msg_data; 4057 4058 recv_msg->msg.data_len = msg->rsp_size - 4; 4059 memcpy(recv_msg->msg_data, msg->rsp + 4, 4060 msg->rsp_size - 4); 4061 if (deliver_response(intf, recv_msg)) 4062 ipmi_inc_stat(intf, unhandled_commands); 4063 else 4064 ipmi_inc_stat(intf, handled_commands); 4065 } 4066 } 4067 4068 return rv; 4069 } 4070 4071 static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, 4072 struct ipmi_smi_msg *msg) 4073 { 4074 struct ipmi_recv_msg *recv_msg; 4075 struct ipmi_ipmb_direct_addr *daddr; 4076 4077 recv_msg = msg->user_data; 4078 if (recv_msg == NULL) { 4079 dev_warn(intf->si_dev, 4080 "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4081 return 0; 4082 } 4083 4084 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4085 recv_msg->msgid = msg->msgid; 4086 daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr; 4087 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4088 daddr->channel = 0; 4089 daddr->slave_addr = msg->rsp[1]; 4090 daddr->rq_lun = msg->rsp[0] & 3; 4091 daddr->rs_lun = msg->rsp[2] & 3; 4092 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4093 recv_msg->msg.cmd = msg->rsp[3]; 4094 memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4); 4095 recv_msg->msg.data = recv_msg->msg_data; 4096 recv_msg->msg.data_len = msg->rsp_size - 4; 4097 deliver_local_response(intf, recv_msg); 4098 4099 return 0; 4100 } 4101 4102 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 4103 struct ipmi_smi_msg *msg) 4104 { 4105 struct ipmi_lan_addr lan_addr; 4106 struct ipmi_recv_msg *recv_msg; 4107 4108 4109 /* 4110 * This is 13, not 12, because the response must contain a 4111 * completion code. 4112 */ 4113 if (msg->rsp_size < 13) { 4114 /* Message not big enough, just ignore it. */ 4115 ipmi_inc_stat(intf, invalid_lan_responses); 4116 return 0; 4117 } 4118 4119 if (msg->rsp[2] != 0) { 4120 /* An error getting the response, just ignore it. */ 4121 return 0; 4122 } 4123 4124 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 4125 lan_addr.session_handle = msg->rsp[4]; 4126 lan_addr.remote_SWID = msg->rsp[8]; 4127 lan_addr.local_SWID = msg->rsp[5]; 4128 lan_addr.channel = msg->rsp[3] & 0x0f; 4129 lan_addr.privilege = msg->rsp[3] >> 4; 4130 lan_addr.lun = msg->rsp[9] & 3; 4131 4132 /* 4133 * It's a response from a remote entity. Look up the sequence 4134 * number and handle the response. 4135 */ 4136 if (intf_find_seq(intf, 4137 msg->rsp[9] >> 2, 4138 msg->rsp[3] & 0x0f, 4139 msg->rsp[10], 4140 (msg->rsp[6] >> 2) & (~1), 4141 (struct ipmi_addr *) &lan_addr, 4142 &recv_msg)) { 4143 /* 4144 * We were unable to find the sequence number, 4145 * so just nuke the message. 4146 */ 4147 ipmi_inc_stat(intf, unhandled_lan_responses); 4148 return 0; 4149 } 4150 4151 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 4152 /* 4153 * The other fields matched, so no need to set them, except 4154 * for netfn, which needs to be the response that was 4155 * returned, not the request value. 4156 */ 4157 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4158 recv_msg->msg.data = recv_msg->msg_data; 4159 recv_msg->msg.data_len = msg->rsp_size - 12; 4160 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4161 if (deliver_response(intf, recv_msg)) 4162 ipmi_inc_stat(intf, unhandled_lan_responses); 4163 else 4164 ipmi_inc_stat(intf, handled_lan_responses); 4165 4166 return 0; 4167 } 4168 4169 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 4170 struct ipmi_smi_msg *msg) 4171 { 4172 struct cmd_rcvr *rcvr; 4173 int rv = 0; 4174 unsigned char netfn; 4175 unsigned char cmd; 4176 unsigned char chan; 4177 struct ipmi_user *user = NULL; 4178 struct ipmi_lan_addr *lan_addr; 4179 struct ipmi_recv_msg *recv_msg; 4180 4181 if (msg->rsp_size < 12) { 4182 /* Message not big enough, just ignore it. */ 4183 ipmi_inc_stat(intf, invalid_commands); 4184 return 0; 4185 } 4186 4187 if (msg->rsp[2] != 0) { 4188 /* An error getting the response, just ignore it. */ 4189 return 0; 4190 } 4191 4192 netfn = msg->rsp[6] >> 2; 4193 cmd = msg->rsp[10]; 4194 chan = msg->rsp[3] & 0xf; 4195 4196 rcu_read_lock(); 4197 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4198 if (rcvr) { 4199 user = rcvr->user; 4200 kref_get(&user->refcount); 4201 } else 4202 user = NULL; 4203 rcu_read_unlock(); 4204 4205 if (user == NULL) { 4206 /* We didn't find a user, just give up. */ 4207 ipmi_inc_stat(intf, unhandled_commands); 4208 4209 /* 4210 * Don't do anything with these messages, just allow 4211 * them to be freed. 4212 */ 4213 rv = 0; 4214 } else { 4215 recv_msg = ipmi_alloc_recv_msg(); 4216 if (!recv_msg) { 4217 /* 4218 * We couldn't allocate memory for the 4219 * message, so requeue it for handling later. 4220 */ 4221 rv = 1; 4222 kref_put(&user->refcount, free_user); 4223 } else { 4224 /* Extract the source address from the data. */ 4225 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 4226 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 4227 lan_addr->session_handle = msg->rsp[4]; 4228 lan_addr->remote_SWID = msg->rsp[8]; 4229 lan_addr->local_SWID = msg->rsp[5]; 4230 lan_addr->lun = msg->rsp[9] & 3; 4231 lan_addr->channel = msg->rsp[3] & 0xf; 4232 lan_addr->privilege = msg->rsp[3] >> 4; 4233 4234 /* 4235 * Extract the rest of the message information 4236 * from the IPMB header. 4237 */ 4238 recv_msg->user = user; 4239 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4240 recv_msg->msgid = msg->rsp[9] >> 2; 4241 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4242 recv_msg->msg.cmd = msg->rsp[10]; 4243 recv_msg->msg.data = recv_msg->msg_data; 4244 4245 /* 4246 * We chop off 12, not 11 bytes because the checksum 4247 * at the end also needs to be removed. 4248 */ 4249 recv_msg->msg.data_len = msg->rsp_size - 12; 4250 memcpy(recv_msg->msg_data, &msg->rsp[11], 4251 msg->rsp_size - 12); 4252 if (deliver_response(intf, recv_msg)) 4253 ipmi_inc_stat(intf, unhandled_commands); 4254 else 4255 ipmi_inc_stat(intf, handled_commands); 4256 } 4257 } 4258 4259 return rv; 4260 } 4261 4262 /* 4263 * This routine will handle "Get Message" command responses with 4264 * channels that use an OEM Medium. The message format belongs to 4265 * the OEM. See IPMI 2.0 specification, Chapter 6 and 4266 * Chapter 22, sections 22.6 and 22.24 for more details. 4267 */ 4268 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 4269 struct ipmi_smi_msg *msg) 4270 { 4271 struct cmd_rcvr *rcvr; 4272 int rv = 0; 4273 unsigned char netfn; 4274 unsigned char cmd; 4275 unsigned char chan; 4276 struct ipmi_user *user = NULL; 4277 struct ipmi_system_interface_addr *smi_addr; 4278 struct ipmi_recv_msg *recv_msg; 4279 4280 /* 4281 * We expect the OEM SW to perform error checking 4282 * so we just do some basic sanity checks 4283 */ 4284 if (msg->rsp_size < 4) { 4285 /* Message not big enough, just ignore it. */ 4286 ipmi_inc_stat(intf, invalid_commands); 4287 return 0; 4288 } 4289 4290 if (msg->rsp[2] != 0) { 4291 /* An error getting the response, just ignore it. */ 4292 return 0; 4293 } 4294 4295 /* 4296 * This is an OEM Message so the OEM needs to know how 4297 * handle the message. We do no interpretation. 4298 */ 4299 netfn = msg->rsp[0] >> 2; 4300 cmd = msg->rsp[1]; 4301 chan = msg->rsp[3] & 0xf; 4302 4303 rcu_read_lock(); 4304 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4305 if (rcvr) { 4306 user = rcvr->user; 4307 kref_get(&user->refcount); 4308 } else 4309 user = NULL; 4310 rcu_read_unlock(); 4311 4312 if (user == NULL) { 4313 /* We didn't find a user, just give up. */ 4314 ipmi_inc_stat(intf, unhandled_commands); 4315 4316 /* 4317 * Don't do anything with these messages, just allow 4318 * them to be freed. 4319 */ 4320 4321 rv = 0; 4322 } else { 4323 recv_msg = ipmi_alloc_recv_msg(); 4324 if (!recv_msg) { 4325 /* 4326 * We couldn't allocate memory for the 4327 * message, so requeue it for handling 4328 * later. 4329 */ 4330 rv = 1; 4331 kref_put(&user->refcount, free_user); 4332 } else { 4333 /* 4334 * OEM Messages are expected to be delivered via 4335 * the system interface to SMS software. We might 4336 * need to visit this again depending on OEM 4337 * requirements 4338 */ 4339 smi_addr = ((struct ipmi_system_interface_addr *) 4340 &recv_msg->addr); 4341 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4342 smi_addr->channel = IPMI_BMC_CHANNEL; 4343 smi_addr->lun = msg->rsp[0] & 3; 4344 4345 recv_msg->user = user; 4346 recv_msg->user_msg_data = NULL; 4347 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4348 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4349 recv_msg->msg.cmd = msg->rsp[1]; 4350 recv_msg->msg.data = recv_msg->msg_data; 4351 4352 /* 4353 * The message starts at byte 4 which follows the 4354 * Channel Byte in the "GET MESSAGE" command 4355 */ 4356 recv_msg->msg.data_len = msg->rsp_size - 4; 4357 memcpy(recv_msg->msg_data, &msg->rsp[4], 4358 msg->rsp_size - 4); 4359 if (deliver_response(intf, recv_msg)) 4360 ipmi_inc_stat(intf, unhandled_commands); 4361 else 4362 ipmi_inc_stat(intf, handled_commands); 4363 } 4364 } 4365 4366 return rv; 4367 } 4368 4369 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4370 struct ipmi_smi_msg *msg) 4371 { 4372 struct ipmi_system_interface_addr *smi_addr; 4373 4374 recv_msg->msgid = 0; 4375 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4376 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4377 smi_addr->channel = IPMI_BMC_CHANNEL; 4378 smi_addr->lun = msg->rsp[0] & 3; 4379 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4380 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4381 recv_msg->msg.cmd = msg->rsp[1]; 4382 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4383 recv_msg->msg.data = recv_msg->msg_data; 4384 recv_msg->msg.data_len = msg->rsp_size - 3; 4385 } 4386 4387 static int handle_read_event_rsp(struct ipmi_smi *intf, 4388 struct ipmi_smi_msg *msg) 4389 { 4390 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4391 struct list_head msgs; 4392 struct ipmi_user *user; 4393 int rv = 0, deliver_count = 0, index; 4394 unsigned long flags; 4395 4396 if (msg->rsp_size < 19) { 4397 /* Message is too small to be an IPMB event. */ 4398 ipmi_inc_stat(intf, invalid_events); 4399 return 0; 4400 } 4401 4402 if (msg->rsp[2] != 0) { 4403 /* An error getting the event, just ignore it. */ 4404 return 0; 4405 } 4406 4407 INIT_LIST_HEAD(&msgs); 4408 4409 spin_lock_irqsave(&intf->events_lock, flags); 4410 4411 ipmi_inc_stat(intf, events); 4412 4413 /* 4414 * Allocate and fill in one message for every user that is 4415 * getting events. 4416 */ 4417 index = srcu_read_lock(&intf->users_srcu); 4418 list_for_each_entry_rcu(user, &intf->users, link) { 4419 if (!user->gets_events) 4420 continue; 4421 4422 recv_msg = ipmi_alloc_recv_msg(); 4423 if (!recv_msg) { 4424 rcu_read_unlock(); 4425 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4426 link) { 4427 list_del(&recv_msg->link); 4428 ipmi_free_recv_msg(recv_msg); 4429 } 4430 /* 4431 * We couldn't allocate memory for the 4432 * message, so requeue it for handling 4433 * later. 4434 */ 4435 rv = 1; 4436 goto out; 4437 } 4438 4439 deliver_count++; 4440 4441 copy_event_into_recv_msg(recv_msg, msg); 4442 recv_msg->user = user; 4443 kref_get(&user->refcount); 4444 list_add_tail(&recv_msg->link, &msgs); 4445 } 4446 srcu_read_unlock(&intf->users_srcu, index); 4447 4448 if (deliver_count) { 4449 /* Now deliver all the messages. */ 4450 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4451 list_del(&recv_msg->link); 4452 deliver_local_response(intf, recv_msg); 4453 } 4454 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4455 /* 4456 * No one to receive the message, put it in queue if there's 4457 * not already too many things in the queue. 4458 */ 4459 recv_msg = ipmi_alloc_recv_msg(); 4460 if (!recv_msg) { 4461 /* 4462 * We couldn't allocate memory for the 4463 * message, so requeue it for handling 4464 * later. 4465 */ 4466 rv = 1; 4467 goto out; 4468 } 4469 4470 copy_event_into_recv_msg(recv_msg, msg); 4471 list_add_tail(&recv_msg->link, &intf->waiting_events); 4472 intf->waiting_events_count++; 4473 } else if (!intf->event_msg_printed) { 4474 /* 4475 * There's too many things in the queue, discard this 4476 * message. 4477 */ 4478 dev_warn(intf->si_dev, 4479 "Event queue full, discarding incoming events\n"); 4480 intf->event_msg_printed = 1; 4481 } 4482 4483 out: 4484 spin_unlock_irqrestore(&intf->events_lock, flags); 4485 4486 return rv; 4487 } 4488 4489 static int handle_bmc_rsp(struct ipmi_smi *intf, 4490 struct ipmi_smi_msg *msg) 4491 { 4492 struct ipmi_recv_msg *recv_msg; 4493 struct ipmi_system_interface_addr *smi_addr; 4494 4495 recv_msg = msg->user_data; 4496 if (recv_msg == NULL) { 4497 dev_warn(intf->si_dev, 4498 "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4499 return 0; 4500 } 4501 4502 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4503 recv_msg->msgid = msg->msgid; 4504 smi_addr = ((struct ipmi_system_interface_addr *) 4505 &recv_msg->addr); 4506 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4507 smi_addr->channel = IPMI_BMC_CHANNEL; 4508 smi_addr->lun = msg->rsp[0] & 3; 4509 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4510 recv_msg->msg.cmd = msg->rsp[1]; 4511 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4512 recv_msg->msg.data = recv_msg->msg_data; 4513 recv_msg->msg.data_len = msg->rsp_size - 2; 4514 deliver_local_response(intf, recv_msg); 4515 4516 return 0; 4517 } 4518 4519 /* 4520 * Handle a received message. Return 1 if the message should be requeued, 4521 * 0 if the message should be freed, or -1 if the message should not 4522 * be freed or requeued. 4523 */ 4524 static int handle_one_recv_msg(struct ipmi_smi *intf, 4525 struct ipmi_smi_msg *msg) 4526 { 4527 int requeue = 0; 4528 int chan; 4529 unsigned char cc; 4530 bool is_cmd = !((msg->rsp[0] >> 2) & 1); 4531 4532 dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp); 4533 4534 if (msg->rsp_size < 2) { 4535 /* Message is too small to be correct. */ 4536 dev_warn(intf->si_dev, 4537 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4538 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4539 4540 return_unspecified: 4541 /* Generate an error response for the message. */ 4542 msg->rsp[0] = msg->data[0] | (1 << 2); 4543 msg->rsp[1] = msg->data[1]; 4544 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4545 msg->rsp_size = 3; 4546 } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4547 /* commands must have at least 4 bytes, responses 5. */ 4548 if (is_cmd && (msg->rsp_size < 4)) { 4549 ipmi_inc_stat(intf, invalid_commands); 4550 goto out; 4551 } 4552 if (!is_cmd && (msg->rsp_size < 5)) { 4553 ipmi_inc_stat(intf, invalid_ipmb_responses); 4554 /* Construct a valid error response. */ 4555 msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */ 4556 msg->rsp[0] |= (1 << 2); /* Make it a response */ 4557 msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */ 4558 msg->rsp[1] = msg->data[1]; /* Addr */ 4559 msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */ 4560 msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */ 4561 msg->rsp[3] = msg->data[3]; /* Cmd */ 4562 msg->rsp[4] = IPMI_ERR_UNSPECIFIED; 4563 msg->rsp_size = 5; 4564 } 4565 } else if ((msg->data_size >= 2) 4566 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4567 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4568 && (msg->user_data == NULL)) { 4569 4570 if (intf->in_shutdown) 4571 goto out; 4572 4573 /* 4574 * This is the local response to a command send, start 4575 * the timer for these. The user_data will not be 4576 * NULL if this is a response send, and we will let 4577 * response sends just go through. 4578 */ 4579 4580 /* 4581 * Check for errors, if we get certain errors (ones 4582 * that mean basically we can try again later), we 4583 * ignore them and start the timer. Otherwise we 4584 * report the error immediately. 4585 */ 4586 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4587 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4588 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4589 && (msg->rsp[2] != IPMI_BUS_ERR) 4590 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4591 int ch = msg->rsp[3] & 0xf; 4592 struct ipmi_channel *chans; 4593 4594 /* Got an error sending the message, handle it. */ 4595 4596 chans = READ_ONCE(intf->channel_list)->c; 4597 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4598 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4599 ipmi_inc_stat(intf, sent_lan_command_errs); 4600 else 4601 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4602 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4603 } else 4604 /* The message was sent, start the timer. */ 4605 intf_start_seq_timer(intf, msg->msgid); 4606 requeue = 0; 4607 goto out; 4608 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4609 || (msg->rsp[1] != msg->data[1])) { 4610 /* 4611 * The NetFN and Command in the response is not even 4612 * marginally correct. 4613 */ 4614 dev_warn(intf->si_dev, 4615 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4616 (msg->data[0] >> 2) | 1, msg->data[1], 4617 msg->rsp[0] >> 2, msg->rsp[1]); 4618 4619 goto return_unspecified; 4620 } 4621 4622 if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4623 if ((msg->data[0] >> 2) & 1) { 4624 /* It's a response to a sent response. */ 4625 chan = 0; 4626 cc = msg->rsp[4]; 4627 goto process_response_response; 4628 } 4629 if (is_cmd) 4630 requeue = handle_ipmb_direct_rcv_cmd(intf, msg); 4631 else 4632 requeue = handle_ipmb_direct_rcv_rsp(intf, msg); 4633 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4634 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4635 && (msg->user_data != NULL)) { 4636 /* 4637 * It's a response to a response we sent. For this we 4638 * deliver a send message response to the user. 4639 */ 4640 struct ipmi_recv_msg *recv_msg; 4641 4642 chan = msg->data[2] & 0x0f; 4643 if (chan >= IPMI_MAX_CHANNELS) 4644 /* Invalid channel number */ 4645 goto out; 4646 cc = msg->rsp[2]; 4647 4648 process_response_response: 4649 recv_msg = msg->user_data; 4650 4651 requeue = 0; 4652 if (!recv_msg) 4653 goto out; 4654 4655 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4656 recv_msg->msg.data = recv_msg->msg_data; 4657 recv_msg->msg_data[0] = cc; 4658 recv_msg->msg.data_len = 1; 4659 deliver_local_response(intf, recv_msg); 4660 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4661 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4662 struct ipmi_channel *chans; 4663 4664 /* It's from the receive queue. */ 4665 chan = msg->rsp[3] & 0xf; 4666 if (chan >= IPMI_MAX_CHANNELS) { 4667 /* Invalid channel number */ 4668 requeue = 0; 4669 goto out; 4670 } 4671 4672 /* 4673 * We need to make sure the channels have been initialized. 4674 * The channel_handler routine will set the "curr_channel" 4675 * equal to or greater than IPMI_MAX_CHANNELS when all the 4676 * channels for this interface have been initialized. 4677 */ 4678 if (!intf->channels_ready) { 4679 requeue = 0; /* Throw the message away */ 4680 goto out; 4681 } 4682 4683 chans = READ_ONCE(intf->channel_list)->c; 4684 4685 switch (chans[chan].medium) { 4686 case IPMI_CHANNEL_MEDIUM_IPMB: 4687 if (msg->rsp[4] & 0x04) { 4688 /* 4689 * It's a response, so find the 4690 * requesting message and send it up. 4691 */ 4692 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4693 } else { 4694 /* 4695 * It's a command to the SMS from some other 4696 * entity. Handle that. 4697 */ 4698 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4699 } 4700 break; 4701 4702 case IPMI_CHANNEL_MEDIUM_8023LAN: 4703 case IPMI_CHANNEL_MEDIUM_ASYNC: 4704 if (msg->rsp[6] & 0x04) { 4705 /* 4706 * It's a response, so find the 4707 * requesting message and send it up. 4708 */ 4709 requeue = handle_lan_get_msg_rsp(intf, msg); 4710 } else { 4711 /* 4712 * It's a command to the SMS from some other 4713 * entity. Handle that. 4714 */ 4715 requeue = handle_lan_get_msg_cmd(intf, msg); 4716 } 4717 break; 4718 4719 default: 4720 /* Check for OEM Channels. Clients had better 4721 register for these commands. */ 4722 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4723 && (chans[chan].medium 4724 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4725 requeue = handle_oem_get_msg_cmd(intf, msg); 4726 } else { 4727 /* 4728 * We don't handle the channel type, so just 4729 * free the message. 4730 */ 4731 requeue = 0; 4732 } 4733 } 4734 4735 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4736 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4737 /* It's an asynchronous event. */ 4738 requeue = handle_read_event_rsp(intf, msg); 4739 } else { 4740 /* It's a response from the local BMC. */ 4741 requeue = handle_bmc_rsp(intf, msg); 4742 } 4743 4744 out: 4745 return requeue; 4746 } 4747 4748 /* 4749 * If there are messages in the queue or pretimeouts, handle them. 4750 */ 4751 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4752 { 4753 struct ipmi_smi_msg *smi_msg; 4754 unsigned long flags = 0; 4755 int rv; 4756 int run_to_completion = intf->run_to_completion; 4757 4758 /* See if any waiting messages need to be processed. */ 4759 if (!run_to_completion) 4760 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4761 while (!list_empty(&intf->waiting_rcv_msgs)) { 4762 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4763 struct ipmi_smi_msg, link); 4764 list_del(&smi_msg->link); 4765 if (!run_to_completion) 4766 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4767 flags); 4768 rv = handle_one_recv_msg(intf, smi_msg); 4769 if (!run_to_completion) 4770 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4771 if (rv > 0) { 4772 /* 4773 * To preserve message order, quit if we 4774 * can't handle a message. Add the message 4775 * back at the head, this is safe because this 4776 * tasklet is the only thing that pulls the 4777 * messages. 4778 */ 4779 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4780 break; 4781 } else { 4782 if (rv == 0) 4783 /* Message handled */ 4784 ipmi_free_smi_msg(smi_msg); 4785 /* If rv < 0, fatal error, del but don't free. */ 4786 } 4787 } 4788 if (!run_to_completion) 4789 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4790 4791 /* 4792 * If the pretimout count is non-zero, decrement one from it and 4793 * deliver pretimeouts to all the users. 4794 */ 4795 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4796 struct ipmi_user *user; 4797 int index; 4798 4799 index = srcu_read_lock(&intf->users_srcu); 4800 list_for_each_entry_rcu(user, &intf->users, link) { 4801 if (user->handler->ipmi_watchdog_pretimeout) 4802 user->handler->ipmi_watchdog_pretimeout( 4803 user->handler_data); 4804 } 4805 srcu_read_unlock(&intf->users_srcu, index); 4806 } 4807 } 4808 4809 static void smi_recv_tasklet(struct tasklet_struct *t) 4810 { 4811 unsigned long flags = 0; /* keep us warning-free. */ 4812 struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); 4813 int run_to_completion = intf->run_to_completion; 4814 struct ipmi_smi_msg *newmsg = NULL; 4815 4816 /* 4817 * Start the next message if available. 4818 * 4819 * Do this here, not in the actual receiver, because we may deadlock 4820 * because the lower layer is allowed to hold locks while calling 4821 * message delivery. 4822 */ 4823 4824 rcu_read_lock(); 4825 4826 if (!run_to_completion) 4827 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4828 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4829 struct list_head *entry = NULL; 4830 4831 /* Pick the high priority queue first. */ 4832 if (!list_empty(&intf->hp_xmit_msgs)) 4833 entry = intf->hp_xmit_msgs.next; 4834 else if (!list_empty(&intf->xmit_msgs)) 4835 entry = intf->xmit_msgs.next; 4836 4837 if (entry) { 4838 list_del(entry); 4839 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4840 intf->curr_msg = newmsg; 4841 } 4842 } 4843 4844 if (!run_to_completion) 4845 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4846 if (newmsg) 4847 intf->handlers->sender(intf->send_info, newmsg); 4848 4849 rcu_read_unlock(); 4850 4851 handle_new_recv_msgs(intf); 4852 } 4853 4854 /* Handle a new message from the lower layer. */ 4855 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4856 struct ipmi_smi_msg *msg) 4857 { 4858 unsigned long flags = 0; /* keep us warning-free. */ 4859 int run_to_completion = intf->run_to_completion; 4860 4861 /* 4862 * To preserve message order, we keep a queue and deliver from 4863 * a tasklet. 4864 */ 4865 if (!run_to_completion) 4866 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4867 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4868 if (!run_to_completion) 4869 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4870 flags); 4871 4872 if (!run_to_completion) 4873 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4874 /* 4875 * We can get an asynchronous event or receive message in addition 4876 * to commands we send. 4877 */ 4878 if (msg == intf->curr_msg) 4879 intf->curr_msg = NULL; 4880 if (!run_to_completion) 4881 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4882 4883 if (run_to_completion) 4884 smi_recv_tasklet(&intf->recv_tasklet); 4885 else 4886 tasklet_schedule(&intf->recv_tasklet); 4887 } 4888 EXPORT_SYMBOL(ipmi_smi_msg_received); 4889 4890 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4891 { 4892 if (intf->in_shutdown) 4893 return; 4894 4895 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4896 tasklet_schedule(&intf->recv_tasklet); 4897 } 4898 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4899 4900 static struct ipmi_smi_msg * 4901 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4902 unsigned char seq, long seqid) 4903 { 4904 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4905 if (!smi_msg) 4906 /* 4907 * If we can't allocate the message, then just return, we 4908 * get 4 retries, so this should be ok. 4909 */ 4910 return NULL; 4911 4912 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4913 smi_msg->data_size = recv_msg->msg.data_len; 4914 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4915 4916 dev_dbg(intf->si_dev, "Resend: %*ph\n", 4917 smi_msg->data_size, smi_msg->data); 4918 4919 return smi_msg; 4920 } 4921 4922 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4923 struct list_head *timeouts, 4924 unsigned long timeout_period, 4925 int slot, unsigned long *flags, 4926 bool *need_timer) 4927 { 4928 struct ipmi_recv_msg *msg; 4929 4930 if (intf->in_shutdown) 4931 return; 4932 4933 if (!ent->inuse) 4934 return; 4935 4936 if (timeout_period < ent->timeout) { 4937 ent->timeout -= timeout_period; 4938 *need_timer = true; 4939 return; 4940 } 4941 4942 if (ent->retries_left == 0) { 4943 /* The message has used all its retries. */ 4944 ent->inuse = 0; 4945 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 4946 msg = ent->recv_msg; 4947 list_add_tail(&msg->link, timeouts); 4948 if (ent->broadcast) 4949 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4950 else if (is_lan_addr(&ent->recv_msg->addr)) 4951 ipmi_inc_stat(intf, timed_out_lan_commands); 4952 else 4953 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4954 } else { 4955 struct ipmi_smi_msg *smi_msg; 4956 /* More retries, send again. */ 4957 4958 *need_timer = true; 4959 4960 /* 4961 * Start with the max timer, set to normal timer after 4962 * the message is sent. 4963 */ 4964 ent->timeout = MAX_MSG_TIMEOUT; 4965 ent->retries_left--; 4966 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4967 ent->seqid); 4968 if (!smi_msg) { 4969 if (is_lan_addr(&ent->recv_msg->addr)) 4970 ipmi_inc_stat(intf, 4971 dropped_rexmit_lan_commands); 4972 else 4973 ipmi_inc_stat(intf, 4974 dropped_rexmit_ipmb_commands); 4975 return; 4976 } 4977 4978 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4979 4980 /* 4981 * Send the new message. We send with a zero 4982 * priority. It timed out, I doubt time is that 4983 * critical now, and high priority messages are really 4984 * only for messages to the local MC, which don't get 4985 * resent. 4986 */ 4987 if (intf->handlers) { 4988 if (is_lan_addr(&ent->recv_msg->addr)) 4989 ipmi_inc_stat(intf, 4990 retransmitted_lan_commands); 4991 else 4992 ipmi_inc_stat(intf, 4993 retransmitted_ipmb_commands); 4994 4995 smi_send(intf, intf->handlers, smi_msg, 0); 4996 } else 4997 ipmi_free_smi_msg(smi_msg); 4998 4999 spin_lock_irqsave(&intf->seq_lock, *flags); 5000 } 5001 } 5002 5003 static bool ipmi_timeout_handler(struct ipmi_smi *intf, 5004 unsigned long timeout_period) 5005 { 5006 struct list_head timeouts; 5007 struct ipmi_recv_msg *msg, *msg2; 5008 unsigned long flags; 5009 int i; 5010 bool need_timer = false; 5011 5012 if (!intf->bmc_registered) { 5013 kref_get(&intf->refcount); 5014 if (!schedule_work(&intf->bmc_reg_work)) { 5015 kref_put(&intf->refcount, intf_free); 5016 need_timer = true; 5017 } 5018 } 5019 5020 /* 5021 * Go through the seq table and find any messages that 5022 * have timed out, putting them in the timeouts 5023 * list. 5024 */ 5025 INIT_LIST_HEAD(&timeouts); 5026 spin_lock_irqsave(&intf->seq_lock, flags); 5027 if (intf->ipmb_maintenance_mode_timeout) { 5028 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 5029 intf->ipmb_maintenance_mode_timeout = 0; 5030 else 5031 intf->ipmb_maintenance_mode_timeout -= timeout_period; 5032 } 5033 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 5034 check_msg_timeout(intf, &intf->seq_table[i], 5035 &timeouts, timeout_period, i, 5036 &flags, &need_timer); 5037 spin_unlock_irqrestore(&intf->seq_lock, flags); 5038 5039 list_for_each_entry_safe(msg, msg2, &timeouts, link) 5040 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 5041 5042 /* 5043 * Maintenance mode handling. Check the timeout 5044 * optimistically before we claim the lock. It may 5045 * mean a timeout gets missed occasionally, but that 5046 * only means the timeout gets extended by one period 5047 * in that case. No big deal, and it avoids the lock 5048 * most of the time. 5049 */ 5050 if (intf->auto_maintenance_timeout > 0) { 5051 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 5052 if (intf->auto_maintenance_timeout > 0) { 5053 intf->auto_maintenance_timeout 5054 -= timeout_period; 5055 if (!intf->maintenance_mode 5056 && (intf->auto_maintenance_timeout <= 0)) { 5057 intf->maintenance_mode_enable = false; 5058 maintenance_mode_update(intf); 5059 } 5060 } 5061 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 5062 flags); 5063 } 5064 5065 tasklet_schedule(&intf->recv_tasklet); 5066 5067 return need_timer; 5068 } 5069 5070 static void ipmi_request_event(struct ipmi_smi *intf) 5071 { 5072 /* No event requests when in maintenance mode. */ 5073 if (intf->maintenance_mode_enable) 5074 return; 5075 5076 if (!intf->in_shutdown) 5077 intf->handlers->request_events(intf->send_info); 5078 } 5079 5080 static struct timer_list ipmi_timer; 5081 5082 static atomic_t stop_operation; 5083 5084 static void ipmi_timeout(struct timer_list *unused) 5085 { 5086 struct ipmi_smi *intf; 5087 bool need_timer = false; 5088 int index; 5089 5090 if (atomic_read(&stop_operation)) 5091 return; 5092 5093 index = srcu_read_lock(&ipmi_interfaces_srcu); 5094 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5095 if (atomic_read(&intf->event_waiters)) { 5096 intf->ticks_to_req_ev--; 5097 if (intf->ticks_to_req_ev == 0) { 5098 ipmi_request_event(intf); 5099 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 5100 } 5101 need_timer = true; 5102 } 5103 5104 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 5105 } 5106 srcu_read_unlock(&ipmi_interfaces_srcu, index); 5107 5108 if (need_timer) 5109 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5110 } 5111 5112 static void need_waiter(struct ipmi_smi *intf) 5113 { 5114 /* Racy, but worst case we start the timer twice. */ 5115 if (!timer_pending(&ipmi_timer)) 5116 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5117 } 5118 5119 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 5120 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 5121 5122 static void free_smi_msg(struct ipmi_smi_msg *msg) 5123 { 5124 atomic_dec(&smi_msg_inuse_count); 5125 /* Try to keep as much stuff out of the panic path as possible. */ 5126 if (!oops_in_progress) 5127 kfree(msg); 5128 } 5129 5130 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 5131 { 5132 struct ipmi_smi_msg *rv; 5133 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 5134 if (rv) { 5135 rv->done = free_smi_msg; 5136 rv->user_data = NULL; 5137 rv->type = IPMI_SMI_MSG_TYPE_NORMAL; 5138 atomic_inc(&smi_msg_inuse_count); 5139 } 5140 return rv; 5141 } 5142 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 5143 5144 static void free_recv_msg(struct ipmi_recv_msg *msg) 5145 { 5146 atomic_dec(&recv_msg_inuse_count); 5147 /* Try to keep as much stuff out of the panic path as possible. */ 5148 if (!oops_in_progress) 5149 kfree(msg); 5150 } 5151 5152 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 5153 { 5154 struct ipmi_recv_msg *rv; 5155 5156 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 5157 if (rv) { 5158 rv->user = NULL; 5159 rv->done = free_recv_msg; 5160 atomic_inc(&recv_msg_inuse_count); 5161 } 5162 return rv; 5163 } 5164 5165 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 5166 { 5167 if (msg->user && !oops_in_progress) 5168 kref_put(&msg->user->refcount, free_user); 5169 msg->done(msg); 5170 } 5171 EXPORT_SYMBOL(ipmi_free_recv_msg); 5172 5173 static atomic_t panic_done_count = ATOMIC_INIT(0); 5174 5175 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 5176 { 5177 atomic_dec(&panic_done_count); 5178 } 5179 5180 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 5181 { 5182 atomic_dec(&panic_done_count); 5183 } 5184 5185 /* 5186 * Inside a panic, send a message and wait for a response. 5187 */ 5188 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, 5189 struct ipmi_addr *addr, 5190 struct kernel_ipmi_msg *msg) 5191 { 5192 struct ipmi_smi_msg smi_msg; 5193 struct ipmi_recv_msg recv_msg; 5194 int rv; 5195 5196 smi_msg.done = dummy_smi_done_handler; 5197 recv_msg.done = dummy_recv_done_handler; 5198 atomic_add(2, &panic_done_count); 5199 rv = i_ipmi_request(NULL, 5200 intf, 5201 addr, 5202 0, 5203 msg, 5204 intf, 5205 &smi_msg, 5206 &recv_msg, 5207 0, 5208 intf->addrinfo[0].address, 5209 intf->addrinfo[0].lun, 5210 0, 1); /* Don't retry, and don't wait. */ 5211 if (rv) 5212 atomic_sub(2, &panic_done_count); 5213 else if (intf->handlers->flush_messages) 5214 intf->handlers->flush_messages(intf->send_info); 5215 5216 while (atomic_read(&panic_done_count) != 0) 5217 ipmi_poll(intf); 5218 } 5219 5220 static void event_receiver_fetcher(struct ipmi_smi *intf, 5221 struct ipmi_recv_msg *msg) 5222 { 5223 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5224 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 5225 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 5226 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5227 /* A get event receiver command, save it. */ 5228 intf->event_receiver = msg->msg.data[1]; 5229 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 5230 } 5231 } 5232 5233 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 5234 { 5235 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5236 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 5237 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 5238 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5239 /* 5240 * A get device id command, save if we are an event 5241 * receiver or generator. 5242 */ 5243 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 5244 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 5245 } 5246 } 5247 5248 static void send_panic_events(struct ipmi_smi *intf, char *str) 5249 { 5250 struct kernel_ipmi_msg msg; 5251 unsigned char data[16]; 5252 struct ipmi_system_interface_addr *si; 5253 struct ipmi_addr addr; 5254 char *p = str; 5255 struct ipmi_ipmb_addr *ipmb; 5256 int j; 5257 5258 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 5259 return; 5260 5261 si = (struct ipmi_system_interface_addr *) &addr; 5262 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5263 si->channel = IPMI_BMC_CHANNEL; 5264 si->lun = 0; 5265 5266 /* Fill in an event telling that we have failed. */ 5267 msg.netfn = 0x04; /* Sensor or Event. */ 5268 msg.cmd = 2; /* Platform event command. */ 5269 msg.data = data; 5270 msg.data_len = 8; 5271 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 5272 data[1] = 0x03; /* This is for IPMI 1.0. */ 5273 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 5274 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 5275 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 5276 5277 /* 5278 * Put a few breadcrumbs in. Hopefully later we can add more things 5279 * to make the panic events more useful. 5280 */ 5281 if (str) { 5282 data[3] = str[0]; 5283 data[6] = str[1]; 5284 data[7] = str[2]; 5285 } 5286 5287 /* Send the event announcing the panic. */ 5288 ipmi_panic_request_and_wait(intf, &addr, &msg); 5289 5290 /* 5291 * On every interface, dump a bunch of OEM event holding the 5292 * string. 5293 */ 5294 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 5295 return; 5296 5297 /* 5298 * intf_num is used as an marker to tell if the 5299 * interface is valid. Thus we need a read barrier to 5300 * make sure data fetched before checking intf_num 5301 * won't be used. 5302 */ 5303 smp_rmb(); 5304 5305 /* 5306 * First job here is to figure out where to send the 5307 * OEM events. There's no way in IPMI to send OEM 5308 * events using an event send command, so we have to 5309 * find the SEL to put them in and stick them in 5310 * there. 5311 */ 5312 5313 /* Get capabilities from the get device id. */ 5314 intf->local_sel_device = 0; 5315 intf->local_event_generator = 0; 5316 intf->event_receiver = 0; 5317 5318 /* Request the device info from the local MC. */ 5319 msg.netfn = IPMI_NETFN_APP_REQUEST; 5320 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 5321 msg.data = NULL; 5322 msg.data_len = 0; 5323 intf->null_user_handler = device_id_fetcher; 5324 ipmi_panic_request_and_wait(intf, &addr, &msg); 5325 5326 if (intf->local_event_generator) { 5327 /* Request the event receiver from the local MC. */ 5328 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 5329 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 5330 msg.data = NULL; 5331 msg.data_len = 0; 5332 intf->null_user_handler = event_receiver_fetcher; 5333 ipmi_panic_request_and_wait(intf, &addr, &msg); 5334 } 5335 intf->null_user_handler = NULL; 5336 5337 /* 5338 * Validate the event receiver. The low bit must not 5339 * be 1 (it must be a valid IPMB address), it cannot 5340 * be zero, and it must not be my address. 5341 */ 5342 if (((intf->event_receiver & 1) == 0) 5343 && (intf->event_receiver != 0) 5344 && (intf->event_receiver != intf->addrinfo[0].address)) { 5345 /* 5346 * The event receiver is valid, send an IPMB 5347 * message. 5348 */ 5349 ipmb = (struct ipmi_ipmb_addr *) &addr; 5350 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5351 ipmb->channel = 0; /* FIXME - is this right? */ 5352 ipmb->lun = intf->event_receiver_lun; 5353 ipmb->slave_addr = intf->event_receiver; 5354 } else if (intf->local_sel_device) { 5355 /* 5356 * The event receiver was not valid (or was 5357 * me), but I am an SEL device, just dump it 5358 * in my SEL. 5359 */ 5360 si = (struct ipmi_system_interface_addr *) &addr; 5361 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5362 si->channel = IPMI_BMC_CHANNEL; 5363 si->lun = 0; 5364 } else 5365 return; /* No where to send the event. */ 5366 5367 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5368 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5369 msg.data = data; 5370 msg.data_len = 16; 5371 5372 j = 0; 5373 while (*p) { 5374 int size = strlen(p); 5375 5376 if (size > 11) 5377 size = 11; 5378 data[0] = 0; 5379 data[1] = 0; 5380 data[2] = 0xf0; /* OEM event without timestamp. */ 5381 data[3] = intf->addrinfo[0].address; 5382 data[4] = j++; /* sequence # */ 5383 /* 5384 * Always give 11 bytes, so strncpy will fill 5385 * it with zeroes for me. 5386 */ 5387 strncpy(data+5, p, 11); 5388 p += size; 5389 5390 ipmi_panic_request_and_wait(intf, &addr, &msg); 5391 } 5392 } 5393 5394 static int has_panicked; 5395 5396 static int panic_event(struct notifier_block *this, 5397 unsigned long event, 5398 void *ptr) 5399 { 5400 struct ipmi_smi *intf; 5401 struct ipmi_user *user; 5402 5403 if (has_panicked) 5404 return NOTIFY_DONE; 5405 has_panicked = 1; 5406 5407 /* For every registered interface, set it to run to completion. */ 5408 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5409 if (!intf->handlers || intf->intf_num == -1) 5410 /* Interface is not ready. */ 5411 continue; 5412 5413 if (!intf->handlers->poll) 5414 continue; 5415 5416 /* 5417 * If we were interrupted while locking xmit_msgs_lock or 5418 * waiting_rcv_msgs_lock, the corresponding list may be 5419 * corrupted. In this case, drop items on the list for 5420 * the safety. 5421 */ 5422 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5423 INIT_LIST_HEAD(&intf->xmit_msgs); 5424 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5425 } else 5426 spin_unlock(&intf->xmit_msgs_lock); 5427 5428 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5429 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5430 else 5431 spin_unlock(&intf->waiting_rcv_msgs_lock); 5432 5433 intf->run_to_completion = 1; 5434 if (intf->handlers->set_run_to_completion) 5435 intf->handlers->set_run_to_completion(intf->send_info, 5436 1); 5437 5438 list_for_each_entry_rcu(user, &intf->users, link) { 5439 if (user->handler->ipmi_panic_handler) 5440 user->handler->ipmi_panic_handler( 5441 user->handler_data); 5442 } 5443 5444 send_panic_events(intf, ptr); 5445 } 5446 5447 return NOTIFY_DONE; 5448 } 5449 5450 /* Must be called with ipmi_interfaces_mutex held. */ 5451 static int ipmi_register_driver(void) 5452 { 5453 int rv; 5454 5455 if (drvregistered) 5456 return 0; 5457 5458 rv = driver_register(&ipmidriver.driver); 5459 if (rv) 5460 pr_err("Could not register IPMI driver\n"); 5461 else 5462 drvregistered = true; 5463 return rv; 5464 } 5465 5466 static struct notifier_block panic_block = { 5467 .notifier_call = panic_event, 5468 .next = NULL, 5469 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5470 }; 5471 5472 static int ipmi_init_msghandler(void) 5473 { 5474 int rv; 5475 5476 mutex_lock(&ipmi_interfaces_mutex); 5477 rv = ipmi_register_driver(); 5478 if (rv) 5479 goto out; 5480 if (initialized) 5481 goto out; 5482 5483 rv = init_srcu_struct(&ipmi_interfaces_srcu); 5484 if (rv) 5485 goto out; 5486 5487 remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); 5488 if (!remove_work_wq) { 5489 pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); 5490 rv = -ENOMEM; 5491 goto out_wq; 5492 } 5493 5494 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5495 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5496 5497 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5498 5499 initialized = true; 5500 5501 out_wq: 5502 if (rv) 5503 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5504 out: 5505 mutex_unlock(&ipmi_interfaces_mutex); 5506 return rv; 5507 } 5508 5509 static int __init ipmi_init_msghandler_mod(void) 5510 { 5511 int rv; 5512 5513 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5514 5515 mutex_lock(&ipmi_interfaces_mutex); 5516 rv = ipmi_register_driver(); 5517 mutex_unlock(&ipmi_interfaces_mutex); 5518 5519 return rv; 5520 } 5521 5522 static void __exit cleanup_ipmi(void) 5523 { 5524 int count; 5525 5526 if (initialized) { 5527 destroy_workqueue(remove_work_wq); 5528 5529 atomic_notifier_chain_unregister(&panic_notifier_list, 5530 &panic_block); 5531 5532 /* 5533 * This can't be called if any interfaces exist, so no worry 5534 * about shutting down the interfaces. 5535 */ 5536 5537 /* 5538 * Tell the timer to stop, then wait for it to stop. This 5539 * avoids problems with race conditions removing the timer 5540 * here. 5541 */ 5542 atomic_set(&stop_operation, 1); 5543 del_timer_sync(&ipmi_timer); 5544 5545 initialized = false; 5546 5547 /* Check for buffer leaks. */ 5548 count = atomic_read(&smi_msg_inuse_count); 5549 if (count != 0) 5550 pr_warn("SMI message count %d at exit\n", count); 5551 count = atomic_read(&recv_msg_inuse_count); 5552 if (count != 0) 5553 pr_warn("recv message count %d at exit\n", count); 5554 5555 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5556 } 5557 if (drvregistered) 5558 driver_unregister(&ipmidriver.driver); 5559 } 5560 module_exit(cleanup_ipmi); 5561 5562 module_init(ipmi_init_msghandler_mod); 5563 MODULE_LICENSE("GPL"); 5564 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5565 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 5566 MODULE_VERSION(IPMI_DRIVER_VERSION); 5567 MODULE_SOFTDEP("post: ipmi_devintf"); 5568