1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: " 15 #define dev_fmt pr_fmt 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/panic_notifier.h> 20 #include <linux/poll.h> 21 #include <linux/sched.h> 22 #include <linux/seq_file.h> 23 #include <linux/spinlock.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/ipmi.h> 27 #include <linux/ipmi_smi.h> 28 #include <linux/notifier.h> 29 #include <linux/init.h> 30 #include <linux/proc_fs.h> 31 #include <linux/rcupdate.h> 32 #include <linux/interrupt.h> 33 #include <linux/moduleparam.h> 34 #include <linux/workqueue.h> 35 #include <linux/uuid.h> 36 #include <linux/nospec.h> 37 #include <linux/vmalloc.h> 38 #include <linux/delay.h> 39 40 #define IPMI_DRIVER_VERSION "39.2" 41 42 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 43 static int ipmi_init_msghandler(void); 44 static void smi_recv_tasklet(struct tasklet_struct *t); 45 static void handle_new_recv_msgs(struct ipmi_smi *intf); 46 static void need_waiter(struct ipmi_smi *intf); 47 static int handle_one_recv_msg(struct ipmi_smi *intf, 48 struct ipmi_smi_msg *msg); 49 50 static bool initialized; 51 static bool drvregistered; 52 53 /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ 54 enum ipmi_panic_event_op { 55 IPMI_SEND_PANIC_EVENT_NONE, 56 IPMI_SEND_PANIC_EVENT, 57 IPMI_SEND_PANIC_EVENT_STRING, 58 IPMI_SEND_PANIC_EVENT_MAX 59 }; 60 61 /* Indices in this array should be mapped to enum ipmi_panic_event_op */ 62 static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL }; 63 64 #ifdef CONFIG_IPMI_PANIC_STRING 65 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 66 #elif defined(CONFIG_IPMI_PANIC_EVENT) 67 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 68 #else 69 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 70 #endif 71 72 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 73 74 static int panic_op_write_handler(const char *val, 75 const struct kernel_param *kp) 76 { 77 char valcp[16]; 78 int e; 79 80 strscpy(valcp, val, sizeof(valcp)); 81 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp)); 82 if (e < 0) 83 return e; 84 85 ipmi_send_panic_event = e; 86 return 0; 87 } 88 89 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 90 { 91 const char *event_str; 92 93 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX) 94 event_str = "???"; 95 else 96 event_str = ipmi_panic_event_str[ipmi_send_panic_event]; 97 98 return sprintf(buffer, "%s\n", event_str); 99 } 100 101 static const struct kernel_param_ops panic_op_ops = { 102 .set = panic_op_write_handler, 103 .get = panic_op_read_handler 104 }; 105 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 106 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 107 108 109 #define MAX_EVENTS_IN_QUEUE 25 110 111 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 112 static unsigned long maintenance_mode_timeout_ms = 30000; 113 module_param(maintenance_mode_timeout_ms, ulong, 0644); 114 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 115 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 116 117 /* 118 * Don't let a message sit in a queue forever, always time it with at lest 119 * the max message timer. This is in milliseconds. 120 */ 121 #define MAX_MSG_TIMEOUT 60000 122 123 /* 124 * Timeout times below are in milliseconds, and are done off a 1 125 * second timer. So setting the value to 1000 would mean anything 126 * between 0 and 1000ms. So really the only reasonable minimum 127 * setting it 2000ms, which is between 1 and 2 seconds. 128 */ 129 130 /* The default timeout for message retries. */ 131 static unsigned long default_retry_ms = 2000; 132 module_param(default_retry_ms, ulong, 0644); 133 MODULE_PARM_DESC(default_retry_ms, 134 "The time (milliseconds) between retry sends"); 135 136 /* The default timeout for maintenance mode message retries. */ 137 static unsigned long default_maintenance_retry_ms = 3000; 138 module_param(default_maintenance_retry_ms, ulong, 0644); 139 MODULE_PARM_DESC(default_maintenance_retry_ms, 140 "The time (milliseconds) between retry sends in maintenance mode"); 141 142 /* The default maximum number of retries */ 143 static unsigned int default_max_retries = 4; 144 module_param(default_max_retries, uint, 0644); 145 MODULE_PARM_DESC(default_max_retries, 146 "The time (milliseconds) between retry sends in maintenance mode"); 147 148 /* Call every ~1000 ms. */ 149 #define IPMI_TIMEOUT_TIME 1000 150 151 /* How many jiffies does it take to get to the timeout time. */ 152 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 153 154 /* 155 * Request events from the queue every second (this is the number of 156 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 157 * future, IPMI will add a way to know immediately if an event is in 158 * the queue and this silliness can go away. 159 */ 160 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 161 162 /* How long should we cache dynamic device IDs? */ 163 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 164 165 /* 166 * The main "user" data structure. 167 */ 168 struct ipmi_user { 169 struct list_head link; 170 171 /* 172 * Set to NULL when the user is destroyed, a pointer to myself 173 * so srcu_dereference can be used on it. 174 */ 175 struct ipmi_user *self; 176 struct srcu_struct release_barrier; 177 178 struct kref refcount; 179 180 /* The upper layer that handles receive messages. */ 181 const struct ipmi_user_hndl *handler; 182 void *handler_data; 183 184 /* The interface this user is bound to. */ 185 struct ipmi_smi *intf; 186 187 /* Does this interface receive IPMI events? */ 188 bool gets_events; 189 190 /* Free must run in process context for RCU cleanup. */ 191 struct work_struct remove_work; 192 }; 193 194 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) 195 __acquires(user->release_barrier) 196 { 197 struct ipmi_user *ruser; 198 199 *index = srcu_read_lock(&user->release_barrier); 200 ruser = srcu_dereference(user->self, &user->release_barrier); 201 if (!ruser) 202 srcu_read_unlock(&user->release_barrier, *index); 203 return ruser; 204 } 205 206 static void release_ipmi_user(struct ipmi_user *user, int index) 207 { 208 srcu_read_unlock(&user->release_barrier, index); 209 } 210 211 struct cmd_rcvr { 212 struct list_head link; 213 214 struct ipmi_user *user; 215 unsigned char netfn; 216 unsigned char cmd; 217 unsigned int chans; 218 219 /* 220 * This is used to form a linked lised during mass deletion. 221 * Since this is in an RCU list, we cannot use the link above 222 * or change any data until the RCU period completes. So we 223 * use this next variable during mass deletion so we can have 224 * a list and don't have to wait and restart the search on 225 * every individual deletion of a command. 226 */ 227 struct cmd_rcvr *next; 228 }; 229 230 struct seq_table { 231 unsigned int inuse : 1; 232 unsigned int broadcast : 1; 233 234 unsigned long timeout; 235 unsigned long orig_timeout; 236 unsigned int retries_left; 237 238 /* 239 * To verify on an incoming send message response that this is 240 * the message that the response is for, we keep a sequence id 241 * and increment it every time we send a message. 242 */ 243 long seqid; 244 245 /* 246 * This is held so we can properly respond to the message on a 247 * timeout, and it is used to hold the temporary data for 248 * retransmission, too. 249 */ 250 struct ipmi_recv_msg *recv_msg; 251 }; 252 253 /* 254 * Store the information in a msgid (long) to allow us to find a 255 * sequence table entry from the msgid. 256 */ 257 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 258 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 259 260 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 261 do { \ 262 seq = (((msgid) >> 26) & 0x3f); \ 263 seqid = ((msgid) & 0x3ffffff); \ 264 } while (0) 265 266 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 267 268 #define IPMI_MAX_CHANNELS 16 269 struct ipmi_channel { 270 unsigned char medium; 271 unsigned char protocol; 272 }; 273 274 struct ipmi_channel_set { 275 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 276 }; 277 278 struct ipmi_my_addrinfo { 279 /* 280 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 281 * but may be changed by the user. 282 */ 283 unsigned char address; 284 285 /* 286 * My LUN. This should generally stay the SMS LUN, but just in 287 * case... 288 */ 289 unsigned char lun; 290 }; 291 292 /* 293 * Note that the product id, manufacturer id, guid, and device id are 294 * immutable in this structure, so dyn_mutex is not required for 295 * accessing those. If those change on a BMC, a new BMC is allocated. 296 */ 297 struct bmc_device { 298 struct platform_device pdev; 299 struct list_head intfs; /* Interfaces on this BMC. */ 300 struct ipmi_device_id id; 301 struct ipmi_device_id fetch_id; 302 int dyn_id_set; 303 unsigned long dyn_id_expiry; 304 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 305 guid_t guid; 306 guid_t fetch_guid; 307 int dyn_guid_set; 308 struct kref usecount; 309 struct work_struct remove_work; 310 unsigned char cc; /* completion code */ 311 }; 312 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 313 314 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 315 struct ipmi_device_id *id, 316 bool *guid_set, guid_t *guid); 317 318 /* 319 * Various statistics for IPMI, these index stats[] in the ipmi_smi 320 * structure. 321 */ 322 enum ipmi_stat_indexes { 323 /* Commands we got from the user that were invalid. */ 324 IPMI_STAT_sent_invalid_commands = 0, 325 326 /* Commands we sent to the MC. */ 327 IPMI_STAT_sent_local_commands, 328 329 /* Responses from the MC that were delivered to a user. */ 330 IPMI_STAT_handled_local_responses, 331 332 /* Responses from the MC that were not delivered to a user. */ 333 IPMI_STAT_unhandled_local_responses, 334 335 /* Commands we sent out to the IPMB bus. */ 336 IPMI_STAT_sent_ipmb_commands, 337 338 /* Commands sent on the IPMB that had errors on the SEND CMD */ 339 IPMI_STAT_sent_ipmb_command_errs, 340 341 /* Each retransmit increments this count. */ 342 IPMI_STAT_retransmitted_ipmb_commands, 343 344 /* 345 * When a message times out (runs out of retransmits) this is 346 * incremented. 347 */ 348 IPMI_STAT_timed_out_ipmb_commands, 349 350 /* 351 * This is like above, but for broadcasts. Broadcasts are 352 * *not* included in the above count (they are expected to 353 * time out). 354 */ 355 IPMI_STAT_timed_out_ipmb_broadcasts, 356 357 /* Responses I have sent to the IPMB bus. */ 358 IPMI_STAT_sent_ipmb_responses, 359 360 /* The response was delivered to the user. */ 361 IPMI_STAT_handled_ipmb_responses, 362 363 /* The response had invalid data in it. */ 364 IPMI_STAT_invalid_ipmb_responses, 365 366 /* The response didn't have anyone waiting for it. */ 367 IPMI_STAT_unhandled_ipmb_responses, 368 369 /* Commands we sent out to the IPMB bus. */ 370 IPMI_STAT_sent_lan_commands, 371 372 /* Commands sent on the IPMB that had errors on the SEND CMD */ 373 IPMI_STAT_sent_lan_command_errs, 374 375 /* Each retransmit increments this count. */ 376 IPMI_STAT_retransmitted_lan_commands, 377 378 /* 379 * When a message times out (runs out of retransmits) this is 380 * incremented. 381 */ 382 IPMI_STAT_timed_out_lan_commands, 383 384 /* Responses I have sent to the IPMB bus. */ 385 IPMI_STAT_sent_lan_responses, 386 387 /* The response was delivered to the user. */ 388 IPMI_STAT_handled_lan_responses, 389 390 /* The response had invalid data in it. */ 391 IPMI_STAT_invalid_lan_responses, 392 393 /* The response didn't have anyone waiting for it. */ 394 IPMI_STAT_unhandled_lan_responses, 395 396 /* The command was delivered to the user. */ 397 IPMI_STAT_handled_commands, 398 399 /* The command had invalid data in it. */ 400 IPMI_STAT_invalid_commands, 401 402 /* The command didn't have anyone waiting for it. */ 403 IPMI_STAT_unhandled_commands, 404 405 /* Invalid data in an event. */ 406 IPMI_STAT_invalid_events, 407 408 /* Events that were received with the proper format. */ 409 IPMI_STAT_events, 410 411 /* Retransmissions on IPMB that failed. */ 412 IPMI_STAT_dropped_rexmit_ipmb_commands, 413 414 /* Retransmissions on LAN that failed. */ 415 IPMI_STAT_dropped_rexmit_lan_commands, 416 417 /* This *must* remain last, add new values above this. */ 418 IPMI_NUM_STATS 419 }; 420 421 422 #define IPMI_IPMB_NUM_SEQ 64 423 struct ipmi_smi { 424 struct module *owner; 425 426 /* What interface number are we? */ 427 int intf_num; 428 429 struct kref refcount; 430 431 /* Set when the interface is being unregistered. */ 432 bool in_shutdown; 433 434 /* Used for a list of interfaces. */ 435 struct list_head link; 436 437 /* 438 * The list of upper layers that are using me. seq_lock write 439 * protects this. Read protection is with srcu. 440 */ 441 struct list_head users; 442 struct srcu_struct users_srcu; 443 444 /* Used for wake ups at startup. */ 445 wait_queue_head_t waitq; 446 447 /* 448 * Prevents the interface from being unregistered when the 449 * interface is used by being looked up through the BMC 450 * structure. 451 */ 452 struct mutex bmc_reg_mutex; 453 454 struct bmc_device tmp_bmc; 455 struct bmc_device *bmc; 456 bool bmc_registered; 457 struct list_head bmc_link; 458 char *my_dev_name; 459 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 460 struct work_struct bmc_reg_work; 461 462 const struct ipmi_smi_handlers *handlers; 463 void *send_info; 464 465 /* Driver-model device for the system interface. */ 466 struct device *si_dev; 467 468 /* 469 * A table of sequence numbers for this interface. We use the 470 * sequence numbers for IPMB messages that go out of the 471 * interface to match them up with their responses. A routine 472 * is called periodically to time the items in this list. 473 */ 474 spinlock_t seq_lock; 475 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 476 int curr_seq; 477 478 /* 479 * Messages queued for delivery. If delivery fails (out of memory 480 * for instance), They will stay in here to be processed later in a 481 * periodic timer interrupt. The tasklet is for handling received 482 * messages directly from the handler. 483 */ 484 spinlock_t waiting_rcv_msgs_lock; 485 struct list_head waiting_rcv_msgs; 486 atomic_t watchdog_pretimeouts_to_deliver; 487 struct tasklet_struct recv_tasklet; 488 489 spinlock_t xmit_msgs_lock; 490 struct list_head xmit_msgs; 491 struct ipmi_smi_msg *curr_msg; 492 struct list_head hp_xmit_msgs; 493 494 /* 495 * The list of command receivers that are registered for commands 496 * on this interface. 497 */ 498 struct mutex cmd_rcvrs_mutex; 499 struct list_head cmd_rcvrs; 500 501 /* 502 * Events that were queues because no one was there to receive 503 * them. 504 */ 505 spinlock_t events_lock; /* For dealing with event stuff. */ 506 struct list_head waiting_events; 507 unsigned int waiting_events_count; /* How many events in queue? */ 508 char delivering_events; 509 char event_msg_printed; 510 511 /* How many users are waiting for events? */ 512 atomic_t event_waiters; 513 unsigned int ticks_to_req_ev; 514 515 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 516 517 /* How many users are waiting for commands? */ 518 unsigned int command_waiters; 519 520 /* How many users are waiting for watchdogs? */ 521 unsigned int watchdog_waiters; 522 523 /* How many users are waiting for message responses? */ 524 unsigned int response_waiters; 525 526 /* 527 * Tells what the lower layer has last been asked to watch for, 528 * messages and/or watchdogs. Protected by watch_lock. 529 */ 530 unsigned int last_watch_mask; 531 532 /* 533 * The event receiver for my BMC, only really used at panic 534 * shutdown as a place to store this. 535 */ 536 unsigned char event_receiver; 537 unsigned char event_receiver_lun; 538 unsigned char local_sel_device; 539 unsigned char local_event_generator; 540 541 /* For handling of maintenance mode. */ 542 int maintenance_mode; 543 bool maintenance_mode_enable; 544 int auto_maintenance_timeout; 545 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 546 547 /* 548 * If we are doing maintenance on something on IPMB, extend 549 * the timeout time to avoid timeouts writing firmware and 550 * such. 551 */ 552 int ipmb_maintenance_mode_timeout; 553 554 /* 555 * A cheap hack, if this is non-null and a message to an 556 * interface comes in with a NULL user, call this routine with 557 * it. Note that the message will still be freed by the 558 * caller. This only works on the system interface. 559 * 560 * Protected by bmc_reg_mutex. 561 */ 562 void (*null_user_handler)(struct ipmi_smi *intf, 563 struct ipmi_recv_msg *msg); 564 565 /* 566 * When we are scanning the channels for an SMI, this will 567 * tell which channel we are scanning. 568 */ 569 int curr_channel; 570 571 /* Channel information */ 572 struct ipmi_channel_set *channel_list; 573 unsigned int curr_working_cset; /* First index into the following. */ 574 struct ipmi_channel_set wchannels[2]; 575 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 576 bool channels_ready; 577 578 atomic_t stats[IPMI_NUM_STATS]; 579 580 /* 581 * run_to_completion duplicate of smb_info, smi_info 582 * and ipmi_serial_info structures. Used to decrease numbers of 583 * parameters passed by "low" level IPMI code. 584 */ 585 int run_to_completion; 586 }; 587 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 588 589 static void __get_guid(struct ipmi_smi *intf); 590 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 591 static int __ipmi_bmc_register(struct ipmi_smi *intf, 592 struct ipmi_device_id *id, 593 bool guid_set, guid_t *guid, int intf_num); 594 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 595 596 597 /** 598 * The driver model view of the IPMI messaging driver. 599 */ 600 static struct platform_driver ipmidriver = { 601 .driver = { 602 .name = "ipmi", 603 .bus = &platform_bus_type 604 } 605 }; 606 /* 607 * This mutex keeps us from adding the same BMC twice. 608 */ 609 static DEFINE_MUTEX(ipmidriver_mutex); 610 611 static LIST_HEAD(ipmi_interfaces); 612 static DEFINE_MUTEX(ipmi_interfaces_mutex); 613 #define ipmi_interfaces_mutex_held() \ 614 lockdep_is_held(&ipmi_interfaces_mutex) 615 static struct srcu_struct ipmi_interfaces_srcu; 616 617 /* 618 * List of watchers that want to know when smi's are added and deleted. 619 */ 620 static LIST_HEAD(smi_watchers); 621 static DEFINE_MUTEX(smi_watchers_mutex); 622 623 #define ipmi_inc_stat(intf, stat) \ 624 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 625 #define ipmi_get_stat(intf, stat) \ 626 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 627 628 static const char * const addr_src_to_str[] = { 629 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 630 "device-tree", "platform" 631 }; 632 633 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 634 { 635 if (src >= SI_LAST) 636 src = 0; /* Invalid */ 637 return addr_src_to_str[src]; 638 } 639 EXPORT_SYMBOL(ipmi_addr_src_to_str); 640 641 static int is_lan_addr(struct ipmi_addr *addr) 642 { 643 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 644 } 645 646 static int is_ipmb_addr(struct ipmi_addr *addr) 647 { 648 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 649 } 650 651 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 652 { 653 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 654 } 655 656 static int is_ipmb_direct_addr(struct ipmi_addr *addr) 657 { 658 return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE; 659 } 660 661 static void free_recv_msg_list(struct list_head *q) 662 { 663 struct ipmi_recv_msg *msg, *msg2; 664 665 list_for_each_entry_safe(msg, msg2, q, link) { 666 list_del(&msg->link); 667 ipmi_free_recv_msg(msg); 668 } 669 } 670 671 static void free_smi_msg_list(struct list_head *q) 672 { 673 struct ipmi_smi_msg *msg, *msg2; 674 675 list_for_each_entry_safe(msg, msg2, q, link) { 676 list_del(&msg->link); 677 ipmi_free_smi_msg(msg); 678 } 679 } 680 681 static void clean_up_interface_data(struct ipmi_smi *intf) 682 { 683 int i; 684 struct cmd_rcvr *rcvr, *rcvr2; 685 struct list_head list; 686 687 tasklet_kill(&intf->recv_tasklet); 688 689 free_smi_msg_list(&intf->waiting_rcv_msgs); 690 free_recv_msg_list(&intf->waiting_events); 691 692 /* 693 * Wholesale remove all the entries from the list in the 694 * interface and wait for RCU to know that none are in use. 695 */ 696 mutex_lock(&intf->cmd_rcvrs_mutex); 697 INIT_LIST_HEAD(&list); 698 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); 699 mutex_unlock(&intf->cmd_rcvrs_mutex); 700 701 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 702 kfree(rcvr); 703 704 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 705 if ((intf->seq_table[i].inuse) 706 && (intf->seq_table[i].recv_msg)) 707 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 708 } 709 } 710 711 static void intf_free(struct kref *ref) 712 { 713 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 714 715 clean_up_interface_data(intf); 716 kfree(intf); 717 } 718 719 struct watcher_entry { 720 int intf_num; 721 struct ipmi_smi *intf; 722 struct list_head link; 723 }; 724 725 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 726 { 727 struct ipmi_smi *intf; 728 int index, rv; 729 730 /* 731 * Make sure the driver is actually initialized, this handles 732 * problems with initialization order. 733 */ 734 rv = ipmi_init_msghandler(); 735 if (rv) 736 return rv; 737 738 mutex_lock(&smi_watchers_mutex); 739 740 list_add(&watcher->link, &smi_watchers); 741 742 index = srcu_read_lock(&ipmi_interfaces_srcu); 743 list_for_each_entry_rcu(intf, &ipmi_interfaces, link, 744 lockdep_is_held(&smi_watchers_mutex)) { 745 int intf_num = READ_ONCE(intf->intf_num); 746 747 if (intf_num == -1) 748 continue; 749 watcher->new_smi(intf_num, intf->si_dev); 750 } 751 srcu_read_unlock(&ipmi_interfaces_srcu, index); 752 753 mutex_unlock(&smi_watchers_mutex); 754 755 return 0; 756 } 757 EXPORT_SYMBOL(ipmi_smi_watcher_register); 758 759 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 760 { 761 mutex_lock(&smi_watchers_mutex); 762 list_del(&watcher->link); 763 mutex_unlock(&smi_watchers_mutex); 764 return 0; 765 } 766 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 767 768 /* 769 * Must be called with smi_watchers_mutex held. 770 */ 771 static void 772 call_smi_watchers(int i, struct device *dev) 773 { 774 struct ipmi_smi_watcher *w; 775 776 mutex_lock(&smi_watchers_mutex); 777 list_for_each_entry(w, &smi_watchers, link) { 778 if (try_module_get(w->owner)) { 779 w->new_smi(i, dev); 780 module_put(w->owner); 781 } 782 } 783 mutex_unlock(&smi_watchers_mutex); 784 } 785 786 static int 787 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 788 { 789 if (addr1->addr_type != addr2->addr_type) 790 return 0; 791 792 if (addr1->channel != addr2->channel) 793 return 0; 794 795 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 796 struct ipmi_system_interface_addr *smi_addr1 797 = (struct ipmi_system_interface_addr *) addr1; 798 struct ipmi_system_interface_addr *smi_addr2 799 = (struct ipmi_system_interface_addr *) addr2; 800 return (smi_addr1->lun == smi_addr2->lun); 801 } 802 803 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 804 struct ipmi_ipmb_addr *ipmb_addr1 805 = (struct ipmi_ipmb_addr *) addr1; 806 struct ipmi_ipmb_addr *ipmb_addr2 807 = (struct ipmi_ipmb_addr *) addr2; 808 809 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 810 && (ipmb_addr1->lun == ipmb_addr2->lun)); 811 } 812 813 if (is_ipmb_direct_addr(addr1)) { 814 struct ipmi_ipmb_direct_addr *daddr1 815 = (struct ipmi_ipmb_direct_addr *) addr1; 816 struct ipmi_ipmb_direct_addr *daddr2 817 = (struct ipmi_ipmb_direct_addr *) addr2; 818 819 return daddr1->slave_addr == daddr2->slave_addr && 820 daddr1->rq_lun == daddr2->rq_lun && 821 daddr1->rs_lun == daddr2->rs_lun; 822 } 823 824 if (is_lan_addr(addr1)) { 825 struct ipmi_lan_addr *lan_addr1 826 = (struct ipmi_lan_addr *) addr1; 827 struct ipmi_lan_addr *lan_addr2 828 = (struct ipmi_lan_addr *) addr2; 829 830 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 831 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 832 && (lan_addr1->session_handle 833 == lan_addr2->session_handle) 834 && (lan_addr1->lun == lan_addr2->lun)); 835 } 836 837 return 1; 838 } 839 840 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 841 { 842 if (len < sizeof(struct ipmi_system_interface_addr)) 843 return -EINVAL; 844 845 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 846 if (addr->channel != IPMI_BMC_CHANNEL) 847 return -EINVAL; 848 return 0; 849 } 850 851 if ((addr->channel == IPMI_BMC_CHANNEL) 852 || (addr->channel >= IPMI_MAX_CHANNELS) 853 || (addr->channel < 0)) 854 return -EINVAL; 855 856 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 857 if (len < sizeof(struct ipmi_ipmb_addr)) 858 return -EINVAL; 859 return 0; 860 } 861 862 if (is_ipmb_direct_addr(addr)) { 863 struct ipmi_ipmb_direct_addr *daddr = (void *) addr; 864 865 if (addr->channel != 0) 866 return -EINVAL; 867 if (len < sizeof(struct ipmi_ipmb_direct_addr)) 868 return -EINVAL; 869 870 if (daddr->slave_addr & 0x01) 871 return -EINVAL; 872 if (daddr->rq_lun >= 4) 873 return -EINVAL; 874 if (daddr->rs_lun >= 4) 875 return -EINVAL; 876 return 0; 877 } 878 879 if (is_lan_addr(addr)) { 880 if (len < sizeof(struct ipmi_lan_addr)) 881 return -EINVAL; 882 return 0; 883 } 884 885 return -EINVAL; 886 } 887 EXPORT_SYMBOL(ipmi_validate_addr); 888 889 unsigned int ipmi_addr_length(int addr_type) 890 { 891 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 892 return sizeof(struct ipmi_system_interface_addr); 893 894 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 895 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 896 return sizeof(struct ipmi_ipmb_addr); 897 898 if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE) 899 return sizeof(struct ipmi_ipmb_direct_addr); 900 901 if (addr_type == IPMI_LAN_ADDR_TYPE) 902 return sizeof(struct ipmi_lan_addr); 903 904 return 0; 905 } 906 EXPORT_SYMBOL(ipmi_addr_length); 907 908 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 909 { 910 int rv = 0; 911 912 if (!msg->user) { 913 /* Special handling for NULL users. */ 914 if (intf->null_user_handler) { 915 intf->null_user_handler(intf, msg); 916 } else { 917 /* No handler, so give up. */ 918 rv = -EINVAL; 919 } 920 ipmi_free_recv_msg(msg); 921 } else if (oops_in_progress) { 922 /* 923 * If we are running in the panic context, calling the 924 * receive handler doesn't much meaning and has a deadlock 925 * risk. At this moment, simply skip it in that case. 926 */ 927 ipmi_free_recv_msg(msg); 928 } else { 929 int index; 930 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); 931 932 if (user) { 933 user->handler->ipmi_recv_hndl(msg, user->handler_data); 934 release_ipmi_user(user, index); 935 } else { 936 /* User went away, give up. */ 937 ipmi_free_recv_msg(msg); 938 rv = -EINVAL; 939 } 940 } 941 942 return rv; 943 } 944 945 static void deliver_local_response(struct ipmi_smi *intf, 946 struct ipmi_recv_msg *msg) 947 { 948 if (deliver_response(intf, msg)) 949 ipmi_inc_stat(intf, unhandled_local_responses); 950 else 951 ipmi_inc_stat(intf, handled_local_responses); 952 } 953 954 static void deliver_err_response(struct ipmi_smi *intf, 955 struct ipmi_recv_msg *msg, int err) 956 { 957 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 958 msg->msg_data[0] = err; 959 msg->msg.netfn |= 1; /* Convert to a response. */ 960 msg->msg.data_len = 1; 961 msg->msg.data = msg->msg_data; 962 deliver_local_response(intf, msg); 963 } 964 965 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 966 { 967 unsigned long iflags; 968 969 if (!intf->handlers->set_need_watch) 970 return; 971 972 spin_lock_irqsave(&intf->watch_lock, iflags); 973 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 974 intf->response_waiters++; 975 976 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 977 intf->watchdog_waiters++; 978 979 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 980 intf->command_waiters++; 981 982 if ((intf->last_watch_mask & flags) != flags) { 983 intf->last_watch_mask |= flags; 984 intf->handlers->set_need_watch(intf->send_info, 985 intf->last_watch_mask); 986 } 987 spin_unlock_irqrestore(&intf->watch_lock, iflags); 988 } 989 990 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 991 { 992 unsigned long iflags; 993 994 if (!intf->handlers->set_need_watch) 995 return; 996 997 spin_lock_irqsave(&intf->watch_lock, iflags); 998 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 999 intf->response_waiters--; 1000 1001 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1002 intf->watchdog_waiters--; 1003 1004 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1005 intf->command_waiters--; 1006 1007 flags = 0; 1008 if (intf->response_waiters) 1009 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 1010 if (intf->watchdog_waiters) 1011 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 1012 if (intf->command_waiters) 1013 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 1014 1015 if (intf->last_watch_mask != flags) { 1016 intf->last_watch_mask = flags; 1017 intf->handlers->set_need_watch(intf->send_info, 1018 intf->last_watch_mask); 1019 } 1020 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1021 } 1022 1023 /* 1024 * Find the next sequence number not being used and add the given 1025 * message with the given timeout to the sequence table. This must be 1026 * called with the interface's seq_lock held. 1027 */ 1028 static int intf_next_seq(struct ipmi_smi *intf, 1029 struct ipmi_recv_msg *recv_msg, 1030 unsigned long timeout, 1031 int retries, 1032 int broadcast, 1033 unsigned char *seq, 1034 long *seqid) 1035 { 1036 int rv = 0; 1037 unsigned int i; 1038 1039 if (timeout == 0) 1040 timeout = default_retry_ms; 1041 if (retries < 0) 1042 retries = default_max_retries; 1043 1044 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1045 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1046 if (!intf->seq_table[i].inuse) 1047 break; 1048 } 1049 1050 if (!intf->seq_table[i].inuse) { 1051 intf->seq_table[i].recv_msg = recv_msg; 1052 1053 /* 1054 * Start with the maximum timeout, when the send response 1055 * comes in we will start the real timer. 1056 */ 1057 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1058 intf->seq_table[i].orig_timeout = timeout; 1059 intf->seq_table[i].retries_left = retries; 1060 intf->seq_table[i].broadcast = broadcast; 1061 intf->seq_table[i].inuse = 1; 1062 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1063 *seq = i; 1064 *seqid = intf->seq_table[i].seqid; 1065 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1066 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1067 need_waiter(intf); 1068 } else { 1069 rv = -EAGAIN; 1070 } 1071 1072 return rv; 1073 } 1074 1075 /* 1076 * Return the receive message for the given sequence number and 1077 * release the sequence number so it can be reused. Some other data 1078 * is passed in to be sure the message matches up correctly (to help 1079 * guard against message coming in after their timeout and the 1080 * sequence number being reused). 1081 */ 1082 static int intf_find_seq(struct ipmi_smi *intf, 1083 unsigned char seq, 1084 short channel, 1085 unsigned char cmd, 1086 unsigned char netfn, 1087 struct ipmi_addr *addr, 1088 struct ipmi_recv_msg **recv_msg) 1089 { 1090 int rv = -ENODEV; 1091 unsigned long flags; 1092 1093 if (seq >= IPMI_IPMB_NUM_SEQ) 1094 return -EINVAL; 1095 1096 spin_lock_irqsave(&intf->seq_lock, flags); 1097 if (intf->seq_table[seq].inuse) { 1098 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1099 1100 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1101 && (msg->msg.netfn == netfn) 1102 && (ipmi_addr_equal(addr, &msg->addr))) { 1103 *recv_msg = msg; 1104 intf->seq_table[seq].inuse = 0; 1105 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1106 rv = 0; 1107 } 1108 } 1109 spin_unlock_irqrestore(&intf->seq_lock, flags); 1110 1111 return rv; 1112 } 1113 1114 1115 /* Start the timer for a specific sequence table entry. */ 1116 static int intf_start_seq_timer(struct ipmi_smi *intf, 1117 long msgid) 1118 { 1119 int rv = -ENODEV; 1120 unsigned long flags; 1121 unsigned char seq; 1122 unsigned long seqid; 1123 1124 1125 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1126 1127 spin_lock_irqsave(&intf->seq_lock, flags); 1128 /* 1129 * We do this verification because the user can be deleted 1130 * while a message is outstanding. 1131 */ 1132 if ((intf->seq_table[seq].inuse) 1133 && (intf->seq_table[seq].seqid == seqid)) { 1134 struct seq_table *ent = &intf->seq_table[seq]; 1135 ent->timeout = ent->orig_timeout; 1136 rv = 0; 1137 } 1138 spin_unlock_irqrestore(&intf->seq_lock, flags); 1139 1140 return rv; 1141 } 1142 1143 /* Got an error for the send message for a specific sequence number. */ 1144 static int intf_err_seq(struct ipmi_smi *intf, 1145 long msgid, 1146 unsigned int err) 1147 { 1148 int rv = -ENODEV; 1149 unsigned long flags; 1150 unsigned char seq; 1151 unsigned long seqid; 1152 struct ipmi_recv_msg *msg = NULL; 1153 1154 1155 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1156 1157 spin_lock_irqsave(&intf->seq_lock, flags); 1158 /* 1159 * We do this verification because the user can be deleted 1160 * while a message is outstanding. 1161 */ 1162 if ((intf->seq_table[seq].inuse) 1163 && (intf->seq_table[seq].seqid == seqid)) { 1164 struct seq_table *ent = &intf->seq_table[seq]; 1165 1166 ent->inuse = 0; 1167 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1168 msg = ent->recv_msg; 1169 rv = 0; 1170 } 1171 spin_unlock_irqrestore(&intf->seq_lock, flags); 1172 1173 if (msg) 1174 deliver_err_response(intf, msg, err); 1175 1176 return rv; 1177 } 1178 1179 static void free_user_work(struct work_struct *work) 1180 { 1181 struct ipmi_user *user = container_of(work, struct ipmi_user, 1182 remove_work); 1183 1184 cleanup_srcu_struct(&user->release_barrier); 1185 vfree(user); 1186 } 1187 1188 int ipmi_create_user(unsigned int if_num, 1189 const struct ipmi_user_hndl *handler, 1190 void *handler_data, 1191 struct ipmi_user **user) 1192 { 1193 unsigned long flags; 1194 struct ipmi_user *new_user; 1195 int rv, index; 1196 struct ipmi_smi *intf; 1197 1198 /* 1199 * There is no module usecount here, because it's not 1200 * required. Since this can only be used by and called from 1201 * other modules, they will implicitly use this module, and 1202 * thus this can't be removed unless the other modules are 1203 * removed. 1204 */ 1205 1206 if (handler == NULL) 1207 return -EINVAL; 1208 1209 /* 1210 * Make sure the driver is actually initialized, this handles 1211 * problems with initialization order. 1212 */ 1213 rv = ipmi_init_msghandler(); 1214 if (rv) 1215 return rv; 1216 1217 new_user = vzalloc(sizeof(*new_user)); 1218 if (!new_user) 1219 return -ENOMEM; 1220 1221 index = srcu_read_lock(&ipmi_interfaces_srcu); 1222 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1223 if (intf->intf_num == if_num) 1224 goto found; 1225 } 1226 /* Not found, return an error */ 1227 rv = -EINVAL; 1228 goto out_kfree; 1229 1230 found: 1231 INIT_WORK(&new_user->remove_work, free_user_work); 1232 1233 rv = init_srcu_struct(&new_user->release_barrier); 1234 if (rv) 1235 goto out_kfree; 1236 1237 if (!try_module_get(intf->owner)) { 1238 rv = -ENODEV; 1239 goto out_kfree; 1240 } 1241 1242 /* Note that each existing user holds a refcount to the interface. */ 1243 kref_get(&intf->refcount); 1244 1245 kref_init(&new_user->refcount); 1246 new_user->handler = handler; 1247 new_user->handler_data = handler_data; 1248 new_user->intf = intf; 1249 new_user->gets_events = false; 1250 1251 rcu_assign_pointer(new_user->self, new_user); 1252 spin_lock_irqsave(&intf->seq_lock, flags); 1253 list_add_rcu(&new_user->link, &intf->users); 1254 spin_unlock_irqrestore(&intf->seq_lock, flags); 1255 if (handler->ipmi_watchdog_pretimeout) 1256 /* User wants pretimeouts, so make sure to watch for them. */ 1257 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1258 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1259 *user = new_user; 1260 return 0; 1261 1262 out_kfree: 1263 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1264 vfree(new_user); 1265 return rv; 1266 } 1267 EXPORT_SYMBOL(ipmi_create_user); 1268 1269 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1270 { 1271 int rv, index; 1272 struct ipmi_smi *intf; 1273 1274 index = srcu_read_lock(&ipmi_interfaces_srcu); 1275 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1276 if (intf->intf_num == if_num) 1277 goto found; 1278 } 1279 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1280 1281 /* Not found, return an error */ 1282 return -EINVAL; 1283 1284 found: 1285 if (!intf->handlers->get_smi_info) 1286 rv = -ENOTTY; 1287 else 1288 rv = intf->handlers->get_smi_info(intf->send_info, data); 1289 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1290 1291 return rv; 1292 } 1293 EXPORT_SYMBOL(ipmi_get_smi_info); 1294 1295 static void free_user(struct kref *ref) 1296 { 1297 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1298 1299 /* SRCU cleanup must happen in task context. */ 1300 schedule_work(&user->remove_work); 1301 } 1302 1303 static void _ipmi_destroy_user(struct ipmi_user *user) 1304 { 1305 struct ipmi_smi *intf = user->intf; 1306 int i; 1307 unsigned long flags; 1308 struct cmd_rcvr *rcvr; 1309 struct cmd_rcvr *rcvrs = NULL; 1310 1311 if (!acquire_ipmi_user(user, &i)) { 1312 /* 1313 * The user has already been cleaned up, just make sure 1314 * nothing is using it and return. 1315 */ 1316 synchronize_srcu(&user->release_barrier); 1317 return; 1318 } 1319 1320 rcu_assign_pointer(user->self, NULL); 1321 release_ipmi_user(user, i); 1322 1323 synchronize_srcu(&user->release_barrier); 1324 1325 if (user->handler->shutdown) 1326 user->handler->shutdown(user->handler_data); 1327 1328 if (user->handler->ipmi_watchdog_pretimeout) 1329 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1330 1331 if (user->gets_events) 1332 atomic_dec(&intf->event_waiters); 1333 1334 /* Remove the user from the interface's sequence table. */ 1335 spin_lock_irqsave(&intf->seq_lock, flags); 1336 list_del_rcu(&user->link); 1337 1338 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1339 if (intf->seq_table[i].inuse 1340 && (intf->seq_table[i].recv_msg->user == user)) { 1341 intf->seq_table[i].inuse = 0; 1342 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1343 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1344 } 1345 } 1346 spin_unlock_irqrestore(&intf->seq_lock, flags); 1347 1348 /* 1349 * Remove the user from the command receiver's table. First 1350 * we build a list of everything (not using the standard link, 1351 * since other things may be using it till we do 1352 * synchronize_srcu()) then free everything in that list. 1353 */ 1354 mutex_lock(&intf->cmd_rcvrs_mutex); 1355 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1356 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1357 if (rcvr->user == user) { 1358 list_del_rcu(&rcvr->link); 1359 rcvr->next = rcvrs; 1360 rcvrs = rcvr; 1361 } 1362 } 1363 mutex_unlock(&intf->cmd_rcvrs_mutex); 1364 synchronize_rcu(); 1365 while (rcvrs) { 1366 rcvr = rcvrs; 1367 rcvrs = rcvr->next; 1368 kfree(rcvr); 1369 } 1370 1371 kref_put(&intf->refcount, intf_free); 1372 module_put(intf->owner); 1373 } 1374 1375 int ipmi_destroy_user(struct ipmi_user *user) 1376 { 1377 _ipmi_destroy_user(user); 1378 1379 kref_put(&user->refcount, free_user); 1380 1381 return 0; 1382 } 1383 EXPORT_SYMBOL(ipmi_destroy_user); 1384 1385 int ipmi_get_version(struct ipmi_user *user, 1386 unsigned char *major, 1387 unsigned char *minor) 1388 { 1389 struct ipmi_device_id id; 1390 int rv, index; 1391 1392 user = acquire_ipmi_user(user, &index); 1393 if (!user) 1394 return -ENODEV; 1395 1396 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1397 if (!rv) { 1398 *major = ipmi_version_major(&id); 1399 *minor = ipmi_version_minor(&id); 1400 } 1401 release_ipmi_user(user, index); 1402 1403 return rv; 1404 } 1405 EXPORT_SYMBOL(ipmi_get_version); 1406 1407 int ipmi_set_my_address(struct ipmi_user *user, 1408 unsigned int channel, 1409 unsigned char address) 1410 { 1411 int index, rv = 0; 1412 1413 user = acquire_ipmi_user(user, &index); 1414 if (!user) 1415 return -ENODEV; 1416 1417 if (channel >= IPMI_MAX_CHANNELS) { 1418 rv = -EINVAL; 1419 } else { 1420 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1421 user->intf->addrinfo[channel].address = address; 1422 } 1423 release_ipmi_user(user, index); 1424 1425 return rv; 1426 } 1427 EXPORT_SYMBOL(ipmi_set_my_address); 1428 1429 int ipmi_get_my_address(struct ipmi_user *user, 1430 unsigned int channel, 1431 unsigned char *address) 1432 { 1433 int index, rv = 0; 1434 1435 user = acquire_ipmi_user(user, &index); 1436 if (!user) 1437 return -ENODEV; 1438 1439 if (channel >= IPMI_MAX_CHANNELS) { 1440 rv = -EINVAL; 1441 } else { 1442 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1443 *address = user->intf->addrinfo[channel].address; 1444 } 1445 release_ipmi_user(user, index); 1446 1447 return rv; 1448 } 1449 EXPORT_SYMBOL(ipmi_get_my_address); 1450 1451 int ipmi_set_my_LUN(struct ipmi_user *user, 1452 unsigned int channel, 1453 unsigned char LUN) 1454 { 1455 int index, rv = 0; 1456 1457 user = acquire_ipmi_user(user, &index); 1458 if (!user) 1459 return -ENODEV; 1460 1461 if (channel >= IPMI_MAX_CHANNELS) { 1462 rv = -EINVAL; 1463 } else { 1464 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1465 user->intf->addrinfo[channel].lun = LUN & 0x3; 1466 } 1467 release_ipmi_user(user, index); 1468 1469 return rv; 1470 } 1471 EXPORT_SYMBOL(ipmi_set_my_LUN); 1472 1473 int ipmi_get_my_LUN(struct ipmi_user *user, 1474 unsigned int channel, 1475 unsigned char *address) 1476 { 1477 int index, rv = 0; 1478 1479 user = acquire_ipmi_user(user, &index); 1480 if (!user) 1481 return -ENODEV; 1482 1483 if (channel >= IPMI_MAX_CHANNELS) { 1484 rv = -EINVAL; 1485 } else { 1486 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1487 *address = user->intf->addrinfo[channel].lun; 1488 } 1489 release_ipmi_user(user, index); 1490 1491 return rv; 1492 } 1493 EXPORT_SYMBOL(ipmi_get_my_LUN); 1494 1495 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1496 { 1497 int mode, index; 1498 unsigned long flags; 1499 1500 user = acquire_ipmi_user(user, &index); 1501 if (!user) 1502 return -ENODEV; 1503 1504 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1505 mode = user->intf->maintenance_mode; 1506 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1507 release_ipmi_user(user, index); 1508 1509 return mode; 1510 } 1511 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1512 1513 static void maintenance_mode_update(struct ipmi_smi *intf) 1514 { 1515 if (intf->handlers->set_maintenance_mode) 1516 intf->handlers->set_maintenance_mode( 1517 intf->send_info, intf->maintenance_mode_enable); 1518 } 1519 1520 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1521 { 1522 int rv = 0, index; 1523 unsigned long flags; 1524 struct ipmi_smi *intf = user->intf; 1525 1526 user = acquire_ipmi_user(user, &index); 1527 if (!user) 1528 return -ENODEV; 1529 1530 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1531 if (intf->maintenance_mode != mode) { 1532 switch (mode) { 1533 case IPMI_MAINTENANCE_MODE_AUTO: 1534 intf->maintenance_mode_enable 1535 = (intf->auto_maintenance_timeout > 0); 1536 break; 1537 1538 case IPMI_MAINTENANCE_MODE_OFF: 1539 intf->maintenance_mode_enable = false; 1540 break; 1541 1542 case IPMI_MAINTENANCE_MODE_ON: 1543 intf->maintenance_mode_enable = true; 1544 break; 1545 1546 default: 1547 rv = -EINVAL; 1548 goto out_unlock; 1549 } 1550 intf->maintenance_mode = mode; 1551 1552 maintenance_mode_update(intf); 1553 } 1554 out_unlock: 1555 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1556 release_ipmi_user(user, index); 1557 1558 return rv; 1559 } 1560 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1561 1562 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1563 { 1564 unsigned long flags; 1565 struct ipmi_smi *intf = user->intf; 1566 struct ipmi_recv_msg *msg, *msg2; 1567 struct list_head msgs; 1568 int index; 1569 1570 user = acquire_ipmi_user(user, &index); 1571 if (!user) 1572 return -ENODEV; 1573 1574 INIT_LIST_HEAD(&msgs); 1575 1576 spin_lock_irqsave(&intf->events_lock, flags); 1577 if (user->gets_events == val) 1578 goto out; 1579 1580 user->gets_events = val; 1581 1582 if (val) { 1583 if (atomic_inc_return(&intf->event_waiters) == 1) 1584 need_waiter(intf); 1585 } else { 1586 atomic_dec(&intf->event_waiters); 1587 } 1588 1589 if (intf->delivering_events) 1590 /* 1591 * Another thread is delivering events for this, so 1592 * let it handle any new events. 1593 */ 1594 goto out; 1595 1596 /* Deliver any queued events. */ 1597 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1598 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1599 list_move_tail(&msg->link, &msgs); 1600 intf->waiting_events_count = 0; 1601 if (intf->event_msg_printed) { 1602 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1603 intf->event_msg_printed = 0; 1604 } 1605 1606 intf->delivering_events = 1; 1607 spin_unlock_irqrestore(&intf->events_lock, flags); 1608 1609 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1610 msg->user = user; 1611 kref_get(&user->refcount); 1612 deliver_local_response(intf, msg); 1613 } 1614 1615 spin_lock_irqsave(&intf->events_lock, flags); 1616 intf->delivering_events = 0; 1617 } 1618 1619 out: 1620 spin_unlock_irqrestore(&intf->events_lock, flags); 1621 release_ipmi_user(user, index); 1622 1623 return 0; 1624 } 1625 EXPORT_SYMBOL(ipmi_set_gets_events); 1626 1627 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1628 unsigned char netfn, 1629 unsigned char cmd, 1630 unsigned char chan) 1631 { 1632 struct cmd_rcvr *rcvr; 1633 1634 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1635 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1636 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1637 && (rcvr->chans & (1 << chan))) 1638 return rcvr; 1639 } 1640 return NULL; 1641 } 1642 1643 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1644 unsigned char netfn, 1645 unsigned char cmd, 1646 unsigned int chans) 1647 { 1648 struct cmd_rcvr *rcvr; 1649 1650 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1651 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1652 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1653 && (rcvr->chans & chans)) 1654 return 0; 1655 } 1656 return 1; 1657 } 1658 1659 int ipmi_register_for_cmd(struct ipmi_user *user, 1660 unsigned char netfn, 1661 unsigned char cmd, 1662 unsigned int chans) 1663 { 1664 struct ipmi_smi *intf = user->intf; 1665 struct cmd_rcvr *rcvr; 1666 int rv = 0, index; 1667 1668 user = acquire_ipmi_user(user, &index); 1669 if (!user) 1670 return -ENODEV; 1671 1672 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1673 if (!rcvr) { 1674 rv = -ENOMEM; 1675 goto out_release; 1676 } 1677 rcvr->cmd = cmd; 1678 rcvr->netfn = netfn; 1679 rcvr->chans = chans; 1680 rcvr->user = user; 1681 1682 mutex_lock(&intf->cmd_rcvrs_mutex); 1683 /* Make sure the command/netfn is not already registered. */ 1684 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1685 rv = -EBUSY; 1686 goto out_unlock; 1687 } 1688 1689 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1690 1691 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1692 1693 out_unlock: 1694 mutex_unlock(&intf->cmd_rcvrs_mutex); 1695 if (rv) 1696 kfree(rcvr); 1697 out_release: 1698 release_ipmi_user(user, index); 1699 1700 return rv; 1701 } 1702 EXPORT_SYMBOL(ipmi_register_for_cmd); 1703 1704 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1705 unsigned char netfn, 1706 unsigned char cmd, 1707 unsigned int chans) 1708 { 1709 struct ipmi_smi *intf = user->intf; 1710 struct cmd_rcvr *rcvr; 1711 struct cmd_rcvr *rcvrs = NULL; 1712 int i, rv = -ENOENT, index; 1713 1714 user = acquire_ipmi_user(user, &index); 1715 if (!user) 1716 return -ENODEV; 1717 1718 mutex_lock(&intf->cmd_rcvrs_mutex); 1719 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1720 if (((1 << i) & chans) == 0) 1721 continue; 1722 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1723 if (rcvr == NULL) 1724 continue; 1725 if (rcvr->user == user) { 1726 rv = 0; 1727 rcvr->chans &= ~chans; 1728 if (rcvr->chans == 0) { 1729 list_del_rcu(&rcvr->link); 1730 rcvr->next = rcvrs; 1731 rcvrs = rcvr; 1732 } 1733 } 1734 } 1735 mutex_unlock(&intf->cmd_rcvrs_mutex); 1736 synchronize_rcu(); 1737 release_ipmi_user(user, index); 1738 while (rcvrs) { 1739 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1740 rcvr = rcvrs; 1741 rcvrs = rcvr->next; 1742 kfree(rcvr); 1743 } 1744 1745 return rv; 1746 } 1747 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1748 1749 unsigned char 1750 ipmb_checksum(unsigned char *data, int size) 1751 { 1752 unsigned char csum = 0; 1753 1754 for (; size > 0; size--, data++) 1755 csum += *data; 1756 1757 return -csum; 1758 } 1759 EXPORT_SYMBOL(ipmb_checksum); 1760 1761 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1762 struct kernel_ipmi_msg *msg, 1763 struct ipmi_ipmb_addr *ipmb_addr, 1764 long msgid, 1765 unsigned char ipmb_seq, 1766 int broadcast, 1767 unsigned char source_address, 1768 unsigned char source_lun) 1769 { 1770 int i = broadcast; 1771 1772 /* Format the IPMB header data. */ 1773 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1774 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1775 smi_msg->data[2] = ipmb_addr->channel; 1776 if (broadcast) 1777 smi_msg->data[3] = 0; 1778 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1779 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1780 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1781 smi_msg->data[i+6] = source_address; 1782 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1783 smi_msg->data[i+8] = msg->cmd; 1784 1785 /* Now tack on the data to the message. */ 1786 if (msg->data_len > 0) 1787 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1788 smi_msg->data_size = msg->data_len + 9; 1789 1790 /* Now calculate the checksum and tack it on. */ 1791 smi_msg->data[i+smi_msg->data_size] 1792 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1793 1794 /* 1795 * Add on the checksum size and the offset from the 1796 * broadcast. 1797 */ 1798 smi_msg->data_size += 1 + i; 1799 1800 smi_msg->msgid = msgid; 1801 } 1802 1803 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1804 struct kernel_ipmi_msg *msg, 1805 struct ipmi_lan_addr *lan_addr, 1806 long msgid, 1807 unsigned char ipmb_seq, 1808 unsigned char source_lun) 1809 { 1810 /* Format the IPMB header data. */ 1811 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1812 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1813 smi_msg->data[2] = lan_addr->channel; 1814 smi_msg->data[3] = lan_addr->session_handle; 1815 smi_msg->data[4] = lan_addr->remote_SWID; 1816 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1817 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1818 smi_msg->data[7] = lan_addr->local_SWID; 1819 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1820 smi_msg->data[9] = msg->cmd; 1821 1822 /* Now tack on the data to the message. */ 1823 if (msg->data_len > 0) 1824 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1825 smi_msg->data_size = msg->data_len + 10; 1826 1827 /* Now calculate the checksum and tack it on. */ 1828 smi_msg->data[smi_msg->data_size] 1829 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1830 1831 /* 1832 * Add on the checksum size and the offset from the 1833 * broadcast. 1834 */ 1835 smi_msg->data_size += 1; 1836 1837 smi_msg->msgid = msgid; 1838 } 1839 1840 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1841 struct ipmi_smi_msg *smi_msg, 1842 int priority) 1843 { 1844 if (intf->curr_msg) { 1845 if (priority > 0) 1846 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1847 else 1848 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1849 smi_msg = NULL; 1850 } else { 1851 intf->curr_msg = smi_msg; 1852 } 1853 1854 return smi_msg; 1855 } 1856 1857 static void smi_send(struct ipmi_smi *intf, 1858 const struct ipmi_smi_handlers *handlers, 1859 struct ipmi_smi_msg *smi_msg, int priority) 1860 { 1861 int run_to_completion = intf->run_to_completion; 1862 unsigned long flags = 0; 1863 1864 if (!run_to_completion) 1865 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1866 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1867 1868 if (!run_to_completion) 1869 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1870 1871 if (smi_msg) 1872 handlers->sender(intf->send_info, smi_msg); 1873 } 1874 1875 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1876 { 1877 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1878 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1879 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1880 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1881 } 1882 1883 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1884 struct ipmi_addr *addr, 1885 long msgid, 1886 struct kernel_ipmi_msg *msg, 1887 struct ipmi_smi_msg *smi_msg, 1888 struct ipmi_recv_msg *recv_msg, 1889 int retries, 1890 unsigned int retry_time_ms) 1891 { 1892 struct ipmi_system_interface_addr *smi_addr; 1893 1894 if (msg->netfn & 1) 1895 /* Responses are not allowed to the SMI. */ 1896 return -EINVAL; 1897 1898 smi_addr = (struct ipmi_system_interface_addr *) addr; 1899 if (smi_addr->lun > 3) { 1900 ipmi_inc_stat(intf, sent_invalid_commands); 1901 return -EINVAL; 1902 } 1903 1904 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1905 1906 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1907 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1908 || (msg->cmd == IPMI_GET_MSG_CMD) 1909 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1910 /* 1911 * We don't let the user do these, since we manage 1912 * the sequence numbers. 1913 */ 1914 ipmi_inc_stat(intf, sent_invalid_commands); 1915 return -EINVAL; 1916 } 1917 1918 if (is_maintenance_mode_cmd(msg)) { 1919 unsigned long flags; 1920 1921 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1922 intf->auto_maintenance_timeout 1923 = maintenance_mode_timeout_ms; 1924 if (!intf->maintenance_mode 1925 && !intf->maintenance_mode_enable) { 1926 intf->maintenance_mode_enable = true; 1927 maintenance_mode_update(intf); 1928 } 1929 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1930 flags); 1931 } 1932 1933 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1934 ipmi_inc_stat(intf, sent_invalid_commands); 1935 return -EMSGSIZE; 1936 } 1937 1938 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1939 smi_msg->data[1] = msg->cmd; 1940 smi_msg->msgid = msgid; 1941 smi_msg->user_data = recv_msg; 1942 if (msg->data_len > 0) 1943 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1944 smi_msg->data_size = msg->data_len + 2; 1945 ipmi_inc_stat(intf, sent_local_commands); 1946 1947 return 0; 1948 } 1949 1950 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1951 struct ipmi_addr *addr, 1952 long msgid, 1953 struct kernel_ipmi_msg *msg, 1954 struct ipmi_smi_msg *smi_msg, 1955 struct ipmi_recv_msg *recv_msg, 1956 unsigned char source_address, 1957 unsigned char source_lun, 1958 int retries, 1959 unsigned int retry_time_ms) 1960 { 1961 struct ipmi_ipmb_addr *ipmb_addr; 1962 unsigned char ipmb_seq; 1963 long seqid; 1964 int broadcast = 0; 1965 struct ipmi_channel *chans; 1966 int rv = 0; 1967 1968 if (addr->channel >= IPMI_MAX_CHANNELS) { 1969 ipmi_inc_stat(intf, sent_invalid_commands); 1970 return -EINVAL; 1971 } 1972 1973 chans = READ_ONCE(intf->channel_list)->c; 1974 1975 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 1976 ipmi_inc_stat(intf, sent_invalid_commands); 1977 return -EINVAL; 1978 } 1979 1980 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1981 /* 1982 * Broadcasts add a zero at the beginning of the 1983 * message, but otherwise is the same as an IPMB 1984 * address. 1985 */ 1986 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1987 broadcast = 1; 1988 retries = 0; /* Don't retry broadcasts. */ 1989 } 1990 1991 /* 1992 * 9 for the header and 1 for the checksum, plus 1993 * possibly one for the broadcast. 1994 */ 1995 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1996 ipmi_inc_stat(intf, sent_invalid_commands); 1997 return -EMSGSIZE; 1998 } 1999 2000 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 2001 if (ipmb_addr->lun > 3) { 2002 ipmi_inc_stat(intf, sent_invalid_commands); 2003 return -EINVAL; 2004 } 2005 2006 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 2007 2008 if (recv_msg->msg.netfn & 0x1) { 2009 /* 2010 * It's a response, so use the user's sequence 2011 * from msgid. 2012 */ 2013 ipmi_inc_stat(intf, sent_ipmb_responses); 2014 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 2015 msgid, broadcast, 2016 source_address, source_lun); 2017 2018 /* 2019 * Save the receive message so we can use it 2020 * to deliver the response. 2021 */ 2022 smi_msg->user_data = recv_msg; 2023 } else { 2024 /* It's a command, so get a sequence for it. */ 2025 unsigned long flags; 2026 2027 spin_lock_irqsave(&intf->seq_lock, flags); 2028 2029 if (is_maintenance_mode_cmd(msg)) 2030 intf->ipmb_maintenance_mode_timeout = 2031 maintenance_mode_timeout_ms; 2032 2033 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 2034 /* Different default in maintenance mode */ 2035 retry_time_ms = default_maintenance_retry_ms; 2036 2037 /* 2038 * Create a sequence number with a 1 second 2039 * timeout and 4 retries. 2040 */ 2041 rv = intf_next_seq(intf, 2042 recv_msg, 2043 retry_time_ms, 2044 retries, 2045 broadcast, 2046 &ipmb_seq, 2047 &seqid); 2048 if (rv) 2049 /* 2050 * We have used up all the sequence numbers, 2051 * probably, so abort. 2052 */ 2053 goto out_err; 2054 2055 ipmi_inc_stat(intf, sent_ipmb_commands); 2056 2057 /* 2058 * Store the sequence number in the message, 2059 * so that when the send message response 2060 * comes back we can start the timer. 2061 */ 2062 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2063 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2064 ipmb_seq, broadcast, 2065 source_address, source_lun); 2066 2067 /* 2068 * Copy the message into the recv message data, so we 2069 * can retransmit it later if necessary. 2070 */ 2071 memcpy(recv_msg->msg_data, smi_msg->data, 2072 smi_msg->data_size); 2073 recv_msg->msg.data = recv_msg->msg_data; 2074 recv_msg->msg.data_len = smi_msg->data_size; 2075 2076 /* 2077 * We don't unlock until here, because we need 2078 * to copy the completed message into the 2079 * recv_msg before we release the lock. 2080 * Otherwise, race conditions may bite us. I 2081 * know that's pretty paranoid, but I prefer 2082 * to be correct. 2083 */ 2084 out_err: 2085 spin_unlock_irqrestore(&intf->seq_lock, flags); 2086 } 2087 2088 return rv; 2089 } 2090 2091 static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, 2092 struct ipmi_addr *addr, 2093 long msgid, 2094 struct kernel_ipmi_msg *msg, 2095 struct ipmi_smi_msg *smi_msg, 2096 struct ipmi_recv_msg *recv_msg, 2097 unsigned char source_lun) 2098 { 2099 struct ipmi_ipmb_direct_addr *daddr; 2100 bool is_cmd = !(recv_msg->msg.netfn & 0x1); 2101 2102 if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT)) 2103 return -EAFNOSUPPORT; 2104 2105 /* Responses must have a completion code. */ 2106 if (!is_cmd && msg->data_len < 1) { 2107 ipmi_inc_stat(intf, sent_invalid_commands); 2108 return -EINVAL; 2109 } 2110 2111 if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) { 2112 ipmi_inc_stat(intf, sent_invalid_commands); 2113 return -EMSGSIZE; 2114 } 2115 2116 daddr = (struct ipmi_ipmb_direct_addr *) addr; 2117 if (daddr->rq_lun > 3 || daddr->rs_lun > 3) { 2118 ipmi_inc_stat(intf, sent_invalid_commands); 2119 return -EINVAL; 2120 } 2121 2122 smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT; 2123 smi_msg->msgid = msgid; 2124 2125 if (is_cmd) { 2126 smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun; 2127 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun; 2128 } else { 2129 smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun; 2130 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun; 2131 } 2132 smi_msg->data[1] = daddr->slave_addr; 2133 smi_msg->data[3] = msg->cmd; 2134 2135 memcpy(smi_msg->data + 4, msg->data, msg->data_len); 2136 smi_msg->data_size = msg->data_len + 4; 2137 2138 smi_msg->user_data = recv_msg; 2139 2140 return 0; 2141 } 2142 2143 static int i_ipmi_req_lan(struct ipmi_smi *intf, 2144 struct ipmi_addr *addr, 2145 long msgid, 2146 struct kernel_ipmi_msg *msg, 2147 struct ipmi_smi_msg *smi_msg, 2148 struct ipmi_recv_msg *recv_msg, 2149 unsigned char source_lun, 2150 int retries, 2151 unsigned int retry_time_ms) 2152 { 2153 struct ipmi_lan_addr *lan_addr; 2154 unsigned char ipmb_seq; 2155 long seqid; 2156 struct ipmi_channel *chans; 2157 int rv = 0; 2158 2159 if (addr->channel >= IPMI_MAX_CHANNELS) { 2160 ipmi_inc_stat(intf, sent_invalid_commands); 2161 return -EINVAL; 2162 } 2163 2164 chans = READ_ONCE(intf->channel_list)->c; 2165 2166 if ((chans[addr->channel].medium 2167 != IPMI_CHANNEL_MEDIUM_8023LAN) 2168 && (chans[addr->channel].medium 2169 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2170 ipmi_inc_stat(intf, sent_invalid_commands); 2171 return -EINVAL; 2172 } 2173 2174 /* 11 for the header and 1 for the checksum. */ 2175 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2176 ipmi_inc_stat(intf, sent_invalid_commands); 2177 return -EMSGSIZE; 2178 } 2179 2180 lan_addr = (struct ipmi_lan_addr *) addr; 2181 if (lan_addr->lun > 3) { 2182 ipmi_inc_stat(intf, sent_invalid_commands); 2183 return -EINVAL; 2184 } 2185 2186 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2187 2188 if (recv_msg->msg.netfn & 0x1) { 2189 /* 2190 * It's a response, so use the user's sequence 2191 * from msgid. 2192 */ 2193 ipmi_inc_stat(intf, sent_lan_responses); 2194 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2195 msgid, source_lun); 2196 2197 /* 2198 * Save the receive message so we can use it 2199 * to deliver the response. 2200 */ 2201 smi_msg->user_data = recv_msg; 2202 } else { 2203 /* It's a command, so get a sequence for it. */ 2204 unsigned long flags; 2205 2206 spin_lock_irqsave(&intf->seq_lock, flags); 2207 2208 /* 2209 * Create a sequence number with a 1 second 2210 * timeout and 4 retries. 2211 */ 2212 rv = intf_next_seq(intf, 2213 recv_msg, 2214 retry_time_ms, 2215 retries, 2216 0, 2217 &ipmb_seq, 2218 &seqid); 2219 if (rv) 2220 /* 2221 * We have used up all the sequence numbers, 2222 * probably, so abort. 2223 */ 2224 goto out_err; 2225 2226 ipmi_inc_stat(intf, sent_lan_commands); 2227 2228 /* 2229 * Store the sequence number in the message, 2230 * so that when the send message response 2231 * comes back we can start the timer. 2232 */ 2233 format_lan_msg(smi_msg, msg, lan_addr, 2234 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2235 ipmb_seq, source_lun); 2236 2237 /* 2238 * Copy the message into the recv message data, so we 2239 * can retransmit it later if necessary. 2240 */ 2241 memcpy(recv_msg->msg_data, smi_msg->data, 2242 smi_msg->data_size); 2243 recv_msg->msg.data = recv_msg->msg_data; 2244 recv_msg->msg.data_len = smi_msg->data_size; 2245 2246 /* 2247 * We don't unlock until here, because we need 2248 * to copy the completed message into the 2249 * recv_msg before we release the lock. 2250 * Otherwise, race conditions may bite us. I 2251 * know that's pretty paranoid, but I prefer 2252 * to be correct. 2253 */ 2254 out_err: 2255 spin_unlock_irqrestore(&intf->seq_lock, flags); 2256 } 2257 2258 return rv; 2259 } 2260 2261 /* 2262 * Separate from ipmi_request so that the user does not have to be 2263 * supplied in certain circumstances (mainly at panic time). If 2264 * messages are supplied, they will be freed, even if an error 2265 * occurs. 2266 */ 2267 static int i_ipmi_request(struct ipmi_user *user, 2268 struct ipmi_smi *intf, 2269 struct ipmi_addr *addr, 2270 long msgid, 2271 struct kernel_ipmi_msg *msg, 2272 void *user_msg_data, 2273 void *supplied_smi, 2274 struct ipmi_recv_msg *supplied_recv, 2275 int priority, 2276 unsigned char source_address, 2277 unsigned char source_lun, 2278 int retries, 2279 unsigned int retry_time_ms) 2280 { 2281 struct ipmi_smi_msg *smi_msg; 2282 struct ipmi_recv_msg *recv_msg; 2283 int rv = 0; 2284 2285 if (supplied_recv) 2286 recv_msg = supplied_recv; 2287 else { 2288 recv_msg = ipmi_alloc_recv_msg(); 2289 if (recv_msg == NULL) { 2290 rv = -ENOMEM; 2291 goto out; 2292 } 2293 } 2294 recv_msg->user_msg_data = user_msg_data; 2295 2296 if (supplied_smi) 2297 smi_msg = (struct ipmi_smi_msg *) supplied_smi; 2298 else { 2299 smi_msg = ipmi_alloc_smi_msg(); 2300 if (smi_msg == NULL) { 2301 if (!supplied_recv) 2302 ipmi_free_recv_msg(recv_msg); 2303 rv = -ENOMEM; 2304 goto out; 2305 } 2306 } 2307 2308 rcu_read_lock(); 2309 if (intf->in_shutdown) { 2310 rv = -ENODEV; 2311 goto out_err; 2312 } 2313 2314 recv_msg->user = user; 2315 if (user) 2316 /* The put happens when the message is freed. */ 2317 kref_get(&user->refcount); 2318 recv_msg->msgid = msgid; 2319 /* 2320 * Store the message to send in the receive message so timeout 2321 * responses can get the proper response data. 2322 */ 2323 recv_msg->msg = *msg; 2324 2325 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2326 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2327 recv_msg, retries, retry_time_ms); 2328 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2329 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2330 source_address, source_lun, 2331 retries, retry_time_ms); 2332 } else if (is_ipmb_direct_addr(addr)) { 2333 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, 2334 recv_msg, source_lun); 2335 } else if (is_lan_addr(addr)) { 2336 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2337 source_lun, retries, retry_time_ms); 2338 } else { 2339 /* Unknown address type. */ 2340 ipmi_inc_stat(intf, sent_invalid_commands); 2341 rv = -EINVAL; 2342 } 2343 2344 if (rv) { 2345 out_err: 2346 ipmi_free_smi_msg(smi_msg); 2347 ipmi_free_recv_msg(recv_msg); 2348 } else { 2349 pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data); 2350 2351 smi_send(intf, intf->handlers, smi_msg, priority); 2352 } 2353 rcu_read_unlock(); 2354 2355 out: 2356 return rv; 2357 } 2358 2359 static int check_addr(struct ipmi_smi *intf, 2360 struct ipmi_addr *addr, 2361 unsigned char *saddr, 2362 unsigned char *lun) 2363 { 2364 if (addr->channel >= IPMI_MAX_CHANNELS) 2365 return -EINVAL; 2366 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2367 *lun = intf->addrinfo[addr->channel].lun; 2368 *saddr = intf->addrinfo[addr->channel].address; 2369 return 0; 2370 } 2371 2372 int ipmi_request_settime(struct ipmi_user *user, 2373 struct ipmi_addr *addr, 2374 long msgid, 2375 struct kernel_ipmi_msg *msg, 2376 void *user_msg_data, 2377 int priority, 2378 int retries, 2379 unsigned int retry_time_ms) 2380 { 2381 unsigned char saddr = 0, lun = 0; 2382 int rv, index; 2383 2384 if (!user) 2385 return -EINVAL; 2386 2387 user = acquire_ipmi_user(user, &index); 2388 if (!user) 2389 return -ENODEV; 2390 2391 rv = check_addr(user->intf, addr, &saddr, &lun); 2392 if (!rv) 2393 rv = i_ipmi_request(user, 2394 user->intf, 2395 addr, 2396 msgid, 2397 msg, 2398 user_msg_data, 2399 NULL, NULL, 2400 priority, 2401 saddr, 2402 lun, 2403 retries, 2404 retry_time_ms); 2405 2406 release_ipmi_user(user, index); 2407 return rv; 2408 } 2409 EXPORT_SYMBOL(ipmi_request_settime); 2410 2411 int ipmi_request_supply_msgs(struct ipmi_user *user, 2412 struct ipmi_addr *addr, 2413 long msgid, 2414 struct kernel_ipmi_msg *msg, 2415 void *user_msg_data, 2416 void *supplied_smi, 2417 struct ipmi_recv_msg *supplied_recv, 2418 int priority) 2419 { 2420 unsigned char saddr = 0, lun = 0; 2421 int rv, index; 2422 2423 if (!user) 2424 return -EINVAL; 2425 2426 user = acquire_ipmi_user(user, &index); 2427 if (!user) 2428 return -ENODEV; 2429 2430 rv = check_addr(user->intf, addr, &saddr, &lun); 2431 if (!rv) 2432 rv = i_ipmi_request(user, 2433 user->intf, 2434 addr, 2435 msgid, 2436 msg, 2437 user_msg_data, 2438 supplied_smi, 2439 supplied_recv, 2440 priority, 2441 saddr, 2442 lun, 2443 -1, 0); 2444 2445 release_ipmi_user(user, index); 2446 return rv; 2447 } 2448 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2449 2450 static void bmc_device_id_handler(struct ipmi_smi *intf, 2451 struct ipmi_recv_msg *msg) 2452 { 2453 int rv; 2454 2455 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2456 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2457 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2458 dev_warn(intf->si_dev, 2459 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2460 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2461 return; 2462 } 2463 2464 if (msg->msg.data[0]) { 2465 dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n", 2466 msg->msg.data[0]); 2467 intf->bmc->dyn_id_set = 0; 2468 goto out; 2469 } 2470 2471 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2472 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2473 if (rv) { 2474 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2475 /* record completion code when error */ 2476 intf->bmc->cc = msg->msg.data[0]; 2477 intf->bmc->dyn_id_set = 0; 2478 } else { 2479 /* 2480 * Make sure the id data is available before setting 2481 * dyn_id_set. 2482 */ 2483 smp_wmb(); 2484 intf->bmc->dyn_id_set = 1; 2485 } 2486 out: 2487 wake_up(&intf->waitq); 2488 } 2489 2490 static int 2491 send_get_device_id_cmd(struct ipmi_smi *intf) 2492 { 2493 struct ipmi_system_interface_addr si; 2494 struct kernel_ipmi_msg msg; 2495 2496 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2497 si.channel = IPMI_BMC_CHANNEL; 2498 si.lun = 0; 2499 2500 msg.netfn = IPMI_NETFN_APP_REQUEST; 2501 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2502 msg.data = NULL; 2503 msg.data_len = 0; 2504 2505 return i_ipmi_request(NULL, 2506 intf, 2507 (struct ipmi_addr *) &si, 2508 0, 2509 &msg, 2510 intf, 2511 NULL, 2512 NULL, 2513 0, 2514 intf->addrinfo[0].address, 2515 intf->addrinfo[0].lun, 2516 -1, 0); 2517 } 2518 2519 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2520 { 2521 int rv; 2522 unsigned int retry_count = 0; 2523 2524 intf->null_user_handler = bmc_device_id_handler; 2525 2526 retry: 2527 bmc->cc = 0; 2528 bmc->dyn_id_set = 2; 2529 2530 rv = send_get_device_id_cmd(intf); 2531 if (rv) 2532 goto out_reset_handler; 2533 2534 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2535 2536 if (!bmc->dyn_id_set) { 2537 if (bmc->cc != IPMI_CC_NO_ERROR && 2538 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 2539 msleep(500); 2540 dev_warn(intf->si_dev, 2541 "BMC returned 0x%2.2x, retry get bmc device id\n", 2542 bmc->cc); 2543 goto retry; 2544 } 2545 2546 rv = -EIO; /* Something went wrong in the fetch. */ 2547 } 2548 2549 /* dyn_id_set makes the id data available. */ 2550 smp_rmb(); 2551 2552 out_reset_handler: 2553 intf->null_user_handler = NULL; 2554 2555 return rv; 2556 } 2557 2558 /* 2559 * Fetch the device id for the bmc/interface. You must pass in either 2560 * bmc or intf, this code will get the other one. If the data has 2561 * been recently fetched, this will just use the cached data. Otherwise 2562 * it will run a new fetch. 2563 * 2564 * Except for the first time this is called (in ipmi_add_smi()), 2565 * this will always return good data; 2566 */ 2567 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2568 struct ipmi_device_id *id, 2569 bool *guid_set, guid_t *guid, int intf_num) 2570 { 2571 int rv = 0; 2572 int prev_dyn_id_set, prev_guid_set; 2573 bool intf_set = intf != NULL; 2574 2575 if (!intf) { 2576 mutex_lock(&bmc->dyn_mutex); 2577 retry_bmc_lock: 2578 if (list_empty(&bmc->intfs)) { 2579 mutex_unlock(&bmc->dyn_mutex); 2580 return -ENOENT; 2581 } 2582 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2583 bmc_link); 2584 kref_get(&intf->refcount); 2585 mutex_unlock(&bmc->dyn_mutex); 2586 mutex_lock(&intf->bmc_reg_mutex); 2587 mutex_lock(&bmc->dyn_mutex); 2588 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2589 bmc_link)) { 2590 mutex_unlock(&intf->bmc_reg_mutex); 2591 kref_put(&intf->refcount, intf_free); 2592 goto retry_bmc_lock; 2593 } 2594 } else { 2595 mutex_lock(&intf->bmc_reg_mutex); 2596 bmc = intf->bmc; 2597 mutex_lock(&bmc->dyn_mutex); 2598 kref_get(&intf->refcount); 2599 } 2600 2601 /* If we have a valid and current ID, just return that. */ 2602 if (intf->in_bmc_register || 2603 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2604 goto out_noprocessing; 2605 2606 prev_guid_set = bmc->dyn_guid_set; 2607 __get_guid(intf); 2608 2609 prev_dyn_id_set = bmc->dyn_id_set; 2610 rv = __get_device_id(intf, bmc); 2611 if (rv) 2612 goto out; 2613 2614 /* 2615 * The guid, device id, manufacturer id, and product id should 2616 * not change on a BMC. If it does we have to do some dancing. 2617 */ 2618 if (!intf->bmc_registered 2619 || (!prev_guid_set && bmc->dyn_guid_set) 2620 || (!prev_dyn_id_set && bmc->dyn_id_set) 2621 || (prev_guid_set && bmc->dyn_guid_set 2622 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2623 || bmc->id.device_id != bmc->fetch_id.device_id 2624 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2625 || bmc->id.product_id != bmc->fetch_id.product_id) { 2626 struct ipmi_device_id id = bmc->fetch_id; 2627 int guid_set = bmc->dyn_guid_set; 2628 guid_t guid; 2629 2630 guid = bmc->fetch_guid; 2631 mutex_unlock(&bmc->dyn_mutex); 2632 2633 __ipmi_bmc_unregister(intf); 2634 /* Fill in the temporary BMC for good measure. */ 2635 intf->bmc->id = id; 2636 intf->bmc->dyn_guid_set = guid_set; 2637 intf->bmc->guid = guid; 2638 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2639 need_waiter(intf); /* Retry later on an error. */ 2640 else 2641 __scan_channels(intf, &id); 2642 2643 2644 if (!intf_set) { 2645 /* 2646 * We weren't given the interface on the 2647 * command line, so restart the operation on 2648 * the next interface for the BMC. 2649 */ 2650 mutex_unlock(&intf->bmc_reg_mutex); 2651 mutex_lock(&bmc->dyn_mutex); 2652 goto retry_bmc_lock; 2653 } 2654 2655 /* We have a new BMC, set it up. */ 2656 bmc = intf->bmc; 2657 mutex_lock(&bmc->dyn_mutex); 2658 goto out_noprocessing; 2659 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2660 /* Version info changes, scan the channels again. */ 2661 __scan_channels(intf, &bmc->fetch_id); 2662 2663 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2664 2665 out: 2666 if (rv && prev_dyn_id_set) { 2667 rv = 0; /* Ignore failures if we have previous data. */ 2668 bmc->dyn_id_set = prev_dyn_id_set; 2669 } 2670 if (!rv) { 2671 bmc->id = bmc->fetch_id; 2672 if (bmc->dyn_guid_set) 2673 bmc->guid = bmc->fetch_guid; 2674 else if (prev_guid_set) 2675 /* 2676 * The guid used to be valid and it failed to fetch, 2677 * just use the cached value. 2678 */ 2679 bmc->dyn_guid_set = prev_guid_set; 2680 } 2681 out_noprocessing: 2682 if (!rv) { 2683 if (id) 2684 *id = bmc->id; 2685 2686 if (guid_set) 2687 *guid_set = bmc->dyn_guid_set; 2688 2689 if (guid && bmc->dyn_guid_set) 2690 *guid = bmc->guid; 2691 } 2692 2693 mutex_unlock(&bmc->dyn_mutex); 2694 mutex_unlock(&intf->bmc_reg_mutex); 2695 2696 kref_put(&intf->refcount, intf_free); 2697 return rv; 2698 } 2699 2700 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2701 struct ipmi_device_id *id, 2702 bool *guid_set, guid_t *guid) 2703 { 2704 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2705 } 2706 2707 static ssize_t device_id_show(struct device *dev, 2708 struct device_attribute *attr, 2709 char *buf) 2710 { 2711 struct bmc_device *bmc = to_bmc_device(dev); 2712 struct ipmi_device_id id; 2713 int rv; 2714 2715 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2716 if (rv) 2717 return rv; 2718 2719 return sysfs_emit(buf, "%u\n", id.device_id); 2720 } 2721 static DEVICE_ATTR_RO(device_id); 2722 2723 static ssize_t provides_device_sdrs_show(struct device *dev, 2724 struct device_attribute *attr, 2725 char *buf) 2726 { 2727 struct bmc_device *bmc = to_bmc_device(dev); 2728 struct ipmi_device_id id; 2729 int rv; 2730 2731 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2732 if (rv) 2733 return rv; 2734 2735 return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7); 2736 } 2737 static DEVICE_ATTR_RO(provides_device_sdrs); 2738 2739 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2740 char *buf) 2741 { 2742 struct bmc_device *bmc = to_bmc_device(dev); 2743 struct ipmi_device_id id; 2744 int rv; 2745 2746 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2747 if (rv) 2748 return rv; 2749 2750 return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F); 2751 } 2752 static DEVICE_ATTR_RO(revision); 2753 2754 static ssize_t firmware_revision_show(struct device *dev, 2755 struct device_attribute *attr, 2756 char *buf) 2757 { 2758 struct bmc_device *bmc = to_bmc_device(dev); 2759 struct ipmi_device_id id; 2760 int rv; 2761 2762 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2763 if (rv) 2764 return rv; 2765 2766 return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1, 2767 id.firmware_revision_2); 2768 } 2769 static DEVICE_ATTR_RO(firmware_revision); 2770 2771 static ssize_t ipmi_version_show(struct device *dev, 2772 struct device_attribute *attr, 2773 char *buf) 2774 { 2775 struct bmc_device *bmc = to_bmc_device(dev); 2776 struct ipmi_device_id id; 2777 int rv; 2778 2779 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2780 if (rv) 2781 return rv; 2782 2783 return sysfs_emit(buf, "%u.%u\n", 2784 ipmi_version_major(&id), 2785 ipmi_version_minor(&id)); 2786 } 2787 static DEVICE_ATTR_RO(ipmi_version); 2788 2789 static ssize_t add_dev_support_show(struct device *dev, 2790 struct device_attribute *attr, 2791 char *buf) 2792 { 2793 struct bmc_device *bmc = to_bmc_device(dev); 2794 struct ipmi_device_id id; 2795 int rv; 2796 2797 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2798 if (rv) 2799 return rv; 2800 2801 return sysfs_emit(buf, "0x%02x\n", id.additional_device_support); 2802 } 2803 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2804 NULL); 2805 2806 static ssize_t manufacturer_id_show(struct device *dev, 2807 struct device_attribute *attr, 2808 char *buf) 2809 { 2810 struct bmc_device *bmc = to_bmc_device(dev); 2811 struct ipmi_device_id id; 2812 int rv; 2813 2814 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2815 if (rv) 2816 return rv; 2817 2818 return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id); 2819 } 2820 static DEVICE_ATTR_RO(manufacturer_id); 2821 2822 static ssize_t product_id_show(struct device *dev, 2823 struct device_attribute *attr, 2824 char *buf) 2825 { 2826 struct bmc_device *bmc = to_bmc_device(dev); 2827 struct ipmi_device_id id; 2828 int rv; 2829 2830 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2831 if (rv) 2832 return rv; 2833 2834 return sysfs_emit(buf, "0x%4.4x\n", id.product_id); 2835 } 2836 static DEVICE_ATTR_RO(product_id); 2837 2838 static ssize_t aux_firmware_rev_show(struct device *dev, 2839 struct device_attribute *attr, 2840 char *buf) 2841 { 2842 struct bmc_device *bmc = to_bmc_device(dev); 2843 struct ipmi_device_id id; 2844 int rv; 2845 2846 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2847 if (rv) 2848 return rv; 2849 2850 return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2851 id.aux_firmware_revision[3], 2852 id.aux_firmware_revision[2], 2853 id.aux_firmware_revision[1], 2854 id.aux_firmware_revision[0]); 2855 } 2856 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2857 2858 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2859 char *buf) 2860 { 2861 struct bmc_device *bmc = to_bmc_device(dev); 2862 bool guid_set; 2863 guid_t guid; 2864 int rv; 2865 2866 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2867 if (rv) 2868 return rv; 2869 if (!guid_set) 2870 return -ENOENT; 2871 2872 return sysfs_emit(buf, "%pUl\n", &guid); 2873 } 2874 static DEVICE_ATTR_RO(guid); 2875 2876 static struct attribute *bmc_dev_attrs[] = { 2877 &dev_attr_device_id.attr, 2878 &dev_attr_provides_device_sdrs.attr, 2879 &dev_attr_revision.attr, 2880 &dev_attr_firmware_revision.attr, 2881 &dev_attr_ipmi_version.attr, 2882 &dev_attr_additional_device_support.attr, 2883 &dev_attr_manufacturer_id.attr, 2884 &dev_attr_product_id.attr, 2885 &dev_attr_aux_firmware_revision.attr, 2886 &dev_attr_guid.attr, 2887 NULL 2888 }; 2889 2890 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2891 struct attribute *attr, int idx) 2892 { 2893 struct device *dev = kobj_to_dev(kobj); 2894 struct bmc_device *bmc = to_bmc_device(dev); 2895 umode_t mode = attr->mode; 2896 int rv; 2897 2898 if (attr == &dev_attr_aux_firmware_revision.attr) { 2899 struct ipmi_device_id id; 2900 2901 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2902 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2903 } 2904 if (attr == &dev_attr_guid.attr) { 2905 bool guid_set; 2906 2907 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2908 return (!rv && guid_set) ? mode : 0; 2909 } 2910 return mode; 2911 } 2912 2913 static const struct attribute_group bmc_dev_attr_group = { 2914 .attrs = bmc_dev_attrs, 2915 .is_visible = bmc_dev_attr_is_visible, 2916 }; 2917 2918 static const struct attribute_group *bmc_dev_attr_groups[] = { 2919 &bmc_dev_attr_group, 2920 NULL 2921 }; 2922 2923 static const struct device_type bmc_device_type = { 2924 .groups = bmc_dev_attr_groups, 2925 }; 2926 2927 static int __find_bmc_guid(struct device *dev, const void *data) 2928 { 2929 const guid_t *guid = data; 2930 struct bmc_device *bmc; 2931 int rv; 2932 2933 if (dev->type != &bmc_device_type) 2934 return 0; 2935 2936 bmc = to_bmc_device(dev); 2937 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2938 if (rv) 2939 rv = kref_get_unless_zero(&bmc->usecount); 2940 return rv; 2941 } 2942 2943 /* 2944 * Returns with the bmc's usecount incremented, if it is non-NULL. 2945 */ 2946 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2947 guid_t *guid) 2948 { 2949 struct device *dev; 2950 struct bmc_device *bmc = NULL; 2951 2952 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2953 if (dev) { 2954 bmc = to_bmc_device(dev); 2955 put_device(dev); 2956 } 2957 return bmc; 2958 } 2959 2960 struct prod_dev_id { 2961 unsigned int product_id; 2962 unsigned char device_id; 2963 }; 2964 2965 static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 2966 { 2967 const struct prod_dev_id *cid = data; 2968 struct bmc_device *bmc; 2969 int rv; 2970 2971 if (dev->type != &bmc_device_type) 2972 return 0; 2973 2974 bmc = to_bmc_device(dev); 2975 rv = (bmc->id.product_id == cid->product_id 2976 && bmc->id.device_id == cid->device_id); 2977 if (rv) 2978 rv = kref_get_unless_zero(&bmc->usecount); 2979 return rv; 2980 } 2981 2982 /* 2983 * Returns with the bmc's usecount incremented, if it is non-NULL. 2984 */ 2985 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 2986 struct device_driver *drv, 2987 unsigned int product_id, unsigned char device_id) 2988 { 2989 struct prod_dev_id id = { 2990 .product_id = product_id, 2991 .device_id = device_id, 2992 }; 2993 struct device *dev; 2994 struct bmc_device *bmc = NULL; 2995 2996 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 2997 if (dev) { 2998 bmc = to_bmc_device(dev); 2999 put_device(dev); 3000 } 3001 return bmc; 3002 } 3003 3004 static DEFINE_IDA(ipmi_bmc_ida); 3005 3006 static void 3007 release_bmc_device(struct device *dev) 3008 { 3009 kfree(to_bmc_device(dev)); 3010 } 3011 3012 static void cleanup_bmc_work(struct work_struct *work) 3013 { 3014 struct bmc_device *bmc = container_of(work, struct bmc_device, 3015 remove_work); 3016 int id = bmc->pdev.id; /* Unregister overwrites id */ 3017 3018 platform_device_unregister(&bmc->pdev); 3019 ida_simple_remove(&ipmi_bmc_ida, id); 3020 } 3021 3022 static void 3023 cleanup_bmc_device(struct kref *ref) 3024 { 3025 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 3026 3027 /* 3028 * Remove the platform device in a work queue to avoid issues 3029 * with removing the device attributes while reading a device 3030 * attribute. 3031 */ 3032 schedule_work(&bmc->remove_work); 3033 } 3034 3035 /* 3036 * Must be called with intf->bmc_reg_mutex held. 3037 */ 3038 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 3039 { 3040 struct bmc_device *bmc = intf->bmc; 3041 3042 if (!intf->bmc_registered) 3043 return; 3044 3045 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3046 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 3047 kfree(intf->my_dev_name); 3048 intf->my_dev_name = NULL; 3049 3050 mutex_lock(&bmc->dyn_mutex); 3051 list_del(&intf->bmc_link); 3052 mutex_unlock(&bmc->dyn_mutex); 3053 intf->bmc = &intf->tmp_bmc; 3054 kref_put(&bmc->usecount, cleanup_bmc_device); 3055 intf->bmc_registered = false; 3056 } 3057 3058 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 3059 { 3060 mutex_lock(&intf->bmc_reg_mutex); 3061 __ipmi_bmc_unregister(intf); 3062 mutex_unlock(&intf->bmc_reg_mutex); 3063 } 3064 3065 /* 3066 * Must be called with intf->bmc_reg_mutex held. 3067 */ 3068 static int __ipmi_bmc_register(struct ipmi_smi *intf, 3069 struct ipmi_device_id *id, 3070 bool guid_set, guid_t *guid, int intf_num) 3071 { 3072 int rv; 3073 struct bmc_device *bmc; 3074 struct bmc_device *old_bmc; 3075 3076 /* 3077 * platform_device_register() can cause bmc_reg_mutex to 3078 * be claimed because of the is_visible functions of 3079 * the attributes. Eliminate possible recursion and 3080 * release the lock. 3081 */ 3082 intf->in_bmc_register = true; 3083 mutex_unlock(&intf->bmc_reg_mutex); 3084 3085 /* 3086 * Try to find if there is an bmc_device struct 3087 * representing the interfaced BMC already 3088 */ 3089 mutex_lock(&ipmidriver_mutex); 3090 if (guid_set) 3091 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 3092 else 3093 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 3094 id->product_id, 3095 id->device_id); 3096 3097 /* 3098 * If there is already an bmc_device, free the new one, 3099 * otherwise register the new BMC device 3100 */ 3101 if (old_bmc) { 3102 bmc = old_bmc; 3103 /* 3104 * Note: old_bmc already has usecount incremented by 3105 * the BMC find functions. 3106 */ 3107 intf->bmc = old_bmc; 3108 mutex_lock(&bmc->dyn_mutex); 3109 list_add_tail(&intf->bmc_link, &bmc->intfs); 3110 mutex_unlock(&bmc->dyn_mutex); 3111 3112 dev_info(intf->si_dev, 3113 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3114 bmc->id.manufacturer_id, 3115 bmc->id.product_id, 3116 bmc->id.device_id); 3117 } else { 3118 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 3119 if (!bmc) { 3120 rv = -ENOMEM; 3121 goto out; 3122 } 3123 INIT_LIST_HEAD(&bmc->intfs); 3124 mutex_init(&bmc->dyn_mutex); 3125 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3126 3127 bmc->id = *id; 3128 bmc->dyn_id_set = 1; 3129 bmc->dyn_guid_set = guid_set; 3130 bmc->guid = *guid; 3131 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3132 3133 bmc->pdev.name = "ipmi_bmc"; 3134 3135 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); 3136 if (rv < 0) { 3137 kfree(bmc); 3138 goto out; 3139 } 3140 3141 bmc->pdev.dev.driver = &ipmidriver.driver; 3142 bmc->pdev.id = rv; 3143 bmc->pdev.dev.release = release_bmc_device; 3144 bmc->pdev.dev.type = &bmc_device_type; 3145 kref_init(&bmc->usecount); 3146 3147 intf->bmc = bmc; 3148 mutex_lock(&bmc->dyn_mutex); 3149 list_add_tail(&intf->bmc_link, &bmc->intfs); 3150 mutex_unlock(&bmc->dyn_mutex); 3151 3152 rv = platform_device_register(&bmc->pdev); 3153 if (rv) { 3154 dev_err(intf->si_dev, 3155 "Unable to register bmc device: %d\n", 3156 rv); 3157 goto out_list_del; 3158 } 3159 3160 dev_info(intf->si_dev, 3161 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3162 bmc->id.manufacturer_id, 3163 bmc->id.product_id, 3164 bmc->id.device_id); 3165 } 3166 3167 /* 3168 * create symlink from system interface device to bmc device 3169 * and back. 3170 */ 3171 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3172 if (rv) { 3173 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3174 goto out_put_bmc; 3175 } 3176 3177 if (intf_num == -1) 3178 intf_num = intf->intf_num; 3179 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3180 if (!intf->my_dev_name) { 3181 rv = -ENOMEM; 3182 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3183 rv); 3184 goto out_unlink1; 3185 } 3186 3187 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3188 intf->my_dev_name); 3189 if (rv) { 3190 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3191 rv); 3192 goto out_free_my_dev_name; 3193 } 3194 3195 intf->bmc_registered = true; 3196 3197 out: 3198 mutex_unlock(&ipmidriver_mutex); 3199 mutex_lock(&intf->bmc_reg_mutex); 3200 intf->in_bmc_register = false; 3201 return rv; 3202 3203 3204 out_free_my_dev_name: 3205 kfree(intf->my_dev_name); 3206 intf->my_dev_name = NULL; 3207 3208 out_unlink1: 3209 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3210 3211 out_put_bmc: 3212 mutex_lock(&bmc->dyn_mutex); 3213 list_del(&intf->bmc_link); 3214 mutex_unlock(&bmc->dyn_mutex); 3215 intf->bmc = &intf->tmp_bmc; 3216 kref_put(&bmc->usecount, cleanup_bmc_device); 3217 goto out; 3218 3219 out_list_del: 3220 mutex_lock(&bmc->dyn_mutex); 3221 list_del(&intf->bmc_link); 3222 mutex_unlock(&bmc->dyn_mutex); 3223 intf->bmc = &intf->tmp_bmc; 3224 put_device(&bmc->pdev.dev); 3225 goto out; 3226 } 3227 3228 static int 3229 send_guid_cmd(struct ipmi_smi *intf, int chan) 3230 { 3231 struct kernel_ipmi_msg msg; 3232 struct ipmi_system_interface_addr si; 3233 3234 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3235 si.channel = IPMI_BMC_CHANNEL; 3236 si.lun = 0; 3237 3238 msg.netfn = IPMI_NETFN_APP_REQUEST; 3239 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3240 msg.data = NULL; 3241 msg.data_len = 0; 3242 return i_ipmi_request(NULL, 3243 intf, 3244 (struct ipmi_addr *) &si, 3245 0, 3246 &msg, 3247 intf, 3248 NULL, 3249 NULL, 3250 0, 3251 intf->addrinfo[0].address, 3252 intf->addrinfo[0].lun, 3253 -1, 0); 3254 } 3255 3256 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3257 { 3258 struct bmc_device *bmc = intf->bmc; 3259 3260 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3261 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3262 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3263 /* Not for me */ 3264 return; 3265 3266 if (msg->msg.data[0] != 0) { 3267 /* Error from getting the GUID, the BMC doesn't have one. */ 3268 bmc->dyn_guid_set = 0; 3269 goto out; 3270 } 3271 3272 if (msg->msg.data_len < UUID_SIZE + 1) { 3273 bmc->dyn_guid_set = 0; 3274 dev_warn(intf->si_dev, 3275 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3276 msg->msg.data_len, UUID_SIZE + 1); 3277 goto out; 3278 } 3279 3280 import_guid(&bmc->fetch_guid, msg->msg.data + 1); 3281 /* 3282 * Make sure the guid data is available before setting 3283 * dyn_guid_set. 3284 */ 3285 smp_wmb(); 3286 bmc->dyn_guid_set = 1; 3287 out: 3288 wake_up(&intf->waitq); 3289 } 3290 3291 static void __get_guid(struct ipmi_smi *intf) 3292 { 3293 int rv; 3294 struct bmc_device *bmc = intf->bmc; 3295 3296 bmc->dyn_guid_set = 2; 3297 intf->null_user_handler = guid_handler; 3298 rv = send_guid_cmd(intf, 0); 3299 if (rv) 3300 /* Send failed, no GUID available. */ 3301 bmc->dyn_guid_set = 0; 3302 else 3303 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3304 3305 /* dyn_guid_set makes the guid data available. */ 3306 smp_rmb(); 3307 3308 intf->null_user_handler = NULL; 3309 } 3310 3311 static int 3312 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3313 { 3314 struct kernel_ipmi_msg msg; 3315 unsigned char data[1]; 3316 struct ipmi_system_interface_addr si; 3317 3318 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3319 si.channel = IPMI_BMC_CHANNEL; 3320 si.lun = 0; 3321 3322 msg.netfn = IPMI_NETFN_APP_REQUEST; 3323 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3324 msg.data = data; 3325 msg.data_len = 1; 3326 data[0] = chan; 3327 return i_ipmi_request(NULL, 3328 intf, 3329 (struct ipmi_addr *) &si, 3330 0, 3331 &msg, 3332 intf, 3333 NULL, 3334 NULL, 3335 0, 3336 intf->addrinfo[0].address, 3337 intf->addrinfo[0].lun, 3338 -1, 0); 3339 } 3340 3341 static void 3342 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3343 { 3344 int rv = 0; 3345 int ch; 3346 unsigned int set = intf->curr_working_cset; 3347 struct ipmi_channel *chans; 3348 3349 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3350 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3351 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3352 /* It's the one we want */ 3353 if (msg->msg.data[0] != 0) { 3354 /* Got an error from the channel, just go on. */ 3355 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3356 /* 3357 * If the MC does not support this 3358 * command, that is legal. We just 3359 * assume it has one IPMB at channel 3360 * zero. 3361 */ 3362 intf->wchannels[set].c[0].medium 3363 = IPMI_CHANNEL_MEDIUM_IPMB; 3364 intf->wchannels[set].c[0].protocol 3365 = IPMI_CHANNEL_PROTOCOL_IPMB; 3366 3367 intf->channel_list = intf->wchannels + set; 3368 intf->channels_ready = true; 3369 wake_up(&intf->waitq); 3370 goto out; 3371 } 3372 goto next_channel; 3373 } 3374 if (msg->msg.data_len < 4) { 3375 /* Message not big enough, just go on. */ 3376 goto next_channel; 3377 } 3378 ch = intf->curr_channel; 3379 chans = intf->wchannels[set].c; 3380 chans[ch].medium = msg->msg.data[2] & 0x7f; 3381 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3382 3383 next_channel: 3384 intf->curr_channel++; 3385 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3386 intf->channel_list = intf->wchannels + set; 3387 intf->channels_ready = true; 3388 wake_up(&intf->waitq); 3389 } else { 3390 intf->channel_list = intf->wchannels + set; 3391 intf->channels_ready = true; 3392 rv = send_channel_info_cmd(intf, intf->curr_channel); 3393 } 3394 3395 if (rv) { 3396 /* Got an error somehow, just give up. */ 3397 dev_warn(intf->si_dev, 3398 "Error sending channel information for channel %d: %d\n", 3399 intf->curr_channel, rv); 3400 3401 intf->channel_list = intf->wchannels + set; 3402 intf->channels_ready = true; 3403 wake_up(&intf->waitq); 3404 } 3405 } 3406 out: 3407 return; 3408 } 3409 3410 /* 3411 * Must be holding intf->bmc_reg_mutex to call this. 3412 */ 3413 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3414 { 3415 int rv; 3416 3417 if (ipmi_version_major(id) > 1 3418 || (ipmi_version_major(id) == 1 3419 && ipmi_version_minor(id) >= 5)) { 3420 unsigned int set; 3421 3422 /* 3423 * Start scanning the channels to see what is 3424 * available. 3425 */ 3426 set = !intf->curr_working_cset; 3427 intf->curr_working_cset = set; 3428 memset(&intf->wchannels[set], 0, 3429 sizeof(struct ipmi_channel_set)); 3430 3431 intf->null_user_handler = channel_handler; 3432 intf->curr_channel = 0; 3433 rv = send_channel_info_cmd(intf, 0); 3434 if (rv) { 3435 dev_warn(intf->si_dev, 3436 "Error sending channel information for channel 0, %d\n", 3437 rv); 3438 intf->null_user_handler = NULL; 3439 return -EIO; 3440 } 3441 3442 /* Wait for the channel info to be read. */ 3443 wait_event(intf->waitq, intf->channels_ready); 3444 intf->null_user_handler = NULL; 3445 } else { 3446 unsigned int set = intf->curr_working_cset; 3447 3448 /* Assume a single IPMB channel at zero. */ 3449 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3450 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3451 intf->channel_list = intf->wchannels + set; 3452 intf->channels_ready = true; 3453 } 3454 3455 return 0; 3456 } 3457 3458 static void ipmi_poll(struct ipmi_smi *intf) 3459 { 3460 if (intf->handlers->poll) 3461 intf->handlers->poll(intf->send_info); 3462 /* In case something came in */ 3463 handle_new_recv_msgs(intf); 3464 } 3465 3466 void ipmi_poll_interface(struct ipmi_user *user) 3467 { 3468 ipmi_poll(user->intf); 3469 } 3470 EXPORT_SYMBOL(ipmi_poll_interface); 3471 3472 static void redo_bmc_reg(struct work_struct *work) 3473 { 3474 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3475 bmc_reg_work); 3476 3477 if (!intf->in_shutdown) 3478 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3479 3480 kref_put(&intf->refcount, intf_free); 3481 } 3482 3483 int ipmi_add_smi(struct module *owner, 3484 const struct ipmi_smi_handlers *handlers, 3485 void *send_info, 3486 struct device *si_dev, 3487 unsigned char slave_addr) 3488 { 3489 int i, j; 3490 int rv; 3491 struct ipmi_smi *intf, *tintf; 3492 struct list_head *link; 3493 struct ipmi_device_id id; 3494 3495 /* 3496 * Make sure the driver is actually initialized, this handles 3497 * problems with initialization order. 3498 */ 3499 rv = ipmi_init_msghandler(); 3500 if (rv) 3501 return rv; 3502 3503 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3504 if (!intf) 3505 return -ENOMEM; 3506 3507 rv = init_srcu_struct(&intf->users_srcu); 3508 if (rv) { 3509 kfree(intf); 3510 return rv; 3511 } 3512 3513 intf->owner = owner; 3514 intf->bmc = &intf->tmp_bmc; 3515 INIT_LIST_HEAD(&intf->bmc->intfs); 3516 mutex_init(&intf->bmc->dyn_mutex); 3517 INIT_LIST_HEAD(&intf->bmc_link); 3518 mutex_init(&intf->bmc_reg_mutex); 3519 intf->intf_num = -1; /* Mark it invalid for now. */ 3520 kref_init(&intf->refcount); 3521 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3522 intf->si_dev = si_dev; 3523 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3524 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3525 intf->addrinfo[j].lun = 2; 3526 } 3527 if (slave_addr != 0) 3528 intf->addrinfo[0].address = slave_addr; 3529 INIT_LIST_HEAD(&intf->users); 3530 intf->handlers = handlers; 3531 intf->send_info = send_info; 3532 spin_lock_init(&intf->seq_lock); 3533 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3534 intf->seq_table[j].inuse = 0; 3535 intf->seq_table[j].seqid = 0; 3536 } 3537 intf->curr_seq = 0; 3538 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3539 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3540 tasklet_setup(&intf->recv_tasklet, 3541 smi_recv_tasklet); 3542 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3543 spin_lock_init(&intf->xmit_msgs_lock); 3544 INIT_LIST_HEAD(&intf->xmit_msgs); 3545 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3546 spin_lock_init(&intf->events_lock); 3547 spin_lock_init(&intf->watch_lock); 3548 atomic_set(&intf->event_waiters, 0); 3549 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3550 INIT_LIST_HEAD(&intf->waiting_events); 3551 intf->waiting_events_count = 0; 3552 mutex_init(&intf->cmd_rcvrs_mutex); 3553 spin_lock_init(&intf->maintenance_mode_lock); 3554 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3555 init_waitqueue_head(&intf->waitq); 3556 for (i = 0; i < IPMI_NUM_STATS; i++) 3557 atomic_set(&intf->stats[i], 0); 3558 3559 mutex_lock(&ipmi_interfaces_mutex); 3560 /* Look for a hole in the numbers. */ 3561 i = 0; 3562 link = &ipmi_interfaces; 3563 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link, 3564 ipmi_interfaces_mutex_held()) { 3565 if (tintf->intf_num != i) { 3566 link = &tintf->link; 3567 break; 3568 } 3569 i++; 3570 } 3571 /* Add the new interface in numeric order. */ 3572 if (i == 0) 3573 list_add_rcu(&intf->link, &ipmi_interfaces); 3574 else 3575 list_add_tail_rcu(&intf->link, link); 3576 3577 rv = handlers->start_processing(send_info, intf); 3578 if (rv) 3579 goto out_err; 3580 3581 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3582 if (rv) { 3583 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3584 goto out_err_started; 3585 } 3586 3587 mutex_lock(&intf->bmc_reg_mutex); 3588 rv = __scan_channels(intf, &id); 3589 mutex_unlock(&intf->bmc_reg_mutex); 3590 if (rv) 3591 goto out_err_bmc_reg; 3592 3593 /* 3594 * Keep memory order straight for RCU readers. Make 3595 * sure everything else is committed to memory before 3596 * setting intf_num to mark the interface valid. 3597 */ 3598 smp_wmb(); 3599 intf->intf_num = i; 3600 mutex_unlock(&ipmi_interfaces_mutex); 3601 3602 /* After this point the interface is legal to use. */ 3603 call_smi_watchers(i, intf->si_dev); 3604 3605 return 0; 3606 3607 out_err_bmc_reg: 3608 ipmi_bmc_unregister(intf); 3609 out_err_started: 3610 if (intf->handlers->shutdown) 3611 intf->handlers->shutdown(intf->send_info); 3612 out_err: 3613 list_del_rcu(&intf->link); 3614 mutex_unlock(&ipmi_interfaces_mutex); 3615 synchronize_srcu(&ipmi_interfaces_srcu); 3616 cleanup_srcu_struct(&intf->users_srcu); 3617 kref_put(&intf->refcount, intf_free); 3618 3619 return rv; 3620 } 3621 EXPORT_SYMBOL(ipmi_add_smi); 3622 3623 static void deliver_smi_err_response(struct ipmi_smi *intf, 3624 struct ipmi_smi_msg *msg, 3625 unsigned char err) 3626 { 3627 msg->rsp[0] = msg->data[0] | 4; 3628 msg->rsp[1] = msg->data[1]; 3629 msg->rsp[2] = err; 3630 msg->rsp_size = 3; 3631 /* It's an error, so it will never requeue, no need to check return. */ 3632 handle_one_recv_msg(intf, msg); 3633 } 3634 3635 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3636 { 3637 int i; 3638 struct seq_table *ent; 3639 struct ipmi_smi_msg *msg; 3640 struct list_head *entry; 3641 struct list_head tmplist; 3642 3643 /* Clear out our transmit queues and hold the messages. */ 3644 INIT_LIST_HEAD(&tmplist); 3645 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3646 list_splice_tail(&intf->xmit_msgs, &tmplist); 3647 3648 /* Current message first, to preserve order */ 3649 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3650 /* Wait for the message to clear out. */ 3651 schedule_timeout(1); 3652 } 3653 3654 /* No need for locks, the interface is down. */ 3655 3656 /* 3657 * Return errors for all pending messages in queue and in the 3658 * tables waiting for remote responses. 3659 */ 3660 while (!list_empty(&tmplist)) { 3661 entry = tmplist.next; 3662 list_del(entry); 3663 msg = list_entry(entry, struct ipmi_smi_msg, link); 3664 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3665 } 3666 3667 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3668 ent = &intf->seq_table[i]; 3669 if (!ent->inuse) 3670 continue; 3671 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3672 } 3673 } 3674 3675 void ipmi_unregister_smi(struct ipmi_smi *intf) 3676 { 3677 struct ipmi_smi_watcher *w; 3678 int intf_num = intf->intf_num, index; 3679 3680 mutex_lock(&ipmi_interfaces_mutex); 3681 intf->intf_num = -1; 3682 intf->in_shutdown = true; 3683 list_del_rcu(&intf->link); 3684 mutex_unlock(&ipmi_interfaces_mutex); 3685 synchronize_srcu(&ipmi_interfaces_srcu); 3686 3687 /* At this point no users can be added to the interface. */ 3688 3689 /* 3690 * Call all the watcher interfaces to tell them that 3691 * an interface is going away. 3692 */ 3693 mutex_lock(&smi_watchers_mutex); 3694 list_for_each_entry(w, &smi_watchers, link) 3695 w->smi_gone(intf_num); 3696 mutex_unlock(&smi_watchers_mutex); 3697 3698 index = srcu_read_lock(&intf->users_srcu); 3699 while (!list_empty(&intf->users)) { 3700 struct ipmi_user *user = 3701 container_of(list_next_rcu(&intf->users), 3702 struct ipmi_user, link); 3703 3704 _ipmi_destroy_user(user); 3705 } 3706 srcu_read_unlock(&intf->users_srcu, index); 3707 3708 if (intf->handlers->shutdown) 3709 intf->handlers->shutdown(intf->send_info); 3710 3711 cleanup_smi_msgs(intf); 3712 3713 ipmi_bmc_unregister(intf); 3714 3715 cleanup_srcu_struct(&intf->users_srcu); 3716 kref_put(&intf->refcount, intf_free); 3717 } 3718 EXPORT_SYMBOL(ipmi_unregister_smi); 3719 3720 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3721 struct ipmi_smi_msg *msg) 3722 { 3723 struct ipmi_ipmb_addr ipmb_addr; 3724 struct ipmi_recv_msg *recv_msg; 3725 3726 /* 3727 * This is 11, not 10, because the response must contain a 3728 * completion code. 3729 */ 3730 if (msg->rsp_size < 11) { 3731 /* Message not big enough, just ignore it. */ 3732 ipmi_inc_stat(intf, invalid_ipmb_responses); 3733 return 0; 3734 } 3735 3736 if (msg->rsp[2] != 0) { 3737 /* An error getting the response, just ignore it. */ 3738 return 0; 3739 } 3740 3741 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3742 ipmb_addr.slave_addr = msg->rsp[6]; 3743 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3744 ipmb_addr.lun = msg->rsp[7] & 3; 3745 3746 /* 3747 * It's a response from a remote entity. Look up the sequence 3748 * number and handle the response. 3749 */ 3750 if (intf_find_seq(intf, 3751 msg->rsp[7] >> 2, 3752 msg->rsp[3] & 0x0f, 3753 msg->rsp[8], 3754 (msg->rsp[4] >> 2) & (~1), 3755 (struct ipmi_addr *) &ipmb_addr, 3756 &recv_msg)) { 3757 /* 3758 * We were unable to find the sequence number, 3759 * so just nuke the message. 3760 */ 3761 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3762 return 0; 3763 } 3764 3765 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3766 /* 3767 * The other fields matched, so no need to set them, except 3768 * for netfn, which needs to be the response that was 3769 * returned, not the request value. 3770 */ 3771 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3772 recv_msg->msg.data = recv_msg->msg_data; 3773 recv_msg->msg.data_len = msg->rsp_size - 10; 3774 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3775 if (deliver_response(intf, recv_msg)) 3776 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3777 else 3778 ipmi_inc_stat(intf, handled_ipmb_responses); 3779 3780 return 0; 3781 } 3782 3783 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3784 struct ipmi_smi_msg *msg) 3785 { 3786 struct cmd_rcvr *rcvr; 3787 int rv = 0; 3788 unsigned char netfn; 3789 unsigned char cmd; 3790 unsigned char chan; 3791 struct ipmi_user *user = NULL; 3792 struct ipmi_ipmb_addr *ipmb_addr; 3793 struct ipmi_recv_msg *recv_msg; 3794 3795 if (msg->rsp_size < 10) { 3796 /* Message not big enough, just ignore it. */ 3797 ipmi_inc_stat(intf, invalid_commands); 3798 return 0; 3799 } 3800 3801 if (msg->rsp[2] != 0) { 3802 /* An error getting the response, just ignore it. */ 3803 return 0; 3804 } 3805 3806 netfn = msg->rsp[4] >> 2; 3807 cmd = msg->rsp[8]; 3808 chan = msg->rsp[3] & 0xf; 3809 3810 rcu_read_lock(); 3811 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3812 if (rcvr) { 3813 user = rcvr->user; 3814 kref_get(&user->refcount); 3815 } else 3816 user = NULL; 3817 rcu_read_unlock(); 3818 3819 if (user == NULL) { 3820 /* We didn't find a user, deliver an error response. */ 3821 ipmi_inc_stat(intf, unhandled_commands); 3822 3823 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3824 msg->data[1] = IPMI_SEND_MSG_CMD; 3825 msg->data[2] = msg->rsp[3]; 3826 msg->data[3] = msg->rsp[6]; 3827 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3828 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3829 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3830 /* rqseq/lun */ 3831 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3832 msg->data[8] = msg->rsp[8]; /* cmd */ 3833 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3834 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3835 msg->data_size = 11; 3836 3837 pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data); 3838 3839 rcu_read_lock(); 3840 if (!intf->in_shutdown) { 3841 smi_send(intf, intf->handlers, msg, 0); 3842 /* 3843 * We used the message, so return the value 3844 * that causes it to not be freed or 3845 * queued. 3846 */ 3847 rv = -1; 3848 } 3849 rcu_read_unlock(); 3850 } else { 3851 recv_msg = ipmi_alloc_recv_msg(); 3852 if (!recv_msg) { 3853 /* 3854 * We couldn't allocate memory for the 3855 * message, so requeue it for handling 3856 * later. 3857 */ 3858 rv = 1; 3859 kref_put(&user->refcount, free_user); 3860 } else { 3861 /* Extract the source address from the data. */ 3862 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3863 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3864 ipmb_addr->slave_addr = msg->rsp[6]; 3865 ipmb_addr->lun = msg->rsp[7] & 3; 3866 ipmb_addr->channel = msg->rsp[3] & 0xf; 3867 3868 /* 3869 * Extract the rest of the message information 3870 * from the IPMB header. 3871 */ 3872 recv_msg->user = user; 3873 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3874 recv_msg->msgid = msg->rsp[7] >> 2; 3875 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3876 recv_msg->msg.cmd = msg->rsp[8]; 3877 recv_msg->msg.data = recv_msg->msg_data; 3878 3879 /* 3880 * We chop off 10, not 9 bytes because the checksum 3881 * at the end also needs to be removed. 3882 */ 3883 recv_msg->msg.data_len = msg->rsp_size - 10; 3884 memcpy(recv_msg->msg_data, &msg->rsp[9], 3885 msg->rsp_size - 10); 3886 if (deliver_response(intf, recv_msg)) 3887 ipmi_inc_stat(intf, unhandled_commands); 3888 else 3889 ipmi_inc_stat(intf, handled_commands); 3890 } 3891 } 3892 3893 return rv; 3894 } 3895 3896 static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, 3897 struct ipmi_smi_msg *msg) 3898 { 3899 struct cmd_rcvr *rcvr; 3900 int rv = 0; 3901 struct ipmi_user *user = NULL; 3902 struct ipmi_ipmb_direct_addr *daddr; 3903 struct ipmi_recv_msg *recv_msg; 3904 unsigned char netfn = msg->rsp[0] >> 2; 3905 unsigned char cmd = msg->rsp[3]; 3906 3907 rcu_read_lock(); 3908 /* We always use channel 0 for direct messages. */ 3909 rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); 3910 if (rcvr) { 3911 user = rcvr->user; 3912 kref_get(&user->refcount); 3913 } else 3914 user = NULL; 3915 rcu_read_unlock(); 3916 3917 if (user == NULL) { 3918 /* We didn't find a user, deliver an error response. */ 3919 ipmi_inc_stat(intf, unhandled_commands); 3920 3921 msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3); 3922 msg->data[1] = msg->rsp[2]; 3923 msg->data[2] = msg->rsp[4] & ~0x3; 3924 msg->data[3] = cmd; 3925 msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; 3926 msg->data_size = 5; 3927 3928 rcu_read_lock(); 3929 if (!intf->in_shutdown) { 3930 smi_send(intf, intf->handlers, msg, 0); 3931 /* 3932 * We used the message, so return the value 3933 * that causes it to not be freed or 3934 * queued. 3935 */ 3936 rv = -1; 3937 } 3938 rcu_read_unlock(); 3939 } else { 3940 recv_msg = ipmi_alloc_recv_msg(); 3941 if (!recv_msg) { 3942 /* 3943 * We couldn't allocate memory for the 3944 * message, so requeue it for handling 3945 * later. 3946 */ 3947 rv = 1; 3948 kref_put(&user->refcount, free_user); 3949 } else { 3950 /* Extract the source address from the data. */ 3951 daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; 3952 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 3953 daddr->channel = 0; 3954 daddr->slave_addr = msg->rsp[1]; 3955 daddr->rs_lun = msg->rsp[0] & 3; 3956 daddr->rq_lun = msg->rsp[2] & 3; 3957 3958 /* 3959 * Extract the rest of the message information 3960 * from the IPMB header. 3961 */ 3962 recv_msg->user = user; 3963 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3964 recv_msg->msgid = (msg->rsp[2] >> 2); 3965 recv_msg->msg.netfn = msg->rsp[0] >> 2; 3966 recv_msg->msg.cmd = msg->rsp[3]; 3967 recv_msg->msg.data = recv_msg->msg_data; 3968 3969 recv_msg->msg.data_len = msg->rsp_size - 4; 3970 memcpy(recv_msg->msg_data, msg->rsp + 4, 3971 msg->rsp_size - 4); 3972 if (deliver_response(intf, recv_msg)) 3973 ipmi_inc_stat(intf, unhandled_commands); 3974 else 3975 ipmi_inc_stat(intf, handled_commands); 3976 } 3977 } 3978 3979 return rv; 3980 } 3981 3982 static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, 3983 struct ipmi_smi_msg *msg) 3984 { 3985 struct ipmi_recv_msg *recv_msg; 3986 struct ipmi_ipmb_direct_addr *daddr; 3987 3988 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 3989 if (recv_msg == NULL) { 3990 dev_warn(intf->si_dev, 3991 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 3992 return 0; 3993 } 3994 3995 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3996 recv_msg->msgid = msg->msgid; 3997 daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr; 3998 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 3999 daddr->channel = 0; 4000 daddr->slave_addr = msg->rsp[1]; 4001 daddr->rq_lun = msg->rsp[0] & 3; 4002 daddr->rs_lun = msg->rsp[2] & 3; 4003 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4004 recv_msg->msg.cmd = msg->rsp[3]; 4005 memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4); 4006 recv_msg->msg.data = recv_msg->msg_data; 4007 recv_msg->msg.data_len = msg->rsp_size - 4; 4008 deliver_local_response(intf, recv_msg); 4009 4010 return 0; 4011 } 4012 4013 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 4014 struct ipmi_smi_msg *msg) 4015 { 4016 struct ipmi_lan_addr lan_addr; 4017 struct ipmi_recv_msg *recv_msg; 4018 4019 4020 /* 4021 * This is 13, not 12, because the response must contain a 4022 * completion code. 4023 */ 4024 if (msg->rsp_size < 13) { 4025 /* Message not big enough, just ignore it. */ 4026 ipmi_inc_stat(intf, invalid_lan_responses); 4027 return 0; 4028 } 4029 4030 if (msg->rsp[2] != 0) { 4031 /* An error getting the response, just ignore it. */ 4032 return 0; 4033 } 4034 4035 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 4036 lan_addr.session_handle = msg->rsp[4]; 4037 lan_addr.remote_SWID = msg->rsp[8]; 4038 lan_addr.local_SWID = msg->rsp[5]; 4039 lan_addr.channel = msg->rsp[3] & 0x0f; 4040 lan_addr.privilege = msg->rsp[3] >> 4; 4041 lan_addr.lun = msg->rsp[9] & 3; 4042 4043 /* 4044 * It's a response from a remote entity. Look up the sequence 4045 * number and handle the response. 4046 */ 4047 if (intf_find_seq(intf, 4048 msg->rsp[9] >> 2, 4049 msg->rsp[3] & 0x0f, 4050 msg->rsp[10], 4051 (msg->rsp[6] >> 2) & (~1), 4052 (struct ipmi_addr *) &lan_addr, 4053 &recv_msg)) { 4054 /* 4055 * We were unable to find the sequence number, 4056 * so just nuke the message. 4057 */ 4058 ipmi_inc_stat(intf, unhandled_lan_responses); 4059 return 0; 4060 } 4061 4062 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 4063 /* 4064 * The other fields matched, so no need to set them, except 4065 * for netfn, which needs to be the response that was 4066 * returned, not the request value. 4067 */ 4068 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4069 recv_msg->msg.data = recv_msg->msg_data; 4070 recv_msg->msg.data_len = msg->rsp_size - 12; 4071 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4072 if (deliver_response(intf, recv_msg)) 4073 ipmi_inc_stat(intf, unhandled_lan_responses); 4074 else 4075 ipmi_inc_stat(intf, handled_lan_responses); 4076 4077 return 0; 4078 } 4079 4080 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 4081 struct ipmi_smi_msg *msg) 4082 { 4083 struct cmd_rcvr *rcvr; 4084 int rv = 0; 4085 unsigned char netfn; 4086 unsigned char cmd; 4087 unsigned char chan; 4088 struct ipmi_user *user = NULL; 4089 struct ipmi_lan_addr *lan_addr; 4090 struct ipmi_recv_msg *recv_msg; 4091 4092 if (msg->rsp_size < 12) { 4093 /* Message not big enough, just ignore it. */ 4094 ipmi_inc_stat(intf, invalid_commands); 4095 return 0; 4096 } 4097 4098 if (msg->rsp[2] != 0) { 4099 /* An error getting the response, just ignore it. */ 4100 return 0; 4101 } 4102 4103 netfn = msg->rsp[6] >> 2; 4104 cmd = msg->rsp[10]; 4105 chan = msg->rsp[3] & 0xf; 4106 4107 rcu_read_lock(); 4108 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4109 if (rcvr) { 4110 user = rcvr->user; 4111 kref_get(&user->refcount); 4112 } else 4113 user = NULL; 4114 rcu_read_unlock(); 4115 4116 if (user == NULL) { 4117 /* We didn't find a user, just give up. */ 4118 ipmi_inc_stat(intf, unhandled_commands); 4119 4120 /* 4121 * Don't do anything with these messages, just allow 4122 * them to be freed. 4123 */ 4124 rv = 0; 4125 } else { 4126 recv_msg = ipmi_alloc_recv_msg(); 4127 if (!recv_msg) { 4128 /* 4129 * We couldn't allocate memory for the 4130 * message, so requeue it for handling later. 4131 */ 4132 rv = 1; 4133 kref_put(&user->refcount, free_user); 4134 } else { 4135 /* Extract the source address from the data. */ 4136 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 4137 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 4138 lan_addr->session_handle = msg->rsp[4]; 4139 lan_addr->remote_SWID = msg->rsp[8]; 4140 lan_addr->local_SWID = msg->rsp[5]; 4141 lan_addr->lun = msg->rsp[9] & 3; 4142 lan_addr->channel = msg->rsp[3] & 0xf; 4143 lan_addr->privilege = msg->rsp[3] >> 4; 4144 4145 /* 4146 * Extract the rest of the message information 4147 * from the IPMB header. 4148 */ 4149 recv_msg->user = user; 4150 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4151 recv_msg->msgid = msg->rsp[9] >> 2; 4152 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4153 recv_msg->msg.cmd = msg->rsp[10]; 4154 recv_msg->msg.data = recv_msg->msg_data; 4155 4156 /* 4157 * We chop off 12, not 11 bytes because the checksum 4158 * at the end also needs to be removed. 4159 */ 4160 recv_msg->msg.data_len = msg->rsp_size - 12; 4161 memcpy(recv_msg->msg_data, &msg->rsp[11], 4162 msg->rsp_size - 12); 4163 if (deliver_response(intf, recv_msg)) 4164 ipmi_inc_stat(intf, unhandled_commands); 4165 else 4166 ipmi_inc_stat(intf, handled_commands); 4167 } 4168 } 4169 4170 return rv; 4171 } 4172 4173 /* 4174 * This routine will handle "Get Message" command responses with 4175 * channels that use an OEM Medium. The message format belongs to 4176 * the OEM. See IPMI 2.0 specification, Chapter 6 and 4177 * Chapter 22, sections 22.6 and 22.24 for more details. 4178 */ 4179 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 4180 struct ipmi_smi_msg *msg) 4181 { 4182 struct cmd_rcvr *rcvr; 4183 int rv = 0; 4184 unsigned char netfn; 4185 unsigned char cmd; 4186 unsigned char chan; 4187 struct ipmi_user *user = NULL; 4188 struct ipmi_system_interface_addr *smi_addr; 4189 struct ipmi_recv_msg *recv_msg; 4190 4191 /* 4192 * We expect the OEM SW to perform error checking 4193 * so we just do some basic sanity checks 4194 */ 4195 if (msg->rsp_size < 4) { 4196 /* Message not big enough, just ignore it. */ 4197 ipmi_inc_stat(intf, invalid_commands); 4198 return 0; 4199 } 4200 4201 if (msg->rsp[2] != 0) { 4202 /* An error getting the response, just ignore it. */ 4203 return 0; 4204 } 4205 4206 /* 4207 * This is an OEM Message so the OEM needs to know how 4208 * handle the message. We do no interpretation. 4209 */ 4210 netfn = msg->rsp[0] >> 2; 4211 cmd = msg->rsp[1]; 4212 chan = msg->rsp[3] & 0xf; 4213 4214 rcu_read_lock(); 4215 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4216 if (rcvr) { 4217 user = rcvr->user; 4218 kref_get(&user->refcount); 4219 } else 4220 user = NULL; 4221 rcu_read_unlock(); 4222 4223 if (user == NULL) { 4224 /* We didn't find a user, just give up. */ 4225 ipmi_inc_stat(intf, unhandled_commands); 4226 4227 /* 4228 * Don't do anything with these messages, just allow 4229 * them to be freed. 4230 */ 4231 4232 rv = 0; 4233 } else { 4234 recv_msg = ipmi_alloc_recv_msg(); 4235 if (!recv_msg) { 4236 /* 4237 * We couldn't allocate memory for the 4238 * message, so requeue it for handling 4239 * later. 4240 */ 4241 rv = 1; 4242 kref_put(&user->refcount, free_user); 4243 } else { 4244 /* 4245 * OEM Messages are expected to be delivered via 4246 * the system interface to SMS software. We might 4247 * need to visit this again depending on OEM 4248 * requirements 4249 */ 4250 smi_addr = ((struct ipmi_system_interface_addr *) 4251 &recv_msg->addr); 4252 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4253 smi_addr->channel = IPMI_BMC_CHANNEL; 4254 smi_addr->lun = msg->rsp[0] & 3; 4255 4256 recv_msg->user = user; 4257 recv_msg->user_msg_data = NULL; 4258 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4259 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4260 recv_msg->msg.cmd = msg->rsp[1]; 4261 recv_msg->msg.data = recv_msg->msg_data; 4262 4263 /* 4264 * The message starts at byte 4 which follows the 4265 * the Channel Byte in the "GET MESSAGE" command 4266 */ 4267 recv_msg->msg.data_len = msg->rsp_size - 4; 4268 memcpy(recv_msg->msg_data, &msg->rsp[4], 4269 msg->rsp_size - 4); 4270 if (deliver_response(intf, recv_msg)) 4271 ipmi_inc_stat(intf, unhandled_commands); 4272 else 4273 ipmi_inc_stat(intf, handled_commands); 4274 } 4275 } 4276 4277 return rv; 4278 } 4279 4280 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4281 struct ipmi_smi_msg *msg) 4282 { 4283 struct ipmi_system_interface_addr *smi_addr; 4284 4285 recv_msg->msgid = 0; 4286 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4287 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4288 smi_addr->channel = IPMI_BMC_CHANNEL; 4289 smi_addr->lun = msg->rsp[0] & 3; 4290 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4291 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4292 recv_msg->msg.cmd = msg->rsp[1]; 4293 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4294 recv_msg->msg.data = recv_msg->msg_data; 4295 recv_msg->msg.data_len = msg->rsp_size - 3; 4296 } 4297 4298 static int handle_read_event_rsp(struct ipmi_smi *intf, 4299 struct ipmi_smi_msg *msg) 4300 { 4301 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4302 struct list_head msgs; 4303 struct ipmi_user *user; 4304 int rv = 0, deliver_count = 0, index; 4305 unsigned long flags; 4306 4307 if (msg->rsp_size < 19) { 4308 /* Message is too small to be an IPMB event. */ 4309 ipmi_inc_stat(intf, invalid_events); 4310 return 0; 4311 } 4312 4313 if (msg->rsp[2] != 0) { 4314 /* An error getting the event, just ignore it. */ 4315 return 0; 4316 } 4317 4318 INIT_LIST_HEAD(&msgs); 4319 4320 spin_lock_irqsave(&intf->events_lock, flags); 4321 4322 ipmi_inc_stat(intf, events); 4323 4324 /* 4325 * Allocate and fill in one message for every user that is 4326 * getting events. 4327 */ 4328 index = srcu_read_lock(&intf->users_srcu); 4329 list_for_each_entry_rcu(user, &intf->users, link) { 4330 if (!user->gets_events) 4331 continue; 4332 4333 recv_msg = ipmi_alloc_recv_msg(); 4334 if (!recv_msg) { 4335 rcu_read_unlock(); 4336 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4337 link) { 4338 list_del(&recv_msg->link); 4339 ipmi_free_recv_msg(recv_msg); 4340 } 4341 /* 4342 * We couldn't allocate memory for the 4343 * message, so requeue it for handling 4344 * later. 4345 */ 4346 rv = 1; 4347 goto out; 4348 } 4349 4350 deliver_count++; 4351 4352 copy_event_into_recv_msg(recv_msg, msg); 4353 recv_msg->user = user; 4354 kref_get(&user->refcount); 4355 list_add_tail(&recv_msg->link, &msgs); 4356 } 4357 srcu_read_unlock(&intf->users_srcu, index); 4358 4359 if (deliver_count) { 4360 /* Now deliver all the messages. */ 4361 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4362 list_del(&recv_msg->link); 4363 deliver_local_response(intf, recv_msg); 4364 } 4365 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4366 /* 4367 * No one to receive the message, put it in queue if there's 4368 * not already too many things in the queue. 4369 */ 4370 recv_msg = ipmi_alloc_recv_msg(); 4371 if (!recv_msg) { 4372 /* 4373 * We couldn't allocate memory for the 4374 * message, so requeue it for handling 4375 * later. 4376 */ 4377 rv = 1; 4378 goto out; 4379 } 4380 4381 copy_event_into_recv_msg(recv_msg, msg); 4382 list_add_tail(&recv_msg->link, &intf->waiting_events); 4383 intf->waiting_events_count++; 4384 } else if (!intf->event_msg_printed) { 4385 /* 4386 * There's too many things in the queue, discard this 4387 * message. 4388 */ 4389 dev_warn(intf->si_dev, 4390 "Event queue full, discarding incoming events\n"); 4391 intf->event_msg_printed = 1; 4392 } 4393 4394 out: 4395 spin_unlock_irqrestore(&intf->events_lock, flags); 4396 4397 return rv; 4398 } 4399 4400 static int handle_bmc_rsp(struct ipmi_smi *intf, 4401 struct ipmi_smi_msg *msg) 4402 { 4403 struct ipmi_recv_msg *recv_msg; 4404 struct ipmi_system_interface_addr *smi_addr; 4405 4406 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 4407 if (recv_msg == NULL) { 4408 dev_warn(intf->si_dev, 4409 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4410 return 0; 4411 } 4412 4413 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4414 recv_msg->msgid = msg->msgid; 4415 smi_addr = ((struct ipmi_system_interface_addr *) 4416 &recv_msg->addr); 4417 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4418 smi_addr->channel = IPMI_BMC_CHANNEL; 4419 smi_addr->lun = msg->rsp[0] & 3; 4420 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4421 recv_msg->msg.cmd = msg->rsp[1]; 4422 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4423 recv_msg->msg.data = recv_msg->msg_data; 4424 recv_msg->msg.data_len = msg->rsp_size - 2; 4425 deliver_local_response(intf, recv_msg); 4426 4427 return 0; 4428 } 4429 4430 /* 4431 * Handle a received message. Return 1 if the message should be requeued, 4432 * 0 if the message should be freed, or -1 if the message should not 4433 * be freed or requeued. 4434 */ 4435 static int handle_one_recv_msg(struct ipmi_smi *intf, 4436 struct ipmi_smi_msg *msg) 4437 { 4438 int requeue = 0; 4439 int chan; 4440 unsigned char cc; 4441 bool is_cmd = !((msg->rsp[0] >> 2) & 1); 4442 4443 pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp); 4444 4445 if (msg->rsp_size < 2) { 4446 /* Message is too small to be correct. */ 4447 dev_warn(intf->si_dev, 4448 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4449 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4450 4451 return_unspecified: 4452 /* Generate an error response for the message. */ 4453 msg->rsp[0] = msg->data[0] | (1 << 2); 4454 msg->rsp[1] = msg->data[1]; 4455 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4456 msg->rsp_size = 3; 4457 } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4458 /* commands must have at least 3 bytes, responses 4. */ 4459 if (is_cmd && (msg->rsp_size < 3)) { 4460 ipmi_inc_stat(intf, invalid_commands); 4461 goto out; 4462 } 4463 if (!is_cmd && (msg->rsp_size < 4)) 4464 goto return_unspecified; 4465 } else if ((msg->data_size >= 2) 4466 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4467 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4468 && (msg->user_data == NULL)) { 4469 4470 if (intf->in_shutdown) 4471 goto out; 4472 4473 /* 4474 * This is the local response to a command send, start 4475 * the timer for these. The user_data will not be 4476 * NULL if this is a response send, and we will let 4477 * response sends just go through. 4478 */ 4479 4480 /* 4481 * Check for errors, if we get certain errors (ones 4482 * that mean basically we can try again later), we 4483 * ignore them and start the timer. Otherwise we 4484 * report the error immediately. 4485 */ 4486 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4487 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4488 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4489 && (msg->rsp[2] != IPMI_BUS_ERR) 4490 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4491 int ch = msg->rsp[3] & 0xf; 4492 struct ipmi_channel *chans; 4493 4494 /* Got an error sending the message, handle it. */ 4495 4496 chans = READ_ONCE(intf->channel_list)->c; 4497 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4498 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4499 ipmi_inc_stat(intf, sent_lan_command_errs); 4500 else 4501 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4502 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4503 } else 4504 /* The message was sent, start the timer. */ 4505 intf_start_seq_timer(intf, msg->msgid); 4506 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4507 || (msg->rsp[1] != msg->data[1])) { 4508 /* 4509 * The NetFN and Command in the response is not even 4510 * marginally correct. 4511 */ 4512 dev_warn(intf->si_dev, 4513 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4514 (msg->data[0] >> 2) | 1, msg->data[1], 4515 msg->rsp[0] >> 2, msg->rsp[1]); 4516 4517 goto return_unspecified; 4518 } 4519 4520 if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4521 if ((msg->data[0] >> 2) & 1) { 4522 /* It's a response to a sent response. */ 4523 chan = 0; 4524 cc = msg->rsp[4]; 4525 goto process_response_response; 4526 } 4527 if (is_cmd) 4528 requeue = handle_ipmb_direct_rcv_cmd(intf, msg); 4529 else 4530 requeue = handle_ipmb_direct_rcv_rsp(intf, msg); 4531 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4532 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4533 && (msg->user_data != NULL)) { 4534 /* 4535 * It's a response to a response we sent. For this we 4536 * deliver a send message response to the user. 4537 */ 4538 struct ipmi_recv_msg *recv_msg; 4539 4540 chan = msg->data[2] & 0x0f; 4541 if (chan >= IPMI_MAX_CHANNELS) 4542 /* Invalid channel number */ 4543 goto out; 4544 cc = msg->rsp[2]; 4545 4546 process_response_response: 4547 recv_msg = msg->user_data; 4548 4549 requeue = 0; 4550 if (!recv_msg) 4551 goto out; 4552 4553 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4554 recv_msg->msg.data = recv_msg->msg_data; 4555 recv_msg->msg_data[0] = cc; 4556 recv_msg->msg.data_len = 1; 4557 deliver_local_response(intf, recv_msg); 4558 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4559 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4560 struct ipmi_channel *chans; 4561 4562 /* It's from the receive queue. */ 4563 chan = msg->rsp[3] & 0xf; 4564 if (chan >= IPMI_MAX_CHANNELS) { 4565 /* Invalid channel number */ 4566 requeue = 0; 4567 goto out; 4568 } 4569 4570 /* 4571 * We need to make sure the channels have been initialized. 4572 * The channel_handler routine will set the "curr_channel" 4573 * equal to or greater than IPMI_MAX_CHANNELS when all the 4574 * channels for this interface have been initialized. 4575 */ 4576 if (!intf->channels_ready) { 4577 requeue = 0; /* Throw the message away */ 4578 goto out; 4579 } 4580 4581 chans = READ_ONCE(intf->channel_list)->c; 4582 4583 switch (chans[chan].medium) { 4584 case IPMI_CHANNEL_MEDIUM_IPMB: 4585 if (msg->rsp[4] & 0x04) { 4586 /* 4587 * It's a response, so find the 4588 * requesting message and send it up. 4589 */ 4590 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4591 } else { 4592 /* 4593 * It's a command to the SMS from some other 4594 * entity. Handle that. 4595 */ 4596 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4597 } 4598 break; 4599 4600 case IPMI_CHANNEL_MEDIUM_8023LAN: 4601 case IPMI_CHANNEL_MEDIUM_ASYNC: 4602 if (msg->rsp[6] & 0x04) { 4603 /* 4604 * It's a response, so find the 4605 * requesting message and send it up. 4606 */ 4607 requeue = handle_lan_get_msg_rsp(intf, msg); 4608 } else { 4609 /* 4610 * It's a command to the SMS from some other 4611 * entity. Handle that. 4612 */ 4613 requeue = handle_lan_get_msg_cmd(intf, msg); 4614 } 4615 break; 4616 4617 default: 4618 /* Check for OEM Channels. Clients had better 4619 register for these commands. */ 4620 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4621 && (chans[chan].medium 4622 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4623 requeue = handle_oem_get_msg_cmd(intf, msg); 4624 } else { 4625 /* 4626 * We don't handle the channel type, so just 4627 * free the message. 4628 */ 4629 requeue = 0; 4630 } 4631 } 4632 4633 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4634 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4635 /* It's an asynchronous event. */ 4636 requeue = handle_read_event_rsp(intf, msg); 4637 } else { 4638 /* It's a response from the local BMC. */ 4639 requeue = handle_bmc_rsp(intf, msg); 4640 } 4641 4642 out: 4643 return requeue; 4644 } 4645 4646 /* 4647 * If there are messages in the queue or pretimeouts, handle them. 4648 */ 4649 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4650 { 4651 struct ipmi_smi_msg *smi_msg; 4652 unsigned long flags = 0; 4653 int rv; 4654 int run_to_completion = intf->run_to_completion; 4655 4656 /* See if any waiting messages need to be processed. */ 4657 if (!run_to_completion) 4658 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4659 while (!list_empty(&intf->waiting_rcv_msgs)) { 4660 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4661 struct ipmi_smi_msg, link); 4662 list_del(&smi_msg->link); 4663 if (!run_to_completion) 4664 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4665 flags); 4666 rv = handle_one_recv_msg(intf, smi_msg); 4667 if (!run_to_completion) 4668 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4669 if (rv > 0) { 4670 /* 4671 * To preserve message order, quit if we 4672 * can't handle a message. Add the message 4673 * back at the head, this is safe because this 4674 * tasklet is the only thing that pulls the 4675 * messages. 4676 */ 4677 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4678 break; 4679 } else { 4680 if (rv == 0) 4681 /* Message handled */ 4682 ipmi_free_smi_msg(smi_msg); 4683 /* If rv < 0, fatal error, del but don't free. */ 4684 } 4685 } 4686 if (!run_to_completion) 4687 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4688 4689 /* 4690 * If the pretimout count is non-zero, decrement one from it and 4691 * deliver pretimeouts to all the users. 4692 */ 4693 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4694 struct ipmi_user *user; 4695 int index; 4696 4697 index = srcu_read_lock(&intf->users_srcu); 4698 list_for_each_entry_rcu(user, &intf->users, link) { 4699 if (user->handler->ipmi_watchdog_pretimeout) 4700 user->handler->ipmi_watchdog_pretimeout( 4701 user->handler_data); 4702 } 4703 srcu_read_unlock(&intf->users_srcu, index); 4704 } 4705 } 4706 4707 static void smi_recv_tasklet(struct tasklet_struct *t) 4708 { 4709 unsigned long flags = 0; /* keep us warning-free. */ 4710 struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); 4711 int run_to_completion = intf->run_to_completion; 4712 struct ipmi_smi_msg *newmsg = NULL; 4713 4714 /* 4715 * Start the next message if available. 4716 * 4717 * Do this here, not in the actual receiver, because we may deadlock 4718 * because the lower layer is allowed to hold locks while calling 4719 * message delivery. 4720 */ 4721 4722 rcu_read_lock(); 4723 4724 if (!run_to_completion) 4725 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4726 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4727 struct list_head *entry = NULL; 4728 4729 /* Pick the high priority queue first. */ 4730 if (!list_empty(&intf->hp_xmit_msgs)) 4731 entry = intf->hp_xmit_msgs.next; 4732 else if (!list_empty(&intf->xmit_msgs)) 4733 entry = intf->xmit_msgs.next; 4734 4735 if (entry) { 4736 list_del(entry); 4737 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4738 intf->curr_msg = newmsg; 4739 } 4740 } 4741 4742 if (!run_to_completion) 4743 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4744 if (newmsg) 4745 intf->handlers->sender(intf->send_info, newmsg); 4746 4747 rcu_read_unlock(); 4748 4749 handle_new_recv_msgs(intf); 4750 } 4751 4752 /* Handle a new message from the lower layer. */ 4753 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4754 struct ipmi_smi_msg *msg) 4755 { 4756 unsigned long flags = 0; /* keep us warning-free. */ 4757 int run_to_completion = intf->run_to_completion; 4758 4759 /* 4760 * To preserve message order, we keep a queue and deliver from 4761 * a tasklet. 4762 */ 4763 if (!run_to_completion) 4764 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4765 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4766 if (!run_to_completion) 4767 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4768 flags); 4769 4770 if (!run_to_completion) 4771 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4772 /* 4773 * We can get an asynchronous event or receive message in addition 4774 * to commands we send. 4775 */ 4776 if (msg == intf->curr_msg) 4777 intf->curr_msg = NULL; 4778 if (!run_to_completion) 4779 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4780 4781 if (run_to_completion) 4782 smi_recv_tasklet(&intf->recv_tasklet); 4783 else 4784 tasklet_schedule(&intf->recv_tasklet); 4785 } 4786 EXPORT_SYMBOL(ipmi_smi_msg_received); 4787 4788 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4789 { 4790 if (intf->in_shutdown) 4791 return; 4792 4793 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4794 tasklet_schedule(&intf->recv_tasklet); 4795 } 4796 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4797 4798 static struct ipmi_smi_msg * 4799 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4800 unsigned char seq, long seqid) 4801 { 4802 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4803 if (!smi_msg) 4804 /* 4805 * If we can't allocate the message, then just return, we 4806 * get 4 retries, so this should be ok. 4807 */ 4808 return NULL; 4809 4810 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4811 smi_msg->data_size = recv_msg->msg.data_len; 4812 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4813 4814 pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data); 4815 4816 return smi_msg; 4817 } 4818 4819 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4820 struct list_head *timeouts, 4821 unsigned long timeout_period, 4822 int slot, unsigned long *flags, 4823 bool *need_timer) 4824 { 4825 struct ipmi_recv_msg *msg; 4826 4827 if (intf->in_shutdown) 4828 return; 4829 4830 if (!ent->inuse) 4831 return; 4832 4833 if (timeout_period < ent->timeout) { 4834 ent->timeout -= timeout_period; 4835 *need_timer = true; 4836 return; 4837 } 4838 4839 if (ent->retries_left == 0) { 4840 /* The message has used all its retries. */ 4841 ent->inuse = 0; 4842 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 4843 msg = ent->recv_msg; 4844 list_add_tail(&msg->link, timeouts); 4845 if (ent->broadcast) 4846 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4847 else if (is_lan_addr(&ent->recv_msg->addr)) 4848 ipmi_inc_stat(intf, timed_out_lan_commands); 4849 else 4850 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4851 } else { 4852 struct ipmi_smi_msg *smi_msg; 4853 /* More retries, send again. */ 4854 4855 *need_timer = true; 4856 4857 /* 4858 * Start with the max timer, set to normal timer after 4859 * the message is sent. 4860 */ 4861 ent->timeout = MAX_MSG_TIMEOUT; 4862 ent->retries_left--; 4863 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4864 ent->seqid); 4865 if (!smi_msg) { 4866 if (is_lan_addr(&ent->recv_msg->addr)) 4867 ipmi_inc_stat(intf, 4868 dropped_rexmit_lan_commands); 4869 else 4870 ipmi_inc_stat(intf, 4871 dropped_rexmit_ipmb_commands); 4872 return; 4873 } 4874 4875 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4876 4877 /* 4878 * Send the new message. We send with a zero 4879 * priority. It timed out, I doubt time is that 4880 * critical now, and high priority messages are really 4881 * only for messages to the local MC, which don't get 4882 * resent. 4883 */ 4884 if (intf->handlers) { 4885 if (is_lan_addr(&ent->recv_msg->addr)) 4886 ipmi_inc_stat(intf, 4887 retransmitted_lan_commands); 4888 else 4889 ipmi_inc_stat(intf, 4890 retransmitted_ipmb_commands); 4891 4892 smi_send(intf, intf->handlers, smi_msg, 0); 4893 } else 4894 ipmi_free_smi_msg(smi_msg); 4895 4896 spin_lock_irqsave(&intf->seq_lock, *flags); 4897 } 4898 } 4899 4900 static bool ipmi_timeout_handler(struct ipmi_smi *intf, 4901 unsigned long timeout_period) 4902 { 4903 struct list_head timeouts; 4904 struct ipmi_recv_msg *msg, *msg2; 4905 unsigned long flags; 4906 int i; 4907 bool need_timer = false; 4908 4909 if (!intf->bmc_registered) { 4910 kref_get(&intf->refcount); 4911 if (!schedule_work(&intf->bmc_reg_work)) { 4912 kref_put(&intf->refcount, intf_free); 4913 need_timer = true; 4914 } 4915 } 4916 4917 /* 4918 * Go through the seq table and find any messages that 4919 * have timed out, putting them in the timeouts 4920 * list. 4921 */ 4922 INIT_LIST_HEAD(&timeouts); 4923 spin_lock_irqsave(&intf->seq_lock, flags); 4924 if (intf->ipmb_maintenance_mode_timeout) { 4925 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 4926 intf->ipmb_maintenance_mode_timeout = 0; 4927 else 4928 intf->ipmb_maintenance_mode_timeout -= timeout_period; 4929 } 4930 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 4931 check_msg_timeout(intf, &intf->seq_table[i], 4932 &timeouts, timeout_period, i, 4933 &flags, &need_timer); 4934 spin_unlock_irqrestore(&intf->seq_lock, flags); 4935 4936 list_for_each_entry_safe(msg, msg2, &timeouts, link) 4937 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 4938 4939 /* 4940 * Maintenance mode handling. Check the timeout 4941 * optimistically before we claim the lock. It may 4942 * mean a timeout gets missed occasionally, but that 4943 * only means the timeout gets extended by one period 4944 * in that case. No big deal, and it avoids the lock 4945 * most of the time. 4946 */ 4947 if (intf->auto_maintenance_timeout > 0) { 4948 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 4949 if (intf->auto_maintenance_timeout > 0) { 4950 intf->auto_maintenance_timeout 4951 -= timeout_period; 4952 if (!intf->maintenance_mode 4953 && (intf->auto_maintenance_timeout <= 0)) { 4954 intf->maintenance_mode_enable = false; 4955 maintenance_mode_update(intf); 4956 } 4957 } 4958 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 4959 flags); 4960 } 4961 4962 tasklet_schedule(&intf->recv_tasklet); 4963 4964 return need_timer; 4965 } 4966 4967 static void ipmi_request_event(struct ipmi_smi *intf) 4968 { 4969 /* No event requests when in maintenance mode. */ 4970 if (intf->maintenance_mode_enable) 4971 return; 4972 4973 if (!intf->in_shutdown) 4974 intf->handlers->request_events(intf->send_info); 4975 } 4976 4977 static struct timer_list ipmi_timer; 4978 4979 static atomic_t stop_operation; 4980 4981 static void ipmi_timeout(struct timer_list *unused) 4982 { 4983 struct ipmi_smi *intf; 4984 bool need_timer = false; 4985 int index; 4986 4987 if (atomic_read(&stop_operation)) 4988 return; 4989 4990 index = srcu_read_lock(&ipmi_interfaces_srcu); 4991 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4992 if (atomic_read(&intf->event_waiters)) { 4993 intf->ticks_to_req_ev--; 4994 if (intf->ticks_to_req_ev == 0) { 4995 ipmi_request_event(intf); 4996 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 4997 } 4998 need_timer = true; 4999 } 5000 5001 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 5002 } 5003 srcu_read_unlock(&ipmi_interfaces_srcu, index); 5004 5005 if (need_timer) 5006 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5007 } 5008 5009 static void need_waiter(struct ipmi_smi *intf) 5010 { 5011 /* Racy, but worst case we start the timer twice. */ 5012 if (!timer_pending(&ipmi_timer)) 5013 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5014 } 5015 5016 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 5017 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 5018 5019 static void free_smi_msg(struct ipmi_smi_msg *msg) 5020 { 5021 atomic_dec(&smi_msg_inuse_count); 5022 /* Try to keep as much stuff out of the panic path as possible. */ 5023 if (!oops_in_progress) 5024 kfree(msg); 5025 } 5026 5027 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 5028 { 5029 struct ipmi_smi_msg *rv; 5030 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 5031 if (rv) { 5032 rv->done = free_smi_msg; 5033 rv->user_data = NULL; 5034 atomic_inc(&smi_msg_inuse_count); 5035 } 5036 return rv; 5037 } 5038 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 5039 5040 static void free_recv_msg(struct ipmi_recv_msg *msg) 5041 { 5042 atomic_dec(&recv_msg_inuse_count); 5043 /* Try to keep as much stuff out of the panic path as possible. */ 5044 if (!oops_in_progress) 5045 kfree(msg); 5046 } 5047 5048 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 5049 { 5050 struct ipmi_recv_msg *rv; 5051 5052 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 5053 if (rv) { 5054 rv->user = NULL; 5055 rv->done = free_recv_msg; 5056 atomic_inc(&recv_msg_inuse_count); 5057 } 5058 return rv; 5059 } 5060 5061 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 5062 { 5063 if (msg->user && !oops_in_progress) 5064 kref_put(&msg->user->refcount, free_user); 5065 msg->done(msg); 5066 } 5067 EXPORT_SYMBOL(ipmi_free_recv_msg); 5068 5069 static atomic_t panic_done_count = ATOMIC_INIT(0); 5070 5071 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 5072 { 5073 atomic_dec(&panic_done_count); 5074 } 5075 5076 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 5077 { 5078 atomic_dec(&panic_done_count); 5079 } 5080 5081 /* 5082 * Inside a panic, send a message and wait for a response. 5083 */ 5084 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, 5085 struct ipmi_addr *addr, 5086 struct kernel_ipmi_msg *msg) 5087 { 5088 struct ipmi_smi_msg smi_msg; 5089 struct ipmi_recv_msg recv_msg; 5090 int rv; 5091 5092 smi_msg.done = dummy_smi_done_handler; 5093 recv_msg.done = dummy_recv_done_handler; 5094 atomic_add(2, &panic_done_count); 5095 rv = i_ipmi_request(NULL, 5096 intf, 5097 addr, 5098 0, 5099 msg, 5100 intf, 5101 &smi_msg, 5102 &recv_msg, 5103 0, 5104 intf->addrinfo[0].address, 5105 intf->addrinfo[0].lun, 5106 0, 1); /* Don't retry, and don't wait. */ 5107 if (rv) 5108 atomic_sub(2, &panic_done_count); 5109 else if (intf->handlers->flush_messages) 5110 intf->handlers->flush_messages(intf->send_info); 5111 5112 while (atomic_read(&panic_done_count) != 0) 5113 ipmi_poll(intf); 5114 } 5115 5116 static void event_receiver_fetcher(struct ipmi_smi *intf, 5117 struct ipmi_recv_msg *msg) 5118 { 5119 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5120 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 5121 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 5122 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5123 /* A get event receiver command, save it. */ 5124 intf->event_receiver = msg->msg.data[1]; 5125 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 5126 } 5127 } 5128 5129 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 5130 { 5131 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5132 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 5133 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 5134 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5135 /* 5136 * A get device id command, save if we are an event 5137 * receiver or generator. 5138 */ 5139 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 5140 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 5141 } 5142 } 5143 5144 static void send_panic_events(struct ipmi_smi *intf, char *str) 5145 { 5146 struct kernel_ipmi_msg msg; 5147 unsigned char data[16]; 5148 struct ipmi_system_interface_addr *si; 5149 struct ipmi_addr addr; 5150 char *p = str; 5151 struct ipmi_ipmb_addr *ipmb; 5152 int j; 5153 5154 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 5155 return; 5156 5157 si = (struct ipmi_system_interface_addr *) &addr; 5158 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5159 si->channel = IPMI_BMC_CHANNEL; 5160 si->lun = 0; 5161 5162 /* Fill in an event telling that we have failed. */ 5163 msg.netfn = 0x04; /* Sensor or Event. */ 5164 msg.cmd = 2; /* Platform event command. */ 5165 msg.data = data; 5166 msg.data_len = 8; 5167 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 5168 data[1] = 0x03; /* This is for IPMI 1.0. */ 5169 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 5170 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 5171 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 5172 5173 /* 5174 * Put a few breadcrumbs in. Hopefully later we can add more things 5175 * to make the panic events more useful. 5176 */ 5177 if (str) { 5178 data[3] = str[0]; 5179 data[6] = str[1]; 5180 data[7] = str[2]; 5181 } 5182 5183 /* Send the event announcing the panic. */ 5184 ipmi_panic_request_and_wait(intf, &addr, &msg); 5185 5186 /* 5187 * On every interface, dump a bunch of OEM event holding the 5188 * string. 5189 */ 5190 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 5191 return; 5192 5193 /* 5194 * intf_num is used as an marker to tell if the 5195 * interface is valid. Thus we need a read barrier to 5196 * make sure data fetched before checking intf_num 5197 * won't be used. 5198 */ 5199 smp_rmb(); 5200 5201 /* 5202 * First job here is to figure out where to send the 5203 * OEM events. There's no way in IPMI to send OEM 5204 * events using an event send command, so we have to 5205 * find the SEL to put them in and stick them in 5206 * there. 5207 */ 5208 5209 /* Get capabilities from the get device id. */ 5210 intf->local_sel_device = 0; 5211 intf->local_event_generator = 0; 5212 intf->event_receiver = 0; 5213 5214 /* Request the device info from the local MC. */ 5215 msg.netfn = IPMI_NETFN_APP_REQUEST; 5216 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 5217 msg.data = NULL; 5218 msg.data_len = 0; 5219 intf->null_user_handler = device_id_fetcher; 5220 ipmi_panic_request_and_wait(intf, &addr, &msg); 5221 5222 if (intf->local_event_generator) { 5223 /* Request the event receiver from the local MC. */ 5224 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 5225 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 5226 msg.data = NULL; 5227 msg.data_len = 0; 5228 intf->null_user_handler = event_receiver_fetcher; 5229 ipmi_panic_request_and_wait(intf, &addr, &msg); 5230 } 5231 intf->null_user_handler = NULL; 5232 5233 /* 5234 * Validate the event receiver. The low bit must not 5235 * be 1 (it must be a valid IPMB address), it cannot 5236 * be zero, and it must not be my address. 5237 */ 5238 if (((intf->event_receiver & 1) == 0) 5239 && (intf->event_receiver != 0) 5240 && (intf->event_receiver != intf->addrinfo[0].address)) { 5241 /* 5242 * The event receiver is valid, send an IPMB 5243 * message. 5244 */ 5245 ipmb = (struct ipmi_ipmb_addr *) &addr; 5246 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5247 ipmb->channel = 0; /* FIXME - is this right? */ 5248 ipmb->lun = intf->event_receiver_lun; 5249 ipmb->slave_addr = intf->event_receiver; 5250 } else if (intf->local_sel_device) { 5251 /* 5252 * The event receiver was not valid (or was 5253 * me), but I am an SEL device, just dump it 5254 * in my SEL. 5255 */ 5256 si = (struct ipmi_system_interface_addr *) &addr; 5257 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5258 si->channel = IPMI_BMC_CHANNEL; 5259 si->lun = 0; 5260 } else 5261 return; /* No where to send the event. */ 5262 5263 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5264 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5265 msg.data = data; 5266 msg.data_len = 16; 5267 5268 j = 0; 5269 while (*p) { 5270 int size = strlen(p); 5271 5272 if (size > 11) 5273 size = 11; 5274 data[0] = 0; 5275 data[1] = 0; 5276 data[2] = 0xf0; /* OEM event without timestamp. */ 5277 data[3] = intf->addrinfo[0].address; 5278 data[4] = j++; /* sequence # */ 5279 /* 5280 * Always give 11 bytes, so strncpy will fill 5281 * it with zeroes for me. 5282 */ 5283 strncpy(data+5, p, 11); 5284 p += size; 5285 5286 ipmi_panic_request_and_wait(intf, &addr, &msg); 5287 } 5288 } 5289 5290 static int has_panicked; 5291 5292 static int panic_event(struct notifier_block *this, 5293 unsigned long event, 5294 void *ptr) 5295 { 5296 struct ipmi_smi *intf; 5297 struct ipmi_user *user; 5298 5299 if (has_panicked) 5300 return NOTIFY_DONE; 5301 has_panicked = 1; 5302 5303 /* For every registered interface, set it to run to completion. */ 5304 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5305 if (!intf->handlers || intf->intf_num == -1) 5306 /* Interface is not ready. */ 5307 continue; 5308 5309 if (!intf->handlers->poll) 5310 continue; 5311 5312 /* 5313 * If we were interrupted while locking xmit_msgs_lock or 5314 * waiting_rcv_msgs_lock, the corresponding list may be 5315 * corrupted. In this case, drop items on the list for 5316 * the safety. 5317 */ 5318 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5319 INIT_LIST_HEAD(&intf->xmit_msgs); 5320 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5321 } else 5322 spin_unlock(&intf->xmit_msgs_lock); 5323 5324 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5325 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5326 else 5327 spin_unlock(&intf->waiting_rcv_msgs_lock); 5328 5329 intf->run_to_completion = 1; 5330 if (intf->handlers->set_run_to_completion) 5331 intf->handlers->set_run_to_completion(intf->send_info, 5332 1); 5333 5334 list_for_each_entry_rcu(user, &intf->users, link) { 5335 if (user->handler->ipmi_panic_handler) 5336 user->handler->ipmi_panic_handler( 5337 user->handler_data); 5338 } 5339 5340 send_panic_events(intf, ptr); 5341 } 5342 5343 return NOTIFY_DONE; 5344 } 5345 5346 /* Must be called with ipmi_interfaces_mutex held. */ 5347 static int ipmi_register_driver(void) 5348 { 5349 int rv; 5350 5351 if (drvregistered) 5352 return 0; 5353 5354 rv = driver_register(&ipmidriver.driver); 5355 if (rv) 5356 pr_err("Could not register IPMI driver\n"); 5357 else 5358 drvregistered = true; 5359 return rv; 5360 } 5361 5362 static struct notifier_block panic_block = { 5363 .notifier_call = panic_event, 5364 .next = NULL, 5365 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5366 }; 5367 5368 static int ipmi_init_msghandler(void) 5369 { 5370 int rv; 5371 5372 mutex_lock(&ipmi_interfaces_mutex); 5373 rv = ipmi_register_driver(); 5374 if (rv) 5375 goto out; 5376 if (initialized) 5377 goto out; 5378 5379 init_srcu_struct(&ipmi_interfaces_srcu); 5380 5381 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5382 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5383 5384 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5385 5386 initialized = true; 5387 5388 out: 5389 mutex_unlock(&ipmi_interfaces_mutex); 5390 return rv; 5391 } 5392 5393 static int __init ipmi_init_msghandler_mod(void) 5394 { 5395 int rv; 5396 5397 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5398 5399 mutex_lock(&ipmi_interfaces_mutex); 5400 rv = ipmi_register_driver(); 5401 mutex_unlock(&ipmi_interfaces_mutex); 5402 5403 return rv; 5404 } 5405 5406 static void __exit cleanup_ipmi(void) 5407 { 5408 int count; 5409 5410 if (initialized) { 5411 atomic_notifier_chain_unregister(&panic_notifier_list, 5412 &panic_block); 5413 5414 /* 5415 * This can't be called if any interfaces exist, so no worry 5416 * about shutting down the interfaces. 5417 */ 5418 5419 /* 5420 * Tell the timer to stop, then wait for it to stop. This 5421 * avoids problems with race conditions removing the timer 5422 * here. 5423 */ 5424 atomic_set(&stop_operation, 1); 5425 del_timer_sync(&ipmi_timer); 5426 5427 initialized = false; 5428 5429 /* Check for buffer leaks. */ 5430 count = atomic_read(&smi_msg_inuse_count); 5431 if (count != 0) 5432 pr_warn("SMI message count %d at exit\n", count); 5433 count = atomic_read(&recv_msg_inuse_count); 5434 if (count != 0) 5435 pr_warn("recv message count %d at exit\n", count); 5436 5437 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5438 } 5439 if (drvregistered) 5440 driver_unregister(&ipmidriver.driver); 5441 } 5442 module_exit(cleanup_ipmi); 5443 5444 module_init(ipmi_init_msghandler_mod); 5445 MODULE_LICENSE("GPL"); 5446 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5447 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 5448 MODULE_VERSION(IPMI_DRIVER_VERSION); 5449 MODULE_SOFTDEP("post: ipmi_devintf"); 5450