1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: " 15 #define dev_fmt pr_fmt 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/panic_notifier.h> 20 #include <linux/poll.h> 21 #include <linux/sched.h> 22 #include <linux/seq_file.h> 23 #include <linux/spinlock.h> 24 #include <linux/mutex.h> 25 #include <linux/slab.h> 26 #include <linux/ipmi.h> 27 #include <linux/ipmi_smi.h> 28 #include <linux/notifier.h> 29 #include <linux/init.h> 30 #include <linux/proc_fs.h> 31 #include <linux/rcupdate.h> 32 #include <linux/interrupt.h> 33 #include <linux/moduleparam.h> 34 #include <linux/workqueue.h> 35 #include <linux/uuid.h> 36 #include <linux/nospec.h> 37 #include <linux/vmalloc.h> 38 #include <linux/delay.h> 39 40 #define IPMI_DRIVER_VERSION "39.2" 41 42 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 43 static int ipmi_init_msghandler(void); 44 static void smi_recv_tasklet(struct tasklet_struct *t); 45 static void handle_new_recv_msgs(struct ipmi_smi *intf); 46 static void need_waiter(struct ipmi_smi *intf); 47 static int handle_one_recv_msg(struct ipmi_smi *intf, 48 struct ipmi_smi_msg *msg); 49 50 static bool initialized; 51 static bool drvregistered; 52 53 /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ 54 enum ipmi_panic_event_op { 55 IPMI_SEND_PANIC_EVENT_NONE, 56 IPMI_SEND_PANIC_EVENT, 57 IPMI_SEND_PANIC_EVENT_STRING, 58 IPMI_SEND_PANIC_EVENT_MAX 59 }; 60 61 /* Indices in this array should be mapped to enum ipmi_panic_event_op */ 62 static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL }; 63 64 #ifdef CONFIG_IPMI_PANIC_STRING 65 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 66 #elif defined(CONFIG_IPMI_PANIC_EVENT) 67 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 68 #else 69 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 70 #endif 71 72 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 73 74 static int panic_op_write_handler(const char *val, 75 const struct kernel_param *kp) 76 { 77 char valcp[16]; 78 int e; 79 80 strscpy(valcp, val, sizeof(valcp)); 81 e = match_string(ipmi_panic_event_str, -1, strstrip(valcp)); 82 if (e < 0) 83 return e; 84 85 ipmi_send_panic_event = e; 86 return 0; 87 } 88 89 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 90 { 91 const char *event_str; 92 93 if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX) 94 event_str = "???"; 95 else 96 event_str = ipmi_panic_event_str[ipmi_send_panic_event]; 97 98 return sprintf(buffer, "%s\n", event_str); 99 } 100 101 static const struct kernel_param_ops panic_op_ops = { 102 .set = panic_op_write_handler, 103 .get = panic_op_read_handler 104 }; 105 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 106 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 107 108 109 #define MAX_EVENTS_IN_QUEUE 25 110 111 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 112 static unsigned long maintenance_mode_timeout_ms = 30000; 113 module_param(maintenance_mode_timeout_ms, ulong, 0644); 114 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 115 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 116 117 /* 118 * Don't let a message sit in a queue forever, always time it with at lest 119 * the max message timer. This is in milliseconds. 120 */ 121 #define MAX_MSG_TIMEOUT 60000 122 123 /* 124 * Timeout times below are in milliseconds, and are done off a 1 125 * second timer. So setting the value to 1000 would mean anything 126 * between 0 and 1000ms. So really the only reasonable minimum 127 * setting it 2000ms, which is between 1 and 2 seconds. 128 */ 129 130 /* The default timeout for message retries. */ 131 static unsigned long default_retry_ms = 2000; 132 module_param(default_retry_ms, ulong, 0644); 133 MODULE_PARM_DESC(default_retry_ms, 134 "The time (milliseconds) between retry sends"); 135 136 /* The default timeout for maintenance mode message retries. */ 137 static unsigned long default_maintenance_retry_ms = 3000; 138 module_param(default_maintenance_retry_ms, ulong, 0644); 139 MODULE_PARM_DESC(default_maintenance_retry_ms, 140 "The time (milliseconds) between retry sends in maintenance mode"); 141 142 /* The default maximum number of retries */ 143 static unsigned int default_max_retries = 4; 144 module_param(default_max_retries, uint, 0644); 145 MODULE_PARM_DESC(default_max_retries, 146 "The time (milliseconds) between retry sends in maintenance mode"); 147 148 /* Call every ~1000 ms. */ 149 #define IPMI_TIMEOUT_TIME 1000 150 151 /* How many jiffies does it take to get to the timeout time. */ 152 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 153 154 /* 155 * Request events from the queue every second (this is the number of 156 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 157 * future, IPMI will add a way to know immediately if an event is in 158 * the queue and this silliness can go away. 159 */ 160 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 161 162 /* How long should we cache dynamic device IDs? */ 163 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 164 165 /* 166 * The main "user" data structure. 167 */ 168 struct ipmi_user { 169 struct list_head link; 170 171 /* 172 * Set to NULL when the user is destroyed, a pointer to myself 173 * so srcu_dereference can be used on it. 174 */ 175 struct ipmi_user *self; 176 struct srcu_struct release_barrier; 177 178 struct kref refcount; 179 180 /* The upper layer that handles receive messages. */ 181 const struct ipmi_user_hndl *handler; 182 void *handler_data; 183 184 /* The interface this user is bound to. */ 185 struct ipmi_smi *intf; 186 187 /* Does this interface receive IPMI events? */ 188 bool gets_events; 189 190 /* Free must run in process context for RCU cleanup. */ 191 struct work_struct remove_work; 192 }; 193 194 static struct workqueue_struct *remove_work_wq; 195 196 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) 197 __acquires(user->release_barrier) 198 { 199 struct ipmi_user *ruser; 200 201 *index = srcu_read_lock(&user->release_barrier); 202 ruser = srcu_dereference(user->self, &user->release_barrier); 203 if (!ruser) 204 srcu_read_unlock(&user->release_barrier, *index); 205 return ruser; 206 } 207 208 static void release_ipmi_user(struct ipmi_user *user, int index) 209 { 210 srcu_read_unlock(&user->release_barrier, index); 211 } 212 213 struct cmd_rcvr { 214 struct list_head link; 215 216 struct ipmi_user *user; 217 unsigned char netfn; 218 unsigned char cmd; 219 unsigned int chans; 220 221 /* 222 * This is used to form a linked lised during mass deletion. 223 * Since this is in an RCU list, we cannot use the link above 224 * or change any data until the RCU period completes. So we 225 * use this next variable during mass deletion so we can have 226 * a list and don't have to wait and restart the search on 227 * every individual deletion of a command. 228 */ 229 struct cmd_rcvr *next; 230 }; 231 232 struct seq_table { 233 unsigned int inuse : 1; 234 unsigned int broadcast : 1; 235 236 unsigned long timeout; 237 unsigned long orig_timeout; 238 unsigned int retries_left; 239 240 /* 241 * To verify on an incoming send message response that this is 242 * the message that the response is for, we keep a sequence id 243 * and increment it every time we send a message. 244 */ 245 long seqid; 246 247 /* 248 * This is held so we can properly respond to the message on a 249 * timeout, and it is used to hold the temporary data for 250 * retransmission, too. 251 */ 252 struct ipmi_recv_msg *recv_msg; 253 }; 254 255 /* 256 * Store the information in a msgid (long) to allow us to find a 257 * sequence table entry from the msgid. 258 */ 259 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 260 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 261 262 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 263 do { \ 264 seq = (((msgid) >> 26) & 0x3f); \ 265 seqid = ((msgid) & 0x3ffffff); \ 266 } while (0) 267 268 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 269 270 #define IPMI_MAX_CHANNELS 16 271 struct ipmi_channel { 272 unsigned char medium; 273 unsigned char protocol; 274 }; 275 276 struct ipmi_channel_set { 277 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 278 }; 279 280 struct ipmi_my_addrinfo { 281 /* 282 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 283 * but may be changed by the user. 284 */ 285 unsigned char address; 286 287 /* 288 * My LUN. This should generally stay the SMS LUN, but just in 289 * case... 290 */ 291 unsigned char lun; 292 }; 293 294 /* 295 * Note that the product id, manufacturer id, guid, and device id are 296 * immutable in this structure, so dyn_mutex is not required for 297 * accessing those. If those change on a BMC, a new BMC is allocated. 298 */ 299 struct bmc_device { 300 struct platform_device pdev; 301 struct list_head intfs; /* Interfaces on this BMC. */ 302 struct ipmi_device_id id; 303 struct ipmi_device_id fetch_id; 304 int dyn_id_set; 305 unsigned long dyn_id_expiry; 306 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 307 guid_t guid; 308 guid_t fetch_guid; 309 int dyn_guid_set; 310 struct kref usecount; 311 struct work_struct remove_work; 312 unsigned char cc; /* completion code */ 313 }; 314 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 315 316 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 317 struct ipmi_device_id *id, 318 bool *guid_set, guid_t *guid); 319 320 /* 321 * Various statistics for IPMI, these index stats[] in the ipmi_smi 322 * structure. 323 */ 324 enum ipmi_stat_indexes { 325 /* Commands we got from the user that were invalid. */ 326 IPMI_STAT_sent_invalid_commands = 0, 327 328 /* Commands we sent to the MC. */ 329 IPMI_STAT_sent_local_commands, 330 331 /* Responses from the MC that were delivered to a user. */ 332 IPMI_STAT_handled_local_responses, 333 334 /* Responses from the MC that were not delivered to a user. */ 335 IPMI_STAT_unhandled_local_responses, 336 337 /* Commands we sent out to the IPMB bus. */ 338 IPMI_STAT_sent_ipmb_commands, 339 340 /* Commands sent on the IPMB that had errors on the SEND CMD */ 341 IPMI_STAT_sent_ipmb_command_errs, 342 343 /* Each retransmit increments this count. */ 344 IPMI_STAT_retransmitted_ipmb_commands, 345 346 /* 347 * When a message times out (runs out of retransmits) this is 348 * incremented. 349 */ 350 IPMI_STAT_timed_out_ipmb_commands, 351 352 /* 353 * This is like above, but for broadcasts. Broadcasts are 354 * *not* included in the above count (they are expected to 355 * time out). 356 */ 357 IPMI_STAT_timed_out_ipmb_broadcasts, 358 359 /* Responses I have sent to the IPMB bus. */ 360 IPMI_STAT_sent_ipmb_responses, 361 362 /* The response was delivered to the user. */ 363 IPMI_STAT_handled_ipmb_responses, 364 365 /* The response had invalid data in it. */ 366 IPMI_STAT_invalid_ipmb_responses, 367 368 /* The response didn't have anyone waiting for it. */ 369 IPMI_STAT_unhandled_ipmb_responses, 370 371 /* Commands we sent out to the IPMB bus. */ 372 IPMI_STAT_sent_lan_commands, 373 374 /* Commands sent on the IPMB that had errors on the SEND CMD */ 375 IPMI_STAT_sent_lan_command_errs, 376 377 /* Each retransmit increments this count. */ 378 IPMI_STAT_retransmitted_lan_commands, 379 380 /* 381 * When a message times out (runs out of retransmits) this is 382 * incremented. 383 */ 384 IPMI_STAT_timed_out_lan_commands, 385 386 /* Responses I have sent to the IPMB bus. */ 387 IPMI_STAT_sent_lan_responses, 388 389 /* The response was delivered to the user. */ 390 IPMI_STAT_handled_lan_responses, 391 392 /* The response had invalid data in it. */ 393 IPMI_STAT_invalid_lan_responses, 394 395 /* The response didn't have anyone waiting for it. */ 396 IPMI_STAT_unhandled_lan_responses, 397 398 /* The command was delivered to the user. */ 399 IPMI_STAT_handled_commands, 400 401 /* The command had invalid data in it. */ 402 IPMI_STAT_invalid_commands, 403 404 /* The command didn't have anyone waiting for it. */ 405 IPMI_STAT_unhandled_commands, 406 407 /* Invalid data in an event. */ 408 IPMI_STAT_invalid_events, 409 410 /* Events that were received with the proper format. */ 411 IPMI_STAT_events, 412 413 /* Retransmissions on IPMB that failed. */ 414 IPMI_STAT_dropped_rexmit_ipmb_commands, 415 416 /* Retransmissions on LAN that failed. */ 417 IPMI_STAT_dropped_rexmit_lan_commands, 418 419 /* This *must* remain last, add new values above this. */ 420 IPMI_NUM_STATS 421 }; 422 423 424 #define IPMI_IPMB_NUM_SEQ 64 425 struct ipmi_smi { 426 struct module *owner; 427 428 /* What interface number are we? */ 429 int intf_num; 430 431 struct kref refcount; 432 433 /* Set when the interface is being unregistered. */ 434 bool in_shutdown; 435 436 /* Used for a list of interfaces. */ 437 struct list_head link; 438 439 /* 440 * The list of upper layers that are using me. seq_lock write 441 * protects this. Read protection is with srcu. 442 */ 443 struct list_head users; 444 struct srcu_struct users_srcu; 445 446 /* Used for wake ups at startup. */ 447 wait_queue_head_t waitq; 448 449 /* 450 * Prevents the interface from being unregistered when the 451 * interface is used by being looked up through the BMC 452 * structure. 453 */ 454 struct mutex bmc_reg_mutex; 455 456 struct bmc_device tmp_bmc; 457 struct bmc_device *bmc; 458 bool bmc_registered; 459 struct list_head bmc_link; 460 char *my_dev_name; 461 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 462 struct work_struct bmc_reg_work; 463 464 const struct ipmi_smi_handlers *handlers; 465 void *send_info; 466 467 /* Driver-model device for the system interface. */ 468 struct device *si_dev; 469 470 /* 471 * A table of sequence numbers for this interface. We use the 472 * sequence numbers for IPMB messages that go out of the 473 * interface to match them up with their responses. A routine 474 * is called periodically to time the items in this list. 475 */ 476 spinlock_t seq_lock; 477 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 478 int curr_seq; 479 480 /* 481 * Messages queued for delivery. If delivery fails (out of memory 482 * for instance), They will stay in here to be processed later in a 483 * periodic timer interrupt. The tasklet is for handling received 484 * messages directly from the handler. 485 */ 486 spinlock_t waiting_rcv_msgs_lock; 487 struct list_head waiting_rcv_msgs; 488 atomic_t watchdog_pretimeouts_to_deliver; 489 struct tasklet_struct recv_tasklet; 490 491 spinlock_t xmit_msgs_lock; 492 struct list_head xmit_msgs; 493 struct ipmi_smi_msg *curr_msg; 494 struct list_head hp_xmit_msgs; 495 496 /* 497 * The list of command receivers that are registered for commands 498 * on this interface. 499 */ 500 struct mutex cmd_rcvrs_mutex; 501 struct list_head cmd_rcvrs; 502 503 /* 504 * Events that were queues because no one was there to receive 505 * them. 506 */ 507 spinlock_t events_lock; /* For dealing with event stuff. */ 508 struct list_head waiting_events; 509 unsigned int waiting_events_count; /* How many events in queue? */ 510 char delivering_events; 511 char event_msg_printed; 512 513 /* How many users are waiting for events? */ 514 atomic_t event_waiters; 515 unsigned int ticks_to_req_ev; 516 517 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 518 519 /* How many users are waiting for commands? */ 520 unsigned int command_waiters; 521 522 /* How many users are waiting for watchdogs? */ 523 unsigned int watchdog_waiters; 524 525 /* How many users are waiting for message responses? */ 526 unsigned int response_waiters; 527 528 /* 529 * Tells what the lower layer has last been asked to watch for, 530 * messages and/or watchdogs. Protected by watch_lock. 531 */ 532 unsigned int last_watch_mask; 533 534 /* 535 * The event receiver for my BMC, only really used at panic 536 * shutdown as a place to store this. 537 */ 538 unsigned char event_receiver; 539 unsigned char event_receiver_lun; 540 unsigned char local_sel_device; 541 unsigned char local_event_generator; 542 543 /* For handling of maintenance mode. */ 544 int maintenance_mode; 545 bool maintenance_mode_enable; 546 int auto_maintenance_timeout; 547 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 548 549 /* 550 * If we are doing maintenance on something on IPMB, extend 551 * the timeout time to avoid timeouts writing firmware and 552 * such. 553 */ 554 int ipmb_maintenance_mode_timeout; 555 556 /* 557 * A cheap hack, if this is non-null and a message to an 558 * interface comes in with a NULL user, call this routine with 559 * it. Note that the message will still be freed by the 560 * caller. This only works on the system interface. 561 * 562 * Protected by bmc_reg_mutex. 563 */ 564 void (*null_user_handler)(struct ipmi_smi *intf, 565 struct ipmi_recv_msg *msg); 566 567 /* 568 * When we are scanning the channels for an SMI, this will 569 * tell which channel we are scanning. 570 */ 571 int curr_channel; 572 573 /* Channel information */ 574 struct ipmi_channel_set *channel_list; 575 unsigned int curr_working_cset; /* First index into the following. */ 576 struct ipmi_channel_set wchannels[2]; 577 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 578 bool channels_ready; 579 580 atomic_t stats[IPMI_NUM_STATS]; 581 582 /* 583 * run_to_completion duplicate of smb_info, smi_info 584 * and ipmi_serial_info structures. Used to decrease numbers of 585 * parameters passed by "low" level IPMI code. 586 */ 587 int run_to_completion; 588 }; 589 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 590 591 static void __get_guid(struct ipmi_smi *intf); 592 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 593 static int __ipmi_bmc_register(struct ipmi_smi *intf, 594 struct ipmi_device_id *id, 595 bool guid_set, guid_t *guid, int intf_num); 596 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 597 598 599 /** 600 * The driver model view of the IPMI messaging driver. 601 */ 602 static struct platform_driver ipmidriver = { 603 .driver = { 604 .name = "ipmi", 605 .bus = &platform_bus_type 606 } 607 }; 608 /* 609 * This mutex keeps us from adding the same BMC twice. 610 */ 611 static DEFINE_MUTEX(ipmidriver_mutex); 612 613 static LIST_HEAD(ipmi_interfaces); 614 static DEFINE_MUTEX(ipmi_interfaces_mutex); 615 #define ipmi_interfaces_mutex_held() \ 616 lockdep_is_held(&ipmi_interfaces_mutex) 617 static struct srcu_struct ipmi_interfaces_srcu; 618 619 /* 620 * List of watchers that want to know when smi's are added and deleted. 621 */ 622 static LIST_HEAD(smi_watchers); 623 static DEFINE_MUTEX(smi_watchers_mutex); 624 625 #define ipmi_inc_stat(intf, stat) \ 626 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 627 #define ipmi_get_stat(intf, stat) \ 628 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 629 630 static const char * const addr_src_to_str[] = { 631 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 632 "device-tree", "platform" 633 }; 634 635 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 636 { 637 if (src >= SI_LAST) 638 src = 0; /* Invalid */ 639 return addr_src_to_str[src]; 640 } 641 EXPORT_SYMBOL(ipmi_addr_src_to_str); 642 643 static int is_lan_addr(struct ipmi_addr *addr) 644 { 645 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 646 } 647 648 static int is_ipmb_addr(struct ipmi_addr *addr) 649 { 650 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 651 } 652 653 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 654 { 655 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 656 } 657 658 static int is_ipmb_direct_addr(struct ipmi_addr *addr) 659 { 660 return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE; 661 } 662 663 static void free_recv_msg_list(struct list_head *q) 664 { 665 struct ipmi_recv_msg *msg, *msg2; 666 667 list_for_each_entry_safe(msg, msg2, q, link) { 668 list_del(&msg->link); 669 ipmi_free_recv_msg(msg); 670 } 671 } 672 673 static void free_smi_msg_list(struct list_head *q) 674 { 675 struct ipmi_smi_msg *msg, *msg2; 676 677 list_for_each_entry_safe(msg, msg2, q, link) { 678 list_del(&msg->link); 679 ipmi_free_smi_msg(msg); 680 } 681 } 682 683 static void clean_up_interface_data(struct ipmi_smi *intf) 684 { 685 int i; 686 struct cmd_rcvr *rcvr, *rcvr2; 687 struct list_head list; 688 689 tasklet_kill(&intf->recv_tasklet); 690 691 free_smi_msg_list(&intf->waiting_rcv_msgs); 692 free_recv_msg_list(&intf->waiting_events); 693 694 /* 695 * Wholesale remove all the entries from the list in the 696 * interface and wait for RCU to know that none are in use. 697 */ 698 mutex_lock(&intf->cmd_rcvrs_mutex); 699 INIT_LIST_HEAD(&list); 700 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); 701 mutex_unlock(&intf->cmd_rcvrs_mutex); 702 703 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 704 kfree(rcvr); 705 706 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 707 if ((intf->seq_table[i].inuse) 708 && (intf->seq_table[i].recv_msg)) 709 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 710 } 711 } 712 713 static void intf_free(struct kref *ref) 714 { 715 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 716 717 clean_up_interface_data(intf); 718 kfree(intf); 719 } 720 721 struct watcher_entry { 722 int intf_num; 723 struct ipmi_smi *intf; 724 struct list_head link; 725 }; 726 727 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 728 { 729 struct ipmi_smi *intf; 730 int index, rv; 731 732 /* 733 * Make sure the driver is actually initialized, this handles 734 * problems with initialization order. 735 */ 736 rv = ipmi_init_msghandler(); 737 if (rv) 738 return rv; 739 740 mutex_lock(&smi_watchers_mutex); 741 742 list_add(&watcher->link, &smi_watchers); 743 744 index = srcu_read_lock(&ipmi_interfaces_srcu); 745 list_for_each_entry_rcu(intf, &ipmi_interfaces, link, 746 lockdep_is_held(&smi_watchers_mutex)) { 747 int intf_num = READ_ONCE(intf->intf_num); 748 749 if (intf_num == -1) 750 continue; 751 watcher->new_smi(intf_num, intf->si_dev); 752 } 753 srcu_read_unlock(&ipmi_interfaces_srcu, index); 754 755 mutex_unlock(&smi_watchers_mutex); 756 757 return 0; 758 } 759 EXPORT_SYMBOL(ipmi_smi_watcher_register); 760 761 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 762 { 763 mutex_lock(&smi_watchers_mutex); 764 list_del(&watcher->link); 765 mutex_unlock(&smi_watchers_mutex); 766 return 0; 767 } 768 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 769 770 /* 771 * Must be called with smi_watchers_mutex held. 772 */ 773 static void 774 call_smi_watchers(int i, struct device *dev) 775 { 776 struct ipmi_smi_watcher *w; 777 778 mutex_lock(&smi_watchers_mutex); 779 list_for_each_entry(w, &smi_watchers, link) { 780 if (try_module_get(w->owner)) { 781 w->new_smi(i, dev); 782 module_put(w->owner); 783 } 784 } 785 mutex_unlock(&smi_watchers_mutex); 786 } 787 788 static int 789 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 790 { 791 if (addr1->addr_type != addr2->addr_type) 792 return 0; 793 794 if (addr1->channel != addr2->channel) 795 return 0; 796 797 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 798 struct ipmi_system_interface_addr *smi_addr1 799 = (struct ipmi_system_interface_addr *) addr1; 800 struct ipmi_system_interface_addr *smi_addr2 801 = (struct ipmi_system_interface_addr *) addr2; 802 return (smi_addr1->lun == smi_addr2->lun); 803 } 804 805 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 806 struct ipmi_ipmb_addr *ipmb_addr1 807 = (struct ipmi_ipmb_addr *) addr1; 808 struct ipmi_ipmb_addr *ipmb_addr2 809 = (struct ipmi_ipmb_addr *) addr2; 810 811 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 812 && (ipmb_addr1->lun == ipmb_addr2->lun)); 813 } 814 815 if (is_ipmb_direct_addr(addr1)) { 816 struct ipmi_ipmb_direct_addr *daddr1 817 = (struct ipmi_ipmb_direct_addr *) addr1; 818 struct ipmi_ipmb_direct_addr *daddr2 819 = (struct ipmi_ipmb_direct_addr *) addr2; 820 821 return daddr1->slave_addr == daddr2->slave_addr && 822 daddr1->rq_lun == daddr2->rq_lun && 823 daddr1->rs_lun == daddr2->rs_lun; 824 } 825 826 if (is_lan_addr(addr1)) { 827 struct ipmi_lan_addr *lan_addr1 828 = (struct ipmi_lan_addr *) addr1; 829 struct ipmi_lan_addr *lan_addr2 830 = (struct ipmi_lan_addr *) addr2; 831 832 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 833 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 834 && (lan_addr1->session_handle 835 == lan_addr2->session_handle) 836 && (lan_addr1->lun == lan_addr2->lun)); 837 } 838 839 return 1; 840 } 841 842 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 843 { 844 if (len < sizeof(struct ipmi_system_interface_addr)) 845 return -EINVAL; 846 847 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 848 if (addr->channel != IPMI_BMC_CHANNEL) 849 return -EINVAL; 850 return 0; 851 } 852 853 if ((addr->channel == IPMI_BMC_CHANNEL) 854 || (addr->channel >= IPMI_MAX_CHANNELS) 855 || (addr->channel < 0)) 856 return -EINVAL; 857 858 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 859 if (len < sizeof(struct ipmi_ipmb_addr)) 860 return -EINVAL; 861 return 0; 862 } 863 864 if (is_ipmb_direct_addr(addr)) { 865 struct ipmi_ipmb_direct_addr *daddr = (void *) addr; 866 867 if (addr->channel != 0) 868 return -EINVAL; 869 if (len < sizeof(struct ipmi_ipmb_direct_addr)) 870 return -EINVAL; 871 872 if (daddr->slave_addr & 0x01) 873 return -EINVAL; 874 if (daddr->rq_lun >= 4) 875 return -EINVAL; 876 if (daddr->rs_lun >= 4) 877 return -EINVAL; 878 return 0; 879 } 880 881 if (is_lan_addr(addr)) { 882 if (len < sizeof(struct ipmi_lan_addr)) 883 return -EINVAL; 884 return 0; 885 } 886 887 return -EINVAL; 888 } 889 EXPORT_SYMBOL(ipmi_validate_addr); 890 891 unsigned int ipmi_addr_length(int addr_type) 892 { 893 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 894 return sizeof(struct ipmi_system_interface_addr); 895 896 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 897 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 898 return sizeof(struct ipmi_ipmb_addr); 899 900 if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE) 901 return sizeof(struct ipmi_ipmb_direct_addr); 902 903 if (addr_type == IPMI_LAN_ADDR_TYPE) 904 return sizeof(struct ipmi_lan_addr); 905 906 return 0; 907 } 908 EXPORT_SYMBOL(ipmi_addr_length); 909 910 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 911 { 912 int rv = 0; 913 914 if (!msg->user) { 915 /* Special handling for NULL users. */ 916 if (intf->null_user_handler) { 917 intf->null_user_handler(intf, msg); 918 } else { 919 /* No handler, so give up. */ 920 rv = -EINVAL; 921 } 922 ipmi_free_recv_msg(msg); 923 } else if (oops_in_progress) { 924 /* 925 * If we are running in the panic context, calling the 926 * receive handler doesn't much meaning and has a deadlock 927 * risk. At this moment, simply skip it in that case. 928 */ 929 ipmi_free_recv_msg(msg); 930 } else { 931 int index; 932 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); 933 934 if (user) { 935 user->handler->ipmi_recv_hndl(msg, user->handler_data); 936 release_ipmi_user(user, index); 937 } else { 938 /* User went away, give up. */ 939 ipmi_free_recv_msg(msg); 940 rv = -EINVAL; 941 } 942 } 943 944 return rv; 945 } 946 947 static void deliver_local_response(struct ipmi_smi *intf, 948 struct ipmi_recv_msg *msg) 949 { 950 if (deliver_response(intf, msg)) 951 ipmi_inc_stat(intf, unhandled_local_responses); 952 else 953 ipmi_inc_stat(intf, handled_local_responses); 954 } 955 956 static void deliver_err_response(struct ipmi_smi *intf, 957 struct ipmi_recv_msg *msg, int err) 958 { 959 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 960 msg->msg_data[0] = err; 961 msg->msg.netfn |= 1; /* Convert to a response. */ 962 msg->msg.data_len = 1; 963 msg->msg.data = msg->msg_data; 964 deliver_local_response(intf, msg); 965 } 966 967 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 968 { 969 unsigned long iflags; 970 971 if (!intf->handlers->set_need_watch) 972 return; 973 974 spin_lock_irqsave(&intf->watch_lock, iflags); 975 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 976 intf->response_waiters++; 977 978 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 979 intf->watchdog_waiters++; 980 981 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 982 intf->command_waiters++; 983 984 if ((intf->last_watch_mask & flags) != flags) { 985 intf->last_watch_mask |= flags; 986 intf->handlers->set_need_watch(intf->send_info, 987 intf->last_watch_mask); 988 } 989 spin_unlock_irqrestore(&intf->watch_lock, iflags); 990 } 991 992 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 993 { 994 unsigned long iflags; 995 996 if (!intf->handlers->set_need_watch) 997 return; 998 999 spin_lock_irqsave(&intf->watch_lock, iflags); 1000 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 1001 intf->response_waiters--; 1002 1003 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 1004 intf->watchdog_waiters--; 1005 1006 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 1007 intf->command_waiters--; 1008 1009 flags = 0; 1010 if (intf->response_waiters) 1011 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 1012 if (intf->watchdog_waiters) 1013 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 1014 if (intf->command_waiters) 1015 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 1016 1017 if (intf->last_watch_mask != flags) { 1018 intf->last_watch_mask = flags; 1019 intf->handlers->set_need_watch(intf->send_info, 1020 intf->last_watch_mask); 1021 } 1022 spin_unlock_irqrestore(&intf->watch_lock, iflags); 1023 } 1024 1025 /* 1026 * Find the next sequence number not being used and add the given 1027 * message with the given timeout to the sequence table. This must be 1028 * called with the interface's seq_lock held. 1029 */ 1030 static int intf_next_seq(struct ipmi_smi *intf, 1031 struct ipmi_recv_msg *recv_msg, 1032 unsigned long timeout, 1033 int retries, 1034 int broadcast, 1035 unsigned char *seq, 1036 long *seqid) 1037 { 1038 int rv = 0; 1039 unsigned int i; 1040 1041 if (timeout == 0) 1042 timeout = default_retry_ms; 1043 if (retries < 0) 1044 retries = default_max_retries; 1045 1046 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1047 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1048 if (!intf->seq_table[i].inuse) 1049 break; 1050 } 1051 1052 if (!intf->seq_table[i].inuse) { 1053 intf->seq_table[i].recv_msg = recv_msg; 1054 1055 /* 1056 * Start with the maximum timeout, when the send response 1057 * comes in we will start the real timer. 1058 */ 1059 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1060 intf->seq_table[i].orig_timeout = timeout; 1061 intf->seq_table[i].retries_left = retries; 1062 intf->seq_table[i].broadcast = broadcast; 1063 intf->seq_table[i].inuse = 1; 1064 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1065 *seq = i; 1066 *seqid = intf->seq_table[i].seqid; 1067 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1068 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1069 need_waiter(intf); 1070 } else { 1071 rv = -EAGAIN; 1072 } 1073 1074 return rv; 1075 } 1076 1077 /* 1078 * Return the receive message for the given sequence number and 1079 * release the sequence number so it can be reused. Some other data 1080 * is passed in to be sure the message matches up correctly (to help 1081 * guard against message coming in after their timeout and the 1082 * sequence number being reused). 1083 */ 1084 static int intf_find_seq(struct ipmi_smi *intf, 1085 unsigned char seq, 1086 short channel, 1087 unsigned char cmd, 1088 unsigned char netfn, 1089 struct ipmi_addr *addr, 1090 struct ipmi_recv_msg **recv_msg) 1091 { 1092 int rv = -ENODEV; 1093 unsigned long flags; 1094 1095 if (seq >= IPMI_IPMB_NUM_SEQ) 1096 return -EINVAL; 1097 1098 spin_lock_irqsave(&intf->seq_lock, flags); 1099 if (intf->seq_table[seq].inuse) { 1100 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1101 1102 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1103 && (msg->msg.netfn == netfn) 1104 && (ipmi_addr_equal(addr, &msg->addr))) { 1105 *recv_msg = msg; 1106 intf->seq_table[seq].inuse = 0; 1107 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1108 rv = 0; 1109 } 1110 } 1111 spin_unlock_irqrestore(&intf->seq_lock, flags); 1112 1113 return rv; 1114 } 1115 1116 1117 /* Start the timer for a specific sequence table entry. */ 1118 static int intf_start_seq_timer(struct ipmi_smi *intf, 1119 long msgid) 1120 { 1121 int rv = -ENODEV; 1122 unsigned long flags; 1123 unsigned char seq; 1124 unsigned long seqid; 1125 1126 1127 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1128 1129 spin_lock_irqsave(&intf->seq_lock, flags); 1130 /* 1131 * We do this verification because the user can be deleted 1132 * while a message is outstanding. 1133 */ 1134 if ((intf->seq_table[seq].inuse) 1135 && (intf->seq_table[seq].seqid == seqid)) { 1136 struct seq_table *ent = &intf->seq_table[seq]; 1137 ent->timeout = ent->orig_timeout; 1138 rv = 0; 1139 } 1140 spin_unlock_irqrestore(&intf->seq_lock, flags); 1141 1142 return rv; 1143 } 1144 1145 /* Got an error for the send message for a specific sequence number. */ 1146 static int intf_err_seq(struct ipmi_smi *intf, 1147 long msgid, 1148 unsigned int err) 1149 { 1150 int rv = -ENODEV; 1151 unsigned long flags; 1152 unsigned char seq; 1153 unsigned long seqid; 1154 struct ipmi_recv_msg *msg = NULL; 1155 1156 1157 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1158 1159 spin_lock_irqsave(&intf->seq_lock, flags); 1160 /* 1161 * We do this verification because the user can be deleted 1162 * while a message is outstanding. 1163 */ 1164 if ((intf->seq_table[seq].inuse) 1165 && (intf->seq_table[seq].seqid == seqid)) { 1166 struct seq_table *ent = &intf->seq_table[seq]; 1167 1168 ent->inuse = 0; 1169 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1170 msg = ent->recv_msg; 1171 rv = 0; 1172 } 1173 spin_unlock_irqrestore(&intf->seq_lock, flags); 1174 1175 if (msg) 1176 deliver_err_response(intf, msg, err); 1177 1178 return rv; 1179 } 1180 1181 static void free_user_work(struct work_struct *work) 1182 { 1183 struct ipmi_user *user = container_of(work, struct ipmi_user, 1184 remove_work); 1185 1186 cleanup_srcu_struct(&user->release_barrier); 1187 vfree(user); 1188 } 1189 1190 int ipmi_create_user(unsigned int if_num, 1191 const struct ipmi_user_hndl *handler, 1192 void *handler_data, 1193 struct ipmi_user **user) 1194 { 1195 unsigned long flags; 1196 struct ipmi_user *new_user; 1197 int rv, index; 1198 struct ipmi_smi *intf; 1199 1200 /* 1201 * There is no module usecount here, because it's not 1202 * required. Since this can only be used by and called from 1203 * other modules, they will implicitly use this module, and 1204 * thus this can't be removed unless the other modules are 1205 * removed. 1206 */ 1207 1208 if (handler == NULL) 1209 return -EINVAL; 1210 1211 /* 1212 * Make sure the driver is actually initialized, this handles 1213 * problems with initialization order. 1214 */ 1215 rv = ipmi_init_msghandler(); 1216 if (rv) 1217 return rv; 1218 1219 new_user = vzalloc(sizeof(*new_user)); 1220 if (!new_user) 1221 return -ENOMEM; 1222 1223 index = srcu_read_lock(&ipmi_interfaces_srcu); 1224 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1225 if (intf->intf_num == if_num) 1226 goto found; 1227 } 1228 /* Not found, return an error */ 1229 rv = -EINVAL; 1230 goto out_kfree; 1231 1232 found: 1233 INIT_WORK(&new_user->remove_work, free_user_work); 1234 1235 rv = init_srcu_struct(&new_user->release_barrier); 1236 if (rv) 1237 goto out_kfree; 1238 1239 if (!try_module_get(intf->owner)) { 1240 rv = -ENODEV; 1241 goto out_kfree; 1242 } 1243 1244 /* Note that each existing user holds a refcount to the interface. */ 1245 kref_get(&intf->refcount); 1246 1247 kref_init(&new_user->refcount); 1248 new_user->handler = handler; 1249 new_user->handler_data = handler_data; 1250 new_user->intf = intf; 1251 new_user->gets_events = false; 1252 1253 rcu_assign_pointer(new_user->self, new_user); 1254 spin_lock_irqsave(&intf->seq_lock, flags); 1255 list_add_rcu(&new_user->link, &intf->users); 1256 spin_unlock_irqrestore(&intf->seq_lock, flags); 1257 if (handler->ipmi_watchdog_pretimeout) 1258 /* User wants pretimeouts, so make sure to watch for them. */ 1259 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1260 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1261 *user = new_user; 1262 return 0; 1263 1264 out_kfree: 1265 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1266 vfree(new_user); 1267 return rv; 1268 } 1269 EXPORT_SYMBOL(ipmi_create_user); 1270 1271 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1272 { 1273 int rv, index; 1274 struct ipmi_smi *intf; 1275 1276 index = srcu_read_lock(&ipmi_interfaces_srcu); 1277 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1278 if (intf->intf_num == if_num) 1279 goto found; 1280 } 1281 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1282 1283 /* Not found, return an error */ 1284 return -EINVAL; 1285 1286 found: 1287 if (!intf->handlers->get_smi_info) 1288 rv = -ENOTTY; 1289 else 1290 rv = intf->handlers->get_smi_info(intf->send_info, data); 1291 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1292 1293 return rv; 1294 } 1295 EXPORT_SYMBOL(ipmi_get_smi_info); 1296 1297 static void free_user(struct kref *ref) 1298 { 1299 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1300 1301 /* SRCU cleanup must happen in task context. */ 1302 queue_work(remove_work_wq, &user->remove_work); 1303 } 1304 1305 static void _ipmi_destroy_user(struct ipmi_user *user) 1306 { 1307 struct ipmi_smi *intf = user->intf; 1308 int i; 1309 unsigned long flags; 1310 struct cmd_rcvr *rcvr; 1311 struct cmd_rcvr *rcvrs = NULL; 1312 1313 if (!acquire_ipmi_user(user, &i)) { 1314 /* 1315 * The user has already been cleaned up, just make sure 1316 * nothing is using it and return. 1317 */ 1318 synchronize_srcu(&user->release_barrier); 1319 return; 1320 } 1321 1322 rcu_assign_pointer(user->self, NULL); 1323 release_ipmi_user(user, i); 1324 1325 synchronize_srcu(&user->release_barrier); 1326 1327 if (user->handler->shutdown) 1328 user->handler->shutdown(user->handler_data); 1329 1330 if (user->handler->ipmi_watchdog_pretimeout) 1331 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1332 1333 if (user->gets_events) 1334 atomic_dec(&intf->event_waiters); 1335 1336 /* Remove the user from the interface's sequence table. */ 1337 spin_lock_irqsave(&intf->seq_lock, flags); 1338 list_del_rcu(&user->link); 1339 1340 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1341 if (intf->seq_table[i].inuse 1342 && (intf->seq_table[i].recv_msg->user == user)) { 1343 intf->seq_table[i].inuse = 0; 1344 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1345 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1346 } 1347 } 1348 spin_unlock_irqrestore(&intf->seq_lock, flags); 1349 1350 /* 1351 * Remove the user from the command receiver's table. First 1352 * we build a list of everything (not using the standard link, 1353 * since other things may be using it till we do 1354 * synchronize_srcu()) then free everything in that list. 1355 */ 1356 mutex_lock(&intf->cmd_rcvrs_mutex); 1357 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1358 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1359 if (rcvr->user == user) { 1360 list_del_rcu(&rcvr->link); 1361 rcvr->next = rcvrs; 1362 rcvrs = rcvr; 1363 } 1364 } 1365 mutex_unlock(&intf->cmd_rcvrs_mutex); 1366 synchronize_rcu(); 1367 while (rcvrs) { 1368 rcvr = rcvrs; 1369 rcvrs = rcvr->next; 1370 kfree(rcvr); 1371 } 1372 1373 kref_put(&intf->refcount, intf_free); 1374 module_put(intf->owner); 1375 } 1376 1377 int ipmi_destroy_user(struct ipmi_user *user) 1378 { 1379 _ipmi_destroy_user(user); 1380 1381 kref_put(&user->refcount, free_user); 1382 1383 return 0; 1384 } 1385 EXPORT_SYMBOL(ipmi_destroy_user); 1386 1387 int ipmi_get_version(struct ipmi_user *user, 1388 unsigned char *major, 1389 unsigned char *minor) 1390 { 1391 struct ipmi_device_id id; 1392 int rv, index; 1393 1394 user = acquire_ipmi_user(user, &index); 1395 if (!user) 1396 return -ENODEV; 1397 1398 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1399 if (!rv) { 1400 *major = ipmi_version_major(&id); 1401 *minor = ipmi_version_minor(&id); 1402 } 1403 release_ipmi_user(user, index); 1404 1405 return rv; 1406 } 1407 EXPORT_SYMBOL(ipmi_get_version); 1408 1409 int ipmi_set_my_address(struct ipmi_user *user, 1410 unsigned int channel, 1411 unsigned char address) 1412 { 1413 int index, rv = 0; 1414 1415 user = acquire_ipmi_user(user, &index); 1416 if (!user) 1417 return -ENODEV; 1418 1419 if (channel >= IPMI_MAX_CHANNELS) { 1420 rv = -EINVAL; 1421 } else { 1422 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1423 user->intf->addrinfo[channel].address = address; 1424 } 1425 release_ipmi_user(user, index); 1426 1427 return rv; 1428 } 1429 EXPORT_SYMBOL(ipmi_set_my_address); 1430 1431 int ipmi_get_my_address(struct ipmi_user *user, 1432 unsigned int channel, 1433 unsigned char *address) 1434 { 1435 int index, rv = 0; 1436 1437 user = acquire_ipmi_user(user, &index); 1438 if (!user) 1439 return -ENODEV; 1440 1441 if (channel >= IPMI_MAX_CHANNELS) { 1442 rv = -EINVAL; 1443 } else { 1444 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1445 *address = user->intf->addrinfo[channel].address; 1446 } 1447 release_ipmi_user(user, index); 1448 1449 return rv; 1450 } 1451 EXPORT_SYMBOL(ipmi_get_my_address); 1452 1453 int ipmi_set_my_LUN(struct ipmi_user *user, 1454 unsigned int channel, 1455 unsigned char LUN) 1456 { 1457 int index, rv = 0; 1458 1459 user = acquire_ipmi_user(user, &index); 1460 if (!user) 1461 return -ENODEV; 1462 1463 if (channel >= IPMI_MAX_CHANNELS) { 1464 rv = -EINVAL; 1465 } else { 1466 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1467 user->intf->addrinfo[channel].lun = LUN & 0x3; 1468 } 1469 release_ipmi_user(user, index); 1470 1471 return rv; 1472 } 1473 EXPORT_SYMBOL(ipmi_set_my_LUN); 1474 1475 int ipmi_get_my_LUN(struct ipmi_user *user, 1476 unsigned int channel, 1477 unsigned char *address) 1478 { 1479 int index, rv = 0; 1480 1481 user = acquire_ipmi_user(user, &index); 1482 if (!user) 1483 return -ENODEV; 1484 1485 if (channel >= IPMI_MAX_CHANNELS) { 1486 rv = -EINVAL; 1487 } else { 1488 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1489 *address = user->intf->addrinfo[channel].lun; 1490 } 1491 release_ipmi_user(user, index); 1492 1493 return rv; 1494 } 1495 EXPORT_SYMBOL(ipmi_get_my_LUN); 1496 1497 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1498 { 1499 int mode, index; 1500 unsigned long flags; 1501 1502 user = acquire_ipmi_user(user, &index); 1503 if (!user) 1504 return -ENODEV; 1505 1506 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1507 mode = user->intf->maintenance_mode; 1508 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1509 release_ipmi_user(user, index); 1510 1511 return mode; 1512 } 1513 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1514 1515 static void maintenance_mode_update(struct ipmi_smi *intf) 1516 { 1517 if (intf->handlers->set_maintenance_mode) 1518 intf->handlers->set_maintenance_mode( 1519 intf->send_info, intf->maintenance_mode_enable); 1520 } 1521 1522 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1523 { 1524 int rv = 0, index; 1525 unsigned long flags; 1526 struct ipmi_smi *intf = user->intf; 1527 1528 user = acquire_ipmi_user(user, &index); 1529 if (!user) 1530 return -ENODEV; 1531 1532 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1533 if (intf->maintenance_mode != mode) { 1534 switch (mode) { 1535 case IPMI_MAINTENANCE_MODE_AUTO: 1536 intf->maintenance_mode_enable 1537 = (intf->auto_maintenance_timeout > 0); 1538 break; 1539 1540 case IPMI_MAINTENANCE_MODE_OFF: 1541 intf->maintenance_mode_enable = false; 1542 break; 1543 1544 case IPMI_MAINTENANCE_MODE_ON: 1545 intf->maintenance_mode_enable = true; 1546 break; 1547 1548 default: 1549 rv = -EINVAL; 1550 goto out_unlock; 1551 } 1552 intf->maintenance_mode = mode; 1553 1554 maintenance_mode_update(intf); 1555 } 1556 out_unlock: 1557 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1558 release_ipmi_user(user, index); 1559 1560 return rv; 1561 } 1562 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1563 1564 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1565 { 1566 unsigned long flags; 1567 struct ipmi_smi *intf = user->intf; 1568 struct ipmi_recv_msg *msg, *msg2; 1569 struct list_head msgs; 1570 int index; 1571 1572 user = acquire_ipmi_user(user, &index); 1573 if (!user) 1574 return -ENODEV; 1575 1576 INIT_LIST_HEAD(&msgs); 1577 1578 spin_lock_irqsave(&intf->events_lock, flags); 1579 if (user->gets_events == val) 1580 goto out; 1581 1582 user->gets_events = val; 1583 1584 if (val) { 1585 if (atomic_inc_return(&intf->event_waiters) == 1) 1586 need_waiter(intf); 1587 } else { 1588 atomic_dec(&intf->event_waiters); 1589 } 1590 1591 if (intf->delivering_events) 1592 /* 1593 * Another thread is delivering events for this, so 1594 * let it handle any new events. 1595 */ 1596 goto out; 1597 1598 /* Deliver any queued events. */ 1599 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1600 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1601 list_move_tail(&msg->link, &msgs); 1602 intf->waiting_events_count = 0; 1603 if (intf->event_msg_printed) { 1604 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1605 intf->event_msg_printed = 0; 1606 } 1607 1608 intf->delivering_events = 1; 1609 spin_unlock_irqrestore(&intf->events_lock, flags); 1610 1611 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1612 msg->user = user; 1613 kref_get(&user->refcount); 1614 deliver_local_response(intf, msg); 1615 } 1616 1617 spin_lock_irqsave(&intf->events_lock, flags); 1618 intf->delivering_events = 0; 1619 } 1620 1621 out: 1622 spin_unlock_irqrestore(&intf->events_lock, flags); 1623 release_ipmi_user(user, index); 1624 1625 return 0; 1626 } 1627 EXPORT_SYMBOL(ipmi_set_gets_events); 1628 1629 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1630 unsigned char netfn, 1631 unsigned char cmd, 1632 unsigned char chan) 1633 { 1634 struct cmd_rcvr *rcvr; 1635 1636 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1637 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1638 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1639 && (rcvr->chans & (1 << chan))) 1640 return rcvr; 1641 } 1642 return NULL; 1643 } 1644 1645 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1646 unsigned char netfn, 1647 unsigned char cmd, 1648 unsigned int chans) 1649 { 1650 struct cmd_rcvr *rcvr; 1651 1652 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1653 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1654 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1655 && (rcvr->chans & chans)) 1656 return 0; 1657 } 1658 return 1; 1659 } 1660 1661 int ipmi_register_for_cmd(struct ipmi_user *user, 1662 unsigned char netfn, 1663 unsigned char cmd, 1664 unsigned int chans) 1665 { 1666 struct ipmi_smi *intf = user->intf; 1667 struct cmd_rcvr *rcvr; 1668 int rv = 0, index; 1669 1670 user = acquire_ipmi_user(user, &index); 1671 if (!user) 1672 return -ENODEV; 1673 1674 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1675 if (!rcvr) { 1676 rv = -ENOMEM; 1677 goto out_release; 1678 } 1679 rcvr->cmd = cmd; 1680 rcvr->netfn = netfn; 1681 rcvr->chans = chans; 1682 rcvr->user = user; 1683 1684 mutex_lock(&intf->cmd_rcvrs_mutex); 1685 /* Make sure the command/netfn is not already registered. */ 1686 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1687 rv = -EBUSY; 1688 goto out_unlock; 1689 } 1690 1691 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1692 1693 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1694 1695 out_unlock: 1696 mutex_unlock(&intf->cmd_rcvrs_mutex); 1697 if (rv) 1698 kfree(rcvr); 1699 out_release: 1700 release_ipmi_user(user, index); 1701 1702 return rv; 1703 } 1704 EXPORT_SYMBOL(ipmi_register_for_cmd); 1705 1706 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1707 unsigned char netfn, 1708 unsigned char cmd, 1709 unsigned int chans) 1710 { 1711 struct ipmi_smi *intf = user->intf; 1712 struct cmd_rcvr *rcvr; 1713 struct cmd_rcvr *rcvrs = NULL; 1714 int i, rv = -ENOENT, index; 1715 1716 user = acquire_ipmi_user(user, &index); 1717 if (!user) 1718 return -ENODEV; 1719 1720 mutex_lock(&intf->cmd_rcvrs_mutex); 1721 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1722 if (((1 << i) & chans) == 0) 1723 continue; 1724 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1725 if (rcvr == NULL) 1726 continue; 1727 if (rcvr->user == user) { 1728 rv = 0; 1729 rcvr->chans &= ~chans; 1730 if (rcvr->chans == 0) { 1731 list_del_rcu(&rcvr->link); 1732 rcvr->next = rcvrs; 1733 rcvrs = rcvr; 1734 } 1735 } 1736 } 1737 mutex_unlock(&intf->cmd_rcvrs_mutex); 1738 synchronize_rcu(); 1739 release_ipmi_user(user, index); 1740 while (rcvrs) { 1741 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1742 rcvr = rcvrs; 1743 rcvrs = rcvr->next; 1744 kfree(rcvr); 1745 } 1746 1747 return rv; 1748 } 1749 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1750 1751 unsigned char 1752 ipmb_checksum(unsigned char *data, int size) 1753 { 1754 unsigned char csum = 0; 1755 1756 for (; size > 0; size--, data++) 1757 csum += *data; 1758 1759 return -csum; 1760 } 1761 EXPORT_SYMBOL(ipmb_checksum); 1762 1763 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1764 struct kernel_ipmi_msg *msg, 1765 struct ipmi_ipmb_addr *ipmb_addr, 1766 long msgid, 1767 unsigned char ipmb_seq, 1768 int broadcast, 1769 unsigned char source_address, 1770 unsigned char source_lun) 1771 { 1772 int i = broadcast; 1773 1774 /* Format the IPMB header data. */ 1775 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1776 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1777 smi_msg->data[2] = ipmb_addr->channel; 1778 if (broadcast) 1779 smi_msg->data[3] = 0; 1780 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1781 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1782 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1783 smi_msg->data[i+6] = source_address; 1784 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1785 smi_msg->data[i+8] = msg->cmd; 1786 1787 /* Now tack on the data to the message. */ 1788 if (msg->data_len > 0) 1789 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1790 smi_msg->data_size = msg->data_len + 9; 1791 1792 /* Now calculate the checksum and tack it on. */ 1793 smi_msg->data[i+smi_msg->data_size] 1794 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1795 1796 /* 1797 * Add on the checksum size and the offset from the 1798 * broadcast. 1799 */ 1800 smi_msg->data_size += 1 + i; 1801 1802 smi_msg->msgid = msgid; 1803 } 1804 1805 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1806 struct kernel_ipmi_msg *msg, 1807 struct ipmi_lan_addr *lan_addr, 1808 long msgid, 1809 unsigned char ipmb_seq, 1810 unsigned char source_lun) 1811 { 1812 /* Format the IPMB header data. */ 1813 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1814 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1815 smi_msg->data[2] = lan_addr->channel; 1816 smi_msg->data[3] = lan_addr->session_handle; 1817 smi_msg->data[4] = lan_addr->remote_SWID; 1818 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1819 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1820 smi_msg->data[7] = lan_addr->local_SWID; 1821 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1822 smi_msg->data[9] = msg->cmd; 1823 1824 /* Now tack on the data to the message. */ 1825 if (msg->data_len > 0) 1826 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1827 smi_msg->data_size = msg->data_len + 10; 1828 1829 /* Now calculate the checksum and tack it on. */ 1830 smi_msg->data[smi_msg->data_size] 1831 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1832 1833 /* 1834 * Add on the checksum size and the offset from the 1835 * broadcast. 1836 */ 1837 smi_msg->data_size += 1; 1838 1839 smi_msg->msgid = msgid; 1840 } 1841 1842 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1843 struct ipmi_smi_msg *smi_msg, 1844 int priority) 1845 { 1846 if (intf->curr_msg) { 1847 if (priority > 0) 1848 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1849 else 1850 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1851 smi_msg = NULL; 1852 } else { 1853 intf->curr_msg = smi_msg; 1854 } 1855 1856 return smi_msg; 1857 } 1858 1859 static void smi_send(struct ipmi_smi *intf, 1860 const struct ipmi_smi_handlers *handlers, 1861 struct ipmi_smi_msg *smi_msg, int priority) 1862 { 1863 int run_to_completion = intf->run_to_completion; 1864 unsigned long flags = 0; 1865 1866 if (!run_to_completion) 1867 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1868 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1869 1870 if (!run_to_completion) 1871 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1872 1873 if (smi_msg) 1874 handlers->sender(intf->send_info, smi_msg); 1875 } 1876 1877 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1878 { 1879 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1880 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1881 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1882 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1883 } 1884 1885 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1886 struct ipmi_addr *addr, 1887 long msgid, 1888 struct kernel_ipmi_msg *msg, 1889 struct ipmi_smi_msg *smi_msg, 1890 struct ipmi_recv_msg *recv_msg, 1891 int retries, 1892 unsigned int retry_time_ms) 1893 { 1894 struct ipmi_system_interface_addr *smi_addr; 1895 1896 if (msg->netfn & 1) 1897 /* Responses are not allowed to the SMI. */ 1898 return -EINVAL; 1899 1900 smi_addr = (struct ipmi_system_interface_addr *) addr; 1901 if (smi_addr->lun > 3) { 1902 ipmi_inc_stat(intf, sent_invalid_commands); 1903 return -EINVAL; 1904 } 1905 1906 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1907 1908 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1909 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1910 || (msg->cmd == IPMI_GET_MSG_CMD) 1911 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1912 /* 1913 * We don't let the user do these, since we manage 1914 * the sequence numbers. 1915 */ 1916 ipmi_inc_stat(intf, sent_invalid_commands); 1917 return -EINVAL; 1918 } 1919 1920 if (is_maintenance_mode_cmd(msg)) { 1921 unsigned long flags; 1922 1923 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1924 intf->auto_maintenance_timeout 1925 = maintenance_mode_timeout_ms; 1926 if (!intf->maintenance_mode 1927 && !intf->maintenance_mode_enable) { 1928 intf->maintenance_mode_enable = true; 1929 maintenance_mode_update(intf); 1930 } 1931 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1932 flags); 1933 } 1934 1935 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1936 ipmi_inc_stat(intf, sent_invalid_commands); 1937 return -EMSGSIZE; 1938 } 1939 1940 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1941 smi_msg->data[1] = msg->cmd; 1942 smi_msg->msgid = msgid; 1943 smi_msg->user_data = recv_msg; 1944 if (msg->data_len > 0) 1945 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1946 smi_msg->data_size = msg->data_len + 2; 1947 ipmi_inc_stat(intf, sent_local_commands); 1948 1949 return 0; 1950 } 1951 1952 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1953 struct ipmi_addr *addr, 1954 long msgid, 1955 struct kernel_ipmi_msg *msg, 1956 struct ipmi_smi_msg *smi_msg, 1957 struct ipmi_recv_msg *recv_msg, 1958 unsigned char source_address, 1959 unsigned char source_lun, 1960 int retries, 1961 unsigned int retry_time_ms) 1962 { 1963 struct ipmi_ipmb_addr *ipmb_addr; 1964 unsigned char ipmb_seq; 1965 long seqid; 1966 int broadcast = 0; 1967 struct ipmi_channel *chans; 1968 int rv = 0; 1969 1970 if (addr->channel >= IPMI_MAX_CHANNELS) { 1971 ipmi_inc_stat(intf, sent_invalid_commands); 1972 return -EINVAL; 1973 } 1974 1975 chans = READ_ONCE(intf->channel_list)->c; 1976 1977 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 1978 ipmi_inc_stat(intf, sent_invalid_commands); 1979 return -EINVAL; 1980 } 1981 1982 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1983 /* 1984 * Broadcasts add a zero at the beginning of the 1985 * message, but otherwise is the same as an IPMB 1986 * address. 1987 */ 1988 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1989 broadcast = 1; 1990 retries = 0; /* Don't retry broadcasts. */ 1991 } 1992 1993 /* 1994 * 9 for the header and 1 for the checksum, plus 1995 * possibly one for the broadcast. 1996 */ 1997 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1998 ipmi_inc_stat(intf, sent_invalid_commands); 1999 return -EMSGSIZE; 2000 } 2001 2002 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 2003 if (ipmb_addr->lun > 3) { 2004 ipmi_inc_stat(intf, sent_invalid_commands); 2005 return -EINVAL; 2006 } 2007 2008 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 2009 2010 if (recv_msg->msg.netfn & 0x1) { 2011 /* 2012 * It's a response, so use the user's sequence 2013 * from msgid. 2014 */ 2015 ipmi_inc_stat(intf, sent_ipmb_responses); 2016 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 2017 msgid, broadcast, 2018 source_address, source_lun); 2019 2020 /* 2021 * Save the receive message so we can use it 2022 * to deliver the response. 2023 */ 2024 smi_msg->user_data = recv_msg; 2025 } else { 2026 /* It's a command, so get a sequence for it. */ 2027 unsigned long flags; 2028 2029 spin_lock_irqsave(&intf->seq_lock, flags); 2030 2031 if (is_maintenance_mode_cmd(msg)) 2032 intf->ipmb_maintenance_mode_timeout = 2033 maintenance_mode_timeout_ms; 2034 2035 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 2036 /* Different default in maintenance mode */ 2037 retry_time_ms = default_maintenance_retry_ms; 2038 2039 /* 2040 * Create a sequence number with a 1 second 2041 * timeout and 4 retries. 2042 */ 2043 rv = intf_next_seq(intf, 2044 recv_msg, 2045 retry_time_ms, 2046 retries, 2047 broadcast, 2048 &ipmb_seq, 2049 &seqid); 2050 if (rv) 2051 /* 2052 * We have used up all the sequence numbers, 2053 * probably, so abort. 2054 */ 2055 goto out_err; 2056 2057 ipmi_inc_stat(intf, sent_ipmb_commands); 2058 2059 /* 2060 * Store the sequence number in the message, 2061 * so that when the send message response 2062 * comes back we can start the timer. 2063 */ 2064 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2065 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2066 ipmb_seq, broadcast, 2067 source_address, source_lun); 2068 2069 /* 2070 * Copy the message into the recv message data, so we 2071 * can retransmit it later if necessary. 2072 */ 2073 memcpy(recv_msg->msg_data, smi_msg->data, 2074 smi_msg->data_size); 2075 recv_msg->msg.data = recv_msg->msg_data; 2076 recv_msg->msg.data_len = smi_msg->data_size; 2077 2078 /* 2079 * We don't unlock until here, because we need 2080 * to copy the completed message into the 2081 * recv_msg before we release the lock. 2082 * Otherwise, race conditions may bite us. I 2083 * know that's pretty paranoid, but I prefer 2084 * to be correct. 2085 */ 2086 out_err: 2087 spin_unlock_irqrestore(&intf->seq_lock, flags); 2088 } 2089 2090 return rv; 2091 } 2092 2093 static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, 2094 struct ipmi_addr *addr, 2095 long msgid, 2096 struct kernel_ipmi_msg *msg, 2097 struct ipmi_smi_msg *smi_msg, 2098 struct ipmi_recv_msg *recv_msg, 2099 unsigned char source_lun) 2100 { 2101 struct ipmi_ipmb_direct_addr *daddr; 2102 bool is_cmd = !(recv_msg->msg.netfn & 0x1); 2103 2104 if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT)) 2105 return -EAFNOSUPPORT; 2106 2107 /* Responses must have a completion code. */ 2108 if (!is_cmd && msg->data_len < 1) { 2109 ipmi_inc_stat(intf, sent_invalid_commands); 2110 return -EINVAL; 2111 } 2112 2113 if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) { 2114 ipmi_inc_stat(intf, sent_invalid_commands); 2115 return -EMSGSIZE; 2116 } 2117 2118 daddr = (struct ipmi_ipmb_direct_addr *) addr; 2119 if (daddr->rq_lun > 3 || daddr->rs_lun > 3) { 2120 ipmi_inc_stat(intf, sent_invalid_commands); 2121 return -EINVAL; 2122 } 2123 2124 smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT; 2125 smi_msg->msgid = msgid; 2126 2127 if (is_cmd) { 2128 smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun; 2129 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun; 2130 } else { 2131 smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun; 2132 smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun; 2133 } 2134 smi_msg->data[1] = daddr->slave_addr; 2135 smi_msg->data[3] = msg->cmd; 2136 2137 memcpy(smi_msg->data + 4, msg->data, msg->data_len); 2138 smi_msg->data_size = msg->data_len + 4; 2139 2140 smi_msg->user_data = recv_msg; 2141 2142 return 0; 2143 } 2144 2145 static int i_ipmi_req_lan(struct ipmi_smi *intf, 2146 struct ipmi_addr *addr, 2147 long msgid, 2148 struct kernel_ipmi_msg *msg, 2149 struct ipmi_smi_msg *smi_msg, 2150 struct ipmi_recv_msg *recv_msg, 2151 unsigned char source_lun, 2152 int retries, 2153 unsigned int retry_time_ms) 2154 { 2155 struct ipmi_lan_addr *lan_addr; 2156 unsigned char ipmb_seq; 2157 long seqid; 2158 struct ipmi_channel *chans; 2159 int rv = 0; 2160 2161 if (addr->channel >= IPMI_MAX_CHANNELS) { 2162 ipmi_inc_stat(intf, sent_invalid_commands); 2163 return -EINVAL; 2164 } 2165 2166 chans = READ_ONCE(intf->channel_list)->c; 2167 2168 if ((chans[addr->channel].medium 2169 != IPMI_CHANNEL_MEDIUM_8023LAN) 2170 && (chans[addr->channel].medium 2171 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2172 ipmi_inc_stat(intf, sent_invalid_commands); 2173 return -EINVAL; 2174 } 2175 2176 /* 11 for the header and 1 for the checksum. */ 2177 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2178 ipmi_inc_stat(intf, sent_invalid_commands); 2179 return -EMSGSIZE; 2180 } 2181 2182 lan_addr = (struct ipmi_lan_addr *) addr; 2183 if (lan_addr->lun > 3) { 2184 ipmi_inc_stat(intf, sent_invalid_commands); 2185 return -EINVAL; 2186 } 2187 2188 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2189 2190 if (recv_msg->msg.netfn & 0x1) { 2191 /* 2192 * It's a response, so use the user's sequence 2193 * from msgid. 2194 */ 2195 ipmi_inc_stat(intf, sent_lan_responses); 2196 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2197 msgid, source_lun); 2198 2199 /* 2200 * Save the receive message so we can use it 2201 * to deliver the response. 2202 */ 2203 smi_msg->user_data = recv_msg; 2204 } else { 2205 /* It's a command, so get a sequence for it. */ 2206 unsigned long flags; 2207 2208 spin_lock_irqsave(&intf->seq_lock, flags); 2209 2210 /* 2211 * Create a sequence number with a 1 second 2212 * timeout and 4 retries. 2213 */ 2214 rv = intf_next_seq(intf, 2215 recv_msg, 2216 retry_time_ms, 2217 retries, 2218 0, 2219 &ipmb_seq, 2220 &seqid); 2221 if (rv) 2222 /* 2223 * We have used up all the sequence numbers, 2224 * probably, so abort. 2225 */ 2226 goto out_err; 2227 2228 ipmi_inc_stat(intf, sent_lan_commands); 2229 2230 /* 2231 * Store the sequence number in the message, 2232 * so that when the send message response 2233 * comes back we can start the timer. 2234 */ 2235 format_lan_msg(smi_msg, msg, lan_addr, 2236 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2237 ipmb_seq, source_lun); 2238 2239 /* 2240 * Copy the message into the recv message data, so we 2241 * can retransmit it later if necessary. 2242 */ 2243 memcpy(recv_msg->msg_data, smi_msg->data, 2244 smi_msg->data_size); 2245 recv_msg->msg.data = recv_msg->msg_data; 2246 recv_msg->msg.data_len = smi_msg->data_size; 2247 2248 /* 2249 * We don't unlock until here, because we need 2250 * to copy the completed message into the 2251 * recv_msg before we release the lock. 2252 * Otherwise, race conditions may bite us. I 2253 * know that's pretty paranoid, but I prefer 2254 * to be correct. 2255 */ 2256 out_err: 2257 spin_unlock_irqrestore(&intf->seq_lock, flags); 2258 } 2259 2260 return rv; 2261 } 2262 2263 /* 2264 * Separate from ipmi_request so that the user does not have to be 2265 * supplied in certain circumstances (mainly at panic time). If 2266 * messages are supplied, they will be freed, even if an error 2267 * occurs. 2268 */ 2269 static int i_ipmi_request(struct ipmi_user *user, 2270 struct ipmi_smi *intf, 2271 struct ipmi_addr *addr, 2272 long msgid, 2273 struct kernel_ipmi_msg *msg, 2274 void *user_msg_data, 2275 void *supplied_smi, 2276 struct ipmi_recv_msg *supplied_recv, 2277 int priority, 2278 unsigned char source_address, 2279 unsigned char source_lun, 2280 int retries, 2281 unsigned int retry_time_ms) 2282 { 2283 struct ipmi_smi_msg *smi_msg; 2284 struct ipmi_recv_msg *recv_msg; 2285 int rv = 0; 2286 2287 if (supplied_recv) 2288 recv_msg = supplied_recv; 2289 else { 2290 recv_msg = ipmi_alloc_recv_msg(); 2291 if (recv_msg == NULL) { 2292 rv = -ENOMEM; 2293 goto out; 2294 } 2295 } 2296 recv_msg->user_msg_data = user_msg_data; 2297 2298 if (supplied_smi) 2299 smi_msg = (struct ipmi_smi_msg *) supplied_smi; 2300 else { 2301 smi_msg = ipmi_alloc_smi_msg(); 2302 if (smi_msg == NULL) { 2303 if (!supplied_recv) 2304 ipmi_free_recv_msg(recv_msg); 2305 rv = -ENOMEM; 2306 goto out; 2307 } 2308 } 2309 2310 rcu_read_lock(); 2311 if (intf->in_shutdown) { 2312 rv = -ENODEV; 2313 goto out_err; 2314 } 2315 2316 recv_msg->user = user; 2317 if (user) 2318 /* The put happens when the message is freed. */ 2319 kref_get(&user->refcount); 2320 recv_msg->msgid = msgid; 2321 /* 2322 * Store the message to send in the receive message so timeout 2323 * responses can get the proper response data. 2324 */ 2325 recv_msg->msg = *msg; 2326 2327 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2328 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2329 recv_msg, retries, retry_time_ms); 2330 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2331 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2332 source_address, source_lun, 2333 retries, retry_time_ms); 2334 } else if (is_ipmb_direct_addr(addr)) { 2335 rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, 2336 recv_msg, source_lun); 2337 } else if (is_lan_addr(addr)) { 2338 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2339 source_lun, retries, retry_time_ms); 2340 } else { 2341 /* Unknown address type. */ 2342 ipmi_inc_stat(intf, sent_invalid_commands); 2343 rv = -EINVAL; 2344 } 2345 2346 if (rv) { 2347 out_err: 2348 ipmi_free_smi_msg(smi_msg); 2349 ipmi_free_recv_msg(recv_msg); 2350 } else { 2351 pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data); 2352 2353 smi_send(intf, intf->handlers, smi_msg, priority); 2354 } 2355 rcu_read_unlock(); 2356 2357 out: 2358 return rv; 2359 } 2360 2361 static int check_addr(struct ipmi_smi *intf, 2362 struct ipmi_addr *addr, 2363 unsigned char *saddr, 2364 unsigned char *lun) 2365 { 2366 if (addr->channel >= IPMI_MAX_CHANNELS) 2367 return -EINVAL; 2368 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2369 *lun = intf->addrinfo[addr->channel].lun; 2370 *saddr = intf->addrinfo[addr->channel].address; 2371 return 0; 2372 } 2373 2374 int ipmi_request_settime(struct ipmi_user *user, 2375 struct ipmi_addr *addr, 2376 long msgid, 2377 struct kernel_ipmi_msg *msg, 2378 void *user_msg_data, 2379 int priority, 2380 int retries, 2381 unsigned int retry_time_ms) 2382 { 2383 unsigned char saddr = 0, lun = 0; 2384 int rv, index; 2385 2386 if (!user) 2387 return -EINVAL; 2388 2389 user = acquire_ipmi_user(user, &index); 2390 if (!user) 2391 return -ENODEV; 2392 2393 rv = check_addr(user->intf, addr, &saddr, &lun); 2394 if (!rv) 2395 rv = i_ipmi_request(user, 2396 user->intf, 2397 addr, 2398 msgid, 2399 msg, 2400 user_msg_data, 2401 NULL, NULL, 2402 priority, 2403 saddr, 2404 lun, 2405 retries, 2406 retry_time_ms); 2407 2408 release_ipmi_user(user, index); 2409 return rv; 2410 } 2411 EXPORT_SYMBOL(ipmi_request_settime); 2412 2413 int ipmi_request_supply_msgs(struct ipmi_user *user, 2414 struct ipmi_addr *addr, 2415 long msgid, 2416 struct kernel_ipmi_msg *msg, 2417 void *user_msg_data, 2418 void *supplied_smi, 2419 struct ipmi_recv_msg *supplied_recv, 2420 int priority) 2421 { 2422 unsigned char saddr = 0, lun = 0; 2423 int rv, index; 2424 2425 if (!user) 2426 return -EINVAL; 2427 2428 user = acquire_ipmi_user(user, &index); 2429 if (!user) 2430 return -ENODEV; 2431 2432 rv = check_addr(user->intf, addr, &saddr, &lun); 2433 if (!rv) 2434 rv = i_ipmi_request(user, 2435 user->intf, 2436 addr, 2437 msgid, 2438 msg, 2439 user_msg_data, 2440 supplied_smi, 2441 supplied_recv, 2442 priority, 2443 saddr, 2444 lun, 2445 -1, 0); 2446 2447 release_ipmi_user(user, index); 2448 return rv; 2449 } 2450 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2451 2452 static void bmc_device_id_handler(struct ipmi_smi *intf, 2453 struct ipmi_recv_msg *msg) 2454 { 2455 int rv; 2456 2457 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2458 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2459 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2460 dev_warn(intf->si_dev, 2461 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2462 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2463 return; 2464 } 2465 2466 if (msg->msg.data[0]) { 2467 dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n", 2468 msg->msg.data[0]); 2469 intf->bmc->dyn_id_set = 0; 2470 goto out; 2471 } 2472 2473 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2474 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2475 if (rv) { 2476 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2477 /* record completion code when error */ 2478 intf->bmc->cc = msg->msg.data[0]; 2479 intf->bmc->dyn_id_set = 0; 2480 } else { 2481 /* 2482 * Make sure the id data is available before setting 2483 * dyn_id_set. 2484 */ 2485 smp_wmb(); 2486 intf->bmc->dyn_id_set = 1; 2487 } 2488 out: 2489 wake_up(&intf->waitq); 2490 } 2491 2492 static int 2493 send_get_device_id_cmd(struct ipmi_smi *intf) 2494 { 2495 struct ipmi_system_interface_addr si; 2496 struct kernel_ipmi_msg msg; 2497 2498 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2499 si.channel = IPMI_BMC_CHANNEL; 2500 si.lun = 0; 2501 2502 msg.netfn = IPMI_NETFN_APP_REQUEST; 2503 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2504 msg.data = NULL; 2505 msg.data_len = 0; 2506 2507 return i_ipmi_request(NULL, 2508 intf, 2509 (struct ipmi_addr *) &si, 2510 0, 2511 &msg, 2512 intf, 2513 NULL, 2514 NULL, 2515 0, 2516 intf->addrinfo[0].address, 2517 intf->addrinfo[0].lun, 2518 -1, 0); 2519 } 2520 2521 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2522 { 2523 int rv; 2524 unsigned int retry_count = 0; 2525 2526 intf->null_user_handler = bmc_device_id_handler; 2527 2528 retry: 2529 bmc->cc = 0; 2530 bmc->dyn_id_set = 2; 2531 2532 rv = send_get_device_id_cmd(intf); 2533 if (rv) 2534 goto out_reset_handler; 2535 2536 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2537 2538 if (!bmc->dyn_id_set) { 2539 if (bmc->cc != IPMI_CC_NO_ERROR && 2540 ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 2541 msleep(500); 2542 dev_warn(intf->si_dev, 2543 "BMC returned 0x%2.2x, retry get bmc device id\n", 2544 bmc->cc); 2545 goto retry; 2546 } 2547 2548 rv = -EIO; /* Something went wrong in the fetch. */ 2549 } 2550 2551 /* dyn_id_set makes the id data available. */ 2552 smp_rmb(); 2553 2554 out_reset_handler: 2555 intf->null_user_handler = NULL; 2556 2557 return rv; 2558 } 2559 2560 /* 2561 * Fetch the device id for the bmc/interface. You must pass in either 2562 * bmc or intf, this code will get the other one. If the data has 2563 * been recently fetched, this will just use the cached data. Otherwise 2564 * it will run a new fetch. 2565 * 2566 * Except for the first time this is called (in ipmi_add_smi()), 2567 * this will always return good data; 2568 */ 2569 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2570 struct ipmi_device_id *id, 2571 bool *guid_set, guid_t *guid, int intf_num) 2572 { 2573 int rv = 0; 2574 int prev_dyn_id_set, prev_guid_set; 2575 bool intf_set = intf != NULL; 2576 2577 if (!intf) { 2578 mutex_lock(&bmc->dyn_mutex); 2579 retry_bmc_lock: 2580 if (list_empty(&bmc->intfs)) { 2581 mutex_unlock(&bmc->dyn_mutex); 2582 return -ENOENT; 2583 } 2584 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2585 bmc_link); 2586 kref_get(&intf->refcount); 2587 mutex_unlock(&bmc->dyn_mutex); 2588 mutex_lock(&intf->bmc_reg_mutex); 2589 mutex_lock(&bmc->dyn_mutex); 2590 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2591 bmc_link)) { 2592 mutex_unlock(&intf->bmc_reg_mutex); 2593 kref_put(&intf->refcount, intf_free); 2594 goto retry_bmc_lock; 2595 } 2596 } else { 2597 mutex_lock(&intf->bmc_reg_mutex); 2598 bmc = intf->bmc; 2599 mutex_lock(&bmc->dyn_mutex); 2600 kref_get(&intf->refcount); 2601 } 2602 2603 /* If we have a valid and current ID, just return that. */ 2604 if (intf->in_bmc_register || 2605 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2606 goto out_noprocessing; 2607 2608 prev_guid_set = bmc->dyn_guid_set; 2609 __get_guid(intf); 2610 2611 prev_dyn_id_set = bmc->dyn_id_set; 2612 rv = __get_device_id(intf, bmc); 2613 if (rv) 2614 goto out; 2615 2616 /* 2617 * The guid, device id, manufacturer id, and product id should 2618 * not change on a BMC. If it does we have to do some dancing. 2619 */ 2620 if (!intf->bmc_registered 2621 || (!prev_guid_set && bmc->dyn_guid_set) 2622 || (!prev_dyn_id_set && bmc->dyn_id_set) 2623 || (prev_guid_set && bmc->dyn_guid_set 2624 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2625 || bmc->id.device_id != bmc->fetch_id.device_id 2626 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2627 || bmc->id.product_id != bmc->fetch_id.product_id) { 2628 struct ipmi_device_id id = bmc->fetch_id; 2629 int guid_set = bmc->dyn_guid_set; 2630 guid_t guid; 2631 2632 guid = bmc->fetch_guid; 2633 mutex_unlock(&bmc->dyn_mutex); 2634 2635 __ipmi_bmc_unregister(intf); 2636 /* Fill in the temporary BMC for good measure. */ 2637 intf->bmc->id = id; 2638 intf->bmc->dyn_guid_set = guid_set; 2639 intf->bmc->guid = guid; 2640 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2641 need_waiter(intf); /* Retry later on an error. */ 2642 else 2643 __scan_channels(intf, &id); 2644 2645 2646 if (!intf_set) { 2647 /* 2648 * We weren't given the interface on the 2649 * command line, so restart the operation on 2650 * the next interface for the BMC. 2651 */ 2652 mutex_unlock(&intf->bmc_reg_mutex); 2653 mutex_lock(&bmc->dyn_mutex); 2654 goto retry_bmc_lock; 2655 } 2656 2657 /* We have a new BMC, set it up. */ 2658 bmc = intf->bmc; 2659 mutex_lock(&bmc->dyn_mutex); 2660 goto out_noprocessing; 2661 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2662 /* Version info changes, scan the channels again. */ 2663 __scan_channels(intf, &bmc->fetch_id); 2664 2665 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2666 2667 out: 2668 if (rv && prev_dyn_id_set) { 2669 rv = 0; /* Ignore failures if we have previous data. */ 2670 bmc->dyn_id_set = prev_dyn_id_set; 2671 } 2672 if (!rv) { 2673 bmc->id = bmc->fetch_id; 2674 if (bmc->dyn_guid_set) 2675 bmc->guid = bmc->fetch_guid; 2676 else if (prev_guid_set) 2677 /* 2678 * The guid used to be valid and it failed to fetch, 2679 * just use the cached value. 2680 */ 2681 bmc->dyn_guid_set = prev_guid_set; 2682 } 2683 out_noprocessing: 2684 if (!rv) { 2685 if (id) 2686 *id = bmc->id; 2687 2688 if (guid_set) 2689 *guid_set = bmc->dyn_guid_set; 2690 2691 if (guid && bmc->dyn_guid_set) 2692 *guid = bmc->guid; 2693 } 2694 2695 mutex_unlock(&bmc->dyn_mutex); 2696 mutex_unlock(&intf->bmc_reg_mutex); 2697 2698 kref_put(&intf->refcount, intf_free); 2699 return rv; 2700 } 2701 2702 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2703 struct ipmi_device_id *id, 2704 bool *guid_set, guid_t *guid) 2705 { 2706 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2707 } 2708 2709 static ssize_t device_id_show(struct device *dev, 2710 struct device_attribute *attr, 2711 char *buf) 2712 { 2713 struct bmc_device *bmc = to_bmc_device(dev); 2714 struct ipmi_device_id id; 2715 int rv; 2716 2717 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2718 if (rv) 2719 return rv; 2720 2721 return sysfs_emit(buf, "%u\n", id.device_id); 2722 } 2723 static DEVICE_ATTR_RO(device_id); 2724 2725 static ssize_t provides_device_sdrs_show(struct device *dev, 2726 struct device_attribute *attr, 2727 char *buf) 2728 { 2729 struct bmc_device *bmc = to_bmc_device(dev); 2730 struct ipmi_device_id id; 2731 int rv; 2732 2733 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2734 if (rv) 2735 return rv; 2736 2737 return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7); 2738 } 2739 static DEVICE_ATTR_RO(provides_device_sdrs); 2740 2741 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2742 char *buf) 2743 { 2744 struct bmc_device *bmc = to_bmc_device(dev); 2745 struct ipmi_device_id id; 2746 int rv; 2747 2748 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2749 if (rv) 2750 return rv; 2751 2752 return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F); 2753 } 2754 static DEVICE_ATTR_RO(revision); 2755 2756 static ssize_t firmware_revision_show(struct device *dev, 2757 struct device_attribute *attr, 2758 char *buf) 2759 { 2760 struct bmc_device *bmc = to_bmc_device(dev); 2761 struct ipmi_device_id id; 2762 int rv; 2763 2764 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2765 if (rv) 2766 return rv; 2767 2768 return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1, 2769 id.firmware_revision_2); 2770 } 2771 static DEVICE_ATTR_RO(firmware_revision); 2772 2773 static ssize_t ipmi_version_show(struct device *dev, 2774 struct device_attribute *attr, 2775 char *buf) 2776 { 2777 struct bmc_device *bmc = to_bmc_device(dev); 2778 struct ipmi_device_id id; 2779 int rv; 2780 2781 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2782 if (rv) 2783 return rv; 2784 2785 return sysfs_emit(buf, "%u.%u\n", 2786 ipmi_version_major(&id), 2787 ipmi_version_minor(&id)); 2788 } 2789 static DEVICE_ATTR_RO(ipmi_version); 2790 2791 static ssize_t add_dev_support_show(struct device *dev, 2792 struct device_attribute *attr, 2793 char *buf) 2794 { 2795 struct bmc_device *bmc = to_bmc_device(dev); 2796 struct ipmi_device_id id; 2797 int rv; 2798 2799 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2800 if (rv) 2801 return rv; 2802 2803 return sysfs_emit(buf, "0x%02x\n", id.additional_device_support); 2804 } 2805 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2806 NULL); 2807 2808 static ssize_t manufacturer_id_show(struct device *dev, 2809 struct device_attribute *attr, 2810 char *buf) 2811 { 2812 struct bmc_device *bmc = to_bmc_device(dev); 2813 struct ipmi_device_id id; 2814 int rv; 2815 2816 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2817 if (rv) 2818 return rv; 2819 2820 return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id); 2821 } 2822 static DEVICE_ATTR_RO(manufacturer_id); 2823 2824 static ssize_t product_id_show(struct device *dev, 2825 struct device_attribute *attr, 2826 char *buf) 2827 { 2828 struct bmc_device *bmc = to_bmc_device(dev); 2829 struct ipmi_device_id id; 2830 int rv; 2831 2832 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2833 if (rv) 2834 return rv; 2835 2836 return sysfs_emit(buf, "0x%4.4x\n", id.product_id); 2837 } 2838 static DEVICE_ATTR_RO(product_id); 2839 2840 static ssize_t aux_firmware_rev_show(struct device *dev, 2841 struct device_attribute *attr, 2842 char *buf) 2843 { 2844 struct bmc_device *bmc = to_bmc_device(dev); 2845 struct ipmi_device_id id; 2846 int rv; 2847 2848 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2849 if (rv) 2850 return rv; 2851 2852 return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2853 id.aux_firmware_revision[3], 2854 id.aux_firmware_revision[2], 2855 id.aux_firmware_revision[1], 2856 id.aux_firmware_revision[0]); 2857 } 2858 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2859 2860 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2861 char *buf) 2862 { 2863 struct bmc_device *bmc = to_bmc_device(dev); 2864 bool guid_set; 2865 guid_t guid; 2866 int rv; 2867 2868 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2869 if (rv) 2870 return rv; 2871 if (!guid_set) 2872 return -ENOENT; 2873 2874 return sysfs_emit(buf, "%pUl\n", &guid); 2875 } 2876 static DEVICE_ATTR_RO(guid); 2877 2878 static struct attribute *bmc_dev_attrs[] = { 2879 &dev_attr_device_id.attr, 2880 &dev_attr_provides_device_sdrs.attr, 2881 &dev_attr_revision.attr, 2882 &dev_attr_firmware_revision.attr, 2883 &dev_attr_ipmi_version.attr, 2884 &dev_attr_additional_device_support.attr, 2885 &dev_attr_manufacturer_id.attr, 2886 &dev_attr_product_id.attr, 2887 &dev_attr_aux_firmware_revision.attr, 2888 &dev_attr_guid.attr, 2889 NULL 2890 }; 2891 2892 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2893 struct attribute *attr, int idx) 2894 { 2895 struct device *dev = kobj_to_dev(kobj); 2896 struct bmc_device *bmc = to_bmc_device(dev); 2897 umode_t mode = attr->mode; 2898 int rv; 2899 2900 if (attr == &dev_attr_aux_firmware_revision.attr) { 2901 struct ipmi_device_id id; 2902 2903 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2904 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2905 } 2906 if (attr == &dev_attr_guid.attr) { 2907 bool guid_set; 2908 2909 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2910 return (!rv && guid_set) ? mode : 0; 2911 } 2912 return mode; 2913 } 2914 2915 static const struct attribute_group bmc_dev_attr_group = { 2916 .attrs = bmc_dev_attrs, 2917 .is_visible = bmc_dev_attr_is_visible, 2918 }; 2919 2920 static const struct attribute_group *bmc_dev_attr_groups[] = { 2921 &bmc_dev_attr_group, 2922 NULL 2923 }; 2924 2925 static const struct device_type bmc_device_type = { 2926 .groups = bmc_dev_attr_groups, 2927 }; 2928 2929 static int __find_bmc_guid(struct device *dev, const void *data) 2930 { 2931 const guid_t *guid = data; 2932 struct bmc_device *bmc; 2933 int rv; 2934 2935 if (dev->type != &bmc_device_type) 2936 return 0; 2937 2938 bmc = to_bmc_device(dev); 2939 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2940 if (rv) 2941 rv = kref_get_unless_zero(&bmc->usecount); 2942 return rv; 2943 } 2944 2945 /* 2946 * Returns with the bmc's usecount incremented, if it is non-NULL. 2947 */ 2948 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2949 guid_t *guid) 2950 { 2951 struct device *dev; 2952 struct bmc_device *bmc = NULL; 2953 2954 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2955 if (dev) { 2956 bmc = to_bmc_device(dev); 2957 put_device(dev); 2958 } 2959 return bmc; 2960 } 2961 2962 struct prod_dev_id { 2963 unsigned int product_id; 2964 unsigned char device_id; 2965 }; 2966 2967 static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 2968 { 2969 const struct prod_dev_id *cid = data; 2970 struct bmc_device *bmc; 2971 int rv; 2972 2973 if (dev->type != &bmc_device_type) 2974 return 0; 2975 2976 bmc = to_bmc_device(dev); 2977 rv = (bmc->id.product_id == cid->product_id 2978 && bmc->id.device_id == cid->device_id); 2979 if (rv) 2980 rv = kref_get_unless_zero(&bmc->usecount); 2981 return rv; 2982 } 2983 2984 /* 2985 * Returns with the bmc's usecount incremented, if it is non-NULL. 2986 */ 2987 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 2988 struct device_driver *drv, 2989 unsigned int product_id, unsigned char device_id) 2990 { 2991 struct prod_dev_id id = { 2992 .product_id = product_id, 2993 .device_id = device_id, 2994 }; 2995 struct device *dev; 2996 struct bmc_device *bmc = NULL; 2997 2998 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 2999 if (dev) { 3000 bmc = to_bmc_device(dev); 3001 put_device(dev); 3002 } 3003 return bmc; 3004 } 3005 3006 static DEFINE_IDA(ipmi_bmc_ida); 3007 3008 static void 3009 release_bmc_device(struct device *dev) 3010 { 3011 kfree(to_bmc_device(dev)); 3012 } 3013 3014 static void cleanup_bmc_work(struct work_struct *work) 3015 { 3016 struct bmc_device *bmc = container_of(work, struct bmc_device, 3017 remove_work); 3018 int id = bmc->pdev.id; /* Unregister overwrites id */ 3019 3020 platform_device_unregister(&bmc->pdev); 3021 ida_simple_remove(&ipmi_bmc_ida, id); 3022 } 3023 3024 static void 3025 cleanup_bmc_device(struct kref *ref) 3026 { 3027 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 3028 3029 /* 3030 * Remove the platform device in a work queue to avoid issues 3031 * with removing the device attributes while reading a device 3032 * attribute. 3033 */ 3034 queue_work(remove_work_wq, &bmc->remove_work); 3035 } 3036 3037 /* 3038 * Must be called with intf->bmc_reg_mutex held. 3039 */ 3040 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 3041 { 3042 struct bmc_device *bmc = intf->bmc; 3043 3044 if (!intf->bmc_registered) 3045 return; 3046 3047 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3048 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 3049 kfree(intf->my_dev_name); 3050 intf->my_dev_name = NULL; 3051 3052 mutex_lock(&bmc->dyn_mutex); 3053 list_del(&intf->bmc_link); 3054 mutex_unlock(&bmc->dyn_mutex); 3055 intf->bmc = &intf->tmp_bmc; 3056 kref_put(&bmc->usecount, cleanup_bmc_device); 3057 intf->bmc_registered = false; 3058 } 3059 3060 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 3061 { 3062 mutex_lock(&intf->bmc_reg_mutex); 3063 __ipmi_bmc_unregister(intf); 3064 mutex_unlock(&intf->bmc_reg_mutex); 3065 } 3066 3067 /* 3068 * Must be called with intf->bmc_reg_mutex held. 3069 */ 3070 static int __ipmi_bmc_register(struct ipmi_smi *intf, 3071 struct ipmi_device_id *id, 3072 bool guid_set, guid_t *guid, int intf_num) 3073 { 3074 int rv; 3075 struct bmc_device *bmc; 3076 struct bmc_device *old_bmc; 3077 3078 /* 3079 * platform_device_register() can cause bmc_reg_mutex to 3080 * be claimed because of the is_visible functions of 3081 * the attributes. Eliminate possible recursion and 3082 * release the lock. 3083 */ 3084 intf->in_bmc_register = true; 3085 mutex_unlock(&intf->bmc_reg_mutex); 3086 3087 /* 3088 * Try to find if there is an bmc_device struct 3089 * representing the interfaced BMC already 3090 */ 3091 mutex_lock(&ipmidriver_mutex); 3092 if (guid_set) 3093 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 3094 else 3095 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 3096 id->product_id, 3097 id->device_id); 3098 3099 /* 3100 * If there is already an bmc_device, free the new one, 3101 * otherwise register the new BMC device 3102 */ 3103 if (old_bmc) { 3104 bmc = old_bmc; 3105 /* 3106 * Note: old_bmc already has usecount incremented by 3107 * the BMC find functions. 3108 */ 3109 intf->bmc = old_bmc; 3110 mutex_lock(&bmc->dyn_mutex); 3111 list_add_tail(&intf->bmc_link, &bmc->intfs); 3112 mutex_unlock(&bmc->dyn_mutex); 3113 3114 dev_info(intf->si_dev, 3115 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3116 bmc->id.manufacturer_id, 3117 bmc->id.product_id, 3118 bmc->id.device_id); 3119 } else { 3120 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 3121 if (!bmc) { 3122 rv = -ENOMEM; 3123 goto out; 3124 } 3125 INIT_LIST_HEAD(&bmc->intfs); 3126 mutex_init(&bmc->dyn_mutex); 3127 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3128 3129 bmc->id = *id; 3130 bmc->dyn_id_set = 1; 3131 bmc->dyn_guid_set = guid_set; 3132 bmc->guid = *guid; 3133 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3134 3135 bmc->pdev.name = "ipmi_bmc"; 3136 3137 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); 3138 if (rv < 0) { 3139 kfree(bmc); 3140 goto out; 3141 } 3142 3143 bmc->pdev.dev.driver = &ipmidriver.driver; 3144 bmc->pdev.id = rv; 3145 bmc->pdev.dev.release = release_bmc_device; 3146 bmc->pdev.dev.type = &bmc_device_type; 3147 kref_init(&bmc->usecount); 3148 3149 intf->bmc = bmc; 3150 mutex_lock(&bmc->dyn_mutex); 3151 list_add_tail(&intf->bmc_link, &bmc->intfs); 3152 mutex_unlock(&bmc->dyn_mutex); 3153 3154 rv = platform_device_register(&bmc->pdev); 3155 if (rv) { 3156 dev_err(intf->si_dev, 3157 "Unable to register bmc device: %d\n", 3158 rv); 3159 goto out_list_del; 3160 } 3161 3162 dev_info(intf->si_dev, 3163 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3164 bmc->id.manufacturer_id, 3165 bmc->id.product_id, 3166 bmc->id.device_id); 3167 } 3168 3169 /* 3170 * create symlink from system interface device to bmc device 3171 * and back. 3172 */ 3173 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3174 if (rv) { 3175 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3176 goto out_put_bmc; 3177 } 3178 3179 if (intf_num == -1) 3180 intf_num = intf->intf_num; 3181 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3182 if (!intf->my_dev_name) { 3183 rv = -ENOMEM; 3184 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3185 rv); 3186 goto out_unlink1; 3187 } 3188 3189 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3190 intf->my_dev_name); 3191 if (rv) { 3192 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3193 rv); 3194 goto out_free_my_dev_name; 3195 } 3196 3197 intf->bmc_registered = true; 3198 3199 out: 3200 mutex_unlock(&ipmidriver_mutex); 3201 mutex_lock(&intf->bmc_reg_mutex); 3202 intf->in_bmc_register = false; 3203 return rv; 3204 3205 3206 out_free_my_dev_name: 3207 kfree(intf->my_dev_name); 3208 intf->my_dev_name = NULL; 3209 3210 out_unlink1: 3211 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3212 3213 out_put_bmc: 3214 mutex_lock(&bmc->dyn_mutex); 3215 list_del(&intf->bmc_link); 3216 mutex_unlock(&bmc->dyn_mutex); 3217 intf->bmc = &intf->tmp_bmc; 3218 kref_put(&bmc->usecount, cleanup_bmc_device); 3219 goto out; 3220 3221 out_list_del: 3222 mutex_lock(&bmc->dyn_mutex); 3223 list_del(&intf->bmc_link); 3224 mutex_unlock(&bmc->dyn_mutex); 3225 intf->bmc = &intf->tmp_bmc; 3226 put_device(&bmc->pdev.dev); 3227 goto out; 3228 } 3229 3230 static int 3231 send_guid_cmd(struct ipmi_smi *intf, int chan) 3232 { 3233 struct kernel_ipmi_msg msg; 3234 struct ipmi_system_interface_addr si; 3235 3236 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3237 si.channel = IPMI_BMC_CHANNEL; 3238 si.lun = 0; 3239 3240 msg.netfn = IPMI_NETFN_APP_REQUEST; 3241 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3242 msg.data = NULL; 3243 msg.data_len = 0; 3244 return i_ipmi_request(NULL, 3245 intf, 3246 (struct ipmi_addr *) &si, 3247 0, 3248 &msg, 3249 intf, 3250 NULL, 3251 NULL, 3252 0, 3253 intf->addrinfo[0].address, 3254 intf->addrinfo[0].lun, 3255 -1, 0); 3256 } 3257 3258 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3259 { 3260 struct bmc_device *bmc = intf->bmc; 3261 3262 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3263 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3264 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3265 /* Not for me */ 3266 return; 3267 3268 if (msg->msg.data[0] != 0) { 3269 /* Error from getting the GUID, the BMC doesn't have one. */ 3270 bmc->dyn_guid_set = 0; 3271 goto out; 3272 } 3273 3274 if (msg->msg.data_len < UUID_SIZE + 1) { 3275 bmc->dyn_guid_set = 0; 3276 dev_warn(intf->si_dev, 3277 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3278 msg->msg.data_len, UUID_SIZE + 1); 3279 goto out; 3280 } 3281 3282 import_guid(&bmc->fetch_guid, msg->msg.data + 1); 3283 /* 3284 * Make sure the guid data is available before setting 3285 * dyn_guid_set. 3286 */ 3287 smp_wmb(); 3288 bmc->dyn_guid_set = 1; 3289 out: 3290 wake_up(&intf->waitq); 3291 } 3292 3293 static void __get_guid(struct ipmi_smi *intf) 3294 { 3295 int rv; 3296 struct bmc_device *bmc = intf->bmc; 3297 3298 bmc->dyn_guid_set = 2; 3299 intf->null_user_handler = guid_handler; 3300 rv = send_guid_cmd(intf, 0); 3301 if (rv) 3302 /* Send failed, no GUID available. */ 3303 bmc->dyn_guid_set = 0; 3304 else 3305 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3306 3307 /* dyn_guid_set makes the guid data available. */ 3308 smp_rmb(); 3309 3310 intf->null_user_handler = NULL; 3311 } 3312 3313 static int 3314 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3315 { 3316 struct kernel_ipmi_msg msg; 3317 unsigned char data[1]; 3318 struct ipmi_system_interface_addr si; 3319 3320 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3321 si.channel = IPMI_BMC_CHANNEL; 3322 si.lun = 0; 3323 3324 msg.netfn = IPMI_NETFN_APP_REQUEST; 3325 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3326 msg.data = data; 3327 msg.data_len = 1; 3328 data[0] = chan; 3329 return i_ipmi_request(NULL, 3330 intf, 3331 (struct ipmi_addr *) &si, 3332 0, 3333 &msg, 3334 intf, 3335 NULL, 3336 NULL, 3337 0, 3338 intf->addrinfo[0].address, 3339 intf->addrinfo[0].lun, 3340 -1, 0); 3341 } 3342 3343 static void 3344 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3345 { 3346 int rv = 0; 3347 int ch; 3348 unsigned int set = intf->curr_working_cset; 3349 struct ipmi_channel *chans; 3350 3351 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3352 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3353 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3354 /* It's the one we want */ 3355 if (msg->msg.data[0] != 0) { 3356 /* Got an error from the channel, just go on. */ 3357 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3358 /* 3359 * If the MC does not support this 3360 * command, that is legal. We just 3361 * assume it has one IPMB at channel 3362 * zero. 3363 */ 3364 intf->wchannels[set].c[0].medium 3365 = IPMI_CHANNEL_MEDIUM_IPMB; 3366 intf->wchannels[set].c[0].protocol 3367 = IPMI_CHANNEL_PROTOCOL_IPMB; 3368 3369 intf->channel_list = intf->wchannels + set; 3370 intf->channels_ready = true; 3371 wake_up(&intf->waitq); 3372 goto out; 3373 } 3374 goto next_channel; 3375 } 3376 if (msg->msg.data_len < 4) { 3377 /* Message not big enough, just go on. */ 3378 goto next_channel; 3379 } 3380 ch = intf->curr_channel; 3381 chans = intf->wchannels[set].c; 3382 chans[ch].medium = msg->msg.data[2] & 0x7f; 3383 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3384 3385 next_channel: 3386 intf->curr_channel++; 3387 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3388 intf->channel_list = intf->wchannels + set; 3389 intf->channels_ready = true; 3390 wake_up(&intf->waitq); 3391 } else { 3392 intf->channel_list = intf->wchannels + set; 3393 intf->channels_ready = true; 3394 rv = send_channel_info_cmd(intf, intf->curr_channel); 3395 } 3396 3397 if (rv) { 3398 /* Got an error somehow, just give up. */ 3399 dev_warn(intf->si_dev, 3400 "Error sending channel information for channel %d: %d\n", 3401 intf->curr_channel, rv); 3402 3403 intf->channel_list = intf->wchannels + set; 3404 intf->channels_ready = true; 3405 wake_up(&intf->waitq); 3406 } 3407 } 3408 out: 3409 return; 3410 } 3411 3412 /* 3413 * Must be holding intf->bmc_reg_mutex to call this. 3414 */ 3415 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3416 { 3417 int rv; 3418 3419 if (ipmi_version_major(id) > 1 3420 || (ipmi_version_major(id) == 1 3421 && ipmi_version_minor(id) >= 5)) { 3422 unsigned int set; 3423 3424 /* 3425 * Start scanning the channels to see what is 3426 * available. 3427 */ 3428 set = !intf->curr_working_cset; 3429 intf->curr_working_cset = set; 3430 memset(&intf->wchannels[set], 0, 3431 sizeof(struct ipmi_channel_set)); 3432 3433 intf->null_user_handler = channel_handler; 3434 intf->curr_channel = 0; 3435 rv = send_channel_info_cmd(intf, 0); 3436 if (rv) { 3437 dev_warn(intf->si_dev, 3438 "Error sending channel information for channel 0, %d\n", 3439 rv); 3440 intf->null_user_handler = NULL; 3441 return -EIO; 3442 } 3443 3444 /* Wait for the channel info to be read. */ 3445 wait_event(intf->waitq, intf->channels_ready); 3446 intf->null_user_handler = NULL; 3447 } else { 3448 unsigned int set = intf->curr_working_cset; 3449 3450 /* Assume a single IPMB channel at zero. */ 3451 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3452 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3453 intf->channel_list = intf->wchannels + set; 3454 intf->channels_ready = true; 3455 } 3456 3457 return 0; 3458 } 3459 3460 static void ipmi_poll(struct ipmi_smi *intf) 3461 { 3462 if (intf->handlers->poll) 3463 intf->handlers->poll(intf->send_info); 3464 /* In case something came in */ 3465 handle_new_recv_msgs(intf); 3466 } 3467 3468 void ipmi_poll_interface(struct ipmi_user *user) 3469 { 3470 ipmi_poll(user->intf); 3471 } 3472 EXPORT_SYMBOL(ipmi_poll_interface); 3473 3474 static void redo_bmc_reg(struct work_struct *work) 3475 { 3476 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3477 bmc_reg_work); 3478 3479 if (!intf->in_shutdown) 3480 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3481 3482 kref_put(&intf->refcount, intf_free); 3483 } 3484 3485 int ipmi_add_smi(struct module *owner, 3486 const struct ipmi_smi_handlers *handlers, 3487 void *send_info, 3488 struct device *si_dev, 3489 unsigned char slave_addr) 3490 { 3491 int i, j; 3492 int rv; 3493 struct ipmi_smi *intf, *tintf; 3494 struct list_head *link; 3495 struct ipmi_device_id id; 3496 3497 /* 3498 * Make sure the driver is actually initialized, this handles 3499 * problems with initialization order. 3500 */ 3501 rv = ipmi_init_msghandler(); 3502 if (rv) 3503 return rv; 3504 3505 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3506 if (!intf) 3507 return -ENOMEM; 3508 3509 rv = init_srcu_struct(&intf->users_srcu); 3510 if (rv) { 3511 kfree(intf); 3512 return rv; 3513 } 3514 3515 intf->owner = owner; 3516 intf->bmc = &intf->tmp_bmc; 3517 INIT_LIST_HEAD(&intf->bmc->intfs); 3518 mutex_init(&intf->bmc->dyn_mutex); 3519 INIT_LIST_HEAD(&intf->bmc_link); 3520 mutex_init(&intf->bmc_reg_mutex); 3521 intf->intf_num = -1; /* Mark it invalid for now. */ 3522 kref_init(&intf->refcount); 3523 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3524 intf->si_dev = si_dev; 3525 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3526 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3527 intf->addrinfo[j].lun = 2; 3528 } 3529 if (slave_addr != 0) 3530 intf->addrinfo[0].address = slave_addr; 3531 INIT_LIST_HEAD(&intf->users); 3532 intf->handlers = handlers; 3533 intf->send_info = send_info; 3534 spin_lock_init(&intf->seq_lock); 3535 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3536 intf->seq_table[j].inuse = 0; 3537 intf->seq_table[j].seqid = 0; 3538 } 3539 intf->curr_seq = 0; 3540 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3541 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3542 tasklet_setup(&intf->recv_tasklet, 3543 smi_recv_tasklet); 3544 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3545 spin_lock_init(&intf->xmit_msgs_lock); 3546 INIT_LIST_HEAD(&intf->xmit_msgs); 3547 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3548 spin_lock_init(&intf->events_lock); 3549 spin_lock_init(&intf->watch_lock); 3550 atomic_set(&intf->event_waiters, 0); 3551 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3552 INIT_LIST_HEAD(&intf->waiting_events); 3553 intf->waiting_events_count = 0; 3554 mutex_init(&intf->cmd_rcvrs_mutex); 3555 spin_lock_init(&intf->maintenance_mode_lock); 3556 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3557 init_waitqueue_head(&intf->waitq); 3558 for (i = 0; i < IPMI_NUM_STATS; i++) 3559 atomic_set(&intf->stats[i], 0); 3560 3561 mutex_lock(&ipmi_interfaces_mutex); 3562 /* Look for a hole in the numbers. */ 3563 i = 0; 3564 link = &ipmi_interfaces; 3565 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link, 3566 ipmi_interfaces_mutex_held()) { 3567 if (tintf->intf_num != i) { 3568 link = &tintf->link; 3569 break; 3570 } 3571 i++; 3572 } 3573 /* Add the new interface in numeric order. */ 3574 if (i == 0) 3575 list_add_rcu(&intf->link, &ipmi_interfaces); 3576 else 3577 list_add_tail_rcu(&intf->link, link); 3578 3579 rv = handlers->start_processing(send_info, intf); 3580 if (rv) 3581 goto out_err; 3582 3583 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3584 if (rv) { 3585 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3586 goto out_err_started; 3587 } 3588 3589 mutex_lock(&intf->bmc_reg_mutex); 3590 rv = __scan_channels(intf, &id); 3591 mutex_unlock(&intf->bmc_reg_mutex); 3592 if (rv) 3593 goto out_err_bmc_reg; 3594 3595 /* 3596 * Keep memory order straight for RCU readers. Make 3597 * sure everything else is committed to memory before 3598 * setting intf_num to mark the interface valid. 3599 */ 3600 smp_wmb(); 3601 intf->intf_num = i; 3602 mutex_unlock(&ipmi_interfaces_mutex); 3603 3604 /* After this point the interface is legal to use. */ 3605 call_smi_watchers(i, intf->si_dev); 3606 3607 return 0; 3608 3609 out_err_bmc_reg: 3610 ipmi_bmc_unregister(intf); 3611 out_err_started: 3612 if (intf->handlers->shutdown) 3613 intf->handlers->shutdown(intf->send_info); 3614 out_err: 3615 list_del_rcu(&intf->link); 3616 mutex_unlock(&ipmi_interfaces_mutex); 3617 synchronize_srcu(&ipmi_interfaces_srcu); 3618 cleanup_srcu_struct(&intf->users_srcu); 3619 kref_put(&intf->refcount, intf_free); 3620 3621 return rv; 3622 } 3623 EXPORT_SYMBOL(ipmi_add_smi); 3624 3625 static void deliver_smi_err_response(struct ipmi_smi *intf, 3626 struct ipmi_smi_msg *msg, 3627 unsigned char err) 3628 { 3629 msg->rsp[0] = msg->data[0] | 4; 3630 msg->rsp[1] = msg->data[1]; 3631 msg->rsp[2] = err; 3632 msg->rsp_size = 3; 3633 /* It's an error, so it will never requeue, no need to check return. */ 3634 handle_one_recv_msg(intf, msg); 3635 } 3636 3637 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3638 { 3639 int i; 3640 struct seq_table *ent; 3641 struct ipmi_smi_msg *msg; 3642 struct list_head *entry; 3643 struct list_head tmplist; 3644 3645 /* Clear out our transmit queues and hold the messages. */ 3646 INIT_LIST_HEAD(&tmplist); 3647 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3648 list_splice_tail(&intf->xmit_msgs, &tmplist); 3649 3650 /* Current message first, to preserve order */ 3651 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3652 /* Wait for the message to clear out. */ 3653 schedule_timeout(1); 3654 } 3655 3656 /* No need for locks, the interface is down. */ 3657 3658 /* 3659 * Return errors for all pending messages in queue and in the 3660 * tables waiting for remote responses. 3661 */ 3662 while (!list_empty(&tmplist)) { 3663 entry = tmplist.next; 3664 list_del(entry); 3665 msg = list_entry(entry, struct ipmi_smi_msg, link); 3666 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3667 } 3668 3669 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3670 ent = &intf->seq_table[i]; 3671 if (!ent->inuse) 3672 continue; 3673 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3674 } 3675 } 3676 3677 void ipmi_unregister_smi(struct ipmi_smi *intf) 3678 { 3679 struct ipmi_smi_watcher *w; 3680 int intf_num, index; 3681 3682 if (!intf) 3683 return; 3684 intf_num = intf->intf_num; 3685 mutex_lock(&ipmi_interfaces_mutex); 3686 intf->intf_num = -1; 3687 intf->in_shutdown = true; 3688 list_del_rcu(&intf->link); 3689 mutex_unlock(&ipmi_interfaces_mutex); 3690 synchronize_srcu(&ipmi_interfaces_srcu); 3691 3692 /* At this point no users can be added to the interface. */ 3693 3694 /* 3695 * Call all the watcher interfaces to tell them that 3696 * an interface is going away. 3697 */ 3698 mutex_lock(&smi_watchers_mutex); 3699 list_for_each_entry(w, &smi_watchers, link) 3700 w->smi_gone(intf_num); 3701 mutex_unlock(&smi_watchers_mutex); 3702 3703 index = srcu_read_lock(&intf->users_srcu); 3704 while (!list_empty(&intf->users)) { 3705 struct ipmi_user *user = 3706 container_of(list_next_rcu(&intf->users), 3707 struct ipmi_user, link); 3708 3709 _ipmi_destroy_user(user); 3710 } 3711 srcu_read_unlock(&intf->users_srcu, index); 3712 3713 if (intf->handlers->shutdown) 3714 intf->handlers->shutdown(intf->send_info); 3715 3716 cleanup_smi_msgs(intf); 3717 3718 ipmi_bmc_unregister(intf); 3719 3720 cleanup_srcu_struct(&intf->users_srcu); 3721 kref_put(&intf->refcount, intf_free); 3722 } 3723 EXPORT_SYMBOL(ipmi_unregister_smi); 3724 3725 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3726 struct ipmi_smi_msg *msg) 3727 { 3728 struct ipmi_ipmb_addr ipmb_addr; 3729 struct ipmi_recv_msg *recv_msg; 3730 3731 /* 3732 * This is 11, not 10, because the response must contain a 3733 * completion code. 3734 */ 3735 if (msg->rsp_size < 11) { 3736 /* Message not big enough, just ignore it. */ 3737 ipmi_inc_stat(intf, invalid_ipmb_responses); 3738 return 0; 3739 } 3740 3741 if (msg->rsp[2] != 0) { 3742 /* An error getting the response, just ignore it. */ 3743 return 0; 3744 } 3745 3746 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3747 ipmb_addr.slave_addr = msg->rsp[6]; 3748 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3749 ipmb_addr.lun = msg->rsp[7] & 3; 3750 3751 /* 3752 * It's a response from a remote entity. Look up the sequence 3753 * number and handle the response. 3754 */ 3755 if (intf_find_seq(intf, 3756 msg->rsp[7] >> 2, 3757 msg->rsp[3] & 0x0f, 3758 msg->rsp[8], 3759 (msg->rsp[4] >> 2) & (~1), 3760 (struct ipmi_addr *) &ipmb_addr, 3761 &recv_msg)) { 3762 /* 3763 * We were unable to find the sequence number, 3764 * so just nuke the message. 3765 */ 3766 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3767 return 0; 3768 } 3769 3770 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3771 /* 3772 * The other fields matched, so no need to set them, except 3773 * for netfn, which needs to be the response that was 3774 * returned, not the request value. 3775 */ 3776 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3777 recv_msg->msg.data = recv_msg->msg_data; 3778 recv_msg->msg.data_len = msg->rsp_size - 10; 3779 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3780 if (deliver_response(intf, recv_msg)) 3781 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3782 else 3783 ipmi_inc_stat(intf, handled_ipmb_responses); 3784 3785 return 0; 3786 } 3787 3788 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3789 struct ipmi_smi_msg *msg) 3790 { 3791 struct cmd_rcvr *rcvr; 3792 int rv = 0; 3793 unsigned char netfn; 3794 unsigned char cmd; 3795 unsigned char chan; 3796 struct ipmi_user *user = NULL; 3797 struct ipmi_ipmb_addr *ipmb_addr; 3798 struct ipmi_recv_msg *recv_msg; 3799 3800 if (msg->rsp_size < 10) { 3801 /* Message not big enough, just ignore it. */ 3802 ipmi_inc_stat(intf, invalid_commands); 3803 return 0; 3804 } 3805 3806 if (msg->rsp[2] != 0) { 3807 /* An error getting the response, just ignore it. */ 3808 return 0; 3809 } 3810 3811 netfn = msg->rsp[4] >> 2; 3812 cmd = msg->rsp[8]; 3813 chan = msg->rsp[3] & 0xf; 3814 3815 rcu_read_lock(); 3816 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3817 if (rcvr) { 3818 user = rcvr->user; 3819 kref_get(&user->refcount); 3820 } else 3821 user = NULL; 3822 rcu_read_unlock(); 3823 3824 if (user == NULL) { 3825 /* We didn't find a user, deliver an error response. */ 3826 ipmi_inc_stat(intf, unhandled_commands); 3827 3828 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3829 msg->data[1] = IPMI_SEND_MSG_CMD; 3830 msg->data[2] = msg->rsp[3]; 3831 msg->data[3] = msg->rsp[6]; 3832 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3833 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3834 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3835 /* rqseq/lun */ 3836 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3837 msg->data[8] = msg->rsp[8]; /* cmd */ 3838 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3839 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3840 msg->data_size = 11; 3841 3842 pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data); 3843 3844 rcu_read_lock(); 3845 if (!intf->in_shutdown) { 3846 smi_send(intf, intf->handlers, msg, 0); 3847 /* 3848 * We used the message, so return the value 3849 * that causes it to not be freed or 3850 * queued. 3851 */ 3852 rv = -1; 3853 } 3854 rcu_read_unlock(); 3855 } else { 3856 recv_msg = ipmi_alloc_recv_msg(); 3857 if (!recv_msg) { 3858 /* 3859 * We couldn't allocate memory for the 3860 * message, so requeue it for handling 3861 * later. 3862 */ 3863 rv = 1; 3864 kref_put(&user->refcount, free_user); 3865 } else { 3866 /* Extract the source address from the data. */ 3867 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3868 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3869 ipmb_addr->slave_addr = msg->rsp[6]; 3870 ipmb_addr->lun = msg->rsp[7] & 3; 3871 ipmb_addr->channel = msg->rsp[3] & 0xf; 3872 3873 /* 3874 * Extract the rest of the message information 3875 * from the IPMB header. 3876 */ 3877 recv_msg->user = user; 3878 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3879 recv_msg->msgid = msg->rsp[7] >> 2; 3880 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3881 recv_msg->msg.cmd = msg->rsp[8]; 3882 recv_msg->msg.data = recv_msg->msg_data; 3883 3884 /* 3885 * We chop off 10, not 9 bytes because the checksum 3886 * at the end also needs to be removed. 3887 */ 3888 recv_msg->msg.data_len = msg->rsp_size - 10; 3889 memcpy(recv_msg->msg_data, &msg->rsp[9], 3890 msg->rsp_size - 10); 3891 if (deliver_response(intf, recv_msg)) 3892 ipmi_inc_stat(intf, unhandled_commands); 3893 else 3894 ipmi_inc_stat(intf, handled_commands); 3895 } 3896 } 3897 3898 return rv; 3899 } 3900 3901 static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, 3902 struct ipmi_smi_msg *msg) 3903 { 3904 struct cmd_rcvr *rcvr; 3905 int rv = 0; 3906 struct ipmi_user *user = NULL; 3907 struct ipmi_ipmb_direct_addr *daddr; 3908 struct ipmi_recv_msg *recv_msg; 3909 unsigned char netfn = msg->rsp[0] >> 2; 3910 unsigned char cmd = msg->rsp[3]; 3911 3912 rcu_read_lock(); 3913 /* We always use channel 0 for direct messages. */ 3914 rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); 3915 if (rcvr) { 3916 user = rcvr->user; 3917 kref_get(&user->refcount); 3918 } else 3919 user = NULL; 3920 rcu_read_unlock(); 3921 3922 if (user == NULL) { 3923 /* We didn't find a user, deliver an error response. */ 3924 ipmi_inc_stat(intf, unhandled_commands); 3925 3926 msg->data[0] = (netfn + 1) << 2; 3927 msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */ 3928 msg->data[1] = msg->rsp[1]; /* Addr */ 3929 msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */ 3930 msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */ 3931 msg->data[3] = cmd; 3932 msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; 3933 msg->data_size = 5; 3934 3935 rcu_read_lock(); 3936 if (!intf->in_shutdown) { 3937 smi_send(intf, intf->handlers, msg, 0); 3938 /* 3939 * We used the message, so return the value 3940 * that causes it to not be freed or 3941 * queued. 3942 */ 3943 rv = -1; 3944 } 3945 rcu_read_unlock(); 3946 } else { 3947 recv_msg = ipmi_alloc_recv_msg(); 3948 if (!recv_msg) { 3949 /* 3950 * We couldn't allocate memory for the 3951 * message, so requeue it for handling 3952 * later. 3953 */ 3954 rv = 1; 3955 kref_put(&user->refcount, free_user); 3956 } else { 3957 /* Extract the source address from the data. */ 3958 daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; 3959 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 3960 daddr->channel = 0; 3961 daddr->slave_addr = msg->rsp[1]; 3962 daddr->rs_lun = msg->rsp[0] & 3; 3963 daddr->rq_lun = msg->rsp[2] & 3; 3964 3965 /* 3966 * Extract the rest of the message information 3967 * from the IPMB header. 3968 */ 3969 recv_msg->user = user; 3970 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3971 recv_msg->msgid = (msg->rsp[2] >> 2); 3972 recv_msg->msg.netfn = msg->rsp[0] >> 2; 3973 recv_msg->msg.cmd = msg->rsp[3]; 3974 recv_msg->msg.data = recv_msg->msg_data; 3975 3976 recv_msg->msg.data_len = msg->rsp_size - 4; 3977 memcpy(recv_msg->msg_data, msg->rsp + 4, 3978 msg->rsp_size - 4); 3979 if (deliver_response(intf, recv_msg)) 3980 ipmi_inc_stat(intf, unhandled_commands); 3981 else 3982 ipmi_inc_stat(intf, handled_commands); 3983 } 3984 } 3985 3986 return rv; 3987 } 3988 3989 static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, 3990 struct ipmi_smi_msg *msg) 3991 { 3992 struct ipmi_recv_msg *recv_msg; 3993 struct ipmi_ipmb_direct_addr *daddr; 3994 3995 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 3996 if (recv_msg == NULL) { 3997 dev_warn(intf->si_dev, 3998 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 3999 return 0; 4000 } 4001 4002 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4003 recv_msg->msgid = msg->msgid; 4004 daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr; 4005 daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; 4006 daddr->channel = 0; 4007 daddr->slave_addr = msg->rsp[1]; 4008 daddr->rq_lun = msg->rsp[0] & 3; 4009 daddr->rs_lun = msg->rsp[2] & 3; 4010 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4011 recv_msg->msg.cmd = msg->rsp[3]; 4012 memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4); 4013 recv_msg->msg.data = recv_msg->msg_data; 4014 recv_msg->msg.data_len = msg->rsp_size - 4; 4015 deliver_local_response(intf, recv_msg); 4016 4017 return 0; 4018 } 4019 4020 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 4021 struct ipmi_smi_msg *msg) 4022 { 4023 struct ipmi_lan_addr lan_addr; 4024 struct ipmi_recv_msg *recv_msg; 4025 4026 4027 /* 4028 * This is 13, not 12, because the response must contain a 4029 * completion code. 4030 */ 4031 if (msg->rsp_size < 13) { 4032 /* Message not big enough, just ignore it. */ 4033 ipmi_inc_stat(intf, invalid_lan_responses); 4034 return 0; 4035 } 4036 4037 if (msg->rsp[2] != 0) { 4038 /* An error getting the response, just ignore it. */ 4039 return 0; 4040 } 4041 4042 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 4043 lan_addr.session_handle = msg->rsp[4]; 4044 lan_addr.remote_SWID = msg->rsp[8]; 4045 lan_addr.local_SWID = msg->rsp[5]; 4046 lan_addr.channel = msg->rsp[3] & 0x0f; 4047 lan_addr.privilege = msg->rsp[3] >> 4; 4048 lan_addr.lun = msg->rsp[9] & 3; 4049 4050 /* 4051 * It's a response from a remote entity. Look up the sequence 4052 * number and handle the response. 4053 */ 4054 if (intf_find_seq(intf, 4055 msg->rsp[9] >> 2, 4056 msg->rsp[3] & 0x0f, 4057 msg->rsp[10], 4058 (msg->rsp[6] >> 2) & (~1), 4059 (struct ipmi_addr *) &lan_addr, 4060 &recv_msg)) { 4061 /* 4062 * We were unable to find the sequence number, 4063 * so just nuke the message. 4064 */ 4065 ipmi_inc_stat(intf, unhandled_lan_responses); 4066 return 0; 4067 } 4068 4069 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 4070 /* 4071 * The other fields matched, so no need to set them, except 4072 * for netfn, which needs to be the response that was 4073 * returned, not the request value. 4074 */ 4075 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4076 recv_msg->msg.data = recv_msg->msg_data; 4077 recv_msg->msg.data_len = msg->rsp_size - 12; 4078 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4079 if (deliver_response(intf, recv_msg)) 4080 ipmi_inc_stat(intf, unhandled_lan_responses); 4081 else 4082 ipmi_inc_stat(intf, handled_lan_responses); 4083 4084 return 0; 4085 } 4086 4087 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 4088 struct ipmi_smi_msg *msg) 4089 { 4090 struct cmd_rcvr *rcvr; 4091 int rv = 0; 4092 unsigned char netfn; 4093 unsigned char cmd; 4094 unsigned char chan; 4095 struct ipmi_user *user = NULL; 4096 struct ipmi_lan_addr *lan_addr; 4097 struct ipmi_recv_msg *recv_msg; 4098 4099 if (msg->rsp_size < 12) { 4100 /* Message not big enough, just ignore it. */ 4101 ipmi_inc_stat(intf, invalid_commands); 4102 return 0; 4103 } 4104 4105 if (msg->rsp[2] != 0) { 4106 /* An error getting the response, just ignore it. */ 4107 return 0; 4108 } 4109 4110 netfn = msg->rsp[6] >> 2; 4111 cmd = msg->rsp[10]; 4112 chan = msg->rsp[3] & 0xf; 4113 4114 rcu_read_lock(); 4115 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4116 if (rcvr) { 4117 user = rcvr->user; 4118 kref_get(&user->refcount); 4119 } else 4120 user = NULL; 4121 rcu_read_unlock(); 4122 4123 if (user == NULL) { 4124 /* We didn't find a user, just give up. */ 4125 ipmi_inc_stat(intf, unhandled_commands); 4126 4127 /* 4128 * Don't do anything with these messages, just allow 4129 * them to be freed. 4130 */ 4131 rv = 0; 4132 } else { 4133 recv_msg = ipmi_alloc_recv_msg(); 4134 if (!recv_msg) { 4135 /* 4136 * We couldn't allocate memory for the 4137 * message, so requeue it for handling later. 4138 */ 4139 rv = 1; 4140 kref_put(&user->refcount, free_user); 4141 } else { 4142 /* Extract the source address from the data. */ 4143 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 4144 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 4145 lan_addr->session_handle = msg->rsp[4]; 4146 lan_addr->remote_SWID = msg->rsp[8]; 4147 lan_addr->local_SWID = msg->rsp[5]; 4148 lan_addr->lun = msg->rsp[9] & 3; 4149 lan_addr->channel = msg->rsp[3] & 0xf; 4150 lan_addr->privilege = msg->rsp[3] >> 4; 4151 4152 /* 4153 * Extract the rest of the message information 4154 * from the IPMB header. 4155 */ 4156 recv_msg->user = user; 4157 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 4158 recv_msg->msgid = msg->rsp[9] >> 2; 4159 recv_msg->msg.netfn = msg->rsp[6] >> 2; 4160 recv_msg->msg.cmd = msg->rsp[10]; 4161 recv_msg->msg.data = recv_msg->msg_data; 4162 4163 /* 4164 * We chop off 12, not 11 bytes because the checksum 4165 * at the end also needs to be removed. 4166 */ 4167 recv_msg->msg.data_len = msg->rsp_size - 12; 4168 memcpy(recv_msg->msg_data, &msg->rsp[11], 4169 msg->rsp_size - 12); 4170 if (deliver_response(intf, recv_msg)) 4171 ipmi_inc_stat(intf, unhandled_commands); 4172 else 4173 ipmi_inc_stat(intf, handled_commands); 4174 } 4175 } 4176 4177 return rv; 4178 } 4179 4180 /* 4181 * This routine will handle "Get Message" command responses with 4182 * channels that use an OEM Medium. The message format belongs to 4183 * the OEM. See IPMI 2.0 specification, Chapter 6 and 4184 * Chapter 22, sections 22.6 and 22.24 for more details. 4185 */ 4186 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 4187 struct ipmi_smi_msg *msg) 4188 { 4189 struct cmd_rcvr *rcvr; 4190 int rv = 0; 4191 unsigned char netfn; 4192 unsigned char cmd; 4193 unsigned char chan; 4194 struct ipmi_user *user = NULL; 4195 struct ipmi_system_interface_addr *smi_addr; 4196 struct ipmi_recv_msg *recv_msg; 4197 4198 /* 4199 * We expect the OEM SW to perform error checking 4200 * so we just do some basic sanity checks 4201 */ 4202 if (msg->rsp_size < 4) { 4203 /* Message not big enough, just ignore it. */ 4204 ipmi_inc_stat(intf, invalid_commands); 4205 return 0; 4206 } 4207 4208 if (msg->rsp[2] != 0) { 4209 /* An error getting the response, just ignore it. */ 4210 return 0; 4211 } 4212 4213 /* 4214 * This is an OEM Message so the OEM needs to know how 4215 * handle the message. We do no interpretation. 4216 */ 4217 netfn = msg->rsp[0] >> 2; 4218 cmd = msg->rsp[1]; 4219 chan = msg->rsp[3] & 0xf; 4220 4221 rcu_read_lock(); 4222 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4223 if (rcvr) { 4224 user = rcvr->user; 4225 kref_get(&user->refcount); 4226 } else 4227 user = NULL; 4228 rcu_read_unlock(); 4229 4230 if (user == NULL) { 4231 /* We didn't find a user, just give up. */ 4232 ipmi_inc_stat(intf, unhandled_commands); 4233 4234 /* 4235 * Don't do anything with these messages, just allow 4236 * them to be freed. 4237 */ 4238 4239 rv = 0; 4240 } else { 4241 recv_msg = ipmi_alloc_recv_msg(); 4242 if (!recv_msg) { 4243 /* 4244 * We couldn't allocate memory for the 4245 * message, so requeue it for handling 4246 * later. 4247 */ 4248 rv = 1; 4249 kref_put(&user->refcount, free_user); 4250 } else { 4251 /* 4252 * OEM Messages are expected to be delivered via 4253 * the system interface to SMS software. We might 4254 * need to visit this again depending on OEM 4255 * requirements 4256 */ 4257 smi_addr = ((struct ipmi_system_interface_addr *) 4258 &recv_msg->addr); 4259 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4260 smi_addr->channel = IPMI_BMC_CHANNEL; 4261 smi_addr->lun = msg->rsp[0] & 3; 4262 4263 recv_msg->user = user; 4264 recv_msg->user_msg_data = NULL; 4265 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4266 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4267 recv_msg->msg.cmd = msg->rsp[1]; 4268 recv_msg->msg.data = recv_msg->msg_data; 4269 4270 /* 4271 * The message starts at byte 4 which follows the 4272 * the Channel Byte in the "GET MESSAGE" command 4273 */ 4274 recv_msg->msg.data_len = msg->rsp_size - 4; 4275 memcpy(recv_msg->msg_data, &msg->rsp[4], 4276 msg->rsp_size - 4); 4277 if (deliver_response(intf, recv_msg)) 4278 ipmi_inc_stat(intf, unhandled_commands); 4279 else 4280 ipmi_inc_stat(intf, handled_commands); 4281 } 4282 } 4283 4284 return rv; 4285 } 4286 4287 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4288 struct ipmi_smi_msg *msg) 4289 { 4290 struct ipmi_system_interface_addr *smi_addr; 4291 4292 recv_msg->msgid = 0; 4293 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4294 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4295 smi_addr->channel = IPMI_BMC_CHANNEL; 4296 smi_addr->lun = msg->rsp[0] & 3; 4297 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4298 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4299 recv_msg->msg.cmd = msg->rsp[1]; 4300 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4301 recv_msg->msg.data = recv_msg->msg_data; 4302 recv_msg->msg.data_len = msg->rsp_size - 3; 4303 } 4304 4305 static int handle_read_event_rsp(struct ipmi_smi *intf, 4306 struct ipmi_smi_msg *msg) 4307 { 4308 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4309 struct list_head msgs; 4310 struct ipmi_user *user; 4311 int rv = 0, deliver_count = 0, index; 4312 unsigned long flags; 4313 4314 if (msg->rsp_size < 19) { 4315 /* Message is too small to be an IPMB event. */ 4316 ipmi_inc_stat(intf, invalid_events); 4317 return 0; 4318 } 4319 4320 if (msg->rsp[2] != 0) { 4321 /* An error getting the event, just ignore it. */ 4322 return 0; 4323 } 4324 4325 INIT_LIST_HEAD(&msgs); 4326 4327 spin_lock_irqsave(&intf->events_lock, flags); 4328 4329 ipmi_inc_stat(intf, events); 4330 4331 /* 4332 * Allocate and fill in one message for every user that is 4333 * getting events. 4334 */ 4335 index = srcu_read_lock(&intf->users_srcu); 4336 list_for_each_entry_rcu(user, &intf->users, link) { 4337 if (!user->gets_events) 4338 continue; 4339 4340 recv_msg = ipmi_alloc_recv_msg(); 4341 if (!recv_msg) { 4342 rcu_read_unlock(); 4343 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4344 link) { 4345 list_del(&recv_msg->link); 4346 ipmi_free_recv_msg(recv_msg); 4347 } 4348 /* 4349 * We couldn't allocate memory for the 4350 * message, so requeue it for handling 4351 * later. 4352 */ 4353 rv = 1; 4354 goto out; 4355 } 4356 4357 deliver_count++; 4358 4359 copy_event_into_recv_msg(recv_msg, msg); 4360 recv_msg->user = user; 4361 kref_get(&user->refcount); 4362 list_add_tail(&recv_msg->link, &msgs); 4363 } 4364 srcu_read_unlock(&intf->users_srcu, index); 4365 4366 if (deliver_count) { 4367 /* Now deliver all the messages. */ 4368 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4369 list_del(&recv_msg->link); 4370 deliver_local_response(intf, recv_msg); 4371 } 4372 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4373 /* 4374 * No one to receive the message, put it in queue if there's 4375 * not already too many things in the queue. 4376 */ 4377 recv_msg = ipmi_alloc_recv_msg(); 4378 if (!recv_msg) { 4379 /* 4380 * We couldn't allocate memory for the 4381 * message, so requeue it for handling 4382 * later. 4383 */ 4384 rv = 1; 4385 goto out; 4386 } 4387 4388 copy_event_into_recv_msg(recv_msg, msg); 4389 list_add_tail(&recv_msg->link, &intf->waiting_events); 4390 intf->waiting_events_count++; 4391 } else if (!intf->event_msg_printed) { 4392 /* 4393 * There's too many things in the queue, discard this 4394 * message. 4395 */ 4396 dev_warn(intf->si_dev, 4397 "Event queue full, discarding incoming events\n"); 4398 intf->event_msg_printed = 1; 4399 } 4400 4401 out: 4402 spin_unlock_irqrestore(&intf->events_lock, flags); 4403 4404 return rv; 4405 } 4406 4407 static int handle_bmc_rsp(struct ipmi_smi *intf, 4408 struct ipmi_smi_msg *msg) 4409 { 4410 struct ipmi_recv_msg *recv_msg; 4411 struct ipmi_system_interface_addr *smi_addr; 4412 4413 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 4414 if (recv_msg == NULL) { 4415 dev_warn(intf->si_dev, 4416 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4417 return 0; 4418 } 4419 4420 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4421 recv_msg->msgid = msg->msgid; 4422 smi_addr = ((struct ipmi_system_interface_addr *) 4423 &recv_msg->addr); 4424 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4425 smi_addr->channel = IPMI_BMC_CHANNEL; 4426 smi_addr->lun = msg->rsp[0] & 3; 4427 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4428 recv_msg->msg.cmd = msg->rsp[1]; 4429 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4430 recv_msg->msg.data = recv_msg->msg_data; 4431 recv_msg->msg.data_len = msg->rsp_size - 2; 4432 deliver_local_response(intf, recv_msg); 4433 4434 return 0; 4435 } 4436 4437 /* 4438 * Handle a received message. Return 1 if the message should be requeued, 4439 * 0 if the message should be freed, or -1 if the message should not 4440 * be freed or requeued. 4441 */ 4442 static int handle_one_recv_msg(struct ipmi_smi *intf, 4443 struct ipmi_smi_msg *msg) 4444 { 4445 int requeue = 0; 4446 int chan; 4447 unsigned char cc; 4448 bool is_cmd = !((msg->rsp[0] >> 2) & 1); 4449 4450 pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp); 4451 4452 if (msg->rsp_size < 2) { 4453 /* Message is too small to be correct. */ 4454 dev_warn(intf->si_dev, 4455 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4456 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4457 4458 return_unspecified: 4459 /* Generate an error response for the message. */ 4460 msg->rsp[0] = msg->data[0] | (1 << 2); 4461 msg->rsp[1] = msg->data[1]; 4462 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4463 msg->rsp_size = 3; 4464 } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4465 /* commands must have at least 4 bytes, responses 5. */ 4466 if (is_cmd && (msg->rsp_size < 4)) { 4467 ipmi_inc_stat(intf, invalid_commands); 4468 goto out; 4469 } 4470 if (!is_cmd && (msg->rsp_size < 5)) { 4471 ipmi_inc_stat(intf, invalid_ipmb_responses); 4472 /* Construct a valid error response. */ 4473 msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */ 4474 msg->rsp[0] |= (1 << 2); /* Make it a response */ 4475 msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */ 4476 msg->rsp[1] = msg->data[1]; /* Addr */ 4477 msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */ 4478 msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */ 4479 msg->rsp[3] = msg->data[3]; /* Cmd */ 4480 msg->rsp[4] = IPMI_ERR_UNSPECIFIED; 4481 msg->rsp_size = 5; 4482 } 4483 } else if ((msg->data_size >= 2) 4484 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4485 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4486 && (msg->user_data == NULL)) { 4487 4488 if (intf->in_shutdown) 4489 goto out; 4490 4491 /* 4492 * This is the local response to a command send, start 4493 * the timer for these. The user_data will not be 4494 * NULL if this is a response send, and we will let 4495 * response sends just go through. 4496 */ 4497 4498 /* 4499 * Check for errors, if we get certain errors (ones 4500 * that mean basically we can try again later), we 4501 * ignore them and start the timer. Otherwise we 4502 * report the error immediately. 4503 */ 4504 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4505 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4506 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4507 && (msg->rsp[2] != IPMI_BUS_ERR) 4508 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4509 int ch = msg->rsp[3] & 0xf; 4510 struct ipmi_channel *chans; 4511 4512 /* Got an error sending the message, handle it. */ 4513 4514 chans = READ_ONCE(intf->channel_list)->c; 4515 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4516 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4517 ipmi_inc_stat(intf, sent_lan_command_errs); 4518 else 4519 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4520 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4521 } else 4522 /* The message was sent, start the timer. */ 4523 intf_start_seq_timer(intf, msg->msgid); 4524 requeue = 0; 4525 goto out; 4526 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4527 || (msg->rsp[1] != msg->data[1])) { 4528 /* 4529 * The NetFN and Command in the response is not even 4530 * marginally correct. 4531 */ 4532 dev_warn(intf->si_dev, 4533 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4534 (msg->data[0] >> 2) | 1, msg->data[1], 4535 msg->rsp[0] >> 2, msg->rsp[1]); 4536 4537 goto return_unspecified; 4538 } 4539 4540 if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) { 4541 if ((msg->data[0] >> 2) & 1) { 4542 /* It's a response to a sent response. */ 4543 chan = 0; 4544 cc = msg->rsp[4]; 4545 goto process_response_response; 4546 } 4547 if (is_cmd) 4548 requeue = handle_ipmb_direct_rcv_cmd(intf, msg); 4549 else 4550 requeue = handle_ipmb_direct_rcv_rsp(intf, msg); 4551 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4552 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4553 && (msg->user_data != NULL)) { 4554 /* 4555 * It's a response to a response we sent. For this we 4556 * deliver a send message response to the user. 4557 */ 4558 struct ipmi_recv_msg *recv_msg; 4559 4560 chan = msg->data[2] & 0x0f; 4561 if (chan >= IPMI_MAX_CHANNELS) 4562 /* Invalid channel number */ 4563 goto out; 4564 cc = msg->rsp[2]; 4565 4566 process_response_response: 4567 recv_msg = msg->user_data; 4568 4569 requeue = 0; 4570 if (!recv_msg) 4571 goto out; 4572 4573 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4574 recv_msg->msg.data = recv_msg->msg_data; 4575 recv_msg->msg_data[0] = cc; 4576 recv_msg->msg.data_len = 1; 4577 deliver_local_response(intf, recv_msg); 4578 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4579 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4580 struct ipmi_channel *chans; 4581 4582 /* It's from the receive queue. */ 4583 chan = msg->rsp[3] & 0xf; 4584 if (chan >= IPMI_MAX_CHANNELS) { 4585 /* Invalid channel number */ 4586 requeue = 0; 4587 goto out; 4588 } 4589 4590 /* 4591 * We need to make sure the channels have been initialized. 4592 * The channel_handler routine will set the "curr_channel" 4593 * equal to or greater than IPMI_MAX_CHANNELS when all the 4594 * channels for this interface have been initialized. 4595 */ 4596 if (!intf->channels_ready) { 4597 requeue = 0; /* Throw the message away */ 4598 goto out; 4599 } 4600 4601 chans = READ_ONCE(intf->channel_list)->c; 4602 4603 switch (chans[chan].medium) { 4604 case IPMI_CHANNEL_MEDIUM_IPMB: 4605 if (msg->rsp[4] & 0x04) { 4606 /* 4607 * It's a response, so find the 4608 * requesting message and send it up. 4609 */ 4610 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4611 } else { 4612 /* 4613 * It's a command to the SMS from some other 4614 * entity. Handle that. 4615 */ 4616 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4617 } 4618 break; 4619 4620 case IPMI_CHANNEL_MEDIUM_8023LAN: 4621 case IPMI_CHANNEL_MEDIUM_ASYNC: 4622 if (msg->rsp[6] & 0x04) { 4623 /* 4624 * It's a response, so find the 4625 * requesting message and send it up. 4626 */ 4627 requeue = handle_lan_get_msg_rsp(intf, msg); 4628 } else { 4629 /* 4630 * It's a command to the SMS from some other 4631 * entity. Handle that. 4632 */ 4633 requeue = handle_lan_get_msg_cmd(intf, msg); 4634 } 4635 break; 4636 4637 default: 4638 /* Check for OEM Channels. Clients had better 4639 register for these commands. */ 4640 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4641 && (chans[chan].medium 4642 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4643 requeue = handle_oem_get_msg_cmd(intf, msg); 4644 } else { 4645 /* 4646 * We don't handle the channel type, so just 4647 * free the message. 4648 */ 4649 requeue = 0; 4650 } 4651 } 4652 4653 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4654 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4655 /* It's an asynchronous event. */ 4656 requeue = handle_read_event_rsp(intf, msg); 4657 } else { 4658 /* It's a response from the local BMC. */ 4659 requeue = handle_bmc_rsp(intf, msg); 4660 } 4661 4662 out: 4663 return requeue; 4664 } 4665 4666 /* 4667 * If there are messages in the queue or pretimeouts, handle them. 4668 */ 4669 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4670 { 4671 struct ipmi_smi_msg *smi_msg; 4672 unsigned long flags = 0; 4673 int rv; 4674 int run_to_completion = intf->run_to_completion; 4675 4676 /* See if any waiting messages need to be processed. */ 4677 if (!run_to_completion) 4678 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4679 while (!list_empty(&intf->waiting_rcv_msgs)) { 4680 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4681 struct ipmi_smi_msg, link); 4682 list_del(&smi_msg->link); 4683 if (!run_to_completion) 4684 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4685 flags); 4686 rv = handle_one_recv_msg(intf, smi_msg); 4687 if (!run_to_completion) 4688 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4689 if (rv > 0) { 4690 /* 4691 * To preserve message order, quit if we 4692 * can't handle a message. Add the message 4693 * back at the head, this is safe because this 4694 * tasklet is the only thing that pulls the 4695 * messages. 4696 */ 4697 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4698 break; 4699 } else { 4700 if (rv == 0) 4701 /* Message handled */ 4702 ipmi_free_smi_msg(smi_msg); 4703 /* If rv < 0, fatal error, del but don't free. */ 4704 } 4705 } 4706 if (!run_to_completion) 4707 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4708 4709 /* 4710 * If the pretimout count is non-zero, decrement one from it and 4711 * deliver pretimeouts to all the users. 4712 */ 4713 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4714 struct ipmi_user *user; 4715 int index; 4716 4717 index = srcu_read_lock(&intf->users_srcu); 4718 list_for_each_entry_rcu(user, &intf->users, link) { 4719 if (user->handler->ipmi_watchdog_pretimeout) 4720 user->handler->ipmi_watchdog_pretimeout( 4721 user->handler_data); 4722 } 4723 srcu_read_unlock(&intf->users_srcu, index); 4724 } 4725 } 4726 4727 static void smi_recv_tasklet(struct tasklet_struct *t) 4728 { 4729 unsigned long flags = 0; /* keep us warning-free. */ 4730 struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); 4731 int run_to_completion = intf->run_to_completion; 4732 struct ipmi_smi_msg *newmsg = NULL; 4733 4734 /* 4735 * Start the next message if available. 4736 * 4737 * Do this here, not in the actual receiver, because we may deadlock 4738 * because the lower layer is allowed to hold locks while calling 4739 * message delivery. 4740 */ 4741 4742 rcu_read_lock(); 4743 4744 if (!run_to_completion) 4745 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4746 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4747 struct list_head *entry = NULL; 4748 4749 /* Pick the high priority queue first. */ 4750 if (!list_empty(&intf->hp_xmit_msgs)) 4751 entry = intf->hp_xmit_msgs.next; 4752 else if (!list_empty(&intf->xmit_msgs)) 4753 entry = intf->xmit_msgs.next; 4754 4755 if (entry) { 4756 list_del(entry); 4757 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4758 intf->curr_msg = newmsg; 4759 } 4760 } 4761 4762 if (!run_to_completion) 4763 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4764 if (newmsg) 4765 intf->handlers->sender(intf->send_info, newmsg); 4766 4767 rcu_read_unlock(); 4768 4769 handle_new_recv_msgs(intf); 4770 } 4771 4772 /* Handle a new message from the lower layer. */ 4773 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4774 struct ipmi_smi_msg *msg) 4775 { 4776 unsigned long flags = 0; /* keep us warning-free. */ 4777 int run_to_completion = intf->run_to_completion; 4778 4779 /* 4780 * To preserve message order, we keep a queue and deliver from 4781 * a tasklet. 4782 */ 4783 if (!run_to_completion) 4784 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4785 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4786 if (!run_to_completion) 4787 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4788 flags); 4789 4790 if (!run_to_completion) 4791 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4792 /* 4793 * We can get an asynchronous event or receive message in addition 4794 * to commands we send. 4795 */ 4796 if (msg == intf->curr_msg) 4797 intf->curr_msg = NULL; 4798 if (!run_to_completion) 4799 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4800 4801 if (run_to_completion) 4802 smi_recv_tasklet(&intf->recv_tasklet); 4803 else 4804 tasklet_schedule(&intf->recv_tasklet); 4805 } 4806 EXPORT_SYMBOL(ipmi_smi_msg_received); 4807 4808 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4809 { 4810 if (intf->in_shutdown) 4811 return; 4812 4813 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4814 tasklet_schedule(&intf->recv_tasklet); 4815 } 4816 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4817 4818 static struct ipmi_smi_msg * 4819 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4820 unsigned char seq, long seqid) 4821 { 4822 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4823 if (!smi_msg) 4824 /* 4825 * If we can't allocate the message, then just return, we 4826 * get 4 retries, so this should be ok. 4827 */ 4828 return NULL; 4829 4830 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4831 smi_msg->data_size = recv_msg->msg.data_len; 4832 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4833 4834 pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data); 4835 4836 return smi_msg; 4837 } 4838 4839 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4840 struct list_head *timeouts, 4841 unsigned long timeout_period, 4842 int slot, unsigned long *flags, 4843 bool *need_timer) 4844 { 4845 struct ipmi_recv_msg *msg; 4846 4847 if (intf->in_shutdown) 4848 return; 4849 4850 if (!ent->inuse) 4851 return; 4852 4853 if (timeout_period < ent->timeout) { 4854 ent->timeout -= timeout_period; 4855 *need_timer = true; 4856 return; 4857 } 4858 4859 if (ent->retries_left == 0) { 4860 /* The message has used all its retries. */ 4861 ent->inuse = 0; 4862 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 4863 msg = ent->recv_msg; 4864 list_add_tail(&msg->link, timeouts); 4865 if (ent->broadcast) 4866 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4867 else if (is_lan_addr(&ent->recv_msg->addr)) 4868 ipmi_inc_stat(intf, timed_out_lan_commands); 4869 else 4870 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4871 } else { 4872 struct ipmi_smi_msg *smi_msg; 4873 /* More retries, send again. */ 4874 4875 *need_timer = true; 4876 4877 /* 4878 * Start with the max timer, set to normal timer after 4879 * the message is sent. 4880 */ 4881 ent->timeout = MAX_MSG_TIMEOUT; 4882 ent->retries_left--; 4883 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4884 ent->seqid); 4885 if (!smi_msg) { 4886 if (is_lan_addr(&ent->recv_msg->addr)) 4887 ipmi_inc_stat(intf, 4888 dropped_rexmit_lan_commands); 4889 else 4890 ipmi_inc_stat(intf, 4891 dropped_rexmit_ipmb_commands); 4892 return; 4893 } 4894 4895 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4896 4897 /* 4898 * Send the new message. We send with a zero 4899 * priority. It timed out, I doubt time is that 4900 * critical now, and high priority messages are really 4901 * only for messages to the local MC, which don't get 4902 * resent. 4903 */ 4904 if (intf->handlers) { 4905 if (is_lan_addr(&ent->recv_msg->addr)) 4906 ipmi_inc_stat(intf, 4907 retransmitted_lan_commands); 4908 else 4909 ipmi_inc_stat(intf, 4910 retransmitted_ipmb_commands); 4911 4912 smi_send(intf, intf->handlers, smi_msg, 0); 4913 } else 4914 ipmi_free_smi_msg(smi_msg); 4915 4916 spin_lock_irqsave(&intf->seq_lock, *flags); 4917 } 4918 } 4919 4920 static bool ipmi_timeout_handler(struct ipmi_smi *intf, 4921 unsigned long timeout_period) 4922 { 4923 struct list_head timeouts; 4924 struct ipmi_recv_msg *msg, *msg2; 4925 unsigned long flags; 4926 int i; 4927 bool need_timer = false; 4928 4929 if (!intf->bmc_registered) { 4930 kref_get(&intf->refcount); 4931 if (!schedule_work(&intf->bmc_reg_work)) { 4932 kref_put(&intf->refcount, intf_free); 4933 need_timer = true; 4934 } 4935 } 4936 4937 /* 4938 * Go through the seq table and find any messages that 4939 * have timed out, putting them in the timeouts 4940 * list. 4941 */ 4942 INIT_LIST_HEAD(&timeouts); 4943 spin_lock_irqsave(&intf->seq_lock, flags); 4944 if (intf->ipmb_maintenance_mode_timeout) { 4945 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 4946 intf->ipmb_maintenance_mode_timeout = 0; 4947 else 4948 intf->ipmb_maintenance_mode_timeout -= timeout_period; 4949 } 4950 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 4951 check_msg_timeout(intf, &intf->seq_table[i], 4952 &timeouts, timeout_period, i, 4953 &flags, &need_timer); 4954 spin_unlock_irqrestore(&intf->seq_lock, flags); 4955 4956 list_for_each_entry_safe(msg, msg2, &timeouts, link) 4957 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 4958 4959 /* 4960 * Maintenance mode handling. Check the timeout 4961 * optimistically before we claim the lock. It may 4962 * mean a timeout gets missed occasionally, but that 4963 * only means the timeout gets extended by one period 4964 * in that case. No big deal, and it avoids the lock 4965 * most of the time. 4966 */ 4967 if (intf->auto_maintenance_timeout > 0) { 4968 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 4969 if (intf->auto_maintenance_timeout > 0) { 4970 intf->auto_maintenance_timeout 4971 -= timeout_period; 4972 if (!intf->maintenance_mode 4973 && (intf->auto_maintenance_timeout <= 0)) { 4974 intf->maintenance_mode_enable = false; 4975 maintenance_mode_update(intf); 4976 } 4977 } 4978 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 4979 flags); 4980 } 4981 4982 tasklet_schedule(&intf->recv_tasklet); 4983 4984 return need_timer; 4985 } 4986 4987 static void ipmi_request_event(struct ipmi_smi *intf) 4988 { 4989 /* No event requests when in maintenance mode. */ 4990 if (intf->maintenance_mode_enable) 4991 return; 4992 4993 if (!intf->in_shutdown) 4994 intf->handlers->request_events(intf->send_info); 4995 } 4996 4997 static struct timer_list ipmi_timer; 4998 4999 static atomic_t stop_operation; 5000 5001 static void ipmi_timeout(struct timer_list *unused) 5002 { 5003 struct ipmi_smi *intf; 5004 bool need_timer = false; 5005 int index; 5006 5007 if (atomic_read(&stop_operation)) 5008 return; 5009 5010 index = srcu_read_lock(&ipmi_interfaces_srcu); 5011 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5012 if (atomic_read(&intf->event_waiters)) { 5013 intf->ticks_to_req_ev--; 5014 if (intf->ticks_to_req_ev == 0) { 5015 ipmi_request_event(intf); 5016 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 5017 } 5018 need_timer = true; 5019 } 5020 5021 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 5022 } 5023 srcu_read_unlock(&ipmi_interfaces_srcu, index); 5024 5025 if (need_timer) 5026 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5027 } 5028 5029 static void need_waiter(struct ipmi_smi *intf) 5030 { 5031 /* Racy, but worst case we start the timer twice. */ 5032 if (!timer_pending(&ipmi_timer)) 5033 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5034 } 5035 5036 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 5037 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 5038 5039 static void free_smi_msg(struct ipmi_smi_msg *msg) 5040 { 5041 atomic_dec(&smi_msg_inuse_count); 5042 /* Try to keep as much stuff out of the panic path as possible. */ 5043 if (!oops_in_progress) 5044 kfree(msg); 5045 } 5046 5047 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 5048 { 5049 struct ipmi_smi_msg *rv; 5050 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 5051 if (rv) { 5052 rv->done = free_smi_msg; 5053 rv->user_data = NULL; 5054 rv->type = IPMI_SMI_MSG_TYPE_NORMAL; 5055 atomic_inc(&smi_msg_inuse_count); 5056 } 5057 return rv; 5058 } 5059 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 5060 5061 static void free_recv_msg(struct ipmi_recv_msg *msg) 5062 { 5063 atomic_dec(&recv_msg_inuse_count); 5064 /* Try to keep as much stuff out of the panic path as possible. */ 5065 if (!oops_in_progress) 5066 kfree(msg); 5067 } 5068 5069 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 5070 { 5071 struct ipmi_recv_msg *rv; 5072 5073 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 5074 if (rv) { 5075 rv->user = NULL; 5076 rv->done = free_recv_msg; 5077 atomic_inc(&recv_msg_inuse_count); 5078 } 5079 return rv; 5080 } 5081 5082 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 5083 { 5084 if (msg->user && !oops_in_progress) 5085 kref_put(&msg->user->refcount, free_user); 5086 msg->done(msg); 5087 } 5088 EXPORT_SYMBOL(ipmi_free_recv_msg); 5089 5090 static atomic_t panic_done_count = ATOMIC_INIT(0); 5091 5092 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 5093 { 5094 atomic_dec(&panic_done_count); 5095 } 5096 5097 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 5098 { 5099 atomic_dec(&panic_done_count); 5100 } 5101 5102 /* 5103 * Inside a panic, send a message and wait for a response. 5104 */ 5105 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, 5106 struct ipmi_addr *addr, 5107 struct kernel_ipmi_msg *msg) 5108 { 5109 struct ipmi_smi_msg smi_msg; 5110 struct ipmi_recv_msg recv_msg; 5111 int rv; 5112 5113 smi_msg.done = dummy_smi_done_handler; 5114 recv_msg.done = dummy_recv_done_handler; 5115 atomic_add(2, &panic_done_count); 5116 rv = i_ipmi_request(NULL, 5117 intf, 5118 addr, 5119 0, 5120 msg, 5121 intf, 5122 &smi_msg, 5123 &recv_msg, 5124 0, 5125 intf->addrinfo[0].address, 5126 intf->addrinfo[0].lun, 5127 0, 1); /* Don't retry, and don't wait. */ 5128 if (rv) 5129 atomic_sub(2, &panic_done_count); 5130 else if (intf->handlers->flush_messages) 5131 intf->handlers->flush_messages(intf->send_info); 5132 5133 while (atomic_read(&panic_done_count) != 0) 5134 ipmi_poll(intf); 5135 } 5136 5137 static void event_receiver_fetcher(struct ipmi_smi *intf, 5138 struct ipmi_recv_msg *msg) 5139 { 5140 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5141 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 5142 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 5143 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5144 /* A get event receiver command, save it. */ 5145 intf->event_receiver = msg->msg.data[1]; 5146 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 5147 } 5148 } 5149 5150 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 5151 { 5152 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 5153 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 5154 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 5155 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 5156 /* 5157 * A get device id command, save if we are an event 5158 * receiver or generator. 5159 */ 5160 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 5161 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 5162 } 5163 } 5164 5165 static void send_panic_events(struct ipmi_smi *intf, char *str) 5166 { 5167 struct kernel_ipmi_msg msg; 5168 unsigned char data[16]; 5169 struct ipmi_system_interface_addr *si; 5170 struct ipmi_addr addr; 5171 char *p = str; 5172 struct ipmi_ipmb_addr *ipmb; 5173 int j; 5174 5175 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 5176 return; 5177 5178 si = (struct ipmi_system_interface_addr *) &addr; 5179 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5180 si->channel = IPMI_BMC_CHANNEL; 5181 si->lun = 0; 5182 5183 /* Fill in an event telling that we have failed. */ 5184 msg.netfn = 0x04; /* Sensor or Event. */ 5185 msg.cmd = 2; /* Platform event command. */ 5186 msg.data = data; 5187 msg.data_len = 8; 5188 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 5189 data[1] = 0x03; /* This is for IPMI 1.0. */ 5190 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 5191 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 5192 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 5193 5194 /* 5195 * Put a few breadcrumbs in. Hopefully later we can add more things 5196 * to make the panic events more useful. 5197 */ 5198 if (str) { 5199 data[3] = str[0]; 5200 data[6] = str[1]; 5201 data[7] = str[2]; 5202 } 5203 5204 /* Send the event announcing the panic. */ 5205 ipmi_panic_request_and_wait(intf, &addr, &msg); 5206 5207 /* 5208 * On every interface, dump a bunch of OEM event holding the 5209 * string. 5210 */ 5211 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 5212 return; 5213 5214 /* 5215 * intf_num is used as an marker to tell if the 5216 * interface is valid. Thus we need a read barrier to 5217 * make sure data fetched before checking intf_num 5218 * won't be used. 5219 */ 5220 smp_rmb(); 5221 5222 /* 5223 * First job here is to figure out where to send the 5224 * OEM events. There's no way in IPMI to send OEM 5225 * events using an event send command, so we have to 5226 * find the SEL to put them in and stick them in 5227 * there. 5228 */ 5229 5230 /* Get capabilities from the get device id. */ 5231 intf->local_sel_device = 0; 5232 intf->local_event_generator = 0; 5233 intf->event_receiver = 0; 5234 5235 /* Request the device info from the local MC. */ 5236 msg.netfn = IPMI_NETFN_APP_REQUEST; 5237 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 5238 msg.data = NULL; 5239 msg.data_len = 0; 5240 intf->null_user_handler = device_id_fetcher; 5241 ipmi_panic_request_and_wait(intf, &addr, &msg); 5242 5243 if (intf->local_event_generator) { 5244 /* Request the event receiver from the local MC. */ 5245 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 5246 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 5247 msg.data = NULL; 5248 msg.data_len = 0; 5249 intf->null_user_handler = event_receiver_fetcher; 5250 ipmi_panic_request_and_wait(intf, &addr, &msg); 5251 } 5252 intf->null_user_handler = NULL; 5253 5254 /* 5255 * Validate the event receiver. The low bit must not 5256 * be 1 (it must be a valid IPMB address), it cannot 5257 * be zero, and it must not be my address. 5258 */ 5259 if (((intf->event_receiver & 1) == 0) 5260 && (intf->event_receiver != 0) 5261 && (intf->event_receiver != intf->addrinfo[0].address)) { 5262 /* 5263 * The event receiver is valid, send an IPMB 5264 * message. 5265 */ 5266 ipmb = (struct ipmi_ipmb_addr *) &addr; 5267 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5268 ipmb->channel = 0; /* FIXME - is this right? */ 5269 ipmb->lun = intf->event_receiver_lun; 5270 ipmb->slave_addr = intf->event_receiver; 5271 } else if (intf->local_sel_device) { 5272 /* 5273 * The event receiver was not valid (or was 5274 * me), but I am an SEL device, just dump it 5275 * in my SEL. 5276 */ 5277 si = (struct ipmi_system_interface_addr *) &addr; 5278 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5279 si->channel = IPMI_BMC_CHANNEL; 5280 si->lun = 0; 5281 } else 5282 return; /* No where to send the event. */ 5283 5284 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5285 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5286 msg.data = data; 5287 msg.data_len = 16; 5288 5289 j = 0; 5290 while (*p) { 5291 int size = strlen(p); 5292 5293 if (size > 11) 5294 size = 11; 5295 data[0] = 0; 5296 data[1] = 0; 5297 data[2] = 0xf0; /* OEM event without timestamp. */ 5298 data[3] = intf->addrinfo[0].address; 5299 data[4] = j++; /* sequence # */ 5300 /* 5301 * Always give 11 bytes, so strncpy will fill 5302 * it with zeroes for me. 5303 */ 5304 strncpy(data+5, p, 11); 5305 p += size; 5306 5307 ipmi_panic_request_and_wait(intf, &addr, &msg); 5308 } 5309 } 5310 5311 static int has_panicked; 5312 5313 static int panic_event(struct notifier_block *this, 5314 unsigned long event, 5315 void *ptr) 5316 { 5317 struct ipmi_smi *intf; 5318 struct ipmi_user *user; 5319 5320 if (has_panicked) 5321 return NOTIFY_DONE; 5322 has_panicked = 1; 5323 5324 /* For every registered interface, set it to run to completion. */ 5325 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5326 if (!intf->handlers || intf->intf_num == -1) 5327 /* Interface is not ready. */ 5328 continue; 5329 5330 if (!intf->handlers->poll) 5331 continue; 5332 5333 /* 5334 * If we were interrupted while locking xmit_msgs_lock or 5335 * waiting_rcv_msgs_lock, the corresponding list may be 5336 * corrupted. In this case, drop items on the list for 5337 * the safety. 5338 */ 5339 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5340 INIT_LIST_HEAD(&intf->xmit_msgs); 5341 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5342 } else 5343 spin_unlock(&intf->xmit_msgs_lock); 5344 5345 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5346 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5347 else 5348 spin_unlock(&intf->waiting_rcv_msgs_lock); 5349 5350 intf->run_to_completion = 1; 5351 if (intf->handlers->set_run_to_completion) 5352 intf->handlers->set_run_to_completion(intf->send_info, 5353 1); 5354 5355 list_for_each_entry_rcu(user, &intf->users, link) { 5356 if (user->handler->ipmi_panic_handler) 5357 user->handler->ipmi_panic_handler( 5358 user->handler_data); 5359 } 5360 5361 send_panic_events(intf, ptr); 5362 } 5363 5364 return NOTIFY_DONE; 5365 } 5366 5367 /* Must be called with ipmi_interfaces_mutex held. */ 5368 static int ipmi_register_driver(void) 5369 { 5370 int rv; 5371 5372 if (drvregistered) 5373 return 0; 5374 5375 rv = driver_register(&ipmidriver.driver); 5376 if (rv) 5377 pr_err("Could not register IPMI driver\n"); 5378 else 5379 drvregistered = true; 5380 return rv; 5381 } 5382 5383 static struct notifier_block panic_block = { 5384 .notifier_call = panic_event, 5385 .next = NULL, 5386 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5387 }; 5388 5389 static int ipmi_init_msghandler(void) 5390 { 5391 int rv; 5392 5393 mutex_lock(&ipmi_interfaces_mutex); 5394 rv = ipmi_register_driver(); 5395 if (rv) 5396 goto out; 5397 if (initialized) 5398 goto out; 5399 5400 rv = init_srcu_struct(&ipmi_interfaces_srcu); 5401 if (rv) 5402 goto out; 5403 5404 remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq"); 5405 if (!remove_work_wq) { 5406 pr_err("unable to create ipmi-msghandler-remove-wq workqueue"); 5407 rv = -ENOMEM; 5408 goto out_wq; 5409 } 5410 5411 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5412 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5413 5414 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5415 5416 initialized = true; 5417 5418 out_wq: 5419 if (rv) 5420 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5421 out: 5422 mutex_unlock(&ipmi_interfaces_mutex); 5423 return rv; 5424 } 5425 5426 static int __init ipmi_init_msghandler_mod(void) 5427 { 5428 int rv; 5429 5430 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5431 5432 mutex_lock(&ipmi_interfaces_mutex); 5433 rv = ipmi_register_driver(); 5434 mutex_unlock(&ipmi_interfaces_mutex); 5435 5436 return rv; 5437 } 5438 5439 static void __exit cleanup_ipmi(void) 5440 { 5441 int count; 5442 5443 if (initialized) { 5444 destroy_workqueue(remove_work_wq); 5445 5446 atomic_notifier_chain_unregister(&panic_notifier_list, 5447 &panic_block); 5448 5449 /* 5450 * This can't be called if any interfaces exist, so no worry 5451 * about shutting down the interfaces. 5452 */ 5453 5454 /* 5455 * Tell the timer to stop, then wait for it to stop. This 5456 * avoids problems with race conditions removing the timer 5457 * here. 5458 */ 5459 atomic_set(&stop_operation, 1); 5460 del_timer_sync(&ipmi_timer); 5461 5462 initialized = false; 5463 5464 /* Check for buffer leaks. */ 5465 count = atomic_read(&smi_msg_inuse_count); 5466 if (count != 0) 5467 pr_warn("SMI message count %d at exit\n", count); 5468 count = atomic_read(&recv_msg_inuse_count); 5469 if (count != 0) 5470 pr_warn("recv message count %d at exit\n", count); 5471 5472 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5473 } 5474 if (drvregistered) 5475 driver_unregister(&ipmidriver.driver); 5476 } 5477 module_exit(cleanup_ipmi); 5478 5479 module_init(ipmi_init_msghandler_mod); 5480 MODULE_LICENSE("GPL"); 5481 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5482 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 5483 MODULE_VERSION(IPMI_DRIVER_VERSION); 5484 MODULE_SOFTDEP("post: ipmi_devintf"); 5485