1 /* 2 * ipmi_msghandler.c 3 * 4 * Incoming and outgoing message routing for an IPMI interface. 5 * 6 * Author: MontaVista Software, Inc. 7 * Corey Minyard <minyard@mvista.com> 8 * source@mvista.com 9 * 10 * Copyright 2002 MontaVista Software Inc. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 * 17 * 18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * You should have received a copy of the GNU General Public License along 30 * with this program; if not, write to the Free Software Foundation, Inc., 31 * 675 Mass Ave, Cambridge, MA 02139, USA. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/errno.h> 36 #include <linux/poll.h> 37 #include <linux/sched.h> 38 #include <linux/seq_file.h> 39 #include <linux/spinlock.h> 40 #include <linux/mutex.h> 41 #include <linux/slab.h> 42 #include <linux/ipmi.h> 43 #include <linux/ipmi_smi.h> 44 #include <linux/notifier.h> 45 #include <linux/init.h> 46 #include <linux/proc_fs.h> 47 #include <linux/rcupdate.h> 48 #include <linux/interrupt.h> 49 50 #define PFX "IPMI message handler: " 51 52 #define IPMI_DRIVER_VERSION "39.2" 53 54 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 55 static int ipmi_init_msghandler(void); 56 static void smi_recv_tasklet(unsigned long); 57 static void handle_new_recv_msgs(ipmi_smi_t intf); 58 static void need_waiter(ipmi_smi_t intf); 59 static int handle_one_recv_msg(ipmi_smi_t intf, 60 struct ipmi_smi_msg *msg); 61 62 static int initialized; 63 64 #ifdef CONFIG_PROC_FS 65 static struct proc_dir_entry *proc_ipmi_root; 66 #endif /* CONFIG_PROC_FS */ 67 68 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 69 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000 70 71 #define MAX_EVENTS_IN_QUEUE 25 72 73 /* 74 * Don't let a message sit in a queue forever, always time it with at lest 75 * the max message timer. This is in milliseconds. 76 */ 77 #define MAX_MSG_TIMEOUT 60000 78 79 /* Call every ~1000 ms. */ 80 #define IPMI_TIMEOUT_TIME 1000 81 82 /* How many jiffies does it take to get to the timeout time. */ 83 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 84 85 /* 86 * Request events from the queue every second (this is the number of 87 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 88 * future, IPMI will add a way to know immediately if an event is in 89 * the queue and this silliness can go away. 90 */ 91 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 92 93 /* 94 * The main "user" data structure. 95 */ 96 struct ipmi_user { 97 struct list_head link; 98 99 /* Set to false when the user is destroyed. */ 100 bool valid; 101 102 struct kref refcount; 103 104 /* The upper layer that handles receive messages. */ 105 struct ipmi_user_hndl *handler; 106 void *handler_data; 107 108 /* The interface this user is bound to. */ 109 ipmi_smi_t intf; 110 111 /* Does this interface receive IPMI events? */ 112 bool gets_events; 113 }; 114 115 struct cmd_rcvr { 116 struct list_head link; 117 118 ipmi_user_t user; 119 unsigned char netfn; 120 unsigned char cmd; 121 unsigned int chans; 122 123 /* 124 * This is used to form a linked lised during mass deletion. 125 * Since this is in an RCU list, we cannot use the link above 126 * or change any data until the RCU period completes. So we 127 * use this next variable during mass deletion so we can have 128 * a list and don't have to wait and restart the search on 129 * every individual deletion of a command. 130 */ 131 struct cmd_rcvr *next; 132 }; 133 134 struct seq_table { 135 unsigned int inuse : 1; 136 unsigned int broadcast : 1; 137 138 unsigned long timeout; 139 unsigned long orig_timeout; 140 unsigned int retries_left; 141 142 /* 143 * To verify on an incoming send message response that this is 144 * the message that the response is for, we keep a sequence id 145 * and increment it every time we send a message. 146 */ 147 long seqid; 148 149 /* 150 * This is held so we can properly respond to the message on a 151 * timeout, and it is used to hold the temporary data for 152 * retransmission, too. 153 */ 154 struct ipmi_recv_msg *recv_msg; 155 }; 156 157 /* 158 * Store the information in a msgid (long) to allow us to find a 159 * sequence table entry from the msgid. 160 */ 161 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff)) 162 163 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 164 do { \ 165 seq = ((msgid >> 26) & 0x3f); \ 166 seqid = (msgid & 0x3fffff); \ 167 } while (0) 168 169 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) 170 171 struct ipmi_channel { 172 unsigned char medium; 173 unsigned char protocol; 174 175 /* 176 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 177 * but may be changed by the user. 178 */ 179 unsigned char address; 180 181 /* 182 * My LUN. This should generally stay the SMS LUN, but just in 183 * case... 184 */ 185 unsigned char lun; 186 }; 187 188 #ifdef CONFIG_PROC_FS 189 struct ipmi_proc_entry { 190 char *name; 191 struct ipmi_proc_entry *next; 192 }; 193 #endif 194 195 struct bmc_device { 196 struct platform_device pdev; 197 struct ipmi_device_id id; 198 unsigned char guid[16]; 199 int guid_set; 200 char name[16]; 201 struct kref usecount; 202 }; 203 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 204 205 /* 206 * Various statistics for IPMI, these index stats[] in the ipmi_smi 207 * structure. 208 */ 209 enum ipmi_stat_indexes { 210 /* Commands we got from the user that were invalid. */ 211 IPMI_STAT_sent_invalid_commands = 0, 212 213 /* Commands we sent to the MC. */ 214 IPMI_STAT_sent_local_commands, 215 216 /* Responses from the MC that were delivered to a user. */ 217 IPMI_STAT_handled_local_responses, 218 219 /* Responses from the MC that were not delivered to a user. */ 220 IPMI_STAT_unhandled_local_responses, 221 222 /* Commands we sent out to the IPMB bus. */ 223 IPMI_STAT_sent_ipmb_commands, 224 225 /* Commands sent on the IPMB that had errors on the SEND CMD */ 226 IPMI_STAT_sent_ipmb_command_errs, 227 228 /* Each retransmit increments this count. */ 229 IPMI_STAT_retransmitted_ipmb_commands, 230 231 /* 232 * When a message times out (runs out of retransmits) this is 233 * incremented. 234 */ 235 IPMI_STAT_timed_out_ipmb_commands, 236 237 /* 238 * This is like above, but for broadcasts. Broadcasts are 239 * *not* included in the above count (they are expected to 240 * time out). 241 */ 242 IPMI_STAT_timed_out_ipmb_broadcasts, 243 244 /* Responses I have sent to the IPMB bus. */ 245 IPMI_STAT_sent_ipmb_responses, 246 247 /* The response was delivered to the user. */ 248 IPMI_STAT_handled_ipmb_responses, 249 250 /* The response had invalid data in it. */ 251 IPMI_STAT_invalid_ipmb_responses, 252 253 /* The response didn't have anyone waiting for it. */ 254 IPMI_STAT_unhandled_ipmb_responses, 255 256 /* Commands we sent out to the IPMB bus. */ 257 IPMI_STAT_sent_lan_commands, 258 259 /* Commands sent on the IPMB that had errors on the SEND CMD */ 260 IPMI_STAT_sent_lan_command_errs, 261 262 /* Each retransmit increments this count. */ 263 IPMI_STAT_retransmitted_lan_commands, 264 265 /* 266 * When a message times out (runs out of retransmits) this is 267 * incremented. 268 */ 269 IPMI_STAT_timed_out_lan_commands, 270 271 /* Responses I have sent to the IPMB bus. */ 272 IPMI_STAT_sent_lan_responses, 273 274 /* The response was delivered to the user. */ 275 IPMI_STAT_handled_lan_responses, 276 277 /* The response had invalid data in it. */ 278 IPMI_STAT_invalid_lan_responses, 279 280 /* The response didn't have anyone waiting for it. */ 281 IPMI_STAT_unhandled_lan_responses, 282 283 /* The command was delivered to the user. */ 284 IPMI_STAT_handled_commands, 285 286 /* The command had invalid data in it. */ 287 IPMI_STAT_invalid_commands, 288 289 /* The command didn't have anyone waiting for it. */ 290 IPMI_STAT_unhandled_commands, 291 292 /* Invalid data in an event. */ 293 IPMI_STAT_invalid_events, 294 295 /* Events that were received with the proper format. */ 296 IPMI_STAT_events, 297 298 /* Retransmissions on IPMB that failed. */ 299 IPMI_STAT_dropped_rexmit_ipmb_commands, 300 301 /* Retransmissions on LAN that failed. */ 302 IPMI_STAT_dropped_rexmit_lan_commands, 303 304 /* This *must* remain last, add new values above this. */ 305 IPMI_NUM_STATS 306 }; 307 308 309 #define IPMI_IPMB_NUM_SEQ 64 310 #define IPMI_MAX_CHANNELS 16 311 struct ipmi_smi { 312 /* What interface number are we? */ 313 int intf_num; 314 315 struct kref refcount; 316 317 /* Set when the interface is being unregistered. */ 318 bool in_shutdown; 319 320 /* Used for a list of interfaces. */ 321 struct list_head link; 322 323 /* 324 * The list of upper layers that are using me. seq_lock 325 * protects this. 326 */ 327 struct list_head users; 328 329 /* Information to supply to users. */ 330 unsigned char ipmi_version_major; 331 unsigned char ipmi_version_minor; 332 333 /* Used for wake ups at startup. */ 334 wait_queue_head_t waitq; 335 336 struct bmc_device *bmc; 337 char *my_dev_name; 338 339 /* 340 * This is the lower-layer's sender routine. Note that you 341 * must either be holding the ipmi_interfaces_mutex or be in 342 * an umpreemptible region to use this. You must fetch the 343 * value into a local variable and make sure it is not NULL. 344 */ 345 const struct ipmi_smi_handlers *handlers; 346 void *send_info; 347 348 #ifdef CONFIG_PROC_FS 349 /* A list of proc entries for this interface. */ 350 struct mutex proc_entry_lock; 351 struct ipmi_proc_entry *proc_entries; 352 #endif 353 354 /* Driver-model device for the system interface. */ 355 struct device *si_dev; 356 357 /* 358 * A table of sequence numbers for this interface. We use the 359 * sequence numbers for IPMB messages that go out of the 360 * interface to match them up with their responses. A routine 361 * is called periodically to time the items in this list. 362 */ 363 spinlock_t seq_lock; 364 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 365 int curr_seq; 366 367 /* 368 * Messages queued for delivery. If delivery fails (out of memory 369 * for instance), They will stay in here to be processed later in a 370 * periodic timer interrupt. The tasklet is for handling received 371 * messages directly from the handler. 372 */ 373 spinlock_t waiting_rcv_msgs_lock; 374 struct list_head waiting_rcv_msgs; 375 atomic_t watchdog_pretimeouts_to_deliver; 376 struct tasklet_struct recv_tasklet; 377 378 spinlock_t xmit_msgs_lock; 379 struct list_head xmit_msgs; 380 struct ipmi_smi_msg *curr_msg; 381 struct list_head hp_xmit_msgs; 382 383 /* 384 * The list of command receivers that are registered for commands 385 * on this interface. 386 */ 387 struct mutex cmd_rcvrs_mutex; 388 struct list_head cmd_rcvrs; 389 390 /* 391 * Events that were queues because no one was there to receive 392 * them. 393 */ 394 spinlock_t events_lock; /* For dealing with event stuff. */ 395 struct list_head waiting_events; 396 unsigned int waiting_events_count; /* How many events in queue? */ 397 char delivering_events; 398 char event_msg_printed; 399 atomic_t event_waiters; 400 unsigned int ticks_to_req_ev; 401 int last_needs_timer; 402 403 /* 404 * The event receiver for my BMC, only really used at panic 405 * shutdown as a place to store this. 406 */ 407 unsigned char event_receiver; 408 unsigned char event_receiver_lun; 409 unsigned char local_sel_device; 410 unsigned char local_event_generator; 411 412 /* For handling of maintenance mode. */ 413 int maintenance_mode; 414 bool maintenance_mode_enable; 415 int auto_maintenance_timeout; 416 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 417 418 /* 419 * A cheap hack, if this is non-null and a message to an 420 * interface comes in with a NULL user, call this routine with 421 * it. Note that the message will still be freed by the 422 * caller. This only works on the system interface. 423 */ 424 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg); 425 426 /* 427 * When we are scanning the channels for an SMI, this will 428 * tell which channel we are scanning. 429 */ 430 int curr_channel; 431 432 /* Channel information */ 433 struct ipmi_channel channels[IPMI_MAX_CHANNELS]; 434 435 /* Proc FS stuff. */ 436 struct proc_dir_entry *proc_dir; 437 char proc_dir_name[10]; 438 439 atomic_t stats[IPMI_NUM_STATS]; 440 441 /* 442 * run_to_completion duplicate of smb_info, smi_info 443 * and ipmi_serial_info structures. Used to decrease numbers of 444 * parameters passed by "low" level IPMI code. 445 */ 446 int run_to_completion; 447 }; 448 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 449 450 /** 451 * The driver model view of the IPMI messaging driver. 452 */ 453 static struct platform_driver ipmidriver = { 454 .driver = { 455 .name = "ipmi", 456 .bus = &platform_bus_type 457 } 458 }; 459 static DEFINE_MUTEX(ipmidriver_mutex); 460 461 static LIST_HEAD(ipmi_interfaces); 462 static DEFINE_MUTEX(ipmi_interfaces_mutex); 463 464 /* 465 * List of watchers that want to know when smi's are added and deleted. 466 */ 467 static LIST_HEAD(smi_watchers); 468 static DEFINE_MUTEX(smi_watchers_mutex); 469 470 #define ipmi_inc_stat(intf, stat) \ 471 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 472 #define ipmi_get_stat(intf, stat) \ 473 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 474 475 static const char * const addr_src_to_str[] = { 476 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 477 "device-tree" 478 }; 479 480 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 481 { 482 if (src >= SI_LAST) 483 src = 0; /* Invalid */ 484 return addr_src_to_str[src]; 485 } 486 EXPORT_SYMBOL(ipmi_addr_src_to_str); 487 488 static int is_lan_addr(struct ipmi_addr *addr) 489 { 490 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 491 } 492 493 static int is_ipmb_addr(struct ipmi_addr *addr) 494 { 495 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 496 } 497 498 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 499 { 500 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 501 } 502 503 static void free_recv_msg_list(struct list_head *q) 504 { 505 struct ipmi_recv_msg *msg, *msg2; 506 507 list_for_each_entry_safe(msg, msg2, q, link) { 508 list_del(&msg->link); 509 ipmi_free_recv_msg(msg); 510 } 511 } 512 513 static void free_smi_msg_list(struct list_head *q) 514 { 515 struct ipmi_smi_msg *msg, *msg2; 516 517 list_for_each_entry_safe(msg, msg2, q, link) { 518 list_del(&msg->link); 519 ipmi_free_smi_msg(msg); 520 } 521 } 522 523 static void clean_up_interface_data(ipmi_smi_t intf) 524 { 525 int i; 526 struct cmd_rcvr *rcvr, *rcvr2; 527 struct list_head list; 528 529 tasklet_kill(&intf->recv_tasklet); 530 531 free_smi_msg_list(&intf->waiting_rcv_msgs); 532 free_recv_msg_list(&intf->waiting_events); 533 534 /* 535 * Wholesale remove all the entries from the list in the 536 * interface and wait for RCU to know that none are in use. 537 */ 538 mutex_lock(&intf->cmd_rcvrs_mutex); 539 INIT_LIST_HEAD(&list); 540 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); 541 mutex_unlock(&intf->cmd_rcvrs_mutex); 542 543 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 544 kfree(rcvr); 545 546 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 547 if ((intf->seq_table[i].inuse) 548 && (intf->seq_table[i].recv_msg)) 549 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 550 } 551 } 552 553 static void intf_free(struct kref *ref) 554 { 555 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount); 556 557 clean_up_interface_data(intf); 558 kfree(intf); 559 } 560 561 struct watcher_entry { 562 int intf_num; 563 ipmi_smi_t intf; 564 struct list_head link; 565 }; 566 567 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 568 { 569 ipmi_smi_t intf; 570 LIST_HEAD(to_deliver); 571 struct watcher_entry *e, *e2; 572 573 mutex_lock(&smi_watchers_mutex); 574 575 mutex_lock(&ipmi_interfaces_mutex); 576 577 /* Build a list of things to deliver. */ 578 list_for_each_entry(intf, &ipmi_interfaces, link) { 579 if (intf->intf_num == -1) 580 continue; 581 e = kmalloc(sizeof(*e), GFP_KERNEL); 582 if (!e) 583 goto out_err; 584 kref_get(&intf->refcount); 585 e->intf = intf; 586 e->intf_num = intf->intf_num; 587 list_add_tail(&e->link, &to_deliver); 588 } 589 590 /* We will succeed, so add it to the list. */ 591 list_add(&watcher->link, &smi_watchers); 592 593 mutex_unlock(&ipmi_interfaces_mutex); 594 595 list_for_each_entry_safe(e, e2, &to_deliver, link) { 596 list_del(&e->link); 597 watcher->new_smi(e->intf_num, e->intf->si_dev); 598 kref_put(&e->intf->refcount, intf_free); 599 kfree(e); 600 } 601 602 mutex_unlock(&smi_watchers_mutex); 603 604 return 0; 605 606 out_err: 607 mutex_unlock(&ipmi_interfaces_mutex); 608 mutex_unlock(&smi_watchers_mutex); 609 list_for_each_entry_safe(e, e2, &to_deliver, link) { 610 list_del(&e->link); 611 kref_put(&e->intf->refcount, intf_free); 612 kfree(e); 613 } 614 return -ENOMEM; 615 } 616 EXPORT_SYMBOL(ipmi_smi_watcher_register); 617 618 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 619 { 620 mutex_lock(&smi_watchers_mutex); 621 list_del(&(watcher->link)); 622 mutex_unlock(&smi_watchers_mutex); 623 return 0; 624 } 625 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 626 627 /* 628 * Must be called with smi_watchers_mutex held. 629 */ 630 static void 631 call_smi_watchers(int i, struct device *dev) 632 { 633 struct ipmi_smi_watcher *w; 634 635 list_for_each_entry(w, &smi_watchers, link) { 636 if (try_module_get(w->owner)) { 637 w->new_smi(i, dev); 638 module_put(w->owner); 639 } 640 } 641 } 642 643 static int 644 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 645 { 646 if (addr1->addr_type != addr2->addr_type) 647 return 0; 648 649 if (addr1->channel != addr2->channel) 650 return 0; 651 652 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 653 struct ipmi_system_interface_addr *smi_addr1 654 = (struct ipmi_system_interface_addr *) addr1; 655 struct ipmi_system_interface_addr *smi_addr2 656 = (struct ipmi_system_interface_addr *) addr2; 657 return (smi_addr1->lun == smi_addr2->lun); 658 } 659 660 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 661 struct ipmi_ipmb_addr *ipmb_addr1 662 = (struct ipmi_ipmb_addr *) addr1; 663 struct ipmi_ipmb_addr *ipmb_addr2 664 = (struct ipmi_ipmb_addr *) addr2; 665 666 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 667 && (ipmb_addr1->lun == ipmb_addr2->lun)); 668 } 669 670 if (is_lan_addr(addr1)) { 671 struct ipmi_lan_addr *lan_addr1 672 = (struct ipmi_lan_addr *) addr1; 673 struct ipmi_lan_addr *lan_addr2 674 = (struct ipmi_lan_addr *) addr2; 675 676 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 677 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 678 && (lan_addr1->session_handle 679 == lan_addr2->session_handle) 680 && (lan_addr1->lun == lan_addr2->lun)); 681 } 682 683 return 1; 684 } 685 686 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 687 { 688 if (len < sizeof(struct ipmi_system_interface_addr)) 689 return -EINVAL; 690 691 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 692 if (addr->channel != IPMI_BMC_CHANNEL) 693 return -EINVAL; 694 return 0; 695 } 696 697 if ((addr->channel == IPMI_BMC_CHANNEL) 698 || (addr->channel >= IPMI_MAX_CHANNELS) 699 || (addr->channel < 0)) 700 return -EINVAL; 701 702 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 703 if (len < sizeof(struct ipmi_ipmb_addr)) 704 return -EINVAL; 705 return 0; 706 } 707 708 if (is_lan_addr(addr)) { 709 if (len < sizeof(struct ipmi_lan_addr)) 710 return -EINVAL; 711 return 0; 712 } 713 714 return -EINVAL; 715 } 716 EXPORT_SYMBOL(ipmi_validate_addr); 717 718 unsigned int ipmi_addr_length(int addr_type) 719 { 720 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 721 return sizeof(struct ipmi_system_interface_addr); 722 723 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 724 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 725 return sizeof(struct ipmi_ipmb_addr); 726 727 if (addr_type == IPMI_LAN_ADDR_TYPE) 728 return sizeof(struct ipmi_lan_addr); 729 730 return 0; 731 } 732 EXPORT_SYMBOL(ipmi_addr_length); 733 734 static void deliver_response(struct ipmi_recv_msg *msg) 735 { 736 if (!msg->user) { 737 ipmi_smi_t intf = msg->user_msg_data; 738 739 /* Special handling for NULL users. */ 740 if (intf->null_user_handler) { 741 intf->null_user_handler(intf, msg); 742 ipmi_inc_stat(intf, handled_local_responses); 743 } else { 744 /* No handler, so give up. */ 745 ipmi_inc_stat(intf, unhandled_local_responses); 746 } 747 ipmi_free_recv_msg(msg); 748 } else if (!oops_in_progress) { 749 /* 750 * If we are running in the panic context, calling the 751 * receive handler doesn't much meaning and has a deadlock 752 * risk. At this moment, simply skip it in that case. 753 */ 754 755 ipmi_user_t user = msg->user; 756 user->handler->ipmi_recv_hndl(msg, user->handler_data); 757 } 758 } 759 760 static void 761 deliver_err_response(struct ipmi_recv_msg *msg, int err) 762 { 763 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 764 msg->msg_data[0] = err; 765 msg->msg.netfn |= 1; /* Convert to a response. */ 766 msg->msg.data_len = 1; 767 msg->msg.data = msg->msg_data; 768 deliver_response(msg); 769 } 770 771 /* 772 * Find the next sequence number not being used and add the given 773 * message with the given timeout to the sequence table. This must be 774 * called with the interface's seq_lock held. 775 */ 776 static int intf_next_seq(ipmi_smi_t intf, 777 struct ipmi_recv_msg *recv_msg, 778 unsigned long timeout, 779 int retries, 780 int broadcast, 781 unsigned char *seq, 782 long *seqid) 783 { 784 int rv = 0; 785 unsigned int i; 786 787 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 788 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 789 if (!intf->seq_table[i].inuse) 790 break; 791 } 792 793 if (!intf->seq_table[i].inuse) { 794 intf->seq_table[i].recv_msg = recv_msg; 795 796 /* 797 * Start with the maximum timeout, when the send response 798 * comes in we will start the real timer. 799 */ 800 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 801 intf->seq_table[i].orig_timeout = timeout; 802 intf->seq_table[i].retries_left = retries; 803 intf->seq_table[i].broadcast = broadcast; 804 intf->seq_table[i].inuse = 1; 805 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 806 *seq = i; 807 *seqid = intf->seq_table[i].seqid; 808 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 809 need_waiter(intf); 810 } else { 811 rv = -EAGAIN; 812 } 813 814 return rv; 815 } 816 817 /* 818 * Return the receive message for the given sequence number and 819 * release the sequence number so it can be reused. Some other data 820 * is passed in to be sure the message matches up correctly (to help 821 * guard against message coming in after their timeout and the 822 * sequence number being reused). 823 */ 824 static int intf_find_seq(ipmi_smi_t intf, 825 unsigned char seq, 826 short channel, 827 unsigned char cmd, 828 unsigned char netfn, 829 struct ipmi_addr *addr, 830 struct ipmi_recv_msg **recv_msg) 831 { 832 int rv = -ENODEV; 833 unsigned long flags; 834 835 if (seq >= IPMI_IPMB_NUM_SEQ) 836 return -EINVAL; 837 838 spin_lock_irqsave(&(intf->seq_lock), flags); 839 if (intf->seq_table[seq].inuse) { 840 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 841 842 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 843 && (msg->msg.netfn == netfn) 844 && (ipmi_addr_equal(addr, &(msg->addr)))) { 845 *recv_msg = msg; 846 intf->seq_table[seq].inuse = 0; 847 rv = 0; 848 } 849 } 850 spin_unlock_irqrestore(&(intf->seq_lock), flags); 851 852 return rv; 853 } 854 855 856 /* Start the timer for a specific sequence table entry. */ 857 static int intf_start_seq_timer(ipmi_smi_t intf, 858 long msgid) 859 { 860 int rv = -ENODEV; 861 unsigned long flags; 862 unsigned char seq; 863 unsigned long seqid; 864 865 866 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 867 868 spin_lock_irqsave(&(intf->seq_lock), flags); 869 /* 870 * We do this verification because the user can be deleted 871 * while a message is outstanding. 872 */ 873 if ((intf->seq_table[seq].inuse) 874 && (intf->seq_table[seq].seqid == seqid)) { 875 struct seq_table *ent = &(intf->seq_table[seq]); 876 ent->timeout = ent->orig_timeout; 877 rv = 0; 878 } 879 spin_unlock_irqrestore(&(intf->seq_lock), flags); 880 881 return rv; 882 } 883 884 /* Got an error for the send message for a specific sequence number. */ 885 static int intf_err_seq(ipmi_smi_t intf, 886 long msgid, 887 unsigned int err) 888 { 889 int rv = -ENODEV; 890 unsigned long flags; 891 unsigned char seq; 892 unsigned long seqid; 893 struct ipmi_recv_msg *msg = NULL; 894 895 896 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 897 898 spin_lock_irqsave(&(intf->seq_lock), flags); 899 /* 900 * We do this verification because the user can be deleted 901 * while a message is outstanding. 902 */ 903 if ((intf->seq_table[seq].inuse) 904 && (intf->seq_table[seq].seqid == seqid)) { 905 struct seq_table *ent = &(intf->seq_table[seq]); 906 907 ent->inuse = 0; 908 msg = ent->recv_msg; 909 rv = 0; 910 } 911 spin_unlock_irqrestore(&(intf->seq_lock), flags); 912 913 if (msg) 914 deliver_err_response(msg, err); 915 916 return rv; 917 } 918 919 920 int ipmi_create_user(unsigned int if_num, 921 struct ipmi_user_hndl *handler, 922 void *handler_data, 923 ipmi_user_t *user) 924 { 925 unsigned long flags; 926 ipmi_user_t new_user; 927 int rv = 0; 928 ipmi_smi_t intf; 929 930 /* 931 * There is no module usecount here, because it's not 932 * required. Since this can only be used by and called from 933 * other modules, they will implicitly use this module, and 934 * thus this can't be removed unless the other modules are 935 * removed. 936 */ 937 938 if (handler == NULL) 939 return -EINVAL; 940 941 /* 942 * Make sure the driver is actually initialized, this handles 943 * problems with initialization order. 944 */ 945 if (!initialized) { 946 rv = ipmi_init_msghandler(); 947 if (rv) 948 return rv; 949 950 /* 951 * The init code doesn't return an error if it was turned 952 * off, but it won't initialize. Check that. 953 */ 954 if (!initialized) 955 return -ENODEV; 956 } 957 958 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); 959 if (!new_user) 960 return -ENOMEM; 961 962 mutex_lock(&ipmi_interfaces_mutex); 963 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 964 if (intf->intf_num == if_num) 965 goto found; 966 } 967 /* Not found, return an error */ 968 rv = -EINVAL; 969 goto out_kfree; 970 971 found: 972 /* Note that each existing user holds a refcount to the interface. */ 973 kref_get(&intf->refcount); 974 975 kref_init(&new_user->refcount); 976 new_user->handler = handler; 977 new_user->handler_data = handler_data; 978 new_user->intf = intf; 979 new_user->gets_events = false; 980 981 if (!try_module_get(intf->handlers->owner)) { 982 rv = -ENODEV; 983 goto out_kref; 984 } 985 986 if (intf->handlers->inc_usecount) { 987 rv = intf->handlers->inc_usecount(intf->send_info); 988 if (rv) { 989 module_put(intf->handlers->owner); 990 goto out_kref; 991 } 992 } 993 994 /* 995 * Hold the lock so intf->handlers is guaranteed to be good 996 * until now 997 */ 998 mutex_unlock(&ipmi_interfaces_mutex); 999 1000 new_user->valid = true; 1001 spin_lock_irqsave(&intf->seq_lock, flags); 1002 list_add_rcu(&new_user->link, &intf->users); 1003 spin_unlock_irqrestore(&intf->seq_lock, flags); 1004 if (handler->ipmi_watchdog_pretimeout) { 1005 /* User wants pretimeouts, so make sure to watch for them. */ 1006 if (atomic_inc_return(&intf->event_waiters) == 1) 1007 need_waiter(intf); 1008 } 1009 *user = new_user; 1010 return 0; 1011 1012 out_kref: 1013 kref_put(&intf->refcount, intf_free); 1014 out_kfree: 1015 mutex_unlock(&ipmi_interfaces_mutex); 1016 kfree(new_user); 1017 return rv; 1018 } 1019 EXPORT_SYMBOL(ipmi_create_user); 1020 1021 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1022 { 1023 int rv = 0; 1024 ipmi_smi_t intf; 1025 const struct ipmi_smi_handlers *handlers; 1026 1027 mutex_lock(&ipmi_interfaces_mutex); 1028 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1029 if (intf->intf_num == if_num) 1030 goto found; 1031 } 1032 /* Not found, return an error */ 1033 rv = -EINVAL; 1034 mutex_unlock(&ipmi_interfaces_mutex); 1035 return rv; 1036 1037 found: 1038 handlers = intf->handlers; 1039 rv = -ENOSYS; 1040 if (handlers->get_smi_info) 1041 rv = handlers->get_smi_info(intf->send_info, data); 1042 mutex_unlock(&ipmi_interfaces_mutex); 1043 1044 return rv; 1045 } 1046 EXPORT_SYMBOL(ipmi_get_smi_info); 1047 1048 static void free_user(struct kref *ref) 1049 { 1050 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount); 1051 kfree(user); 1052 } 1053 1054 int ipmi_destroy_user(ipmi_user_t user) 1055 { 1056 ipmi_smi_t intf = user->intf; 1057 int i; 1058 unsigned long flags; 1059 struct cmd_rcvr *rcvr; 1060 struct cmd_rcvr *rcvrs = NULL; 1061 1062 user->valid = false; 1063 1064 if (user->handler->ipmi_watchdog_pretimeout) 1065 atomic_dec(&intf->event_waiters); 1066 1067 if (user->gets_events) 1068 atomic_dec(&intf->event_waiters); 1069 1070 /* Remove the user from the interface's sequence table. */ 1071 spin_lock_irqsave(&intf->seq_lock, flags); 1072 list_del_rcu(&user->link); 1073 1074 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1075 if (intf->seq_table[i].inuse 1076 && (intf->seq_table[i].recv_msg->user == user)) { 1077 intf->seq_table[i].inuse = 0; 1078 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1079 } 1080 } 1081 spin_unlock_irqrestore(&intf->seq_lock, flags); 1082 1083 /* 1084 * Remove the user from the command receiver's table. First 1085 * we build a list of everything (not using the standard link, 1086 * since other things may be using it till we do 1087 * synchronize_rcu()) then free everything in that list. 1088 */ 1089 mutex_lock(&intf->cmd_rcvrs_mutex); 1090 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 1091 if (rcvr->user == user) { 1092 list_del_rcu(&rcvr->link); 1093 rcvr->next = rcvrs; 1094 rcvrs = rcvr; 1095 } 1096 } 1097 mutex_unlock(&intf->cmd_rcvrs_mutex); 1098 synchronize_rcu(); 1099 while (rcvrs) { 1100 rcvr = rcvrs; 1101 rcvrs = rcvr->next; 1102 kfree(rcvr); 1103 } 1104 1105 mutex_lock(&ipmi_interfaces_mutex); 1106 if (intf->handlers) { 1107 module_put(intf->handlers->owner); 1108 if (intf->handlers->dec_usecount) 1109 intf->handlers->dec_usecount(intf->send_info); 1110 } 1111 mutex_unlock(&ipmi_interfaces_mutex); 1112 1113 kref_put(&intf->refcount, intf_free); 1114 1115 kref_put(&user->refcount, free_user); 1116 1117 return 0; 1118 } 1119 EXPORT_SYMBOL(ipmi_destroy_user); 1120 1121 void ipmi_get_version(ipmi_user_t user, 1122 unsigned char *major, 1123 unsigned char *minor) 1124 { 1125 *major = user->intf->ipmi_version_major; 1126 *minor = user->intf->ipmi_version_minor; 1127 } 1128 EXPORT_SYMBOL(ipmi_get_version); 1129 1130 int ipmi_set_my_address(ipmi_user_t user, 1131 unsigned int channel, 1132 unsigned char address) 1133 { 1134 if (channel >= IPMI_MAX_CHANNELS) 1135 return -EINVAL; 1136 user->intf->channels[channel].address = address; 1137 return 0; 1138 } 1139 EXPORT_SYMBOL(ipmi_set_my_address); 1140 1141 int ipmi_get_my_address(ipmi_user_t user, 1142 unsigned int channel, 1143 unsigned char *address) 1144 { 1145 if (channel >= IPMI_MAX_CHANNELS) 1146 return -EINVAL; 1147 *address = user->intf->channels[channel].address; 1148 return 0; 1149 } 1150 EXPORT_SYMBOL(ipmi_get_my_address); 1151 1152 int ipmi_set_my_LUN(ipmi_user_t user, 1153 unsigned int channel, 1154 unsigned char LUN) 1155 { 1156 if (channel >= IPMI_MAX_CHANNELS) 1157 return -EINVAL; 1158 user->intf->channels[channel].lun = LUN & 0x3; 1159 return 0; 1160 } 1161 EXPORT_SYMBOL(ipmi_set_my_LUN); 1162 1163 int ipmi_get_my_LUN(ipmi_user_t user, 1164 unsigned int channel, 1165 unsigned char *address) 1166 { 1167 if (channel >= IPMI_MAX_CHANNELS) 1168 return -EINVAL; 1169 *address = user->intf->channels[channel].lun; 1170 return 0; 1171 } 1172 EXPORT_SYMBOL(ipmi_get_my_LUN); 1173 1174 int ipmi_get_maintenance_mode(ipmi_user_t user) 1175 { 1176 int mode; 1177 unsigned long flags; 1178 1179 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1180 mode = user->intf->maintenance_mode; 1181 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1182 1183 return mode; 1184 } 1185 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1186 1187 static void maintenance_mode_update(ipmi_smi_t intf) 1188 { 1189 if (intf->handlers->set_maintenance_mode) 1190 intf->handlers->set_maintenance_mode( 1191 intf->send_info, intf->maintenance_mode_enable); 1192 } 1193 1194 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) 1195 { 1196 int rv = 0; 1197 unsigned long flags; 1198 ipmi_smi_t intf = user->intf; 1199 1200 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1201 if (intf->maintenance_mode != mode) { 1202 switch (mode) { 1203 case IPMI_MAINTENANCE_MODE_AUTO: 1204 intf->maintenance_mode_enable 1205 = (intf->auto_maintenance_timeout > 0); 1206 break; 1207 1208 case IPMI_MAINTENANCE_MODE_OFF: 1209 intf->maintenance_mode_enable = false; 1210 break; 1211 1212 case IPMI_MAINTENANCE_MODE_ON: 1213 intf->maintenance_mode_enable = true; 1214 break; 1215 1216 default: 1217 rv = -EINVAL; 1218 goto out_unlock; 1219 } 1220 intf->maintenance_mode = mode; 1221 1222 maintenance_mode_update(intf); 1223 } 1224 out_unlock: 1225 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1226 1227 return rv; 1228 } 1229 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1230 1231 int ipmi_set_gets_events(ipmi_user_t user, bool val) 1232 { 1233 unsigned long flags; 1234 ipmi_smi_t intf = user->intf; 1235 struct ipmi_recv_msg *msg, *msg2; 1236 struct list_head msgs; 1237 1238 INIT_LIST_HEAD(&msgs); 1239 1240 spin_lock_irqsave(&intf->events_lock, flags); 1241 if (user->gets_events == val) 1242 goto out; 1243 1244 user->gets_events = val; 1245 1246 if (val) { 1247 if (atomic_inc_return(&intf->event_waiters) == 1) 1248 need_waiter(intf); 1249 } else { 1250 atomic_dec(&intf->event_waiters); 1251 } 1252 1253 if (intf->delivering_events) 1254 /* 1255 * Another thread is delivering events for this, so 1256 * let it handle any new events. 1257 */ 1258 goto out; 1259 1260 /* Deliver any queued events. */ 1261 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1262 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1263 list_move_tail(&msg->link, &msgs); 1264 intf->waiting_events_count = 0; 1265 if (intf->event_msg_printed) { 1266 printk(KERN_WARNING PFX "Event queue no longer" 1267 " full\n"); 1268 intf->event_msg_printed = 0; 1269 } 1270 1271 intf->delivering_events = 1; 1272 spin_unlock_irqrestore(&intf->events_lock, flags); 1273 1274 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1275 msg->user = user; 1276 kref_get(&user->refcount); 1277 deliver_response(msg); 1278 } 1279 1280 spin_lock_irqsave(&intf->events_lock, flags); 1281 intf->delivering_events = 0; 1282 } 1283 1284 out: 1285 spin_unlock_irqrestore(&intf->events_lock, flags); 1286 1287 return 0; 1288 } 1289 EXPORT_SYMBOL(ipmi_set_gets_events); 1290 1291 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, 1292 unsigned char netfn, 1293 unsigned char cmd, 1294 unsigned char chan) 1295 { 1296 struct cmd_rcvr *rcvr; 1297 1298 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 1299 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1300 && (rcvr->chans & (1 << chan))) 1301 return rcvr; 1302 } 1303 return NULL; 1304 } 1305 1306 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf, 1307 unsigned char netfn, 1308 unsigned char cmd, 1309 unsigned int chans) 1310 { 1311 struct cmd_rcvr *rcvr; 1312 1313 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 1314 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1315 && (rcvr->chans & chans)) 1316 return 0; 1317 } 1318 return 1; 1319 } 1320 1321 int ipmi_register_for_cmd(ipmi_user_t user, 1322 unsigned char netfn, 1323 unsigned char cmd, 1324 unsigned int chans) 1325 { 1326 ipmi_smi_t intf = user->intf; 1327 struct cmd_rcvr *rcvr; 1328 int rv = 0; 1329 1330 1331 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1332 if (!rcvr) 1333 return -ENOMEM; 1334 rcvr->cmd = cmd; 1335 rcvr->netfn = netfn; 1336 rcvr->chans = chans; 1337 rcvr->user = user; 1338 1339 mutex_lock(&intf->cmd_rcvrs_mutex); 1340 /* Make sure the command/netfn is not already registered. */ 1341 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1342 rv = -EBUSY; 1343 goto out_unlock; 1344 } 1345 1346 if (atomic_inc_return(&intf->event_waiters) == 1) 1347 need_waiter(intf); 1348 1349 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1350 1351 out_unlock: 1352 mutex_unlock(&intf->cmd_rcvrs_mutex); 1353 if (rv) 1354 kfree(rcvr); 1355 1356 return rv; 1357 } 1358 EXPORT_SYMBOL(ipmi_register_for_cmd); 1359 1360 int ipmi_unregister_for_cmd(ipmi_user_t user, 1361 unsigned char netfn, 1362 unsigned char cmd, 1363 unsigned int chans) 1364 { 1365 ipmi_smi_t intf = user->intf; 1366 struct cmd_rcvr *rcvr; 1367 struct cmd_rcvr *rcvrs = NULL; 1368 int i, rv = -ENOENT; 1369 1370 mutex_lock(&intf->cmd_rcvrs_mutex); 1371 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1372 if (((1 << i) & chans) == 0) 1373 continue; 1374 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1375 if (rcvr == NULL) 1376 continue; 1377 if (rcvr->user == user) { 1378 rv = 0; 1379 rcvr->chans &= ~chans; 1380 if (rcvr->chans == 0) { 1381 list_del_rcu(&rcvr->link); 1382 rcvr->next = rcvrs; 1383 rcvrs = rcvr; 1384 } 1385 } 1386 } 1387 mutex_unlock(&intf->cmd_rcvrs_mutex); 1388 synchronize_rcu(); 1389 while (rcvrs) { 1390 atomic_dec(&intf->event_waiters); 1391 rcvr = rcvrs; 1392 rcvrs = rcvr->next; 1393 kfree(rcvr); 1394 } 1395 return rv; 1396 } 1397 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1398 1399 static unsigned char 1400 ipmb_checksum(unsigned char *data, int size) 1401 { 1402 unsigned char csum = 0; 1403 1404 for (; size > 0; size--, data++) 1405 csum += *data; 1406 1407 return -csum; 1408 } 1409 1410 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1411 struct kernel_ipmi_msg *msg, 1412 struct ipmi_ipmb_addr *ipmb_addr, 1413 long msgid, 1414 unsigned char ipmb_seq, 1415 int broadcast, 1416 unsigned char source_address, 1417 unsigned char source_lun) 1418 { 1419 int i = broadcast; 1420 1421 /* Format the IPMB header data. */ 1422 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1423 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1424 smi_msg->data[2] = ipmb_addr->channel; 1425 if (broadcast) 1426 smi_msg->data[3] = 0; 1427 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1428 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1429 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2); 1430 smi_msg->data[i+6] = source_address; 1431 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1432 smi_msg->data[i+8] = msg->cmd; 1433 1434 /* Now tack on the data to the message. */ 1435 if (msg->data_len > 0) 1436 memcpy(&(smi_msg->data[i+9]), msg->data, 1437 msg->data_len); 1438 smi_msg->data_size = msg->data_len + 9; 1439 1440 /* Now calculate the checksum and tack it on. */ 1441 smi_msg->data[i+smi_msg->data_size] 1442 = ipmb_checksum(&(smi_msg->data[i+6]), 1443 smi_msg->data_size-6); 1444 1445 /* 1446 * Add on the checksum size and the offset from the 1447 * broadcast. 1448 */ 1449 smi_msg->data_size += 1 + i; 1450 1451 smi_msg->msgid = msgid; 1452 } 1453 1454 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1455 struct kernel_ipmi_msg *msg, 1456 struct ipmi_lan_addr *lan_addr, 1457 long msgid, 1458 unsigned char ipmb_seq, 1459 unsigned char source_lun) 1460 { 1461 /* Format the IPMB header data. */ 1462 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1463 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1464 smi_msg->data[2] = lan_addr->channel; 1465 smi_msg->data[3] = lan_addr->session_handle; 1466 smi_msg->data[4] = lan_addr->remote_SWID; 1467 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1468 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2); 1469 smi_msg->data[7] = lan_addr->local_SWID; 1470 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1471 smi_msg->data[9] = msg->cmd; 1472 1473 /* Now tack on the data to the message. */ 1474 if (msg->data_len > 0) 1475 memcpy(&(smi_msg->data[10]), msg->data, 1476 msg->data_len); 1477 smi_msg->data_size = msg->data_len + 10; 1478 1479 /* Now calculate the checksum and tack it on. */ 1480 smi_msg->data[smi_msg->data_size] 1481 = ipmb_checksum(&(smi_msg->data[7]), 1482 smi_msg->data_size-7); 1483 1484 /* 1485 * Add on the checksum size and the offset from the 1486 * broadcast. 1487 */ 1488 smi_msg->data_size += 1; 1489 1490 smi_msg->msgid = msgid; 1491 } 1492 1493 static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf, 1494 struct ipmi_smi_msg *smi_msg, 1495 int priority) 1496 { 1497 if (intf->curr_msg) { 1498 if (priority > 0) 1499 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1500 else 1501 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1502 smi_msg = NULL; 1503 } else { 1504 intf->curr_msg = smi_msg; 1505 } 1506 1507 return smi_msg; 1508 } 1509 1510 1511 static void smi_send(ipmi_smi_t intf, const struct ipmi_smi_handlers *handlers, 1512 struct ipmi_smi_msg *smi_msg, int priority) 1513 { 1514 int run_to_completion = intf->run_to_completion; 1515 1516 if (run_to_completion) { 1517 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1518 } else { 1519 unsigned long flags; 1520 1521 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1522 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1523 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1524 } 1525 1526 if (smi_msg) 1527 handlers->sender(intf->send_info, smi_msg); 1528 } 1529 1530 /* 1531 * Separate from ipmi_request so that the user does not have to be 1532 * supplied in certain circumstances (mainly at panic time). If 1533 * messages are supplied, they will be freed, even if an error 1534 * occurs. 1535 */ 1536 static int i_ipmi_request(ipmi_user_t user, 1537 ipmi_smi_t intf, 1538 struct ipmi_addr *addr, 1539 long msgid, 1540 struct kernel_ipmi_msg *msg, 1541 void *user_msg_data, 1542 void *supplied_smi, 1543 struct ipmi_recv_msg *supplied_recv, 1544 int priority, 1545 unsigned char source_address, 1546 unsigned char source_lun, 1547 int retries, 1548 unsigned int retry_time_ms) 1549 { 1550 int rv = 0; 1551 struct ipmi_smi_msg *smi_msg; 1552 struct ipmi_recv_msg *recv_msg; 1553 unsigned long flags; 1554 1555 1556 if (supplied_recv) 1557 recv_msg = supplied_recv; 1558 else { 1559 recv_msg = ipmi_alloc_recv_msg(); 1560 if (recv_msg == NULL) 1561 return -ENOMEM; 1562 } 1563 recv_msg->user_msg_data = user_msg_data; 1564 1565 if (supplied_smi) 1566 smi_msg = (struct ipmi_smi_msg *) supplied_smi; 1567 else { 1568 smi_msg = ipmi_alloc_smi_msg(); 1569 if (smi_msg == NULL) { 1570 ipmi_free_recv_msg(recv_msg); 1571 return -ENOMEM; 1572 } 1573 } 1574 1575 rcu_read_lock(); 1576 if (intf->in_shutdown) { 1577 rv = -ENODEV; 1578 goto out_err; 1579 } 1580 1581 recv_msg->user = user; 1582 if (user) 1583 kref_get(&user->refcount); 1584 recv_msg->msgid = msgid; 1585 /* 1586 * Store the message to send in the receive message so timeout 1587 * responses can get the proper response data. 1588 */ 1589 recv_msg->msg = *msg; 1590 1591 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 1592 struct ipmi_system_interface_addr *smi_addr; 1593 1594 if (msg->netfn & 1) { 1595 /* Responses are not allowed to the SMI. */ 1596 rv = -EINVAL; 1597 goto out_err; 1598 } 1599 1600 smi_addr = (struct ipmi_system_interface_addr *) addr; 1601 if (smi_addr->lun > 3) { 1602 ipmi_inc_stat(intf, sent_invalid_commands); 1603 rv = -EINVAL; 1604 goto out_err; 1605 } 1606 1607 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1608 1609 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1610 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1611 || (msg->cmd == IPMI_GET_MSG_CMD) 1612 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1613 /* 1614 * We don't let the user do these, since we manage 1615 * the sequence numbers. 1616 */ 1617 ipmi_inc_stat(intf, sent_invalid_commands); 1618 rv = -EINVAL; 1619 goto out_err; 1620 } 1621 1622 if (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1623 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1624 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1625 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) { 1626 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1627 intf->auto_maintenance_timeout 1628 = IPMI_MAINTENANCE_MODE_TIMEOUT; 1629 if (!intf->maintenance_mode 1630 && !intf->maintenance_mode_enable) { 1631 intf->maintenance_mode_enable = true; 1632 maintenance_mode_update(intf); 1633 } 1634 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1635 flags); 1636 } 1637 1638 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { 1639 ipmi_inc_stat(intf, sent_invalid_commands); 1640 rv = -EMSGSIZE; 1641 goto out_err; 1642 } 1643 1644 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1645 smi_msg->data[1] = msg->cmd; 1646 smi_msg->msgid = msgid; 1647 smi_msg->user_data = recv_msg; 1648 if (msg->data_len > 0) 1649 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); 1650 smi_msg->data_size = msg->data_len + 2; 1651 ipmi_inc_stat(intf, sent_local_commands); 1652 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 1653 struct ipmi_ipmb_addr *ipmb_addr; 1654 unsigned char ipmb_seq; 1655 long seqid; 1656 int broadcast = 0; 1657 1658 if (addr->channel >= IPMI_MAX_CHANNELS) { 1659 ipmi_inc_stat(intf, sent_invalid_commands); 1660 rv = -EINVAL; 1661 goto out_err; 1662 } 1663 1664 if (intf->channels[addr->channel].medium 1665 != IPMI_CHANNEL_MEDIUM_IPMB) { 1666 ipmi_inc_stat(intf, sent_invalid_commands); 1667 rv = -EINVAL; 1668 goto out_err; 1669 } 1670 1671 if (retries < 0) { 1672 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) 1673 retries = 0; /* Don't retry broadcasts. */ 1674 else 1675 retries = 4; 1676 } 1677 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1678 /* 1679 * Broadcasts add a zero at the beginning of the 1680 * message, but otherwise is the same as an IPMB 1681 * address. 1682 */ 1683 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1684 broadcast = 1; 1685 } 1686 1687 1688 /* Default to 1 second retries. */ 1689 if (retry_time_ms == 0) 1690 retry_time_ms = 1000; 1691 1692 /* 1693 * 9 for the header and 1 for the checksum, plus 1694 * possibly one for the broadcast. 1695 */ 1696 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1697 ipmi_inc_stat(intf, sent_invalid_commands); 1698 rv = -EMSGSIZE; 1699 goto out_err; 1700 } 1701 1702 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 1703 if (ipmb_addr->lun > 3) { 1704 ipmi_inc_stat(intf, sent_invalid_commands); 1705 rv = -EINVAL; 1706 goto out_err; 1707 } 1708 1709 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 1710 1711 if (recv_msg->msg.netfn & 0x1) { 1712 /* 1713 * It's a response, so use the user's sequence 1714 * from msgid. 1715 */ 1716 ipmi_inc_stat(intf, sent_ipmb_responses); 1717 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 1718 msgid, broadcast, 1719 source_address, source_lun); 1720 1721 /* 1722 * Save the receive message so we can use it 1723 * to deliver the response. 1724 */ 1725 smi_msg->user_data = recv_msg; 1726 } else { 1727 /* It's a command, so get a sequence for it. */ 1728 1729 spin_lock_irqsave(&(intf->seq_lock), flags); 1730 1731 /* 1732 * Create a sequence number with a 1 second 1733 * timeout and 4 retries. 1734 */ 1735 rv = intf_next_seq(intf, 1736 recv_msg, 1737 retry_time_ms, 1738 retries, 1739 broadcast, 1740 &ipmb_seq, 1741 &seqid); 1742 if (rv) { 1743 /* 1744 * We have used up all the sequence numbers, 1745 * probably, so abort. 1746 */ 1747 spin_unlock_irqrestore(&(intf->seq_lock), 1748 flags); 1749 goto out_err; 1750 } 1751 1752 ipmi_inc_stat(intf, sent_ipmb_commands); 1753 1754 /* 1755 * Store the sequence number in the message, 1756 * so that when the send message response 1757 * comes back we can start the timer. 1758 */ 1759 format_ipmb_msg(smi_msg, msg, ipmb_addr, 1760 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 1761 ipmb_seq, broadcast, 1762 source_address, source_lun); 1763 1764 /* 1765 * Copy the message into the recv message data, so we 1766 * can retransmit it later if necessary. 1767 */ 1768 memcpy(recv_msg->msg_data, smi_msg->data, 1769 smi_msg->data_size); 1770 recv_msg->msg.data = recv_msg->msg_data; 1771 recv_msg->msg.data_len = smi_msg->data_size; 1772 1773 /* 1774 * We don't unlock until here, because we need 1775 * to copy the completed message into the 1776 * recv_msg before we release the lock. 1777 * Otherwise, race conditions may bite us. I 1778 * know that's pretty paranoid, but I prefer 1779 * to be correct. 1780 */ 1781 spin_unlock_irqrestore(&(intf->seq_lock), flags); 1782 } 1783 } else if (is_lan_addr(addr)) { 1784 struct ipmi_lan_addr *lan_addr; 1785 unsigned char ipmb_seq; 1786 long seqid; 1787 1788 if (addr->channel >= IPMI_MAX_CHANNELS) { 1789 ipmi_inc_stat(intf, sent_invalid_commands); 1790 rv = -EINVAL; 1791 goto out_err; 1792 } 1793 1794 if ((intf->channels[addr->channel].medium 1795 != IPMI_CHANNEL_MEDIUM_8023LAN) 1796 && (intf->channels[addr->channel].medium 1797 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 1798 ipmi_inc_stat(intf, sent_invalid_commands); 1799 rv = -EINVAL; 1800 goto out_err; 1801 } 1802 1803 retries = 4; 1804 1805 /* Default to 1 second retries. */ 1806 if (retry_time_ms == 0) 1807 retry_time_ms = 1000; 1808 1809 /* 11 for the header and 1 for the checksum. */ 1810 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 1811 ipmi_inc_stat(intf, sent_invalid_commands); 1812 rv = -EMSGSIZE; 1813 goto out_err; 1814 } 1815 1816 lan_addr = (struct ipmi_lan_addr *) addr; 1817 if (lan_addr->lun > 3) { 1818 ipmi_inc_stat(intf, sent_invalid_commands); 1819 rv = -EINVAL; 1820 goto out_err; 1821 } 1822 1823 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 1824 1825 if (recv_msg->msg.netfn & 0x1) { 1826 /* 1827 * It's a response, so use the user's sequence 1828 * from msgid. 1829 */ 1830 ipmi_inc_stat(intf, sent_lan_responses); 1831 format_lan_msg(smi_msg, msg, lan_addr, msgid, 1832 msgid, source_lun); 1833 1834 /* 1835 * Save the receive message so we can use it 1836 * to deliver the response. 1837 */ 1838 smi_msg->user_data = recv_msg; 1839 } else { 1840 /* It's a command, so get a sequence for it. */ 1841 1842 spin_lock_irqsave(&(intf->seq_lock), flags); 1843 1844 /* 1845 * Create a sequence number with a 1 second 1846 * timeout and 4 retries. 1847 */ 1848 rv = intf_next_seq(intf, 1849 recv_msg, 1850 retry_time_ms, 1851 retries, 1852 0, 1853 &ipmb_seq, 1854 &seqid); 1855 if (rv) { 1856 /* 1857 * We have used up all the sequence numbers, 1858 * probably, so abort. 1859 */ 1860 spin_unlock_irqrestore(&(intf->seq_lock), 1861 flags); 1862 goto out_err; 1863 } 1864 1865 ipmi_inc_stat(intf, sent_lan_commands); 1866 1867 /* 1868 * Store the sequence number in the message, 1869 * so that when the send message response 1870 * comes back we can start the timer. 1871 */ 1872 format_lan_msg(smi_msg, msg, lan_addr, 1873 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 1874 ipmb_seq, source_lun); 1875 1876 /* 1877 * Copy the message into the recv message data, so we 1878 * can retransmit it later if necessary. 1879 */ 1880 memcpy(recv_msg->msg_data, smi_msg->data, 1881 smi_msg->data_size); 1882 recv_msg->msg.data = recv_msg->msg_data; 1883 recv_msg->msg.data_len = smi_msg->data_size; 1884 1885 /* 1886 * We don't unlock until here, because we need 1887 * to copy the completed message into the 1888 * recv_msg before we release the lock. 1889 * Otherwise, race conditions may bite us. I 1890 * know that's pretty paranoid, but I prefer 1891 * to be correct. 1892 */ 1893 spin_unlock_irqrestore(&(intf->seq_lock), flags); 1894 } 1895 } else { 1896 /* Unknown address type. */ 1897 ipmi_inc_stat(intf, sent_invalid_commands); 1898 rv = -EINVAL; 1899 goto out_err; 1900 } 1901 1902 #ifdef DEBUG_MSGING 1903 { 1904 int m; 1905 for (m = 0; m < smi_msg->data_size; m++) 1906 printk(" %2.2x", smi_msg->data[m]); 1907 printk("\n"); 1908 } 1909 #endif 1910 1911 smi_send(intf, intf->handlers, smi_msg, priority); 1912 rcu_read_unlock(); 1913 1914 return 0; 1915 1916 out_err: 1917 rcu_read_unlock(); 1918 ipmi_free_smi_msg(smi_msg); 1919 ipmi_free_recv_msg(recv_msg); 1920 return rv; 1921 } 1922 1923 static int check_addr(ipmi_smi_t intf, 1924 struct ipmi_addr *addr, 1925 unsigned char *saddr, 1926 unsigned char *lun) 1927 { 1928 if (addr->channel >= IPMI_MAX_CHANNELS) 1929 return -EINVAL; 1930 *lun = intf->channels[addr->channel].lun; 1931 *saddr = intf->channels[addr->channel].address; 1932 return 0; 1933 } 1934 1935 int ipmi_request_settime(ipmi_user_t user, 1936 struct ipmi_addr *addr, 1937 long msgid, 1938 struct kernel_ipmi_msg *msg, 1939 void *user_msg_data, 1940 int priority, 1941 int retries, 1942 unsigned int retry_time_ms) 1943 { 1944 unsigned char saddr = 0, lun = 0; 1945 int rv; 1946 1947 if (!user) 1948 return -EINVAL; 1949 rv = check_addr(user->intf, addr, &saddr, &lun); 1950 if (rv) 1951 return rv; 1952 return i_ipmi_request(user, 1953 user->intf, 1954 addr, 1955 msgid, 1956 msg, 1957 user_msg_data, 1958 NULL, NULL, 1959 priority, 1960 saddr, 1961 lun, 1962 retries, 1963 retry_time_ms); 1964 } 1965 EXPORT_SYMBOL(ipmi_request_settime); 1966 1967 int ipmi_request_supply_msgs(ipmi_user_t user, 1968 struct ipmi_addr *addr, 1969 long msgid, 1970 struct kernel_ipmi_msg *msg, 1971 void *user_msg_data, 1972 void *supplied_smi, 1973 struct ipmi_recv_msg *supplied_recv, 1974 int priority) 1975 { 1976 unsigned char saddr = 0, lun = 0; 1977 int rv; 1978 1979 if (!user) 1980 return -EINVAL; 1981 rv = check_addr(user->intf, addr, &saddr, &lun); 1982 if (rv) 1983 return rv; 1984 return i_ipmi_request(user, 1985 user->intf, 1986 addr, 1987 msgid, 1988 msg, 1989 user_msg_data, 1990 supplied_smi, 1991 supplied_recv, 1992 priority, 1993 saddr, 1994 lun, 1995 -1, 0); 1996 } 1997 EXPORT_SYMBOL(ipmi_request_supply_msgs); 1998 1999 #ifdef CONFIG_PROC_FS 2000 static int smi_ipmb_proc_show(struct seq_file *m, void *v) 2001 { 2002 ipmi_smi_t intf = m->private; 2003 int i; 2004 2005 seq_printf(m, "%x", intf->channels[0].address); 2006 for (i = 1; i < IPMI_MAX_CHANNELS; i++) 2007 seq_printf(m, " %x", intf->channels[i].address); 2008 seq_putc(m, '\n'); 2009 2010 return 0; 2011 } 2012 2013 static int smi_ipmb_proc_open(struct inode *inode, struct file *file) 2014 { 2015 return single_open(file, smi_ipmb_proc_show, PDE_DATA(inode)); 2016 } 2017 2018 static const struct file_operations smi_ipmb_proc_ops = { 2019 .open = smi_ipmb_proc_open, 2020 .read = seq_read, 2021 .llseek = seq_lseek, 2022 .release = single_release, 2023 }; 2024 2025 static int smi_version_proc_show(struct seq_file *m, void *v) 2026 { 2027 ipmi_smi_t intf = m->private; 2028 2029 seq_printf(m, "%u.%u\n", 2030 ipmi_version_major(&intf->bmc->id), 2031 ipmi_version_minor(&intf->bmc->id)); 2032 2033 return 0; 2034 } 2035 2036 static int smi_version_proc_open(struct inode *inode, struct file *file) 2037 { 2038 return single_open(file, smi_version_proc_show, PDE_DATA(inode)); 2039 } 2040 2041 static const struct file_operations smi_version_proc_ops = { 2042 .open = smi_version_proc_open, 2043 .read = seq_read, 2044 .llseek = seq_lseek, 2045 .release = single_release, 2046 }; 2047 2048 static int smi_stats_proc_show(struct seq_file *m, void *v) 2049 { 2050 ipmi_smi_t intf = m->private; 2051 2052 seq_printf(m, "sent_invalid_commands: %u\n", 2053 ipmi_get_stat(intf, sent_invalid_commands)); 2054 seq_printf(m, "sent_local_commands: %u\n", 2055 ipmi_get_stat(intf, sent_local_commands)); 2056 seq_printf(m, "handled_local_responses: %u\n", 2057 ipmi_get_stat(intf, handled_local_responses)); 2058 seq_printf(m, "unhandled_local_responses: %u\n", 2059 ipmi_get_stat(intf, unhandled_local_responses)); 2060 seq_printf(m, "sent_ipmb_commands: %u\n", 2061 ipmi_get_stat(intf, sent_ipmb_commands)); 2062 seq_printf(m, "sent_ipmb_command_errs: %u\n", 2063 ipmi_get_stat(intf, sent_ipmb_command_errs)); 2064 seq_printf(m, "retransmitted_ipmb_commands: %u\n", 2065 ipmi_get_stat(intf, retransmitted_ipmb_commands)); 2066 seq_printf(m, "timed_out_ipmb_commands: %u\n", 2067 ipmi_get_stat(intf, timed_out_ipmb_commands)); 2068 seq_printf(m, "timed_out_ipmb_broadcasts: %u\n", 2069 ipmi_get_stat(intf, timed_out_ipmb_broadcasts)); 2070 seq_printf(m, "sent_ipmb_responses: %u\n", 2071 ipmi_get_stat(intf, sent_ipmb_responses)); 2072 seq_printf(m, "handled_ipmb_responses: %u\n", 2073 ipmi_get_stat(intf, handled_ipmb_responses)); 2074 seq_printf(m, "invalid_ipmb_responses: %u\n", 2075 ipmi_get_stat(intf, invalid_ipmb_responses)); 2076 seq_printf(m, "unhandled_ipmb_responses: %u\n", 2077 ipmi_get_stat(intf, unhandled_ipmb_responses)); 2078 seq_printf(m, "sent_lan_commands: %u\n", 2079 ipmi_get_stat(intf, sent_lan_commands)); 2080 seq_printf(m, "sent_lan_command_errs: %u\n", 2081 ipmi_get_stat(intf, sent_lan_command_errs)); 2082 seq_printf(m, "retransmitted_lan_commands: %u\n", 2083 ipmi_get_stat(intf, retransmitted_lan_commands)); 2084 seq_printf(m, "timed_out_lan_commands: %u\n", 2085 ipmi_get_stat(intf, timed_out_lan_commands)); 2086 seq_printf(m, "sent_lan_responses: %u\n", 2087 ipmi_get_stat(intf, sent_lan_responses)); 2088 seq_printf(m, "handled_lan_responses: %u\n", 2089 ipmi_get_stat(intf, handled_lan_responses)); 2090 seq_printf(m, "invalid_lan_responses: %u\n", 2091 ipmi_get_stat(intf, invalid_lan_responses)); 2092 seq_printf(m, "unhandled_lan_responses: %u\n", 2093 ipmi_get_stat(intf, unhandled_lan_responses)); 2094 seq_printf(m, "handled_commands: %u\n", 2095 ipmi_get_stat(intf, handled_commands)); 2096 seq_printf(m, "invalid_commands: %u\n", 2097 ipmi_get_stat(intf, invalid_commands)); 2098 seq_printf(m, "unhandled_commands: %u\n", 2099 ipmi_get_stat(intf, unhandled_commands)); 2100 seq_printf(m, "invalid_events: %u\n", 2101 ipmi_get_stat(intf, invalid_events)); 2102 seq_printf(m, "events: %u\n", 2103 ipmi_get_stat(intf, events)); 2104 seq_printf(m, "failed rexmit LAN msgs: %u\n", 2105 ipmi_get_stat(intf, dropped_rexmit_lan_commands)); 2106 seq_printf(m, "failed rexmit IPMB msgs: %u\n", 2107 ipmi_get_stat(intf, dropped_rexmit_ipmb_commands)); 2108 return 0; 2109 } 2110 2111 static int smi_stats_proc_open(struct inode *inode, struct file *file) 2112 { 2113 return single_open(file, smi_stats_proc_show, PDE_DATA(inode)); 2114 } 2115 2116 static const struct file_operations smi_stats_proc_ops = { 2117 .open = smi_stats_proc_open, 2118 .read = seq_read, 2119 .llseek = seq_lseek, 2120 .release = single_release, 2121 }; 2122 #endif /* CONFIG_PROC_FS */ 2123 2124 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, 2125 const struct file_operations *proc_ops, 2126 void *data) 2127 { 2128 int rv = 0; 2129 #ifdef CONFIG_PROC_FS 2130 struct proc_dir_entry *file; 2131 struct ipmi_proc_entry *entry; 2132 2133 /* Create a list element. */ 2134 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 2135 if (!entry) 2136 return -ENOMEM; 2137 entry->name = kstrdup(name, GFP_KERNEL); 2138 if (!entry->name) { 2139 kfree(entry); 2140 return -ENOMEM; 2141 } 2142 2143 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); 2144 if (!file) { 2145 kfree(entry->name); 2146 kfree(entry); 2147 rv = -ENOMEM; 2148 } else { 2149 mutex_lock(&smi->proc_entry_lock); 2150 /* Stick it on the list. */ 2151 entry->next = smi->proc_entries; 2152 smi->proc_entries = entry; 2153 mutex_unlock(&smi->proc_entry_lock); 2154 } 2155 #endif /* CONFIG_PROC_FS */ 2156 2157 return rv; 2158 } 2159 EXPORT_SYMBOL(ipmi_smi_add_proc_entry); 2160 2161 static int add_proc_entries(ipmi_smi_t smi, int num) 2162 { 2163 int rv = 0; 2164 2165 #ifdef CONFIG_PROC_FS 2166 sprintf(smi->proc_dir_name, "%d", num); 2167 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); 2168 if (!smi->proc_dir) 2169 rv = -ENOMEM; 2170 2171 if (rv == 0) 2172 rv = ipmi_smi_add_proc_entry(smi, "stats", 2173 &smi_stats_proc_ops, 2174 smi); 2175 2176 if (rv == 0) 2177 rv = ipmi_smi_add_proc_entry(smi, "ipmb", 2178 &smi_ipmb_proc_ops, 2179 smi); 2180 2181 if (rv == 0) 2182 rv = ipmi_smi_add_proc_entry(smi, "version", 2183 &smi_version_proc_ops, 2184 smi); 2185 #endif /* CONFIG_PROC_FS */ 2186 2187 return rv; 2188 } 2189 2190 static void remove_proc_entries(ipmi_smi_t smi) 2191 { 2192 #ifdef CONFIG_PROC_FS 2193 struct ipmi_proc_entry *entry; 2194 2195 mutex_lock(&smi->proc_entry_lock); 2196 while (smi->proc_entries) { 2197 entry = smi->proc_entries; 2198 smi->proc_entries = entry->next; 2199 2200 remove_proc_entry(entry->name, smi->proc_dir); 2201 kfree(entry->name); 2202 kfree(entry); 2203 } 2204 mutex_unlock(&smi->proc_entry_lock); 2205 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root); 2206 #endif /* CONFIG_PROC_FS */ 2207 } 2208 2209 static int __find_bmc_guid(struct device *dev, void *data) 2210 { 2211 unsigned char *id = data; 2212 struct bmc_device *bmc = to_bmc_device(dev); 2213 return memcmp(bmc->guid, id, 16) == 0; 2214 } 2215 2216 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2217 unsigned char *guid) 2218 { 2219 struct device *dev; 2220 2221 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2222 if (dev) 2223 return to_bmc_device(dev); 2224 else 2225 return NULL; 2226 } 2227 2228 struct prod_dev_id { 2229 unsigned int product_id; 2230 unsigned char device_id; 2231 }; 2232 2233 static int __find_bmc_prod_dev_id(struct device *dev, void *data) 2234 { 2235 struct prod_dev_id *id = data; 2236 struct bmc_device *bmc = to_bmc_device(dev); 2237 2238 return (bmc->id.product_id == id->product_id 2239 && bmc->id.device_id == id->device_id); 2240 } 2241 2242 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 2243 struct device_driver *drv, 2244 unsigned int product_id, unsigned char device_id) 2245 { 2246 struct prod_dev_id id = { 2247 .product_id = product_id, 2248 .device_id = device_id, 2249 }; 2250 struct device *dev; 2251 2252 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 2253 if (dev) 2254 return to_bmc_device(dev); 2255 else 2256 return NULL; 2257 } 2258 2259 static ssize_t device_id_show(struct device *dev, 2260 struct device_attribute *attr, 2261 char *buf) 2262 { 2263 struct bmc_device *bmc = to_bmc_device(dev); 2264 2265 return snprintf(buf, 10, "%u\n", bmc->id.device_id); 2266 } 2267 static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL); 2268 2269 static ssize_t provides_device_sdrs_show(struct device *dev, 2270 struct device_attribute *attr, 2271 char *buf) 2272 { 2273 struct bmc_device *bmc = to_bmc_device(dev); 2274 2275 return snprintf(buf, 10, "%u\n", 2276 (bmc->id.device_revision & 0x80) >> 7); 2277 } 2278 static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, 2279 NULL); 2280 2281 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2282 char *buf) 2283 { 2284 struct bmc_device *bmc = to_bmc_device(dev); 2285 2286 return snprintf(buf, 20, "%u\n", 2287 bmc->id.device_revision & 0x0F); 2288 } 2289 static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL); 2290 2291 static ssize_t firmware_revision_show(struct device *dev, 2292 struct device_attribute *attr, 2293 char *buf) 2294 { 2295 struct bmc_device *bmc = to_bmc_device(dev); 2296 2297 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, 2298 bmc->id.firmware_revision_2); 2299 } 2300 static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL); 2301 2302 static ssize_t ipmi_version_show(struct device *dev, 2303 struct device_attribute *attr, 2304 char *buf) 2305 { 2306 struct bmc_device *bmc = to_bmc_device(dev); 2307 2308 return snprintf(buf, 20, "%u.%u\n", 2309 ipmi_version_major(&bmc->id), 2310 ipmi_version_minor(&bmc->id)); 2311 } 2312 static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL); 2313 2314 static ssize_t add_dev_support_show(struct device *dev, 2315 struct device_attribute *attr, 2316 char *buf) 2317 { 2318 struct bmc_device *bmc = to_bmc_device(dev); 2319 2320 return snprintf(buf, 10, "0x%02x\n", 2321 bmc->id.additional_device_support); 2322 } 2323 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2324 NULL); 2325 2326 static ssize_t manufacturer_id_show(struct device *dev, 2327 struct device_attribute *attr, 2328 char *buf) 2329 { 2330 struct bmc_device *bmc = to_bmc_device(dev); 2331 2332 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); 2333 } 2334 static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL); 2335 2336 static ssize_t product_id_show(struct device *dev, 2337 struct device_attribute *attr, 2338 char *buf) 2339 { 2340 struct bmc_device *bmc = to_bmc_device(dev); 2341 2342 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); 2343 } 2344 static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL); 2345 2346 static ssize_t aux_firmware_rev_show(struct device *dev, 2347 struct device_attribute *attr, 2348 char *buf) 2349 { 2350 struct bmc_device *bmc = to_bmc_device(dev); 2351 2352 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2353 bmc->id.aux_firmware_revision[3], 2354 bmc->id.aux_firmware_revision[2], 2355 bmc->id.aux_firmware_revision[1], 2356 bmc->id.aux_firmware_revision[0]); 2357 } 2358 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2359 2360 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2361 char *buf) 2362 { 2363 struct bmc_device *bmc = to_bmc_device(dev); 2364 2365 return snprintf(buf, 100, "%Lx%Lx\n", 2366 (long long) bmc->guid[0], 2367 (long long) bmc->guid[8]); 2368 } 2369 static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL); 2370 2371 static struct attribute *bmc_dev_attrs[] = { 2372 &dev_attr_device_id.attr, 2373 &dev_attr_provides_device_sdrs.attr, 2374 &dev_attr_revision.attr, 2375 &dev_attr_firmware_revision.attr, 2376 &dev_attr_ipmi_version.attr, 2377 &dev_attr_additional_device_support.attr, 2378 &dev_attr_manufacturer_id.attr, 2379 &dev_attr_product_id.attr, 2380 &dev_attr_aux_firmware_revision.attr, 2381 &dev_attr_guid.attr, 2382 NULL 2383 }; 2384 2385 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2386 struct attribute *attr, int idx) 2387 { 2388 struct device *dev = kobj_to_dev(kobj); 2389 struct bmc_device *bmc = to_bmc_device(dev); 2390 umode_t mode = attr->mode; 2391 2392 if (attr == &dev_attr_aux_firmware_revision.attr) 2393 return bmc->id.aux_firmware_revision_set ? mode : 0; 2394 if (attr == &dev_attr_guid.attr) 2395 return bmc->guid_set ? mode : 0; 2396 return mode; 2397 } 2398 2399 static struct attribute_group bmc_dev_attr_group = { 2400 .attrs = bmc_dev_attrs, 2401 .is_visible = bmc_dev_attr_is_visible, 2402 }; 2403 2404 static const struct attribute_group *bmc_dev_attr_groups[] = { 2405 &bmc_dev_attr_group, 2406 NULL 2407 }; 2408 2409 static struct device_type bmc_device_type = { 2410 .groups = bmc_dev_attr_groups, 2411 }; 2412 2413 static void 2414 release_bmc_device(struct device *dev) 2415 { 2416 kfree(to_bmc_device(dev)); 2417 } 2418 2419 static void 2420 cleanup_bmc_device(struct kref *ref) 2421 { 2422 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 2423 2424 platform_device_unregister(&bmc->pdev); 2425 } 2426 2427 static void ipmi_bmc_unregister(ipmi_smi_t intf) 2428 { 2429 struct bmc_device *bmc = intf->bmc; 2430 2431 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 2432 if (intf->my_dev_name) { 2433 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 2434 kfree(intf->my_dev_name); 2435 intf->my_dev_name = NULL; 2436 } 2437 2438 mutex_lock(&ipmidriver_mutex); 2439 kref_put(&bmc->usecount, cleanup_bmc_device); 2440 intf->bmc = NULL; 2441 mutex_unlock(&ipmidriver_mutex); 2442 } 2443 2444 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum) 2445 { 2446 int rv; 2447 struct bmc_device *bmc = intf->bmc; 2448 struct bmc_device *old_bmc; 2449 2450 mutex_lock(&ipmidriver_mutex); 2451 2452 /* 2453 * Try to find if there is an bmc_device struct 2454 * representing the interfaced BMC already 2455 */ 2456 if (bmc->guid_set) 2457 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid); 2458 else 2459 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 2460 bmc->id.product_id, 2461 bmc->id.device_id); 2462 2463 /* 2464 * If there is already an bmc_device, free the new one, 2465 * otherwise register the new BMC device 2466 */ 2467 if (old_bmc) { 2468 kfree(bmc); 2469 intf->bmc = old_bmc; 2470 bmc = old_bmc; 2471 2472 kref_get(&bmc->usecount); 2473 mutex_unlock(&ipmidriver_mutex); 2474 2475 printk(KERN_INFO 2476 "ipmi: interfacing existing BMC (man_id: 0x%6.6x," 2477 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2478 bmc->id.manufacturer_id, 2479 bmc->id.product_id, 2480 bmc->id.device_id); 2481 } else { 2482 unsigned char orig_dev_id = bmc->id.device_id; 2483 int warn_printed = 0; 2484 2485 snprintf(bmc->name, sizeof(bmc->name), 2486 "ipmi_bmc.%4.4x", bmc->id.product_id); 2487 bmc->pdev.name = bmc->name; 2488 2489 while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 2490 bmc->id.product_id, 2491 bmc->id.device_id)) { 2492 if (!warn_printed) { 2493 printk(KERN_WARNING PFX 2494 "This machine has two different BMCs" 2495 " with the same product id and device" 2496 " id. This is an error in the" 2497 " firmware, but incrementing the" 2498 " device id to work around the problem." 2499 " Prod ID = 0x%x, Dev ID = 0x%x\n", 2500 bmc->id.product_id, bmc->id.device_id); 2501 warn_printed = 1; 2502 } 2503 bmc->id.device_id++; /* Wraps at 255 */ 2504 if (bmc->id.device_id == orig_dev_id) { 2505 printk(KERN_ERR PFX 2506 "Out of device ids!\n"); 2507 break; 2508 } 2509 } 2510 2511 bmc->pdev.dev.driver = &ipmidriver.driver; 2512 bmc->pdev.id = bmc->id.device_id; 2513 bmc->pdev.dev.release = release_bmc_device; 2514 bmc->pdev.dev.type = &bmc_device_type; 2515 kref_init(&bmc->usecount); 2516 2517 rv = platform_device_register(&bmc->pdev); 2518 mutex_unlock(&ipmidriver_mutex); 2519 if (rv) { 2520 put_device(&bmc->pdev.dev); 2521 printk(KERN_ERR 2522 "ipmi_msghandler:" 2523 " Unable to register bmc device: %d\n", 2524 rv); 2525 /* 2526 * Don't go to out_err, you can only do that if 2527 * the device is registered already. 2528 */ 2529 return rv; 2530 } 2531 2532 dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, " 2533 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2534 bmc->id.manufacturer_id, 2535 bmc->id.product_id, 2536 bmc->id.device_id); 2537 } 2538 2539 /* 2540 * create symlink from system interface device to bmc device 2541 * and back. 2542 */ 2543 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 2544 if (rv) { 2545 printk(KERN_ERR 2546 "ipmi_msghandler: Unable to create bmc symlink: %d\n", 2547 rv); 2548 goto out_err; 2549 } 2550 2551 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", ifnum); 2552 if (!intf->my_dev_name) { 2553 rv = -ENOMEM; 2554 printk(KERN_ERR 2555 "ipmi_msghandler: allocate link from BMC: %d\n", 2556 rv); 2557 goto out_err; 2558 } 2559 2560 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 2561 intf->my_dev_name); 2562 if (rv) { 2563 kfree(intf->my_dev_name); 2564 intf->my_dev_name = NULL; 2565 printk(KERN_ERR 2566 "ipmi_msghandler:" 2567 " Unable to create symlink to bmc: %d\n", 2568 rv); 2569 goto out_err; 2570 } 2571 2572 return 0; 2573 2574 out_err: 2575 ipmi_bmc_unregister(intf); 2576 return rv; 2577 } 2578 2579 static int 2580 send_guid_cmd(ipmi_smi_t intf, int chan) 2581 { 2582 struct kernel_ipmi_msg msg; 2583 struct ipmi_system_interface_addr si; 2584 2585 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2586 si.channel = IPMI_BMC_CHANNEL; 2587 si.lun = 0; 2588 2589 msg.netfn = IPMI_NETFN_APP_REQUEST; 2590 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 2591 msg.data = NULL; 2592 msg.data_len = 0; 2593 return i_ipmi_request(NULL, 2594 intf, 2595 (struct ipmi_addr *) &si, 2596 0, 2597 &msg, 2598 intf, 2599 NULL, 2600 NULL, 2601 0, 2602 intf->channels[0].address, 2603 intf->channels[0].lun, 2604 -1, 0); 2605 } 2606 2607 static void 2608 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 2609 { 2610 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2611 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2612 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 2613 /* Not for me */ 2614 return; 2615 2616 if (msg->msg.data[0] != 0) { 2617 /* Error from getting the GUID, the BMC doesn't have one. */ 2618 intf->bmc->guid_set = 0; 2619 goto out; 2620 } 2621 2622 if (msg->msg.data_len < 17) { 2623 intf->bmc->guid_set = 0; 2624 printk(KERN_WARNING PFX 2625 "guid_handler: The GUID response from the BMC was too" 2626 " short, it was %d but should have been 17. Assuming" 2627 " GUID is not available.\n", 2628 msg->msg.data_len); 2629 goto out; 2630 } 2631 2632 memcpy(intf->bmc->guid, msg->msg.data, 16); 2633 intf->bmc->guid_set = 1; 2634 out: 2635 wake_up(&intf->waitq); 2636 } 2637 2638 static void 2639 get_guid(ipmi_smi_t intf) 2640 { 2641 int rv; 2642 2643 intf->bmc->guid_set = 0x2; 2644 intf->null_user_handler = guid_handler; 2645 rv = send_guid_cmd(intf, 0); 2646 if (rv) 2647 /* Send failed, no GUID available. */ 2648 intf->bmc->guid_set = 0; 2649 wait_event(intf->waitq, intf->bmc->guid_set != 2); 2650 intf->null_user_handler = NULL; 2651 } 2652 2653 static int 2654 send_channel_info_cmd(ipmi_smi_t intf, int chan) 2655 { 2656 struct kernel_ipmi_msg msg; 2657 unsigned char data[1]; 2658 struct ipmi_system_interface_addr si; 2659 2660 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2661 si.channel = IPMI_BMC_CHANNEL; 2662 si.lun = 0; 2663 2664 msg.netfn = IPMI_NETFN_APP_REQUEST; 2665 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 2666 msg.data = data; 2667 msg.data_len = 1; 2668 data[0] = chan; 2669 return i_ipmi_request(NULL, 2670 intf, 2671 (struct ipmi_addr *) &si, 2672 0, 2673 &msg, 2674 intf, 2675 NULL, 2676 NULL, 2677 0, 2678 intf->channels[0].address, 2679 intf->channels[0].lun, 2680 -1, 0); 2681 } 2682 2683 static void 2684 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 2685 { 2686 int rv = 0; 2687 int chan; 2688 2689 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2690 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 2691 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 2692 /* It's the one we want */ 2693 if (msg->msg.data[0] != 0) { 2694 /* Got an error from the channel, just go on. */ 2695 2696 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 2697 /* 2698 * If the MC does not support this 2699 * command, that is legal. We just 2700 * assume it has one IPMB at channel 2701 * zero. 2702 */ 2703 intf->channels[0].medium 2704 = IPMI_CHANNEL_MEDIUM_IPMB; 2705 intf->channels[0].protocol 2706 = IPMI_CHANNEL_PROTOCOL_IPMB; 2707 2708 intf->curr_channel = IPMI_MAX_CHANNELS; 2709 wake_up(&intf->waitq); 2710 goto out; 2711 } 2712 goto next_channel; 2713 } 2714 if (msg->msg.data_len < 4) { 2715 /* Message not big enough, just go on. */ 2716 goto next_channel; 2717 } 2718 chan = intf->curr_channel; 2719 intf->channels[chan].medium = msg->msg.data[2] & 0x7f; 2720 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f; 2721 2722 next_channel: 2723 intf->curr_channel++; 2724 if (intf->curr_channel >= IPMI_MAX_CHANNELS) 2725 wake_up(&intf->waitq); 2726 else 2727 rv = send_channel_info_cmd(intf, intf->curr_channel); 2728 2729 if (rv) { 2730 /* Got an error somehow, just give up. */ 2731 printk(KERN_WARNING PFX 2732 "Error sending channel information for channel" 2733 " %d: %d\n", intf->curr_channel, rv); 2734 2735 intf->curr_channel = IPMI_MAX_CHANNELS; 2736 wake_up(&intf->waitq); 2737 } 2738 } 2739 out: 2740 return; 2741 } 2742 2743 static void ipmi_poll(ipmi_smi_t intf) 2744 { 2745 if (intf->handlers->poll) 2746 intf->handlers->poll(intf->send_info); 2747 /* In case something came in */ 2748 handle_new_recv_msgs(intf); 2749 } 2750 2751 void ipmi_poll_interface(ipmi_user_t user) 2752 { 2753 ipmi_poll(user->intf); 2754 } 2755 EXPORT_SYMBOL(ipmi_poll_interface); 2756 2757 int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, 2758 void *send_info, 2759 struct ipmi_device_id *device_id, 2760 struct device *si_dev, 2761 unsigned char slave_addr) 2762 { 2763 int i, j; 2764 int rv; 2765 ipmi_smi_t intf; 2766 ipmi_smi_t tintf; 2767 struct list_head *link; 2768 2769 /* 2770 * Make sure the driver is actually initialized, this handles 2771 * problems with initialization order. 2772 */ 2773 if (!initialized) { 2774 rv = ipmi_init_msghandler(); 2775 if (rv) 2776 return rv; 2777 /* 2778 * The init code doesn't return an error if it was turned 2779 * off, but it won't initialize. Check that. 2780 */ 2781 if (!initialized) 2782 return -ENODEV; 2783 } 2784 2785 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 2786 if (!intf) 2787 return -ENOMEM; 2788 2789 intf->ipmi_version_major = ipmi_version_major(device_id); 2790 intf->ipmi_version_minor = ipmi_version_minor(device_id); 2791 2792 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); 2793 if (!intf->bmc) { 2794 kfree(intf); 2795 return -ENOMEM; 2796 } 2797 intf->intf_num = -1; /* Mark it invalid for now. */ 2798 kref_init(&intf->refcount); 2799 intf->bmc->id = *device_id; 2800 intf->si_dev = si_dev; 2801 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 2802 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR; 2803 intf->channels[j].lun = 2; 2804 } 2805 if (slave_addr != 0) 2806 intf->channels[0].address = slave_addr; 2807 INIT_LIST_HEAD(&intf->users); 2808 intf->handlers = handlers; 2809 intf->send_info = send_info; 2810 spin_lock_init(&intf->seq_lock); 2811 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 2812 intf->seq_table[j].inuse = 0; 2813 intf->seq_table[j].seqid = 0; 2814 } 2815 intf->curr_seq = 0; 2816 #ifdef CONFIG_PROC_FS 2817 mutex_init(&intf->proc_entry_lock); 2818 #endif 2819 spin_lock_init(&intf->waiting_rcv_msgs_lock); 2820 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 2821 tasklet_init(&intf->recv_tasklet, 2822 smi_recv_tasklet, 2823 (unsigned long) intf); 2824 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 2825 spin_lock_init(&intf->xmit_msgs_lock); 2826 INIT_LIST_HEAD(&intf->xmit_msgs); 2827 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 2828 spin_lock_init(&intf->events_lock); 2829 atomic_set(&intf->event_waiters, 0); 2830 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 2831 INIT_LIST_HEAD(&intf->waiting_events); 2832 intf->waiting_events_count = 0; 2833 mutex_init(&intf->cmd_rcvrs_mutex); 2834 spin_lock_init(&intf->maintenance_mode_lock); 2835 INIT_LIST_HEAD(&intf->cmd_rcvrs); 2836 init_waitqueue_head(&intf->waitq); 2837 for (i = 0; i < IPMI_NUM_STATS; i++) 2838 atomic_set(&intf->stats[i], 0); 2839 2840 intf->proc_dir = NULL; 2841 2842 mutex_lock(&smi_watchers_mutex); 2843 mutex_lock(&ipmi_interfaces_mutex); 2844 /* Look for a hole in the numbers. */ 2845 i = 0; 2846 link = &ipmi_interfaces; 2847 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) { 2848 if (tintf->intf_num != i) { 2849 link = &tintf->link; 2850 break; 2851 } 2852 i++; 2853 } 2854 /* Add the new interface in numeric order. */ 2855 if (i == 0) 2856 list_add_rcu(&intf->link, &ipmi_interfaces); 2857 else 2858 list_add_tail_rcu(&intf->link, link); 2859 2860 rv = handlers->start_processing(send_info, intf); 2861 if (rv) 2862 goto out; 2863 2864 get_guid(intf); 2865 2866 if ((intf->ipmi_version_major > 1) 2867 || ((intf->ipmi_version_major == 1) 2868 && (intf->ipmi_version_minor >= 5))) { 2869 /* 2870 * Start scanning the channels to see what is 2871 * available. 2872 */ 2873 intf->null_user_handler = channel_handler; 2874 intf->curr_channel = 0; 2875 rv = send_channel_info_cmd(intf, 0); 2876 if (rv) { 2877 printk(KERN_WARNING PFX 2878 "Error sending channel information for channel" 2879 " 0, %d\n", rv); 2880 goto out; 2881 } 2882 2883 /* Wait for the channel info to be read. */ 2884 wait_event(intf->waitq, 2885 intf->curr_channel >= IPMI_MAX_CHANNELS); 2886 intf->null_user_handler = NULL; 2887 } else { 2888 /* Assume a single IPMB channel at zero. */ 2889 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 2890 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 2891 intf->curr_channel = IPMI_MAX_CHANNELS; 2892 } 2893 2894 if (rv == 0) 2895 rv = add_proc_entries(intf, i); 2896 2897 rv = ipmi_bmc_register(intf, i); 2898 2899 out: 2900 if (rv) { 2901 if (intf->proc_dir) 2902 remove_proc_entries(intf); 2903 intf->handlers = NULL; 2904 list_del_rcu(&intf->link); 2905 mutex_unlock(&ipmi_interfaces_mutex); 2906 mutex_unlock(&smi_watchers_mutex); 2907 synchronize_rcu(); 2908 kref_put(&intf->refcount, intf_free); 2909 } else { 2910 /* 2911 * Keep memory order straight for RCU readers. Make 2912 * sure everything else is committed to memory before 2913 * setting intf_num to mark the interface valid. 2914 */ 2915 smp_wmb(); 2916 intf->intf_num = i; 2917 mutex_unlock(&ipmi_interfaces_mutex); 2918 /* After this point the interface is legal to use. */ 2919 call_smi_watchers(i, intf->si_dev); 2920 mutex_unlock(&smi_watchers_mutex); 2921 } 2922 2923 return rv; 2924 } 2925 EXPORT_SYMBOL(ipmi_register_smi); 2926 2927 static void deliver_smi_err_response(ipmi_smi_t intf, 2928 struct ipmi_smi_msg *msg, 2929 unsigned char err) 2930 { 2931 msg->rsp[0] = msg->data[0] | 4; 2932 msg->rsp[1] = msg->data[1]; 2933 msg->rsp[2] = err; 2934 msg->rsp_size = 3; 2935 /* It's an error, so it will never requeue, no need to check return. */ 2936 handle_one_recv_msg(intf, msg); 2937 } 2938 2939 static void cleanup_smi_msgs(ipmi_smi_t intf) 2940 { 2941 int i; 2942 struct seq_table *ent; 2943 struct ipmi_smi_msg *msg; 2944 struct list_head *entry; 2945 struct list_head tmplist; 2946 2947 /* Clear out our transmit queues and hold the messages. */ 2948 INIT_LIST_HEAD(&tmplist); 2949 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 2950 list_splice_tail(&intf->xmit_msgs, &tmplist); 2951 2952 /* Current message first, to preserve order */ 2953 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 2954 /* Wait for the message to clear out. */ 2955 schedule_timeout(1); 2956 } 2957 2958 /* No need for locks, the interface is down. */ 2959 2960 /* 2961 * Return errors for all pending messages in queue and in the 2962 * tables waiting for remote responses. 2963 */ 2964 while (!list_empty(&tmplist)) { 2965 entry = tmplist.next; 2966 list_del(entry); 2967 msg = list_entry(entry, struct ipmi_smi_msg, link); 2968 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 2969 } 2970 2971 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 2972 ent = &(intf->seq_table[i]); 2973 if (!ent->inuse) 2974 continue; 2975 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED); 2976 } 2977 } 2978 2979 int ipmi_unregister_smi(ipmi_smi_t intf) 2980 { 2981 struct ipmi_smi_watcher *w; 2982 int intf_num = intf->intf_num; 2983 ipmi_user_t user; 2984 2985 ipmi_bmc_unregister(intf); 2986 2987 mutex_lock(&smi_watchers_mutex); 2988 mutex_lock(&ipmi_interfaces_mutex); 2989 intf->intf_num = -1; 2990 intf->in_shutdown = true; 2991 list_del_rcu(&intf->link); 2992 mutex_unlock(&ipmi_interfaces_mutex); 2993 synchronize_rcu(); 2994 2995 cleanup_smi_msgs(intf); 2996 2997 /* Clean up the effects of users on the lower-level software. */ 2998 mutex_lock(&ipmi_interfaces_mutex); 2999 rcu_read_lock(); 3000 list_for_each_entry_rcu(user, &intf->users, link) { 3001 module_put(intf->handlers->owner); 3002 if (intf->handlers->dec_usecount) 3003 intf->handlers->dec_usecount(intf->send_info); 3004 } 3005 rcu_read_unlock(); 3006 intf->handlers = NULL; 3007 mutex_unlock(&ipmi_interfaces_mutex); 3008 3009 remove_proc_entries(intf); 3010 3011 /* 3012 * Call all the watcher interfaces to tell them that 3013 * an interface is gone. 3014 */ 3015 list_for_each_entry(w, &smi_watchers, link) 3016 w->smi_gone(intf_num); 3017 mutex_unlock(&smi_watchers_mutex); 3018 3019 kref_put(&intf->refcount, intf_free); 3020 return 0; 3021 } 3022 EXPORT_SYMBOL(ipmi_unregister_smi); 3023 3024 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, 3025 struct ipmi_smi_msg *msg) 3026 { 3027 struct ipmi_ipmb_addr ipmb_addr; 3028 struct ipmi_recv_msg *recv_msg; 3029 3030 /* 3031 * This is 11, not 10, because the response must contain a 3032 * completion code. 3033 */ 3034 if (msg->rsp_size < 11) { 3035 /* Message not big enough, just ignore it. */ 3036 ipmi_inc_stat(intf, invalid_ipmb_responses); 3037 return 0; 3038 } 3039 3040 if (msg->rsp[2] != 0) { 3041 /* An error getting the response, just ignore it. */ 3042 return 0; 3043 } 3044 3045 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3046 ipmb_addr.slave_addr = msg->rsp[6]; 3047 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3048 ipmb_addr.lun = msg->rsp[7] & 3; 3049 3050 /* 3051 * It's a response from a remote entity. Look up the sequence 3052 * number and handle the response. 3053 */ 3054 if (intf_find_seq(intf, 3055 msg->rsp[7] >> 2, 3056 msg->rsp[3] & 0x0f, 3057 msg->rsp[8], 3058 (msg->rsp[4] >> 2) & (~1), 3059 (struct ipmi_addr *) &(ipmb_addr), 3060 &recv_msg)) { 3061 /* 3062 * We were unable to find the sequence number, 3063 * so just nuke the message. 3064 */ 3065 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3066 return 0; 3067 } 3068 3069 memcpy(recv_msg->msg_data, 3070 &(msg->rsp[9]), 3071 msg->rsp_size - 9); 3072 /* 3073 * The other fields matched, so no need to set them, except 3074 * for netfn, which needs to be the response that was 3075 * returned, not the request value. 3076 */ 3077 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3078 recv_msg->msg.data = recv_msg->msg_data; 3079 recv_msg->msg.data_len = msg->rsp_size - 10; 3080 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3081 ipmi_inc_stat(intf, handled_ipmb_responses); 3082 deliver_response(recv_msg); 3083 3084 return 0; 3085 } 3086 3087 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, 3088 struct ipmi_smi_msg *msg) 3089 { 3090 struct cmd_rcvr *rcvr; 3091 int rv = 0; 3092 unsigned char netfn; 3093 unsigned char cmd; 3094 unsigned char chan; 3095 ipmi_user_t user = NULL; 3096 struct ipmi_ipmb_addr *ipmb_addr; 3097 struct ipmi_recv_msg *recv_msg; 3098 3099 if (msg->rsp_size < 10) { 3100 /* Message not big enough, just ignore it. */ 3101 ipmi_inc_stat(intf, invalid_commands); 3102 return 0; 3103 } 3104 3105 if (msg->rsp[2] != 0) { 3106 /* An error getting the response, just ignore it. */ 3107 return 0; 3108 } 3109 3110 netfn = msg->rsp[4] >> 2; 3111 cmd = msg->rsp[8]; 3112 chan = msg->rsp[3] & 0xf; 3113 3114 rcu_read_lock(); 3115 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3116 if (rcvr) { 3117 user = rcvr->user; 3118 kref_get(&user->refcount); 3119 } else 3120 user = NULL; 3121 rcu_read_unlock(); 3122 3123 if (user == NULL) { 3124 /* We didn't find a user, deliver an error response. */ 3125 ipmi_inc_stat(intf, unhandled_commands); 3126 3127 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3128 msg->data[1] = IPMI_SEND_MSG_CMD; 3129 msg->data[2] = msg->rsp[3]; 3130 msg->data[3] = msg->rsp[6]; 3131 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3132 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); 3133 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address; 3134 /* rqseq/lun */ 3135 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3136 msg->data[8] = msg->rsp[8]; /* cmd */ 3137 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3138 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); 3139 msg->data_size = 11; 3140 3141 #ifdef DEBUG_MSGING 3142 { 3143 int m; 3144 printk("Invalid command:"); 3145 for (m = 0; m < msg->data_size; m++) 3146 printk(" %2.2x", msg->data[m]); 3147 printk("\n"); 3148 } 3149 #endif 3150 rcu_read_lock(); 3151 if (!intf->in_shutdown) { 3152 smi_send(intf, intf->handlers, msg, 0); 3153 /* 3154 * We used the message, so return the value 3155 * that causes it to not be freed or 3156 * queued. 3157 */ 3158 rv = -1; 3159 } 3160 rcu_read_unlock(); 3161 } else { 3162 /* Deliver the message to the user. */ 3163 ipmi_inc_stat(intf, handled_commands); 3164 3165 recv_msg = ipmi_alloc_recv_msg(); 3166 if (!recv_msg) { 3167 /* 3168 * We couldn't allocate memory for the 3169 * message, so requeue it for handling 3170 * later. 3171 */ 3172 rv = 1; 3173 kref_put(&user->refcount, free_user); 3174 } else { 3175 /* Extract the source address from the data. */ 3176 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3177 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3178 ipmb_addr->slave_addr = msg->rsp[6]; 3179 ipmb_addr->lun = msg->rsp[7] & 3; 3180 ipmb_addr->channel = msg->rsp[3] & 0xf; 3181 3182 /* 3183 * Extract the rest of the message information 3184 * from the IPMB header. 3185 */ 3186 recv_msg->user = user; 3187 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3188 recv_msg->msgid = msg->rsp[7] >> 2; 3189 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3190 recv_msg->msg.cmd = msg->rsp[8]; 3191 recv_msg->msg.data = recv_msg->msg_data; 3192 3193 /* 3194 * We chop off 10, not 9 bytes because the checksum 3195 * at the end also needs to be removed. 3196 */ 3197 recv_msg->msg.data_len = msg->rsp_size - 10; 3198 memcpy(recv_msg->msg_data, 3199 &(msg->rsp[9]), 3200 msg->rsp_size - 10); 3201 deliver_response(recv_msg); 3202 } 3203 } 3204 3205 return rv; 3206 } 3207 3208 static int handle_lan_get_msg_rsp(ipmi_smi_t intf, 3209 struct ipmi_smi_msg *msg) 3210 { 3211 struct ipmi_lan_addr lan_addr; 3212 struct ipmi_recv_msg *recv_msg; 3213 3214 3215 /* 3216 * This is 13, not 12, because the response must contain a 3217 * completion code. 3218 */ 3219 if (msg->rsp_size < 13) { 3220 /* Message not big enough, just ignore it. */ 3221 ipmi_inc_stat(intf, invalid_lan_responses); 3222 return 0; 3223 } 3224 3225 if (msg->rsp[2] != 0) { 3226 /* An error getting the response, just ignore it. */ 3227 return 0; 3228 } 3229 3230 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 3231 lan_addr.session_handle = msg->rsp[4]; 3232 lan_addr.remote_SWID = msg->rsp[8]; 3233 lan_addr.local_SWID = msg->rsp[5]; 3234 lan_addr.channel = msg->rsp[3] & 0x0f; 3235 lan_addr.privilege = msg->rsp[3] >> 4; 3236 lan_addr.lun = msg->rsp[9] & 3; 3237 3238 /* 3239 * It's a response from a remote entity. Look up the sequence 3240 * number and handle the response. 3241 */ 3242 if (intf_find_seq(intf, 3243 msg->rsp[9] >> 2, 3244 msg->rsp[3] & 0x0f, 3245 msg->rsp[10], 3246 (msg->rsp[6] >> 2) & (~1), 3247 (struct ipmi_addr *) &(lan_addr), 3248 &recv_msg)) { 3249 /* 3250 * We were unable to find the sequence number, 3251 * so just nuke the message. 3252 */ 3253 ipmi_inc_stat(intf, unhandled_lan_responses); 3254 return 0; 3255 } 3256 3257 memcpy(recv_msg->msg_data, 3258 &(msg->rsp[11]), 3259 msg->rsp_size - 11); 3260 /* 3261 * The other fields matched, so no need to set them, except 3262 * for netfn, which needs to be the response that was 3263 * returned, not the request value. 3264 */ 3265 recv_msg->msg.netfn = msg->rsp[6] >> 2; 3266 recv_msg->msg.data = recv_msg->msg_data; 3267 recv_msg->msg.data_len = msg->rsp_size - 12; 3268 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3269 ipmi_inc_stat(intf, handled_lan_responses); 3270 deliver_response(recv_msg); 3271 3272 return 0; 3273 } 3274 3275 static int handle_lan_get_msg_cmd(ipmi_smi_t intf, 3276 struct ipmi_smi_msg *msg) 3277 { 3278 struct cmd_rcvr *rcvr; 3279 int rv = 0; 3280 unsigned char netfn; 3281 unsigned char cmd; 3282 unsigned char chan; 3283 ipmi_user_t user = NULL; 3284 struct ipmi_lan_addr *lan_addr; 3285 struct ipmi_recv_msg *recv_msg; 3286 3287 if (msg->rsp_size < 12) { 3288 /* Message not big enough, just ignore it. */ 3289 ipmi_inc_stat(intf, invalid_commands); 3290 return 0; 3291 } 3292 3293 if (msg->rsp[2] != 0) { 3294 /* An error getting the response, just ignore it. */ 3295 return 0; 3296 } 3297 3298 netfn = msg->rsp[6] >> 2; 3299 cmd = msg->rsp[10]; 3300 chan = msg->rsp[3] & 0xf; 3301 3302 rcu_read_lock(); 3303 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3304 if (rcvr) { 3305 user = rcvr->user; 3306 kref_get(&user->refcount); 3307 } else 3308 user = NULL; 3309 rcu_read_unlock(); 3310 3311 if (user == NULL) { 3312 /* We didn't find a user, just give up. */ 3313 ipmi_inc_stat(intf, unhandled_commands); 3314 3315 /* 3316 * Don't do anything with these messages, just allow 3317 * them to be freed. 3318 */ 3319 rv = 0; 3320 } else { 3321 /* Deliver the message to the user. */ 3322 ipmi_inc_stat(intf, handled_commands); 3323 3324 recv_msg = ipmi_alloc_recv_msg(); 3325 if (!recv_msg) { 3326 /* 3327 * We couldn't allocate memory for the 3328 * message, so requeue it for handling later. 3329 */ 3330 rv = 1; 3331 kref_put(&user->refcount, free_user); 3332 } else { 3333 /* Extract the source address from the data. */ 3334 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 3335 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 3336 lan_addr->session_handle = msg->rsp[4]; 3337 lan_addr->remote_SWID = msg->rsp[8]; 3338 lan_addr->local_SWID = msg->rsp[5]; 3339 lan_addr->lun = msg->rsp[9] & 3; 3340 lan_addr->channel = msg->rsp[3] & 0xf; 3341 lan_addr->privilege = msg->rsp[3] >> 4; 3342 3343 /* 3344 * Extract the rest of the message information 3345 * from the IPMB header. 3346 */ 3347 recv_msg->user = user; 3348 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3349 recv_msg->msgid = msg->rsp[9] >> 2; 3350 recv_msg->msg.netfn = msg->rsp[6] >> 2; 3351 recv_msg->msg.cmd = msg->rsp[10]; 3352 recv_msg->msg.data = recv_msg->msg_data; 3353 3354 /* 3355 * We chop off 12, not 11 bytes because the checksum 3356 * at the end also needs to be removed. 3357 */ 3358 recv_msg->msg.data_len = msg->rsp_size - 12; 3359 memcpy(recv_msg->msg_data, 3360 &(msg->rsp[11]), 3361 msg->rsp_size - 12); 3362 deliver_response(recv_msg); 3363 } 3364 } 3365 3366 return rv; 3367 } 3368 3369 /* 3370 * This routine will handle "Get Message" command responses with 3371 * channels that use an OEM Medium. The message format belongs to 3372 * the OEM. See IPMI 2.0 specification, Chapter 6 and 3373 * Chapter 22, sections 22.6 and 22.24 for more details. 3374 */ 3375 static int handle_oem_get_msg_cmd(ipmi_smi_t intf, 3376 struct ipmi_smi_msg *msg) 3377 { 3378 struct cmd_rcvr *rcvr; 3379 int rv = 0; 3380 unsigned char netfn; 3381 unsigned char cmd; 3382 unsigned char chan; 3383 ipmi_user_t user = NULL; 3384 struct ipmi_system_interface_addr *smi_addr; 3385 struct ipmi_recv_msg *recv_msg; 3386 3387 /* 3388 * We expect the OEM SW to perform error checking 3389 * so we just do some basic sanity checks 3390 */ 3391 if (msg->rsp_size < 4) { 3392 /* Message not big enough, just ignore it. */ 3393 ipmi_inc_stat(intf, invalid_commands); 3394 return 0; 3395 } 3396 3397 if (msg->rsp[2] != 0) { 3398 /* An error getting the response, just ignore it. */ 3399 return 0; 3400 } 3401 3402 /* 3403 * This is an OEM Message so the OEM needs to know how 3404 * handle the message. We do no interpretation. 3405 */ 3406 netfn = msg->rsp[0] >> 2; 3407 cmd = msg->rsp[1]; 3408 chan = msg->rsp[3] & 0xf; 3409 3410 rcu_read_lock(); 3411 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3412 if (rcvr) { 3413 user = rcvr->user; 3414 kref_get(&user->refcount); 3415 } else 3416 user = NULL; 3417 rcu_read_unlock(); 3418 3419 if (user == NULL) { 3420 /* We didn't find a user, just give up. */ 3421 ipmi_inc_stat(intf, unhandled_commands); 3422 3423 /* 3424 * Don't do anything with these messages, just allow 3425 * them to be freed. 3426 */ 3427 3428 rv = 0; 3429 } else { 3430 /* Deliver the message to the user. */ 3431 ipmi_inc_stat(intf, handled_commands); 3432 3433 recv_msg = ipmi_alloc_recv_msg(); 3434 if (!recv_msg) { 3435 /* 3436 * We couldn't allocate memory for the 3437 * message, so requeue it for handling 3438 * later. 3439 */ 3440 rv = 1; 3441 kref_put(&user->refcount, free_user); 3442 } else { 3443 /* 3444 * OEM Messages are expected to be delivered via 3445 * the system interface to SMS software. We might 3446 * need to visit this again depending on OEM 3447 * requirements 3448 */ 3449 smi_addr = ((struct ipmi_system_interface_addr *) 3450 &(recv_msg->addr)); 3451 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3452 smi_addr->channel = IPMI_BMC_CHANNEL; 3453 smi_addr->lun = msg->rsp[0] & 3; 3454 3455 recv_msg->user = user; 3456 recv_msg->user_msg_data = NULL; 3457 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 3458 recv_msg->msg.netfn = msg->rsp[0] >> 2; 3459 recv_msg->msg.cmd = msg->rsp[1]; 3460 recv_msg->msg.data = recv_msg->msg_data; 3461 3462 /* 3463 * The message starts at byte 4 which follows the 3464 * the Channel Byte in the "GET MESSAGE" command 3465 */ 3466 recv_msg->msg.data_len = msg->rsp_size - 4; 3467 memcpy(recv_msg->msg_data, 3468 &(msg->rsp[4]), 3469 msg->rsp_size - 4); 3470 deliver_response(recv_msg); 3471 } 3472 } 3473 3474 return rv; 3475 } 3476 3477 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 3478 struct ipmi_smi_msg *msg) 3479 { 3480 struct ipmi_system_interface_addr *smi_addr; 3481 3482 recv_msg->msgid = 0; 3483 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr); 3484 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3485 smi_addr->channel = IPMI_BMC_CHANNEL; 3486 smi_addr->lun = msg->rsp[0] & 3; 3487 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 3488 recv_msg->msg.netfn = msg->rsp[0] >> 2; 3489 recv_msg->msg.cmd = msg->rsp[1]; 3490 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3); 3491 recv_msg->msg.data = recv_msg->msg_data; 3492 recv_msg->msg.data_len = msg->rsp_size - 3; 3493 } 3494 3495 static int handle_read_event_rsp(ipmi_smi_t intf, 3496 struct ipmi_smi_msg *msg) 3497 { 3498 struct ipmi_recv_msg *recv_msg, *recv_msg2; 3499 struct list_head msgs; 3500 ipmi_user_t user; 3501 int rv = 0; 3502 int deliver_count = 0; 3503 unsigned long flags; 3504 3505 if (msg->rsp_size < 19) { 3506 /* Message is too small to be an IPMB event. */ 3507 ipmi_inc_stat(intf, invalid_events); 3508 return 0; 3509 } 3510 3511 if (msg->rsp[2] != 0) { 3512 /* An error getting the event, just ignore it. */ 3513 return 0; 3514 } 3515 3516 INIT_LIST_HEAD(&msgs); 3517 3518 spin_lock_irqsave(&intf->events_lock, flags); 3519 3520 ipmi_inc_stat(intf, events); 3521 3522 /* 3523 * Allocate and fill in one message for every user that is 3524 * getting events. 3525 */ 3526 rcu_read_lock(); 3527 list_for_each_entry_rcu(user, &intf->users, link) { 3528 if (!user->gets_events) 3529 continue; 3530 3531 recv_msg = ipmi_alloc_recv_msg(); 3532 if (!recv_msg) { 3533 rcu_read_unlock(); 3534 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 3535 link) { 3536 list_del(&recv_msg->link); 3537 ipmi_free_recv_msg(recv_msg); 3538 } 3539 /* 3540 * We couldn't allocate memory for the 3541 * message, so requeue it for handling 3542 * later. 3543 */ 3544 rv = 1; 3545 goto out; 3546 } 3547 3548 deliver_count++; 3549 3550 copy_event_into_recv_msg(recv_msg, msg); 3551 recv_msg->user = user; 3552 kref_get(&user->refcount); 3553 list_add_tail(&(recv_msg->link), &msgs); 3554 } 3555 rcu_read_unlock(); 3556 3557 if (deliver_count) { 3558 /* Now deliver all the messages. */ 3559 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 3560 list_del(&recv_msg->link); 3561 deliver_response(recv_msg); 3562 } 3563 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 3564 /* 3565 * No one to receive the message, put it in queue if there's 3566 * not already too many things in the queue. 3567 */ 3568 recv_msg = ipmi_alloc_recv_msg(); 3569 if (!recv_msg) { 3570 /* 3571 * We couldn't allocate memory for the 3572 * message, so requeue it for handling 3573 * later. 3574 */ 3575 rv = 1; 3576 goto out; 3577 } 3578 3579 copy_event_into_recv_msg(recv_msg, msg); 3580 list_add_tail(&(recv_msg->link), &(intf->waiting_events)); 3581 intf->waiting_events_count++; 3582 } else if (!intf->event_msg_printed) { 3583 /* 3584 * There's too many things in the queue, discard this 3585 * message. 3586 */ 3587 printk(KERN_WARNING PFX "Event queue full, discarding" 3588 " incoming events\n"); 3589 intf->event_msg_printed = 1; 3590 } 3591 3592 out: 3593 spin_unlock_irqrestore(&(intf->events_lock), flags); 3594 3595 return rv; 3596 } 3597 3598 static int handle_bmc_rsp(ipmi_smi_t intf, 3599 struct ipmi_smi_msg *msg) 3600 { 3601 struct ipmi_recv_msg *recv_msg; 3602 struct ipmi_user *user; 3603 3604 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 3605 if (recv_msg == NULL) { 3606 printk(KERN_WARNING 3607 "IPMI message received with no owner. This\n" 3608 "could be because of a malformed message, or\n" 3609 "because of a hardware error. Contact your\n" 3610 "hardware vender for assistance\n"); 3611 return 0; 3612 } 3613 3614 user = recv_msg->user; 3615 /* Make sure the user still exists. */ 3616 if (user && !user->valid) { 3617 /* The user for the message went away, so give up. */ 3618 ipmi_inc_stat(intf, unhandled_local_responses); 3619 ipmi_free_recv_msg(recv_msg); 3620 } else { 3621 struct ipmi_system_interface_addr *smi_addr; 3622 3623 ipmi_inc_stat(intf, handled_local_responses); 3624 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3625 recv_msg->msgid = msg->msgid; 3626 smi_addr = ((struct ipmi_system_interface_addr *) 3627 &(recv_msg->addr)); 3628 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3629 smi_addr->channel = IPMI_BMC_CHANNEL; 3630 smi_addr->lun = msg->rsp[0] & 3; 3631 recv_msg->msg.netfn = msg->rsp[0] >> 2; 3632 recv_msg->msg.cmd = msg->rsp[1]; 3633 memcpy(recv_msg->msg_data, 3634 &(msg->rsp[2]), 3635 msg->rsp_size - 2); 3636 recv_msg->msg.data = recv_msg->msg_data; 3637 recv_msg->msg.data_len = msg->rsp_size - 2; 3638 deliver_response(recv_msg); 3639 } 3640 3641 return 0; 3642 } 3643 3644 /* 3645 * Handle a received message. Return 1 if the message should be requeued, 3646 * 0 if the message should be freed, or -1 if the message should not 3647 * be freed or requeued. 3648 */ 3649 static int handle_one_recv_msg(ipmi_smi_t intf, 3650 struct ipmi_smi_msg *msg) 3651 { 3652 int requeue; 3653 int chan; 3654 3655 #ifdef DEBUG_MSGING 3656 int m; 3657 printk("Recv:"); 3658 for (m = 0; m < msg->rsp_size; m++) 3659 printk(" %2.2x", msg->rsp[m]); 3660 printk("\n"); 3661 #endif 3662 if (msg->rsp_size < 2) { 3663 /* Message is too small to be correct. */ 3664 printk(KERN_WARNING PFX "BMC returned to small a message" 3665 " for netfn %x cmd %x, got %d bytes\n", 3666 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 3667 3668 /* Generate an error response for the message. */ 3669 msg->rsp[0] = msg->data[0] | (1 << 2); 3670 msg->rsp[1] = msg->data[1]; 3671 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 3672 msg->rsp_size = 3; 3673 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 3674 || (msg->rsp[1] != msg->data[1])) { 3675 /* 3676 * The NetFN and Command in the response is not even 3677 * marginally correct. 3678 */ 3679 printk(KERN_WARNING PFX "BMC returned incorrect response," 3680 " expected netfn %x cmd %x, got netfn %x cmd %x\n", 3681 (msg->data[0] >> 2) | 1, msg->data[1], 3682 msg->rsp[0] >> 2, msg->rsp[1]); 3683 3684 /* Generate an error response for the message. */ 3685 msg->rsp[0] = msg->data[0] | (1 << 2); 3686 msg->rsp[1] = msg->data[1]; 3687 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 3688 msg->rsp_size = 3; 3689 } 3690 3691 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3692 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 3693 && (msg->user_data != NULL)) { 3694 /* 3695 * It's a response to a response we sent. For this we 3696 * deliver a send message response to the user. 3697 */ 3698 struct ipmi_recv_msg *recv_msg = msg->user_data; 3699 3700 requeue = 0; 3701 if (msg->rsp_size < 2) 3702 /* Message is too small to be correct. */ 3703 goto out; 3704 3705 chan = msg->data[2] & 0x0f; 3706 if (chan >= IPMI_MAX_CHANNELS) 3707 /* Invalid channel number */ 3708 goto out; 3709 3710 if (!recv_msg) 3711 goto out; 3712 3713 /* Make sure the user still exists. */ 3714 if (!recv_msg->user || !recv_msg->user->valid) 3715 goto out; 3716 3717 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 3718 recv_msg->msg.data = recv_msg->msg_data; 3719 recv_msg->msg.data_len = 1; 3720 recv_msg->msg_data[0] = msg->rsp[2]; 3721 deliver_response(recv_msg); 3722 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3723 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 3724 /* It's from the receive queue. */ 3725 chan = msg->rsp[3] & 0xf; 3726 if (chan >= IPMI_MAX_CHANNELS) { 3727 /* Invalid channel number */ 3728 requeue = 0; 3729 goto out; 3730 } 3731 3732 /* 3733 * We need to make sure the channels have been initialized. 3734 * The channel_handler routine will set the "curr_channel" 3735 * equal to or greater than IPMI_MAX_CHANNELS when all the 3736 * channels for this interface have been initialized. 3737 */ 3738 if (intf->curr_channel < IPMI_MAX_CHANNELS) { 3739 requeue = 0; /* Throw the message away */ 3740 goto out; 3741 } 3742 3743 switch (intf->channels[chan].medium) { 3744 case IPMI_CHANNEL_MEDIUM_IPMB: 3745 if (msg->rsp[4] & 0x04) { 3746 /* 3747 * It's a response, so find the 3748 * requesting message and send it up. 3749 */ 3750 requeue = handle_ipmb_get_msg_rsp(intf, msg); 3751 } else { 3752 /* 3753 * It's a command to the SMS from some other 3754 * entity. Handle that. 3755 */ 3756 requeue = handle_ipmb_get_msg_cmd(intf, msg); 3757 } 3758 break; 3759 3760 case IPMI_CHANNEL_MEDIUM_8023LAN: 3761 case IPMI_CHANNEL_MEDIUM_ASYNC: 3762 if (msg->rsp[6] & 0x04) { 3763 /* 3764 * It's a response, so find the 3765 * requesting message and send it up. 3766 */ 3767 requeue = handle_lan_get_msg_rsp(intf, msg); 3768 } else { 3769 /* 3770 * It's a command to the SMS from some other 3771 * entity. Handle that. 3772 */ 3773 requeue = handle_lan_get_msg_cmd(intf, msg); 3774 } 3775 break; 3776 3777 default: 3778 /* Check for OEM Channels. Clients had better 3779 register for these commands. */ 3780 if ((intf->channels[chan].medium 3781 >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 3782 && (intf->channels[chan].medium 3783 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 3784 requeue = handle_oem_get_msg_cmd(intf, msg); 3785 } else { 3786 /* 3787 * We don't handle the channel type, so just 3788 * free the message. 3789 */ 3790 requeue = 0; 3791 } 3792 } 3793 3794 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3795 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 3796 /* It's an asynchronous event. */ 3797 requeue = handle_read_event_rsp(intf, msg); 3798 } else { 3799 /* It's a response from the local BMC. */ 3800 requeue = handle_bmc_rsp(intf, msg); 3801 } 3802 3803 out: 3804 return requeue; 3805 } 3806 3807 /* 3808 * If there are messages in the queue or pretimeouts, handle them. 3809 */ 3810 static void handle_new_recv_msgs(ipmi_smi_t intf) 3811 { 3812 struct ipmi_smi_msg *smi_msg; 3813 unsigned long flags = 0; 3814 int rv; 3815 int run_to_completion = intf->run_to_completion; 3816 3817 /* See if any waiting messages need to be processed. */ 3818 if (!run_to_completion) 3819 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 3820 while (!list_empty(&intf->waiting_rcv_msgs)) { 3821 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 3822 struct ipmi_smi_msg, link); 3823 list_del(&smi_msg->link); 3824 if (!run_to_completion) 3825 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 3826 flags); 3827 rv = handle_one_recv_msg(intf, smi_msg); 3828 if (!run_to_completion) 3829 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 3830 if (rv > 0) { 3831 /* 3832 * To preserve message order, quit if we 3833 * can't handle a message. Add the message 3834 * back at the head, this is safe because this 3835 * tasklet is the only thing that pulls the 3836 * messages. 3837 */ 3838 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 3839 break; 3840 } else { 3841 if (rv == 0) 3842 /* Message handled */ 3843 ipmi_free_smi_msg(smi_msg); 3844 /* If rv < 0, fatal error, del but don't free. */ 3845 } 3846 } 3847 if (!run_to_completion) 3848 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 3849 3850 /* 3851 * If the pretimout count is non-zero, decrement one from it and 3852 * deliver pretimeouts to all the users. 3853 */ 3854 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 3855 ipmi_user_t user; 3856 3857 rcu_read_lock(); 3858 list_for_each_entry_rcu(user, &intf->users, link) { 3859 if (user->handler->ipmi_watchdog_pretimeout) 3860 user->handler->ipmi_watchdog_pretimeout( 3861 user->handler_data); 3862 } 3863 rcu_read_unlock(); 3864 } 3865 } 3866 3867 static void smi_recv_tasklet(unsigned long val) 3868 { 3869 unsigned long flags = 0; /* keep us warning-free. */ 3870 ipmi_smi_t intf = (ipmi_smi_t) val; 3871 int run_to_completion = intf->run_to_completion; 3872 struct ipmi_smi_msg *newmsg = NULL; 3873 3874 /* 3875 * Start the next message if available. 3876 * 3877 * Do this here, not in the actual receiver, because we may deadlock 3878 * because the lower layer is allowed to hold locks while calling 3879 * message delivery. 3880 */ 3881 if (!run_to_completion) 3882 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 3883 if (intf->curr_msg == NULL && !intf->in_shutdown) { 3884 struct list_head *entry = NULL; 3885 3886 /* Pick the high priority queue first. */ 3887 if (!list_empty(&intf->hp_xmit_msgs)) 3888 entry = intf->hp_xmit_msgs.next; 3889 else if (!list_empty(&intf->xmit_msgs)) 3890 entry = intf->xmit_msgs.next; 3891 3892 if (entry) { 3893 list_del(entry); 3894 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 3895 intf->curr_msg = newmsg; 3896 } 3897 } 3898 if (!run_to_completion) 3899 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 3900 if (newmsg) 3901 intf->handlers->sender(intf->send_info, newmsg); 3902 3903 handle_new_recv_msgs(intf); 3904 } 3905 3906 /* Handle a new message from the lower layer. */ 3907 void ipmi_smi_msg_received(ipmi_smi_t intf, 3908 struct ipmi_smi_msg *msg) 3909 { 3910 unsigned long flags = 0; /* keep us warning-free. */ 3911 int run_to_completion = intf->run_to_completion; 3912 3913 if ((msg->data_size >= 2) 3914 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 3915 && (msg->data[1] == IPMI_SEND_MSG_CMD) 3916 && (msg->user_data == NULL)) { 3917 3918 if (intf->in_shutdown) 3919 goto free_msg; 3920 3921 /* 3922 * This is the local response to a command send, start 3923 * the timer for these. The user_data will not be 3924 * NULL if this is a response send, and we will let 3925 * response sends just go through. 3926 */ 3927 3928 /* 3929 * Check for errors, if we get certain errors (ones 3930 * that mean basically we can try again later), we 3931 * ignore them and start the timer. Otherwise we 3932 * report the error immediately. 3933 */ 3934 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 3935 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 3936 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 3937 && (msg->rsp[2] != IPMI_BUS_ERR) 3938 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 3939 int chan = msg->rsp[3] & 0xf; 3940 3941 /* Got an error sending the message, handle it. */ 3942 if (chan >= IPMI_MAX_CHANNELS) 3943 ; /* This shouldn't happen */ 3944 else if ((intf->channels[chan].medium 3945 == IPMI_CHANNEL_MEDIUM_8023LAN) 3946 || (intf->channels[chan].medium 3947 == IPMI_CHANNEL_MEDIUM_ASYNC)) 3948 ipmi_inc_stat(intf, sent_lan_command_errs); 3949 else 3950 ipmi_inc_stat(intf, sent_ipmb_command_errs); 3951 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 3952 } else 3953 /* The message was sent, start the timer. */ 3954 intf_start_seq_timer(intf, msg->msgid); 3955 3956 free_msg: 3957 ipmi_free_smi_msg(msg); 3958 } else { 3959 /* 3960 * To preserve message order, we keep a queue and deliver from 3961 * a tasklet. 3962 */ 3963 if (!run_to_completion) 3964 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 3965 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 3966 if (!run_to_completion) 3967 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 3968 flags); 3969 } 3970 3971 if (!run_to_completion) 3972 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 3973 /* 3974 * We can get an asynchronous event or receive message in addition 3975 * to commands we send. 3976 */ 3977 if (msg == intf->curr_msg) 3978 intf->curr_msg = NULL; 3979 if (!run_to_completion) 3980 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 3981 3982 if (run_to_completion) 3983 smi_recv_tasklet((unsigned long) intf); 3984 else 3985 tasklet_schedule(&intf->recv_tasklet); 3986 } 3987 EXPORT_SYMBOL(ipmi_smi_msg_received); 3988 3989 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) 3990 { 3991 if (intf->in_shutdown) 3992 return; 3993 3994 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 3995 tasklet_schedule(&intf->recv_tasklet); 3996 } 3997 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 3998 3999 static struct ipmi_smi_msg * 4000 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, 4001 unsigned char seq, long seqid) 4002 { 4003 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4004 if (!smi_msg) 4005 /* 4006 * If we can't allocate the message, then just return, we 4007 * get 4 retries, so this should be ok. 4008 */ 4009 return NULL; 4010 4011 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4012 smi_msg->data_size = recv_msg->msg.data_len; 4013 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4014 4015 #ifdef DEBUG_MSGING 4016 { 4017 int m; 4018 printk("Resend: "); 4019 for (m = 0; m < smi_msg->data_size; m++) 4020 printk(" %2.2x", smi_msg->data[m]); 4021 printk("\n"); 4022 } 4023 #endif 4024 return smi_msg; 4025 } 4026 4027 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, 4028 struct list_head *timeouts, long timeout_period, 4029 int slot, unsigned long *flags, 4030 unsigned int *waiting_msgs) 4031 { 4032 struct ipmi_recv_msg *msg; 4033 const struct ipmi_smi_handlers *handlers; 4034 4035 if (intf->in_shutdown) 4036 return; 4037 4038 if (!ent->inuse) 4039 return; 4040 4041 ent->timeout -= timeout_period; 4042 if (ent->timeout > 0) { 4043 (*waiting_msgs)++; 4044 return; 4045 } 4046 4047 if (ent->retries_left == 0) { 4048 /* The message has used all its retries. */ 4049 ent->inuse = 0; 4050 msg = ent->recv_msg; 4051 list_add_tail(&msg->link, timeouts); 4052 if (ent->broadcast) 4053 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4054 else if (is_lan_addr(&ent->recv_msg->addr)) 4055 ipmi_inc_stat(intf, timed_out_lan_commands); 4056 else 4057 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4058 } else { 4059 struct ipmi_smi_msg *smi_msg; 4060 /* More retries, send again. */ 4061 4062 (*waiting_msgs)++; 4063 4064 /* 4065 * Start with the max timer, set to normal timer after 4066 * the message is sent. 4067 */ 4068 ent->timeout = MAX_MSG_TIMEOUT; 4069 ent->retries_left--; 4070 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4071 ent->seqid); 4072 if (!smi_msg) { 4073 if (is_lan_addr(&ent->recv_msg->addr)) 4074 ipmi_inc_stat(intf, 4075 dropped_rexmit_lan_commands); 4076 else 4077 ipmi_inc_stat(intf, 4078 dropped_rexmit_ipmb_commands); 4079 return; 4080 } 4081 4082 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4083 4084 /* 4085 * Send the new message. We send with a zero 4086 * priority. It timed out, I doubt time is that 4087 * critical now, and high priority messages are really 4088 * only for messages to the local MC, which don't get 4089 * resent. 4090 */ 4091 handlers = intf->handlers; 4092 if (handlers) { 4093 if (is_lan_addr(&ent->recv_msg->addr)) 4094 ipmi_inc_stat(intf, 4095 retransmitted_lan_commands); 4096 else 4097 ipmi_inc_stat(intf, 4098 retransmitted_ipmb_commands); 4099 4100 smi_send(intf, handlers, smi_msg, 0); 4101 } else 4102 ipmi_free_smi_msg(smi_msg); 4103 4104 spin_lock_irqsave(&intf->seq_lock, *flags); 4105 } 4106 } 4107 4108 static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period) 4109 { 4110 struct list_head timeouts; 4111 struct ipmi_recv_msg *msg, *msg2; 4112 unsigned long flags; 4113 int i; 4114 unsigned int waiting_msgs = 0; 4115 4116 /* 4117 * Go through the seq table and find any messages that 4118 * have timed out, putting them in the timeouts 4119 * list. 4120 */ 4121 INIT_LIST_HEAD(&timeouts); 4122 spin_lock_irqsave(&intf->seq_lock, flags); 4123 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 4124 check_msg_timeout(intf, &(intf->seq_table[i]), 4125 &timeouts, timeout_period, i, 4126 &flags, &waiting_msgs); 4127 spin_unlock_irqrestore(&intf->seq_lock, flags); 4128 4129 list_for_each_entry_safe(msg, msg2, &timeouts, link) 4130 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); 4131 4132 /* 4133 * Maintenance mode handling. Check the timeout 4134 * optimistically before we claim the lock. It may 4135 * mean a timeout gets missed occasionally, but that 4136 * only means the timeout gets extended by one period 4137 * in that case. No big deal, and it avoids the lock 4138 * most of the time. 4139 */ 4140 if (intf->auto_maintenance_timeout > 0) { 4141 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 4142 if (intf->auto_maintenance_timeout > 0) { 4143 intf->auto_maintenance_timeout 4144 -= timeout_period; 4145 if (!intf->maintenance_mode 4146 && (intf->auto_maintenance_timeout <= 0)) { 4147 intf->maintenance_mode_enable = false; 4148 maintenance_mode_update(intf); 4149 } 4150 } 4151 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 4152 flags); 4153 } 4154 4155 tasklet_schedule(&intf->recv_tasklet); 4156 4157 return waiting_msgs; 4158 } 4159 4160 static void ipmi_request_event(ipmi_smi_t intf) 4161 { 4162 /* No event requests when in maintenance mode. */ 4163 if (intf->maintenance_mode_enable) 4164 return; 4165 4166 if (!intf->in_shutdown) 4167 intf->handlers->request_events(intf->send_info); 4168 } 4169 4170 static struct timer_list ipmi_timer; 4171 4172 static atomic_t stop_operation; 4173 4174 static void ipmi_timeout(unsigned long data) 4175 { 4176 ipmi_smi_t intf; 4177 int nt = 0; 4178 4179 if (atomic_read(&stop_operation)) 4180 return; 4181 4182 rcu_read_lock(); 4183 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4184 int lnt = 0; 4185 4186 if (atomic_read(&intf->event_waiters)) { 4187 intf->ticks_to_req_ev--; 4188 if (intf->ticks_to_req_ev == 0) { 4189 ipmi_request_event(intf); 4190 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 4191 } 4192 lnt++; 4193 } 4194 4195 lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 4196 4197 lnt = !!lnt; 4198 if (lnt != intf->last_needs_timer && 4199 intf->handlers->set_need_watch) 4200 intf->handlers->set_need_watch(intf->send_info, lnt); 4201 intf->last_needs_timer = lnt; 4202 4203 nt += lnt; 4204 } 4205 rcu_read_unlock(); 4206 4207 if (nt) 4208 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4209 } 4210 4211 static void need_waiter(ipmi_smi_t intf) 4212 { 4213 /* Racy, but worst case we start the timer twice. */ 4214 if (!timer_pending(&ipmi_timer)) 4215 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4216 } 4217 4218 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 4219 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 4220 4221 static void free_smi_msg(struct ipmi_smi_msg *msg) 4222 { 4223 atomic_dec(&smi_msg_inuse_count); 4224 kfree(msg); 4225 } 4226 4227 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 4228 { 4229 struct ipmi_smi_msg *rv; 4230 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 4231 if (rv) { 4232 rv->done = free_smi_msg; 4233 rv->user_data = NULL; 4234 atomic_inc(&smi_msg_inuse_count); 4235 } 4236 return rv; 4237 } 4238 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 4239 4240 static void free_recv_msg(struct ipmi_recv_msg *msg) 4241 { 4242 atomic_dec(&recv_msg_inuse_count); 4243 kfree(msg); 4244 } 4245 4246 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 4247 { 4248 struct ipmi_recv_msg *rv; 4249 4250 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 4251 if (rv) { 4252 rv->user = NULL; 4253 rv->done = free_recv_msg; 4254 atomic_inc(&recv_msg_inuse_count); 4255 } 4256 return rv; 4257 } 4258 4259 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 4260 { 4261 if (msg->user) 4262 kref_put(&msg->user->refcount, free_user); 4263 msg->done(msg); 4264 } 4265 EXPORT_SYMBOL(ipmi_free_recv_msg); 4266 4267 #ifdef CONFIG_IPMI_PANIC_EVENT 4268 4269 static atomic_t panic_done_count = ATOMIC_INIT(0); 4270 4271 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 4272 { 4273 atomic_dec(&panic_done_count); 4274 } 4275 4276 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 4277 { 4278 atomic_dec(&panic_done_count); 4279 } 4280 4281 /* 4282 * Inside a panic, send a message and wait for a response. 4283 */ 4284 static void ipmi_panic_request_and_wait(ipmi_smi_t intf, 4285 struct ipmi_addr *addr, 4286 struct kernel_ipmi_msg *msg) 4287 { 4288 struct ipmi_smi_msg smi_msg; 4289 struct ipmi_recv_msg recv_msg; 4290 int rv; 4291 4292 smi_msg.done = dummy_smi_done_handler; 4293 recv_msg.done = dummy_recv_done_handler; 4294 atomic_add(2, &panic_done_count); 4295 rv = i_ipmi_request(NULL, 4296 intf, 4297 addr, 4298 0, 4299 msg, 4300 intf, 4301 &smi_msg, 4302 &recv_msg, 4303 0, 4304 intf->channels[0].address, 4305 intf->channels[0].lun, 4306 0, 1); /* Don't retry, and don't wait. */ 4307 if (rv) 4308 atomic_sub(2, &panic_done_count); 4309 else if (intf->handlers->flush_messages) 4310 intf->handlers->flush_messages(intf->send_info); 4311 4312 while (atomic_read(&panic_done_count) != 0) 4313 ipmi_poll(intf); 4314 } 4315 4316 #ifdef CONFIG_IPMI_PANIC_STRING 4317 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 4318 { 4319 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 4320 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 4321 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 4322 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 4323 /* A get event receiver command, save it. */ 4324 intf->event_receiver = msg->msg.data[1]; 4325 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 4326 } 4327 } 4328 4329 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 4330 { 4331 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 4332 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 4333 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 4334 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 4335 /* 4336 * A get device id command, save if we are an event 4337 * receiver or generator. 4338 */ 4339 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 4340 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 4341 } 4342 } 4343 #endif 4344 4345 static void send_panic_events(char *str) 4346 { 4347 struct kernel_ipmi_msg msg; 4348 ipmi_smi_t intf; 4349 unsigned char data[16]; 4350 struct ipmi_system_interface_addr *si; 4351 struct ipmi_addr addr; 4352 4353 si = (struct ipmi_system_interface_addr *) &addr; 4354 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4355 si->channel = IPMI_BMC_CHANNEL; 4356 si->lun = 0; 4357 4358 /* Fill in an event telling that we have failed. */ 4359 msg.netfn = 0x04; /* Sensor or Event. */ 4360 msg.cmd = 2; /* Platform event command. */ 4361 msg.data = data; 4362 msg.data_len = 8; 4363 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 4364 data[1] = 0x03; /* This is for IPMI 1.0. */ 4365 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 4366 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 4367 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 4368 4369 /* 4370 * Put a few breadcrumbs in. Hopefully later we can add more things 4371 * to make the panic events more useful. 4372 */ 4373 if (str) { 4374 data[3] = str[0]; 4375 data[6] = str[1]; 4376 data[7] = str[2]; 4377 } 4378 4379 /* For every registered interface, send the event. */ 4380 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4381 if (!intf->handlers) 4382 /* Interface is not ready. */ 4383 continue; 4384 4385 /* Send the event announcing the panic. */ 4386 ipmi_panic_request_and_wait(intf, &addr, &msg); 4387 } 4388 4389 #ifdef CONFIG_IPMI_PANIC_STRING 4390 /* 4391 * On every interface, dump a bunch of OEM event holding the 4392 * string. 4393 */ 4394 if (!str) 4395 return; 4396 4397 /* For every registered interface, send the event. */ 4398 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4399 char *p = str; 4400 struct ipmi_ipmb_addr *ipmb; 4401 int j; 4402 4403 if (intf->intf_num == -1) 4404 /* Interface was not ready yet. */ 4405 continue; 4406 4407 /* 4408 * intf_num is used as an marker to tell if the 4409 * interface is valid. Thus we need a read barrier to 4410 * make sure data fetched before checking intf_num 4411 * won't be used. 4412 */ 4413 smp_rmb(); 4414 4415 /* 4416 * First job here is to figure out where to send the 4417 * OEM events. There's no way in IPMI to send OEM 4418 * events using an event send command, so we have to 4419 * find the SEL to put them in and stick them in 4420 * there. 4421 */ 4422 4423 /* Get capabilities from the get device id. */ 4424 intf->local_sel_device = 0; 4425 intf->local_event_generator = 0; 4426 intf->event_receiver = 0; 4427 4428 /* Request the device info from the local MC. */ 4429 msg.netfn = IPMI_NETFN_APP_REQUEST; 4430 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 4431 msg.data = NULL; 4432 msg.data_len = 0; 4433 intf->null_user_handler = device_id_fetcher; 4434 ipmi_panic_request_and_wait(intf, &addr, &msg); 4435 4436 if (intf->local_event_generator) { 4437 /* Request the event receiver from the local MC. */ 4438 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 4439 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 4440 msg.data = NULL; 4441 msg.data_len = 0; 4442 intf->null_user_handler = event_receiver_fetcher; 4443 ipmi_panic_request_and_wait(intf, &addr, &msg); 4444 } 4445 intf->null_user_handler = NULL; 4446 4447 /* 4448 * Validate the event receiver. The low bit must not 4449 * be 1 (it must be a valid IPMB address), it cannot 4450 * be zero, and it must not be my address. 4451 */ 4452 if (((intf->event_receiver & 1) == 0) 4453 && (intf->event_receiver != 0) 4454 && (intf->event_receiver != intf->channels[0].address)) { 4455 /* 4456 * The event receiver is valid, send an IPMB 4457 * message. 4458 */ 4459 ipmb = (struct ipmi_ipmb_addr *) &addr; 4460 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 4461 ipmb->channel = 0; /* FIXME - is this right? */ 4462 ipmb->lun = intf->event_receiver_lun; 4463 ipmb->slave_addr = intf->event_receiver; 4464 } else if (intf->local_sel_device) { 4465 /* 4466 * The event receiver was not valid (or was 4467 * me), but I am an SEL device, just dump it 4468 * in my SEL. 4469 */ 4470 si = (struct ipmi_system_interface_addr *) &addr; 4471 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4472 si->channel = IPMI_BMC_CHANNEL; 4473 si->lun = 0; 4474 } else 4475 continue; /* No where to send the event. */ 4476 4477 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 4478 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 4479 msg.data = data; 4480 msg.data_len = 16; 4481 4482 j = 0; 4483 while (*p) { 4484 int size = strlen(p); 4485 4486 if (size > 11) 4487 size = 11; 4488 data[0] = 0; 4489 data[1] = 0; 4490 data[2] = 0xf0; /* OEM event without timestamp. */ 4491 data[3] = intf->channels[0].address; 4492 data[4] = j++; /* sequence # */ 4493 /* 4494 * Always give 11 bytes, so strncpy will fill 4495 * it with zeroes for me. 4496 */ 4497 strncpy(data+5, p, 11); 4498 p += size; 4499 4500 ipmi_panic_request_and_wait(intf, &addr, &msg); 4501 } 4502 } 4503 #endif /* CONFIG_IPMI_PANIC_STRING */ 4504 } 4505 #endif /* CONFIG_IPMI_PANIC_EVENT */ 4506 4507 static int has_panicked; 4508 4509 static int panic_event(struct notifier_block *this, 4510 unsigned long event, 4511 void *ptr) 4512 { 4513 ipmi_smi_t intf; 4514 4515 if (has_panicked) 4516 return NOTIFY_DONE; 4517 has_panicked = 1; 4518 4519 /* For every registered interface, set it to run to completion. */ 4520 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4521 if (!intf->handlers) 4522 /* Interface is not ready. */ 4523 continue; 4524 4525 /* 4526 * If we were interrupted while locking xmit_msgs_lock or 4527 * waiting_rcv_msgs_lock, the corresponding list may be 4528 * corrupted. In this case, drop items on the list for 4529 * the safety. 4530 */ 4531 if (!spin_trylock(&intf->xmit_msgs_lock)) { 4532 INIT_LIST_HEAD(&intf->xmit_msgs); 4533 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 4534 } else 4535 spin_unlock(&intf->xmit_msgs_lock); 4536 4537 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 4538 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 4539 else 4540 spin_unlock(&intf->waiting_rcv_msgs_lock); 4541 4542 intf->run_to_completion = 1; 4543 intf->handlers->set_run_to_completion(intf->send_info, 1); 4544 } 4545 4546 #ifdef CONFIG_IPMI_PANIC_EVENT 4547 send_panic_events(ptr); 4548 #endif 4549 4550 return NOTIFY_DONE; 4551 } 4552 4553 static struct notifier_block panic_block = { 4554 .notifier_call = panic_event, 4555 .next = NULL, 4556 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 4557 }; 4558 4559 static int ipmi_init_msghandler(void) 4560 { 4561 int rv; 4562 4563 if (initialized) 4564 return 0; 4565 4566 rv = driver_register(&ipmidriver.driver); 4567 if (rv) { 4568 printk(KERN_ERR PFX "Could not register IPMI driver\n"); 4569 return rv; 4570 } 4571 4572 printk(KERN_INFO "ipmi message handler version " 4573 IPMI_DRIVER_VERSION "\n"); 4574 4575 #ifdef CONFIG_PROC_FS 4576 proc_ipmi_root = proc_mkdir("ipmi", NULL); 4577 if (!proc_ipmi_root) { 4578 printk(KERN_ERR PFX "Unable to create IPMI proc dir"); 4579 driver_unregister(&ipmidriver.driver); 4580 return -ENOMEM; 4581 } 4582 4583 #endif /* CONFIG_PROC_FS */ 4584 4585 setup_timer(&ipmi_timer, ipmi_timeout, 0); 4586 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4587 4588 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 4589 4590 initialized = 1; 4591 4592 return 0; 4593 } 4594 4595 static int __init ipmi_init_msghandler_mod(void) 4596 { 4597 ipmi_init_msghandler(); 4598 return 0; 4599 } 4600 4601 static void __exit cleanup_ipmi(void) 4602 { 4603 int count; 4604 4605 if (!initialized) 4606 return; 4607 4608 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); 4609 4610 /* 4611 * This can't be called if any interfaces exist, so no worry 4612 * about shutting down the interfaces. 4613 */ 4614 4615 /* 4616 * Tell the timer to stop, then wait for it to stop. This 4617 * avoids problems with race conditions removing the timer 4618 * here. 4619 */ 4620 atomic_inc(&stop_operation); 4621 del_timer_sync(&ipmi_timer); 4622 4623 #ifdef CONFIG_PROC_FS 4624 proc_remove(proc_ipmi_root); 4625 #endif /* CONFIG_PROC_FS */ 4626 4627 driver_unregister(&ipmidriver.driver); 4628 4629 initialized = 0; 4630 4631 /* Check for buffer leaks. */ 4632 count = atomic_read(&smi_msg_inuse_count); 4633 if (count != 0) 4634 printk(KERN_WARNING PFX "SMI message count %d at exit\n", 4635 count); 4636 count = atomic_read(&recv_msg_inuse_count); 4637 if (count != 0) 4638 printk(KERN_WARNING PFX "recv message count %d at exit\n", 4639 count); 4640 } 4641 module_exit(cleanup_ipmi); 4642 4643 module_init(ipmi_init_msghandler_mod); 4644 MODULE_LICENSE("GPL"); 4645 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 4646 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI" 4647 " interface."); 4648 MODULE_VERSION(IPMI_DRIVER_VERSION); 4649