1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_msghandler.c 4 * 5 * Incoming and outgoing message routing for an IPMI interface. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #define pr_fmt(fmt) "%s" fmt, "IPMI message handler: " 15 #define dev_fmt pr_fmt 16 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 #include <linux/poll.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/spinlock.h> 23 #include <linux/mutex.h> 24 #include <linux/slab.h> 25 #include <linux/ipmi.h> 26 #include <linux/ipmi_smi.h> 27 #include <linux/notifier.h> 28 #include <linux/init.h> 29 #include <linux/proc_fs.h> 30 #include <linux/rcupdate.h> 31 #include <linux/interrupt.h> 32 #include <linux/moduleparam.h> 33 #include <linux/workqueue.h> 34 #include <linux/uuid.h> 35 #include <linux/nospec.h> 36 #include <linux/vmalloc.h> 37 #include <linux/delay.h> 38 39 #define IPMI_DRIVER_VERSION "39.2" 40 41 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 42 static int ipmi_init_msghandler(void); 43 static void smi_recv_tasklet(struct tasklet_struct *t); 44 static void handle_new_recv_msgs(struct ipmi_smi *intf); 45 static void need_waiter(struct ipmi_smi *intf); 46 static int handle_one_recv_msg(struct ipmi_smi *intf, 47 struct ipmi_smi_msg *msg); 48 49 static bool initialized; 50 static bool drvregistered; 51 52 enum ipmi_panic_event_op { 53 IPMI_SEND_PANIC_EVENT_NONE, 54 IPMI_SEND_PANIC_EVENT, 55 IPMI_SEND_PANIC_EVENT_STRING 56 }; 57 #ifdef CONFIG_IPMI_PANIC_STRING 58 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING 59 #elif defined(CONFIG_IPMI_PANIC_EVENT) 60 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT 61 #else 62 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE 63 #endif 64 65 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; 66 67 static int panic_op_write_handler(const char *val, 68 const struct kernel_param *kp) 69 { 70 char valcp[16]; 71 char *s; 72 73 strncpy(valcp, val, 15); 74 valcp[15] = '\0'; 75 76 s = strstrip(valcp); 77 78 if (strcmp(s, "none") == 0) 79 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE; 80 else if (strcmp(s, "event") == 0) 81 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT; 82 else if (strcmp(s, "string") == 0) 83 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING; 84 else 85 return -EINVAL; 86 87 return 0; 88 } 89 90 static int panic_op_read_handler(char *buffer, const struct kernel_param *kp) 91 { 92 switch (ipmi_send_panic_event) { 93 case IPMI_SEND_PANIC_EVENT_NONE: 94 strcpy(buffer, "none\n"); 95 break; 96 97 case IPMI_SEND_PANIC_EVENT: 98 strcpy(buffer, "event\n"); 99 break; 100 101 case IPMI_SEND_PANIC_EVENT_STRING: 102 strcpy(buffer, "string\n"); 103 break; 104 105 default: 106 strcpy(buffer, "???\n"); 107 break; 108 } 109 110 return strlen(buffer); 111 } 112 113 static const struct kernel_param_ops panic_op_ops = { 114 .set = panic_op_write_handler, 115 .get = panic_op_read_handler 116 }; 117 module_param_cb(panic_op, &panic_op_ops, NULL, 0600); 118 MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events."); 119 120 121 #define MAX_EVENTS_IN_QUEUE 25 122 123 /* Remain in auto-maintenance mode for this amount of time (in ms). */ 124 static unsigned long maintenance_mode_timeout_ms = 30000; 125 module_param(maintenance_mode_timeout_ms, ulong, 0644); 126 MODULE_PARM_DESC(maintenance_mode_timeout_ms, 127 "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode."); 128 129 /* 130 * Don't let a message sit in a queue forever, always time it with at lest 131 * the max message timer. This is in milliseconds. 132 */ 133 #define MAX_MSG_TIMEOUT 60000 134 135 /* 136 * Timeout times below are in milliseconds, and are done off a 1 137 * second timer. So setting the value to 1000 would mean anything 138 * between 0 and 1000ms. So really the only reasonable minimum 139 * setting it 2000ms, which is between 1 and 2 seconds. 140 */ 141 142 /* The default timeout for message retries. */ 143 static unsigned long default_retry_ms = 2000; 144 module_param(default_retry_ms, ulong, 0644); 145 MODULE_PARM_DESC(default_retry_ms, 146 "The time (milliseconds) between retry sends"); 147 148 /* The default timeout for maintenance mode message retries. */ 149 static unsigned long default_maintenance_retry_ms = 3000; 150 module_param(default_maintenance_retry_ms, ulong, 0644); 151 MODULE_PARM_DESC(default_maintenance_retry_ms, 152 "The time (milliseconds) between retry sends in maintenance mode"); 153 154 /* The default maximum number of retries */ 155 static unsigned int default_max_retries = 4; 156 module_param(default_max_retries, uint, 0644); 157 MODULE_PARM_DESC(default_max_retries, 158 "The time (milliseconds) between retry sends in maintenance mode"); 159 160 /* Call every ~1000 ms. */ 161 #define IPMI_TIMEOUT_TIME 1000 162 163 /* How many jiffies does it take to get to the timeout time. */ 164 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 165 166 /* 167 * Request events from the queue every second (this is the number of 168 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 169 * future, IPMI will add a way to know immediately if an event is in 170 * the queue and this silliness can go away. 171 */ 172 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 173 174 /* How long should we cache dynamic device IDs? */ 175 #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ) 176 177 /* 178 * The main "user" data structure. 179 */ 180 struct ipmi_user { 181 struct list_head link; 182 183 /* 184 * Set to NULL when the user is destroyed, a pointer to myself 185 * so srcu_dereference can be used on it. 186 */ 187 struct ipmi_user *self; 188 struct srcu_struct release_barrier; 189 190 struct kref refcount; 191 192 /* The upper layer that handles receive messages. */ 193 const struct ipmi_user_hndl *handler; 194 void *handler_data; 195 196 /* The interface this user is bound to. */ 197 struct ipmi_smi *intf; 198 199 /* Does this interface receive IPMI events? */ 200 bool gets_events; 201 202 /* Free must run in process context for RCU cleanup. */ 203 struct work_struct remove_work; 204 }; 205 206 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index) 207 __acquires(user->release_barrier) 208 { 209 struct ipmi_user *ruser; 210 211 *index = srcu_read_lock(&user->release_barrier); 212 ruser = srcu_dereference(user->self, &user->release_barrier); 213 if (!ruser) 214 srcu_read_unlock(&user->release_barrier, *index); 215 return ruser; 216 } 217 218 static void release_ipmi_user(struct ipmi_user *user, int index) 219 { 220 srcu_read_unlock(&user->release_barrier, index); 221 } 222 223 struct cmd_rcvr { 224 struct list_head link; 225 226 struct ipmi_user *user; 227 unsigned char netfn; 228 unsigned char cmd; 229 unsigned int chans; 230 231 /* 232 * This is used to form a linked lised during mass deletion. 233 * Since this is in an RCU list, we cannot use the link above 234 * or change any data until the RCU period completes. So we 235 * use this next variable during mass deletion so we can have 236 * a list and don't have to wait and restart the search on 237 * every individual deletion of a command. 238 */ 239 struct cmd_rcvr *next; 240 }; 241 242 struct seq_table { 243 unsigned int inuse : 1; 244 unsigned int broadcast : 1; 245 246 unsigned long timeout; 247 unsigned long orig_timeout; 248 unsigned int retries_left; 249 250 /* 251 * To verify on an incoming send message response that this is 252 * the message that the response is for, we keep a sequence id 253 * and increment it every time we send a message. 254 */ 255 long seqid; 256 257 /* 258 * This is held so we can properly respond to the message on a 259 * timeout, and it is used to hold the temporary data for 260 * retransmission, too. 261 */ 262 struct ipmi_recv_msg *recv_msg; 263 }; 264 265 /* 266 * Store the information in a msgid (long) to allow us to find a 267 * sequence table entry from the msgid. 268 */ 269 #define STORE_SEQ_IN_MSGID(seq, seqid) \ 270 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff)) 271 272 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 273 do { \ 274 seq = (((msgid) >> 26) & 0x3f); \ 275 seqid = ((msgid) & 0x3ffffff); \ 276 } while (0) 277 278 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff) 279 280 #define IPMI_MAX_CHANNELS 16 281 struct ipmi_channel { 282 unsigned char medium; 283 unsigned char protocol; 284 }; 285 286 struct ipmi_channel_set { 287 struct ipmi_channel c[IPMI_MAX_CHANNELS]; 288 }; 289 290 struct ipmi_my_addrinfo { 291 /* 292 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 293 * but may be changed by the user. 294 */ 295 unsigned char address; 296 297 /* 298 * My LUN. This should generally stay the SMS LUN, but just in 299 * case... 300 */ 301 unsigned char lun; 302 }; 303 304 /* 305 * Note that the product id, manufacturer id, guid, and device id are 306 * immutable in this structure, so dyn_mutex is not required for 307 * accessing those. If those change on a BMC, a new BMC is allocated. 308 */ 309 struct bmc_device { 310 struct platform_device pdev; 311 struct list_head intfs; /* Interfaces on this BMC. */ 312 struct ipmi_device_id id; 313 struct ipmi_device_id fetch_id; 314 int dyn_id_set; 315 unsigned long dyn_id_expiry; 316 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */ 317 guid_t guid; 318 guid_t fetch_guid; 319 int dyn_guid_set; 320 struct kref usecount; 321 struct work_struct remove_work; 322 unsigned char cc; /* completion code */ 323 }; 324 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 325 326 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 327 struct ipmi_device_id *id, 328 bool *guid_set, guid_t *guid); 329 330 /* 331 * Various statistics for IPMI, these index stats[] in the ipmi_smi 332 * structure. 333 */ 334 enum ipmi_stat_indexes { 335 /* Commands we got from the user that were invalid. */ 336 IPMI_STAT_sent_invalid_commands = 0, 337 338 /* Commands we sent to the MC. */ 339 IPMI_STAT_sent_local_commands, 340 341 /* Responses from the MC that were delivered to a user. */ 342 IPMI_STAT_handled_local_responses, 343 344 /* Responses from the MC that were not delivered to a user. */ 345 IPMI_STAT_unhandled_local_responses, 346 347 /* Commands we sent out to the IPMB bus. */ 348 IPMI_STAT_sent_ipmb_commands, 349 350 /* Commands sent on the IPMB that had errors on the SEND CMD */ 351 IPMI_STAT_sent_ipmb_command_errs, 352 353 /* Each retransmit increments this count. */ 354 IPMI_STAT_retransmitted_ipmb_commands, 355 356 /* 357 * When a message times out (runs out of retransmits) this is 358 * incremented. 359 */ 360 IPMI_STAT_timed_out_ipmb_commands, 361 362 /* 363 * This is like above, but for broadcasts. Broadcasts are 364 * *not* included in the above count (they are expected to 365 * time out). 366 */ 367 IPMI_STAT_timed_out_ipmb_broadcasts, 368 369 /* Responses I have sent to the IPMB bus. */ 370 IPMI_STAT_sent_ipmb_responses, 371 372 /* The response was delivered to the user. */ 373 IPMI_STAT_handled_ipmb_responses, 374 375 /* The response had invalid data in it. */ 376 IPMI_STAT_invalid_ipmb_responses, 377 378 /* The response didn't have anyone waiting for it. */ 379 IPMI_STAT_unhandled_ipmb_responses, 380 381 /* Commands we sent out to the IPMB bus. */ 382 IPMI_STAT_sent_lan_commands, 383 384 /* Commands sent on the IPMB that had errors on the SEND CMD */ 385 IPMI_STAT_sent_lan_command_errs, 386 387 /* Each retransmit increments this count. */ 388 IPMI_STAT_retransmitted_lan_commands, 389 390 /* 391 * When a message times out (runs out of retransmits) this is 392 * incremented. 393 */ 394 IPMI_STAT_timed_out_lan_commands, 395 396 /* Responses I have sent to the IPMB bus. */ 397 IPMI_STAT_sent_lan_responses, 398 399 /* The response was delivered to the user. */ 400 IPMI_STAT_handled_lan_responses, 401 402 /* The response had invalid data in it. */ 403 IPMI_STAT_invalid_lan_responses, 404 405 /* The response didn't have anyone waiting for it. */ 406 IPMI_STAT_unhandled_lan_responses, 407 408 /* The command was delivered to the user. */ 409 IPMI_STAT_handled_commands, 410 411 /* The command had invalid data in it. */ 412 IPMI_STAT_invalid_commands, 413 414 /* The command didn't have anyone waiting for it. */ 415 IPMI_STAT_unhandled_commands, 416 417 /* Invalid data in an event. */ 418 IPMI_STAT_invalid_events, 419 420 /* Events that were received with the proper format. */ 421 IPMI_STAT_events, 422 423 /* Retransmissions on IPMB that failed. */ 424 IPMI_STAT_dropped_rexmit_ipmb_commands, 425 426 /* Retransmissions on LAN that failed. */ 427 IPMI_STAT_dropped_rexmit_lan_commands, 428 429 /* This *must* remain last, add new values above this. */ 430 IPMI_NUM_STATS 431 }; 432 433 434 #define IPMI_IPMB_NUM_SEQ 64 435 struct ipmi_smi { 436 struct module *owner; 437 438 /* What interface number are we? */ 439 int intf_num; 440 441 struct kref refcount; 442 443 /* Set when the interface is being unregistered. */ 444 bool in_shutdown; 445 446 /* Used for a list of interfaces. */ 447 struct list_head link; 448 449 /* 450 * The list of upper layers that are using me. seq_lock write 451 * protects this. Read protection is with srcu. 452 */ 453 struct list_head users; 454 struct srcu_struct users_srcu; 455 456 /* Used for wake ups at startup. */ 457 wait_queue_head_t waitq; 458 459 /* 460 * Prevents the interface from being unregistered when the 461 * interface is used by being looked up through the BMC 462 * structure. 463 */ 464 struct mutex bmc_reg_mutex; 465 466 struct bmc_device tmp_bmc; 467 struct bmc_device *bmc; 468 bool bmc_registered; 469 struct list_head bmc_link; 470 char *my_dev_name; 471 bool in_bmc_register; /* Handle recursive situations. Yuck. */ 472 struct work_struct bmc_reg_work; 473 474 const struct ipmi_smi_handlers *handlers; 475 void *send_info; 476 477 /* Driver-model device for the system interface. */ 478 struct device *si_dev; 479 480 /* 481 * A table of sequence numbers for this interface. We use the 482 * sequence numbers for IPMB messages that go out of the 483 * interface to match them up with their responses. A routine 484 * is called periodically to time the items in this list. 485 */ 486 spinlock_t seq_lock; 487 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 488 int curr_seq; 489 490 /* 491 * Messages queued for delivery. If delivery fails (out of memory 492 * for instance), They will stay in here to be processed later in a 493 * periodic timer interrupt. The tasklet is for handling received 494 * messages directly from the handler. 495 */ 496 spinlock_t waiting_rcv_msgs_lock; 497 struct list_head waiting_rcv_msgs; 498 atomic_t watchdog_pretimeouts_to_deliver; 499 struct tasklet_struct recv_tasklet; 500 501 spinlock_t xmit_msgs_lock; 502 struct list_head xmit_msgs; 503 struct ipmi_smi_msg *curr_msg; 504 struct list_head hp_xmit_msgs; 505 506 /* 507 * The list of command receivers that are registered for commands 508 * on this interface. 509 */ 510 struct mutex cmd_rcvrs_mutex; 511 struct list_head cmd_rcvrs; 512 513 /* 514 * Events that were queues because no one was there to receive 515 * them. 516 */ 517 spinlock_t events_lock; /* For dealing with event stuff. */ 518 struct list_head waiting_events; 519 unsigned int waiting_events_count; /* How many events in queue? */ 520 char delivering_events; 521 char event_msg_printed; 522 523 /* How many users are waiting for events? */ 524 atomic_t event_waiters; 525 unsigned int ticks_to_req_ev; 526 527 spinlock_t watch_lock; /* For dealing with watch stuff below. */ 528 529 /* How many users are waiting for commands? */ 530 unsigned int command_waiters; 531 532 /* How many users are waiting for watchdogs? */ 533 unsigned int watchdog_waiters; 534 535 /* How many users are waiting for message responses? */ 536 unsigned int response_waiters; 537 538 /* 539 * Tells what the lower layer has last been asked to watch for, 540 * messages and/or watchdogs. Protected by watch_lock. 541 */ 542 unsigned int last_watch_mask; 543 544 /* 545 * The event receiver for my BMC, only really used at panic 546 * shutdown as a place to store this. 547 */ 548 unsigned char event_receiver; 549 unsigned char event_receiver_lun; 550 unsigned char local_sel_device; 551 unsigned char local_event_generator; 552 553 /* For handling of maintenance mode. */ 554 int maintenance_mode; 555 bool maintenance_mode_enable; 556 int auto_maintenance_timeout; 557 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 558 559 /* 560 * If we are doing maintenance on something on IPMB, extend 561 * the timeout time to avoid timeouts writing firmware and 562 * such. 563 */ 564 int ipmb_maintenance_mode_timeout; 565 566 /* 567 * A cheap hack, if this is non-null and a message to an 568 * interface comes in with a NULL user, call this routine with 569 * it. Note that the message will still be freed by the 570 * caller. This only works on the system interface. 571 * 572 * Protected by bmc_reg_mutex. 573 */ 574 void (*null_user_handler)(struct ipmi_smi *intf, 575 struct ipmi_recv_msg *msg); 576 577 /* 578 * When we are scanning the channels for an SMI, this will 579 * tell which channel we are scanning. 580 */ 581 int curr_channel; 582 583 /* Channel information */ 584 struct ipmi_channel_set *channel_list; 585 unsigned int curr_working_cset; /* First index into the following. */ 586 struct ipmi_channel_set wchannels[2]; 587 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; 588 bool channels_ready; 589 590 atomic_t stats[IPMI_NUM_STATS]; 591 592 /* 593 * run_to_completion duplicate of smb_info, smi_info 594 * and ipmi_serial_info structures. Used to decrease numbers of 595 * parameters passed by "low" level IPMI code. 596 */ 597 int run_to_completion; 598 }; 599 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 600 601 static void __get_guid(struct ipmi_smi *intf); 602 static void __ipmi_bmc_unregister(struct ipmi_smi *intf); 603 static int __ipmi_bmc_register(struct ipmi_smi *intf, 604 struct ipmi_device_id *id, 605 bool guid_set, guid_t *guid, int intf_num); 606 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id); 607 608 609 /** 610 * The driver model view of the IPMI messaging driver. 611 */ 612 static struct platform_driver ipmidriver = { 613 .driver = { 614 .name = "ipmi", 615 .bus = &platform_bus_type 616 } 617 }; 618 /* 619 * This mutex keeps us from adding the same BMC twice. 620 */ 621 static DEFINE_MUTEX(ipmidriver_mutex); 622 623 static LIST_HEAD(ipmi_interfaces); 624 static DEFINE_MUTEX(ipmi_interfaces_mutex); 625 #define ipmi_interfaces_mutex_held() \ 626 lockdep_is_held(&ipmi_interfaces_mutex) 627 static struct srcu_struct ipmi_interfaces_srcu; 628 629 /* 630 * List of watchers that want to know when smi's are added and deleted. 631 */ 632 static LIST_HEAD(smi_watchers); 633 static DEFINE_MUTEX(smi_watchers_mutex); 634 635 #define ipmi_inc_stat(intf, stat) \ 636 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 637 #define ipmi_get_stat(intf, stat) \ 638 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 639 640 static const char * const addr_src_to_str[] = { 641 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI", 642 "device-tree", "platform" 643 }; 644 645 const char *ipmi_addr_src_to_str(enum ipmi_addr_src src) 646 { 647 if (src >= SI_LAST) 648 src = 0; /* Invalid */ 649 return addr_src_to_str[src]; 650 } 651 EXPORT_SYMBOL(ipmi_addr_src_to_str); 652 653 static int is_lan_addr(struct ipmi_addr *addr) 654 { 655 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 656 } 657 658 static int is_ipmb_addr(struct ipmi_addr *addr) 659 { 660 return addr->addr_type == IPMI_IPMB_ADDR_TYPE; 661 } 662 663 static int is_ipmb_bcast_addr(struct ipmi_addr *addr) 664 { 665 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE; 666 } 667 668 static void free_recv_msg_list(struct list_head *q) 669 { 670 struct ipmi_recv_msg *msg, *msg2; 671 672 list_for_each_entry_safe(msg, msg2, q, link) { 673 list_del(&msg->link); 674 ipmi_free_recv_msg(msg); 675 } 676 } 677 678 static void free_smi_msg_list(struct list_head *q) 679 { 680 struct ipmi_smi_msg *msg, *msg2; 681 682 list_for_each_entry_safe(msg, msg2, q, link) { 683 list_del(&msg->link); 684 ipmi_free_smi_msg(msg); 685 } 686 } 687 688 static void clean_up_interface_data(struct ipmi_smi *intf) 689 { 690 int i; 691 struct cmd_rcvr *rcvr, *rcvr2; 692 struct list_head list; 693 694 tasklet_kill(&intf->recv_tasklet); 695 696 free_smi_msg_list(&intf->waiting_rcv_msgs); 697 free_recv_msg_list(&intf->waiting_events); 698 699 /* 700 * Wholesale remove all the entries from the list in the 701 * interface and wait for RCU to know that none are in use. 702 */ 703 mutex_lock(&intf->cmd_rcvrs_mutex); 704 INIT_LIST_HEAD(&list); 705 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu); 706 mutex_unlock(&intf->cmd_rcvrs_mutex); 707 708 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 709 kfree(rcvr); 710 711 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 712 if ((intf->seq_table[i].inuse) 713 && (intf->seq_table[i].recv_msg)) 714 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 715 } 716 } 717 718 static void intf_free(struct kref *ref) 719 { 720 struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount); 721 722 clean_up_interface_data(intf); 723 kfree(intf); 724 } 725 726 struct watcher_entry { 727 int intf_num; 728 struct ipmi_smi *intf; 729 struct list_head link; 730 }; 731 732 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 733 { 734 struct ipmi_smi *intf; 735 int index, rv; 736 737 /* 738 * Make sure the driver is actually initialized, this handles 739 * problems with initialization order. 740 */ 741 rv = ipmi_init_msghandler(); 742 if (rv) 743 return rv; 744 745 mutex_lock(&smi_watchers_mutex); 746 747 list_add(&watcher->link, &smi_watchers); 748 749 index = srcu_read_lock(&ipmi_interfaces_srcu); 750 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 751 int intf_num = READ_ONCE(intf->intf_num); 752 753 if (intf_num == -1) 754 continue; 755 watcher->new_smi(intf_num, intf->si_dev); 756 } 757 srcu_read_unlock(&ipmi_interfaces_srcu, index); 758 759 mutex_unlock(&smi_watchers_mutex); 760 761 return 0; 762 } 763 EXPORT_SYMBOL(ipmi_smi_watcher_register); 764 765 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 766 { 767 mutex_lock(&smi_watchers_mutex); 768 list_del(&watcher->link); 769 mutex_unlock(&smi_watchers_mutex); 770 return 0; 771 } 772 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 773 774 /* 775 * Must be called with smi_watchers_mutex held. 776 */ 777 static void 778 call_smi_watchers(int i, struct device *dev) 779 { 780 struct ipmi_smi_watcher *w; 781 782 mutex_lock(&smi_watchers_mutex); 783 list_for_each_entry(w, &smi_watchers, link) { 784 if (try_module_get(w->owner)) { 785 w->new_smi(i, dev); 786 module_put(w->owner); 787 } 788 } 789 mutex_unlock(&smi_watchers_mutex); 790 } 791 792 static int 793 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 794 { 795 if (addr1->addr_type != addr2->addr_type) 796 return 0; 797 798 if (addr1->channel != addr2->channel) 799 return 0; 800 801 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 802 struct ipmi_system_interface_addr *smi_addr1 803 = (struct ipmi_system_interface_addr *) addr1; 804 struct ipmi_system_interface_addr *smi_addr2 805 = (struct ipmi_system_interface_addr *) addr2; 806 return (smi_addr1->lun == smi_addr2->lun); 807 } 808 809 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) { 810 struct ipmi_ipmb_addr *ipmb_addr1 811 = (struct ipmi_ipmb_addr *) addr1; 812 struct ipmi_ipmb_addr *ipmb_addr2 813 = (struct ipmi_ipmb_addr *) addr2; 814 815 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 816 && (ipmb_addr1->lun == ipmb_addr2->lun)); 817 } 818 819 if (is_lan_addr(addr1)) { 820 struct ipmi_lan_addr *lan_addr1 821 = (struct ipmi_lan_addr *) addr1; 822 struct ipmi_lan_addr *lan_addr2 823 = (struct ipmi_lan_addr *) addr2; 824 825 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 826 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 827 && (lan_addr1->session_handle 828 == lan_addr2->session_handle) 829 && (lan_addr1->lun == lan_addr2->lun)); 830 } 831 832 return 1; 833 } 834 835 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 836 { 837 if (len < sizeof(struct ipmi_system_interface_addr)) 838 return -EINVAL; 839 840 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 841 if (addr->channel != IPMI_BMC_CHANNEL) 842 return -EINVAL; 843 return 0; 844 } 845 846 if ((addr->channel == IPMI_BMC_CHANNEL) 847 || (addr->channel >= IPMI_MAX_CHANNELS) 848 || (addr->channel < 0)) 849 return -EINVAL; 850 851 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 852 if (len < sizeof(struct ipmi_ipmb_addr)) 853 return -EINVAL; 854 return 0; 855 } 856 857 if (is_lan_addr(addr)) { 858 if (len < sizeof(struct ipmi_lan_addr)) 859 return -EINVAL; 860 return 0; 861 } 862 863 return -EINVAL; 864 } 865 EXPORT_SYMBOL(ipmi_validate_addr); 866 867 unsigned int ipmi_addr_length(int addr_type) 868 { 869 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 870 return sizeof(struct ipmi_system_interface_addr); 871 872 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 873 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 874 return sizeof(struct ipmi_ipmb_addr); 875 876 if (addr_type == IPMI_LAN_ADDR_TYPE) 877 return sizeof(struct ipmi_lan_addr); 878 879 return 0; 880 } 881 EXPORT_SYMBOL(ipmi_addr_length); 882 883 static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 884 { 885 int rv = 0; 886 887 if (!msg->user) { 888 /* Special handling for NULL users. */ 889 if (intf->null_user_handler) { 890 intf->null_user_handler(intf, msg); 891 } else { 892 /* No handler, so give up. */ 893 rv = -EINVAL; 894 } 895 ipmi_free_recv_msg(msg); 896 } else if (oops_in_progress) { 897 /* 898 * If we are running in the panic context, calling the 899 * receive handler doesn't much meaning and has a deadlock 900 * risk. At this moment, simply skip it in that case. 901 */ 902 ipmi_free_recv_msg(msg); 903 } else { 904 int index; 905 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); 906 907 if (user) { 908 user->handler->ipmi_recv_hndl(msg, user->handler_data); 909 release_ipmi_user(user, index); 910 } else { 911 /* User went away, give up. */ 912 ipmi_free_recv_msg(msg); 913 rv = -EINVAL; 914 } 915 } 916 917 return rv; 918 } 919 920 static void deliver_local_response(struct ipmi_smi *intf, 921 struct ipmi_recv_msg *msg) 922 { 923 if (deliver_response(intf, msg)) 924 ipmi_inc_stat(intf, unhandled_local_responses); 925 else 926 ipmi_inc_stat(intf, handled_local_responses); 927 } 928 929 static void deliver_err_response(struct ipmi_smi *intf, 930 struct ipmi_recv_msg *msg, int err) 931 { 932 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 933 msg->msg_data[0] = err; 934 msg->msg.netfn |= 1; /* Convert to a response. */ 935 msg->msg.data_len = 1; 936 msg->msg.data = msg->msg_data; 937 deliver_local_response(intf, msg); 938 } 939 940 static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) 941 { 942 unsigned long iflags; 943 944 if (!intf->handlers->set_need_watch) 945 return; 946 947 spin_lock_irqsave(&intf->watch_lock, iflags); 948 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 949 intf->response_waiters++; 950 951 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 952 intf->watchdog_waiters++; 953 954 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 955 intf->command_waiters++; 956 957 if ((intf->last_watch_mask & flags) != flags) { 958 intf->last_watch_mask |= flags; 959 intf->handlers->set_need_watch(intf->send_info, 960 intf->last_watch_mask); 961 } 962 spin_unlock_irqrestore(&intf->watch_lock, iflags); 963 } 964 965 static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) 966 { 967 unsigned long iflags; 968 969 if (!intf->handlers->set_need_watch) 970 return; 971 972 spin_lock_irqsave(&intf->watch_lock, iflags); 973 if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) 974 intf->response_waiters--; 975 976 if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) 977 intf->watchdog_waiters--; 978 979 if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) 980 intf->command_waiters--; 981 982 flags = 0; 983 if (intf->response_waiters) 984 flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; 985 if (intf->watchdog_waiters) 986 flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; 987 if (intf->command_waiters) 988 flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; 989 990 if (intf->last_watch_mask != flags) { 991 intf->last_watch_mask = flags; 992 intf->handlers->set_need_watch(intf->send_info, 993 intf->last_watch_mask); 994 } 995 spin_unlock_irqrestore(&intf->watch_lock, iflags); 996 } 997 998 /* 999 * Find the next sequence number not being used and add the given 1000 * message with the given timeout to the sequence table. This must be 1001 * called with the interface's seq_lock held. 1002 */ 1003 static int intf_next_seq(struct ipmi_smi *intf, 1004 struct ipmi_recv_msg *recv_msg, 1005 unsigned long timeout, 1006 int retries, 1007 int broadcast, 1008 unsigned char *seq, 1009 long *seqid) 1010 { 1011 int rv = 0; 1012 unsigned int i; 1013 1014 if (timeout == 0) 1015 timeout = default_retry_ms; 1016 if (retries < 0) 1017 retries = default_max_retries; 1018 1019 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 1020 i = (i+1)%IPMI_IPMB_NUM_SEQ) { 1021 if (!intf->seq_table[i].inuse) 1022 break; 1023 } 1024 1025 if (!intf->seq_table[i].inuse) { 1026 intf->seq_table[i].recv_msg = recv_msg; 1027 1028 /* 1029 * Start with the maximum timeout, when the send response 1030 * comes in we will start the real timer. 1031 */ 1032 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 1033 intf->seq_table[i].orig_timeout = timeout; 1034 intf->seq_table[i].retries_left = retries; 1035 intf->seq_table[i].broadcast = broadcast; 1036 intf->seq_table[i].inuse = 1; 1037 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 1038 *seq = i; 1039 *seqid = intf->seq_table[i].seqid; 1040 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 1041 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1042 need_waiter(intf); 1043 } else { 1044 rv = -EAGAIN; 1045 } 1046 1047 return rv; 1048 } 1049 1050 /* 1051 * Return the receive message for the given sequence number and 1052 * release the sequence number so it can be reused. Some other data 1053 * is passed in to be sure the message matches up correctly (to help 1054 * guard against message coming in after their timeout and the 1055 * sequence number being reused). 1056 */ 1057 static int intf_find_seq(struct ipmi_smi *intf, 1058 unsigned char seq, 1059 short channel, 1060 unsigned char cmd, 1061 unsigned char netfn, 1062 struct ipmi_addr *addr, 1063 struct ipmi_recv_msg **recv_msg) 1064 { 1065 int rv = -ENODEV; 1066 unsigned long flags; 1067 1068 if (seq >= IPMI_IPMB_NUM_SEQ) 1069 return -EINVAL; 1070 1071 spin_lock_irqsave(&intf->seq_lock, flags); 1072 if (intf->seq_table[seq].inuse) { 1073 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 1074 1075 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) 1076 && (msg->msg.netfn == netfn) 1077 && (ipmi_addr_equal(addr, &msg->addr))) { 1078 *recv_msg = msg; 1079 intf->seq_table[seq].inuse = 0; 1080 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1081 rv = 0; 1082 } 1083 } 1084 spin_unlock_irqrestore(&intf->seq_lock, flags); 1085 1086 return rv; 1087 } 1088 1089 1090 /* Start the timer for a specific sequence table entry. */ 1091 static int intf_start_seq_timer(struct ipmi_smi *intf, 1092 long msgid) 1093 { 1094 int rv = -ENODEV; 1095 unsigned long flags; 1096 unsigned char seq; 1097 unsigned long seqid; 1098 1099 1100 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1101 1102 spin_lock_irqsave(&intf->seq_lock, flags); 1103 /* 1104 * We do this verification because the user can be deleted 1105 * while a message is outstanding. 1106 */ 1107 if ((intf->seq_table[seq].inuse) 1108 && (intf->seq_table[seq].seqid == seqid)) { 1109 struct seq_table *ent = &intf->seq_table[seq]; 1110 ent->timeout = ent->orig_timeout; 1111 rv = 0; 1112 } 1113 spin_unlock_irqrestore(&intf->seq_lock, flags); 1114 1115 return rv; 1116 } 1117 1118 /* Got an error for the send message for a specific sequence number. */ 1119 static int intf_err_seq(struct ipmi_smi *intf, 1120 long msgid, 1121 unsigned int err) 1122 { 1123 int rv = -ENODEV; 1124 unsigned long flags; 1125 unsigned char seq; 1126 unsigned long seqid; 1127 struct ipmi_recv_msg *msg = NULL; 1128 1129 1130 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 1131 1132 spin_lock_irqsave(&intf->seq_lock, flags); 1133 /* 1134 * We do this verification because the user can be deleted 1135 * while a message is outstanding. 1136 */ 1137 if ((intf->seq_table[seq].inuse) 1138 && (intf->seq_table[seq].seqid == seqid)) { 1139 struct seq_table *ent = &intf->seq_table[seq]; 1140 1141 ent->inuse = 0; 1142 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1143 msg = ent->recv_msg; 1144 rv = 0; 1145 } 1146 spin_unlock_irqrestore(&intf->seq_lock, flags); 1147 1148 if (msg) 1149 deliver_err_response(intf, msg, err); 1150 1151 return rv; 1152 } 1153 1154 static void free_user_work(struct work_struct *work) 1155 { 1156 struct ipmi_user *user = container_of(work, struct ipmi_user, 1157 remove_work); 1158 1159 cleanup_srcu_struct(&user->release_barrier); 1160 vfree(user); 1161 } 1162 1163 int ipmi_create_user(unsigned int if_num, 1164 const struct ipmi_user_hndl *handler, 1165 void *handler_data, 1166 struct ipmi_user **user) 1167 { 1168 unsigned long flags; 1169 struct ipmi_user *new_user; 1170 int rv, index; 1171 struct ipmi_smi *intf; 1172 1173 /* 1174 * There is no module usecount here, because it's not 1175 * required. Since this can only be used by and called from 1176 * other modules, they will implicitly use this module, and 1177 * thus this can't be removed unless the other modules are 1178 * removed. 1179 */ 1180 1181 if (handler == NULL) 1182 return -EINVAL; 1183 1184 /* 1185 * Make sure the driver is actually initialized, this handles 1186 * problems with initialization order. 1187 */ 1188 rv = ipmi_init_msghandler(); 1189 if (rv) 1190 return rv; 1191 1192 new_user = vzalloc(sizeof(*new_user)); 1193 if (!new_user) 1194 return -ENOMEM; 1195 1196 index = srcu_read_lock(&ipmi_interfaces_srcu); 1197 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1198 if (intf->intf_num == if_num) 1199 goto found; 1200 } 1201 /* Not found, return an error */ 1202 rv = -EINVAL; 1203 goto out_kfree; 1204 1205 found: 1206 INIT_WORK(&new_user->remove_work, free_user_work); 1207 1208 rv = init_srcu_struct(&new_user->release_barrier); 1209 if (rv) 1210 goto out_kfree; 1211 1212 if (!try_module_get(intf->owner)) { 1213 rv = -ENODEV; 1214 goto out_kfree; 1215 } 1216 1217 /* Note that each existing user holds a refcount to the interface. */ 1218 kref_get(&intf->refcount); 1219 1220 kref_init(&new_user->refcount); 1221 new_user->handler = handler; 1222 new_user->handler_data = handler_data; 1223 new_user->intf = intf; 1224 new_user->gets_events = false; 1225 1226 rcu_assign_pointer(new_user->self, new_user); 1227 spin_lock_irqsave(&intf->seq_lock, flags); 1228 list_add_rcu(&new_user->link, &intf->users); 1229 spin_unlock_irqrestore(&intf->seq_lock, flags); 1230 if (handler->ipmi_watchdog_pretimeout) 1231 /* User wants pretimeouts, so make sure to watch for them. */ 1232 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1233 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1234 *user = new_user; 1235 return 0; 1236 1237 out_kfree: 1238 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1239 vfree(new_user); 1240 return rv; 1241 } 1242 EXPORT_SYMBOL(ipmi_create_user); 1243 1244 int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 1245 { 1246 int rv, index; 1247 struct ipmi_smi *intf; 1248 1249 index = srcu_read_lock(&ipmi_interfaces_srcu); 1250 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 1251 if (intf->intf_num == if_num) 1252 goto found; 1253 } 1254 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1255 1256 /* Not found, return an error */ 1257 return -EINVAL; 1258 1259 found: 1260 if (!intf->handlers->get_smi_info) 1261 rv = -ENOTTY; 1262 else 1263 rv = intf->handlers->get_smi_info(intf->send_info, data); 1264 srcu_read_unlock(&ipmi_interfaces_srcu, index); 1265 1266 return rv; 1267 } 1268 EXPORT_SYMBOL(ipmi_get_smi_info); 1269 1270 static void free_user(struct kref *ref) 1271 { 1272 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1273 1274 /* SRCU cleanup must happen in task context. */ 1275 schedule_work(&user->remove_work); 1276 } 1277 1278 static void _ipmi_destroy_user(struct ipmi_user *user) 1279 { 1280 struct ipmi_smi *intf = user->intf; 1281 int i; 1282 unsigned long flags; 1283 struct cmd_rcvr *rcvr; 1284 struct cmd_rcvr *rcvrs = NULL; 1285 1286 if (!acquire_ipmi_user(user, &i)) { 1287 /* 1288 * The user has already been cleaned up, just make sure 1289 * nothing is using it and return. 1290 */ 1291 synchronize_srcu(&user->release_barrier); 1292 return; 1293 } 1294 1295 rcu_assign_pointer(user->self, NULL); 1296 release_ipmi_user(user, i); 1297 1298 synchronize_srcu(&user->release_barrier); 1299 1300 if (user->handler->shutdown) 1301 user->handler->shutdown(user->handler_data); 1302 1303 if (user->handler->ipmi_watchdog_pretimeout) 1304 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); 1305 1306 if (user->gets_events) 1307 atomic_dec(&intf->event_waiters); 1308 1309 /* Remove the user from the interface's sequence table. */ 1310 spin_lock_irqsave(&intf->seq_lock, flags); 1311 list_del_rcu(&user->link); 1312 1313 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 1314 if (intf->seq_table[i].inuse 1315 && (intf->seq_table[i].recv_msg->user == user)) { 1316 intf->seq_table[i].inuse = 0; 1317 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 1318 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 1319 } 1320 } 1321 spin_unlock_irqrestore(&intf->seq_lock, flags); 1322 1323 /* 1324 * Remove the user from the command receiver's table. First 1325 * we build a list of everything (not using the standard link, 1326 * since other things may be using it till we do 1327 * synchronize_srcu()) then free everything in that list. 1328 */ 1329 mutex_lock(&intf->cmd_rcvrs_mutex); 1330 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1331 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1332 if (rcvr->user == user) { 1333 list_del_rcu(&rcvr->link); 1334 rcvr->next = rcvrs; 1335 rcvrs = rcvr; 1336 } 1337 } 1338 mutex_unlock(&intf->cmd_rcvrs_mutex); 1339 synchronize_rcu(); 1340 while (rcvrs) { 1341 rcvr = rcvrs; 1342 rcvrs = rcvr->next; 1343 kfree(rcvr); 1344 } 1345 1346 kref_put(&intf->refcount, intf_free); 1347 module_put(intf->owner); 1348 } 1349 1350 int ipmi_destroy_user(struct ipmi_user *user) 1351 { 1352 _ipmi_destroy_user(user); 1353 1354 kref_put(&user->refcount, free_user); 1355 1356 return 0; 1357 } 1358 EXPORT_SYMBOL(ipmi_destroy_user); 1359 1360 int ipmi_get_version(struct ipmi_user *user, 1361 unsigned char *major, 1362 unsigned char *minor) 1363 { 1364 struct ipmi_device_id id; 1365 int rv, index; 1366 1367 user = acquire_ipmi_user(user, &index); 1368 if (!user) 1369 return -ENODEV; 1370 1371 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL); 1372 if (!rv) { 1373 *major = ipmi_version_major(&id); 1374 *minor = ipmi_version_minor(&id); 1375 } 1376 release_ipmi_user(user, index); 1377 1378 return rv; 1379 } 1380 EXPORT_SYMBOL(ipmi_get_version); 1381 1382 int ipmi_set_my_address(struct ipmi_user *user, 1383 unsigned int channel, 1384 unsigned char address) 1385 { 1386 int index, rv = 0; 1387 1388 user = acquire_ipmi_user(user, &index); 1389 if (!user) 1390 return -ENODEV; 1391 1392 if (channel >= IPMI_MAX_CHANNELS) { 1393 rv = -EINVAL; 1394 } else { 1395 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1396 user->intf->addrinfo[channel].address = address; 1397 } 1398 release_ipmi_user(user, index); 1399 1400 return rv; 1401 } 1402 EXPORT_SYMBOL(ipmi_set_my_address); 1403 1404 int ipmi_get_my_address(struct ipmi_user *user, 1405 unsigned int channel, 1406 unsigned char *address) 1407 { 1408 int index, rv = 0; 1409 1410 user = acquire_ipmi_user(user, &index); 1411 if (!user) 1412 return -ENODEV; 1413 1414 if (channel >= IPMI_MAX_CHANNELS) { 1415 rv = -EINVAL; 1416 } else { 1417 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1418 *address = user->intf->addrinfo[channel].address; 1419 } 1420 release_ipmi_user(user, index); 1421 1422 return rv; 1423 } 1424 EXPORT_SYMBOL(ipmi_get_my_address); 1425 1426 int ipmi_set_my_LUN(struct ipmi_user *user, 1427 unsigned int channel, 1428 unsigned char LUN) 1429 { 1430 int index, rv = 0; 1431 1432 user = acquire_ipmi_user(user, &index); 1433 if (!user) 1434 return -ENODEV; 1435 1436 if (channel >= IPMI_MAX_CHANNELS) { 1437 rv = -EINVAL; 1438 } else { 1439 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1440 user->intf->addrinfo[channel].lun = LUN & 0x3; 1441 } 1442 release_ipmi_user(user, index); 1443 1444 return rv; 1445 } 1446 EXPORT_SYMBOL(ipmi_set_my_LUN); 1447 1448 int ipmi_get_my_LUN(struct ipmi_user *user, 1449 unsigned int channel, 1450 unsigned char *address) 1451 { 1452 int index, rv = 0; 1453 1454 user = acquire_ipmi_user(user, &index); 1455 if (!user) 1456 return -ENODEV; 1457 1458 if (channel >= IPMI_MAX_CHANNELS) { 1459 rv = -EINVAL; 1460 } else { 1461 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS); 1462 *address = user->intf->addrinfo[channel].lun; 1463 } 1464 release_ipmi_user(user, index); 1465 1466 return rv; 1467 } 1468 EXPORT_SYMBOL(ipmi_get_my_LUN); 1469 1470 int ipmi_get_maintenance_mode(struct ipmi_user *user) 1471 { 1472 int mode, index; 1473 unsigned long flags; 1474 1475 user = acquire_ipmi_user(user, &index); 1476 if (!user) 1477 return -ENODEV; 1478 1479 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags); 1480 mode = user->intf->maintenance_mode; 1481 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags); 1482 release_ipmi_user(user, index); 1483 1484 return mode; 1485 } 1486 EXPORT_SYMBOL(ipmi_get_maintenance_mode); 1487 1488 static void maintenance_mode_update(struct ipmi_smi *intf) 1489 { 1490 if (intf->handlers->set_maintenance_mode) 1491 intf->handlers->set_maintenance_mode( 1492 intf->send_info, intf->maintenance_mode_enable); 1493 } 1494 1495 int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) 1496 { 1497 int rv = 0, index; 1498 unsigned long flags; 1499 struct ipmi_smi *intf = user->intf; 1500 1501 user = acquire_ipmi_user(user, &index); 1502 if (!user) 1503 return -ENODEV; 1504 1505 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1506 if (intf->maintenance_mode != mode) { 1507 switch (mode) { 1508 case IPMI_MAINTENANCE_MODE_AUTO: 1509 intf->maintenance_mode_enable 1510 = (intf->auto_maintenance_timeout > 0); 1511 break; 1512 1513 case IPMI_MAINTENANCE_MODE_OFF: 1514 intf->maintenance_mode_enable = false; 1515 break; 1516 1517 case IPMI_MAINTENANCE_MODE_ON: 1518 intf->maintenance_mode_enable = true; 1519 break; 1520 1521 default: 1522 rv = -EINVAL; 1523 goto out_unlock; 1524 } 1525 intf->maintenance_mode = mode; 1526 1527 maintenance_mode_update(intf); 1528 } 1529 out_unlock: 1530 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); 1531 release_ipmi_user(user, index); 1532 1533 return rv; 1534 } 1535 EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1536 1537 int ipmi_set_gets_events(struct ipmi_user *user, bool val) 1538 { 1539 unsigned long flags; 1540 struct ipmi_smi *intf = user->intf; 1541 struct ipmi_recv_msg *msg, *msg2; 1542 struct list_head msgs; 1543 int index; 1544 1545 user = acquire_ipmi_user(user, &index); 1546 if (!user) 1547 return -ENODEV; 1548 1549 INIT_LIST_HEAD(&msgs); 1550 1551 spin_lock_irqsave(&intf->events_lock, flags); 1552 if (user->gets_events == val) 1553 goto out; 1554 1555 user->gets_events = val; 1556 1557 if (val) { 1558 if (atomic_inc_return(&intf->event_waiters) == 1) 1559 need_waiter(intf); 1560 } else { 1561 atomic_dec(&intf->event_waiters); 1562 } 1563 1564 if (intf->delivering_events) 1565 /* 1566 * Another thread is delivering events for this, so 1567 * let it handle any new events. 1568 */ 1569 goto out; 1570 1571 /* Deliver any queued events. */ 1572 while (user->gets_events && !list_empty(&intf->waiting_events)) { 1573 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1574 list_move_tail(&msg->link, &msgs); 1575 intf->waiting_events_count = 0; 1576 if (intf->event_msg_printed) { 1577 dev_warn(intf->si_dev, "Event queue no longer full\n"); 1578 intf->event_msg_printed = 0; 1579 } 1580 1581 intf->delivering_events = 1; 1582 spin_unlock_irqrestore(&intf->events_lock, flags); 1583 1584 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1585 msg->user = user; 1586 kref_get(&user->refcount); 1587 deliver_local_response(intf, msg); 1588 } 1589 1590 spin_lock_irqsave(&intf->events_lock, flags); 1591 intf->delivering_events = 0; 1592 } 1593 1594 out: 1595 spin_unlock_irqrestore(&intf->events_lock, flags); 1596 release_ipmi_user(user, index); 1597 1598 return 0; 1599 } 1600 EXPORT_SYMBOL(ipmi_set_gets_events); 1601 1602 static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf, 1603 unsigned char netfn, 1604 unsigned char cmd, 1605 unsigned char chan) 1606 { 1607 struct cmd_rcvr *rcvr; 1608 1609 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1610 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1611 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1612 && (rcvr->chans & (1 << chan))) 1613 return rcvr; 1614 } 1615 return NULL; 1616 } 1617 1618 static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf, 1619 unsigned char netfn, 1620 unsigned char cmd, 1621 unsigned int chans) 1622 { 1623 struct cmd_rcvr *rcvr; 1624 1625 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, 1626 lockdep_is_held(&intf->cmd_rcvrs_mutex)) { 1627 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) 1628 && (rcvr->chans & chans)) 1629 return 0; 1630 } 1631 return 1; 1632 } 1633 1634 int ipmi_register_for_cmd(struct ipmi_user *user, 1635 unsigned char netfn, 1636 unsigned char cmd, 1637 unsigned int chans) 1638 { 1639 struct ipmi_smi *intf = user->intf; 1640 struct cmd_rcvr *rcvr; 1641 int rv = 0, index; 1642 1643 user = acquire_ipmi_user(user, &index); 1644 if (!user) 1645 return -ENODEV; 1646 1647 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 1648 if (!rcvr) { 1649 rv = -ENOMEM; 1650 goto out_release; 1651 } 1652 rcvr->cmd = cmd; 1653 rcvr->netfn = netfn; 1654 rcvr->chans = chans; 1655 rcvr->user = user; 1656 1657 mutex_lock(&intf->cmd_rcvrs_mutex); 1658 /* Make sure the command/netfn is not already registered. */ 1659 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) { 1660 rv = -EBUSY; 1661 goto out_unlock; 1662 } 1663 1664 smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1665 1666 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1667 1668 out_unlock: 1669 mutex_unlock(&intf->cmd_rcvrs_mutex); 1670 if (rv) 1671 kfree(rcvr); 1672 out_release: 1673 release_ipmi_user(user, index); 1674 1675 return rv; 1676 } 1677 EXPORT_SYMBOL(ipmi_register_for_cmd); 1678 1679 int ipmi_unregister_for_cmd(struct ipmi_user *user, 1680 unsigned char netfn, 1681 unsigned char cmd, 1682 unsigned int chans) 1683 { 1684 struct ipmi_smi *intf = user->intf; 1685 struct cmd_rcvr *rcvr; 1686 struct cmd_rcvr *rcvrs = NULL; 1687 int i, rv = -ENOENT, index; 1688 1689 user = acquire_ipmi_user(user, &index); 1690 if (!user) 1691 return -ENODEV; 1692 1693 mutex_lock(&intf->cmd_rcvrs_mutex); 1694 for (i = 0; i < IPMI_NUM_CHANNELS; i++) { 1695 if (((1 << i) & chans) == 0) 1696 continue; 1697 rcvr = find_cmd_rcvr(intf, netfn, cmd, i); 1698 if (rcvr == NULL) 1699 continue; 1700 if (rcvr->user == user) { 1701 rv = 0; 1702 rcvr->chans &= ~chans; 1703 if (rcvr->chans == 0) { 1704 list_del_rcu(&rcvr->link); 1705 rcvr->next = rcvrs; 1706 rcvrs = rcvr; 1707 } 1708 } 1709 } 1710 mutex_unlock(&intf->cmd_rcvrs_mutex); 1711 synchronize_rcu(); 1712 release_ipmi_user(user, index); 1713 while (rcvrs) { 1714 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); 1715 rcvr = rcvrs; 1716 rcvrs = rcvr->next; 1717 kfree(rcvr); 1718 } 1719 1720 return rv; 1721 } 1722 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 1723 1724 static unsigned char 1725 ipmb_checksum(unsigned char *data, int size) 1726 { 1727 unsigned char csum = 0; 1728 1729 for (; size > 0; size--, data++) 1730 csum += *data; 1731 1732 return -csum; 1733 } 1734 1735 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1736 struct kernel_ipmi_msg *msg, 1737 struct ipmi_ipmb_addr *ipmb_addr, 1738 long msgid, 1739 unsigned char ipmb_seq, 1740 int broadcast, 1741 unsigned char source_address, 1742 unsigned char source_lun) 1743 { 1744 int i = broadcast; 1745 1746 /* Format the IPMB header data. */ 1747 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1748 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1749 smi_msg->data[2] = ipmb_addr->channel; 1750 if (broadcast) 1751 smi_msg->data[3] = 0; 1752 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1753 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1754 smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2); 1755 smi_msg->data[i+6] = source_address; 1756 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1757 smi_msg->data[i+8] = msg->cmd; 1758 1759 /* Now tack on the data to the message. */ 1760 if (msg->data_len > 0) 1761 memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len); 1762 smi_msg->data_size = msg->data_len + 9; 1763 1764 /* Now calculate the checksum and tack it on. */ 1765 smi_msg->data[i+smi_msg->data_size] 1766 = ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6); 1767 1768 /* 1769 * Add on the checksum size and the offset from the 1770 * broadcast. 1771 */ 1772 smi_msg->data_size += 1 + i; 1773 1774 smi_msg->msgid = msgid; 1775 } 1776 1777 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1778 struct kernel_ipmi_msg *msg, 1779 struct ipmi_lan_addr *lan_addr, 1780 long msgid, 1781 unsigned char ipmb_seq, 1782 unsigned char source_lun) 1783 { 1784 /* Format the IPMB header data. */ 1785 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1786 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1787 smi_msg->data[2] = lan_addr->channel; 1788 smi_msg->data[3] = lan_addr->session_handle; 1789 smi_msg->data[4] = lan_addr->remote_SWID; 1790 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1791 smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2); 1792 smi_msg->data[7] = lan_addr->local_SWID; 1793 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1794 smi_msg->data[9] = msg->cmd; 1795 1796 /* Now tack on the data to the message. */ 1797 if (msg->data_len > 0) 1798 memcpy(&smi_msg->data[10], msg->data, msg->data_len); 1799 smi_msg->data_size = msg->data_len + 10; 1800 1801 /* Now calculate the checksum and tack it on. */ 1802 smi_msg->data[smi_msg->data_size] 1803 = ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7); 1804 1805 /* 1806 * Add on the checksum size and the offset from the 1807 * broadcast. 1808 */ 1809 smi_msg->data_size += 1; 1810 1811 smi_msg->msgid = msgid; 1812 } 1813 1814 static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, 1815 struct ipmi_smi_msg *smi_msg, 1816 int priority) 1817 { 1818 if (intf->curr_msg) { 1819 if (priority > 0) 1820 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); 1821 else 1822 list_add_tail(&smi_msg->link, &intf->xmit_msgs); 1823 smi_msg = NULL; 1824 } else { 1825 intf->curr_msg = smi_msg; 1826 } 1827 1828 return smi_msg; 1829 } 1830 1831 static void smi_send(struct ipmi_smi *intf, 1832 const struct ipmi_smi_handlers *handlers, 1833 struct ipmi_smi_msg *smi_msg, int priority) 1834 { 1835 int run_to_completion = intf->run_to_completion; 1836 unsigned long flags = 0; 1837 1838 if (!run_to_completion) 1839 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 1840 smi_msg = smi_add_send_msg(intf, smi_msg, priority); 1841 1842 if (!run_to_completion) 1843 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 1844 1845 if (smi_msg) 1846 handlers->sender(intf->send_info, smi_msg); 1847 } 1848 1849 static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) 1850 { 1851 return (((msg->netfn == IPMI_NETFN_APP_REQUEST) 1852 && ((msg->cmd == IPMI_COLD_RESET_CMD) 1853 || (msg->cmd == IPMI_WARM_RESET_CMD))) 1854 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)); 1855 } 1856 1857 static int i_ipmi_req_sysintf(struct ipmi_smi *intf, 1858 struct ipmi_addr *addr, 1859 long msgid, 1860 struct kernel_ipmi_msg *msg, 1861 struct ipmi_smi_msg *smi_msg, 1862 struct ipmi_recv_msg *recv_msg, 1863 int retries, 1864 unsigned int retry_time_ms) 1865 { 1866 struct ipmi_system_interface_addr *smi_addr; 1867 1868 if (msg->netfn & 1) 1869 /* Responses are not allowed to the SMI. */ 1870 return -EINVAL; 1871 1872 smi_addr = (struct ipmi_system_interface_addr *) addr; 1873 if (smi_addr->lun > 3) { 1874 ipmi_inc_stat(intf, sent_invalid_commands); 1875 return -EINVAL; 1876 } 1877 1878 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1879 1880 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1881 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1882 || (msg->cmd == IPMI_GET_MSG_CMD) 1883 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { 1884 /* 1885 * We don't let the user do these, since we manage 1886 * the sequence numbers. 1887 */ 1888 ipmi_inc_stat(intf, sent_invalid_commands); 1889 return -EINVAL; 1890 } 1891 1892 if (is_maintenance_mode_cmd(msg)) { 1893 unsigned long flags; 1894 1895 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 1896 intf->auto_maintenance_timeout 1897 = maintenance_mode_timeout_ms; 1898 if (!intf->maintenance_mode 1899 && !intf->maintenance_mode_enable) { 1900 intf->maintenance_mode_enable = true; 1901 maintenance_mode_update(intf); 1902 } 1903 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1904 flags); 1905 } 1906 1907 if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) { 1908 ipmi_inc_stat(intf, sent_invalid_commands); 1909 return -EMSGSIZE; 1910 } 1911 1912 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1913 smi_msg->data[1] = msg->cmd; 1914 smi_msg->msgid = msgid; 1915 smi_msg->user_data = recv_msg; 1916 if (msg->data_len > 0) 1917 memcpy(&smi_msg->data[2], msg->data, msg->data_len); 1918 smi_msg->data_size = msg->data_len + 2; 1919 ipmi_inc_stat(intf, sent_local_commands); 1920 1921 return 0; 1922 } 1923 1924 static int i_ipmi_req_ipmb(struct ipmi_smi *intf, 1925 struct ipmi_addr *addr, 1926 long msgid, 1927 struct kernel_ipmi_msg *msg, 1928 struct ipmi_smi_msg *smi_msg, 1929 struct ipmi_recv_msg *recv_msg, 1930 unsigned char source_address, 1931 unsigned char source_lun, 1932 int retries, 1933 unsigned int retry_time_ms) 1934 { 1935 struct ipmi_ipmb_addr *ipmb_addr; 1936 unsigned char ipmb_seq; 1937 long seqid; 1938 int broadcast = 0; 1939 struct ipmi_channel *chans; 1940 int rv = 0; 1941 1942 if (addr->channel >= IPMI_MAX_CHANNELS) { 1943 ipmi_inc_stat(intf, sent_invalid_commands); 1944 return -EINVAL; 1945 } 1946 1947 chans = READ_ONCE(intf->channel_list)->c; 1948 1949 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) { 1950 ipmi_inc_stat(intf, sent_invalid_commands); 1951 return -EINVAL; 1952 } 1953 1954 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1955 /* 1956 * Broadcasts add a zero at the beginning of the 1957 * message, but otherwise is the same as an IPMB 1958 * address. 1959 */ 1960 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1961 broadcast = 1; 1962 retries = 0; /* Don't retry broadcasts. */ 1963 } 1964 1965 /* 1966 * 9 for the header and 1 for the checksum, plus 1967 * possibly one for the broadcast. 1968 */ 1969 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1970 ipmi_inc_stat(intf, sent_invalid_commands); 1971 return -EMSGSIZE; 1972 } 1973 1974 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 1975 if (ipmb_addr->lun > 3) { 1976 ipmi_inc_stat(intf, sent_invalid_commands); 1977 return -EINVAL; 1978 } 1979 1980 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 1981 1982 if (recv_msg->msg.netfn & 0x1) { 1983 /* 1984 * It's a response, so use the user's sequence 1985 * from msgid. 1986 */ 1987 ipmi_inc_stat(intf, sent_ipmb_responses); 1988 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 1989 msgid, broadcast, 1990 source_address, source_lun); 1991 1992 /* 1993 * Save the receive message so we can use it 1994 * to deliver the response. 1995 */ 1996 smi_msg->user_data = recv_msg; 1997 } else { 1998 /* It's a command, so get a sequence for it. */ 1999 unsigned long flags; 2000 2001 spin_lock_irqsave(&intf->seq_lock, flags); 2002 2003 if (is_maintenance_mode_cmd(msg)) 2004 intf->ipmb_maintenance_mode_timeout = 2005 maintenance_mode_timeout_ms; 2006 2007 if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) 2008 /* Different default in maintenance mode */ 2009 retry_time_ms = default_maintenance_retry_ms; 2010 2011 /* 2012 * Create a sequence number with a 1 second 2013 * timeout and 4 retries. 2014 */ 2015 rv = intf_next_seq(intf, 2016 recv_msg, 2017 retry_time_ms, 2018 retries, 2019 broadcast, 2020 &ipmb_seq, 2021 &seqid); 2022 if (rv) 2023 /* 2024 * We have used up all the sequence numbers, 2025 * probably, so abort. 2026 */ 2027 goto out_err; 2028 2029 ipmi_inc_stat(intf, sent_ipmb_commands); 2030 2031 /* 2032 * Store the sequence number in the message, 2033 * so that when the send message response 2034 * comes back we can start the timer. 2035 */ 2036 format_ipmb_msg(smi_msg, msg, ipmb_addr, 2037 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2038 ipmb_seq, broadcast, 2039 source_address, source_lun); 2040 2041 /* 2042 * Copy the message into the recv message data, so we 2043 * can retransmit it later if necessary. 2044 */ 2045 memcpy(recv_msg->msg_data, smi_msg->data, 2046 smi_msg->data_size); 2047 recv_msg->msg.data = recv_msg->msg_data; 2048 recv_msg->msg.data_len = smi_msg->data_size; 2049 2050 /* 2051 * We don't unlock until here, because we need 2052 * to copy the completed message into the 2053 * recv_msg before we release the lock. 2054 * Otherwise, race conditions may bite us. I 2055 * know that's pretty paranoid, but I prefer 2056 * to be correct. 2057 */ 2058 out_err: 2059 spin_unlock_irqrestore(&intf->seq_lock, flags); 2060 } 2061 2062 return rv; 2063 } 2064 2065 static int i_ipmi_req_lan(struct ipmi_smi *intf, 2066 struct ipmi_addr *addr, 2067 long msgid, 2068 struct kernel_ipmi_msg *msg, 2069 struct ipmi_smi_msg *smi_msg, 2070 struct ipmi_recv_msg *recv_msg, 2071 unsigned char source_lun, 2072 int retries, 2073 unsigned int retry_time_ms) 2074 { 2075 struct ipmi_lan_addr *lan_addr; 2076 unsigned char ipmb_seq; 2077 long seqid; 2078 struct ipmi_channel *chans; 2079 int rv = 0; 2080 2081 if (addr->channel >= IPMI_MAX_CHANNELS) { 2082 ipmi_inc_stat(intf, sent_invalid_commands); 2083 return -EINVAL; 2084 } 2085 2086 chans = READ_ONCE(intf->channel_list)->c; 2087 2088 if ((chans[addr->channel].medium 2089 != IPMI_CHANNEL_MEDIUM_8023LAN) 2090 && (chans[addr->channel].medium 2091 != IPMI_CHANNEL_MEDIUM_ASYNC)) { 2092 ipmi_inc_stat(intf, sent_invalid_commands); 2093 return -EINVAL; 2094 } 2095 2096 /* 11 for the header and 1 for the checksum. */ 2097 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 2098 ipmi_inc_stat(intf, sent_invalid_commands); 2099 return -EMSGSIZE; 2100 } 2101 2102 lan_addr = (struct ipmi_lan_addr *) addr; 2103 if (lan_addr->lun > 3) { 2104 ipmi_inc_stat(intf, sent_invalid_commands); 2105 return -EINVAL; 2106 } 2107 2108 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 2109 2110 if (recv_msg->msg.netfn & 0x1) { 2111 /* 2112 * It's a response, so use the user's sequence 2113 * from msgid. 2114 */ 2115 ipmi_inc_stat(intf, sent_lan_responses); 2116 format_lan_msg(smi_msg, msg, lan_addr, msgid, 2117 msgid, source_lun); 2118 2119 /* 2120 * Save the receive message so we can use it 2121 * to deliver the response. 2122 */ 2123 smi_msg->user_data = recv_msg; 2124 } else { 2125 /* It's a command, so get a sequence for it. */ 2126 unsigned long flags; 2127 2128 spin_lock_irqsave(&intf->seq_lock, flags); 2129 2130 /* 2131 * Create a sequence number with a 1 second 2132 * timeout and 4 retries. 2133 */ 2134 rv = intf_next_seq(intf, 2135 recv_msg, 2136 retry_time_ms, 2137 retries, 2138 0, 2139 &ipmb_seq, 2140 &seqid); 2141 if (rv) 2142 /* 2143 * We have used up all the sequence numbers, 2144 * probably, so abort. 2145 */ 2146 goto out_err; 2147 2148 ipmi_inc_stat(intf, sent_lan_commands); 2149 2150 /* 2151 * Store the sequence number in the message, 2152 * so that when the send message response 2153 * comes back we can start the timer. 2154 */ 2155 format_lan_msg(smi_msg, msg, lan_addr, 2156 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 2157 ipmb_seq, source_lun); 2158 2159 /* 2160 * Copy the message into the recv message data, so we 2161 * can retransmit it later if necessary. 2162 */ 2163 memcpy(recv_msg->msg_data, smi_msg->data, 2164 smi_msg->data_size); 2165 recv_msg->msg.data = recv_msg->msg_data; 2166 recv_msg->msg.data_len = smi_msg->data_size; 2167 2168 /* 2169 * We don't unlock until here, because we need 2170 * to copy the completed message into the 2171 * recv_msg before we release the lock. 2172 * Otherwise, race conditions may bite us. I 2173 * know that's pretty paranoid, but I prefer 2174 * to be correct. 2175 */ 2176 out_err: 2177 spin_unlock_irqrestore(&intf->seq_lock, flags); 2178 } 2179 2180 return rv; 2181 } 2182 2183 /* 2184 * Separate from ipmi_request so that the user does not have to be 2185 * supplied in certain circumstances (mainly at panic time). If 2186 * messages are supplied, they will be freed, even if an error 2187 * occurs. 2188 */ 2189 static int i_ipmi_request(struct ipmi_user *user, 2190 struct ipmi_smi *intf, 2191 struct ipmi_addr *addr, 2192 long msgid, 2193 struct kernel_ipmi_msg *msg, 2194 void *user_msg_data, 2195 void *supplied_smi, 2196 struct ipmi_recv_msg *supplied_recv, 2197 int priority, 2198 unsigned char source_address, 2199 unsigned char source_lun, 2200 int retries, 2201 unsigned int retry_time_ms) 2202 { 2203 struct ipmi_smi_msg *smi_msg; 2204 struct ipmi_recv_msg *recv_msg; 2205 int rv = 0; 2206 2207 if (supplied_recv) 2208 recv_msg = supplied_recv; 2209 else { 2210 recv_msg = ipmi_alloc_recv_msg(); 2211 if (recv_msg == NULL) { 2212 rv = -ENOMEM; 2213 goto out; 2214 } 2215 } 2216 recv_msg->user_msg_data = user_msg_data; 2217 2218 if (supplied_smi) 2219 smi_msg = (struct ipmi_smi_msg *) supplied_smi; 2220 else { 2221 smi_msg = ipmi_alloc_smi_msg(); 2222 if (smi_msg == NULL) { 2223 if (!supplied_recv) 2224 ipmi_free_recv_msg(recv_msg); 2225 rv = -ENOMEM; 2226 goto out; 2227 } 2228 } 2229 2230 rcu_read_lock(); 2231 if (intf->in_shutdown) { 2232 rv = -ENODEV; 2233 goto out_err; 2234 } 2235 2236 recv_msg->user = user; 2237 if (user) 2238 /* The put happens when the message is freed. */ 2239 kref_get(&user->refcount); 2240 recv_msg->msgid = msgid; 2241 /* 2242 * Store the message to send in the receive message so timeout 2243 * responses can get the proper response data. 2244 */ 2245 recv_msg->msg = *msg; 2246 2247 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 2248 rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg, 2249 recv_msg, retries, retry_time_ms); 2250 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) { 2251 rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, 2252 source_address, source_lun, 2253 retries, retry_time_ms); 2254 } else if (is_lan_addr(addr)) { 2255 rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, 2256 source_lun, retries, retry_time_ms); 2257 } else { 2258 /* Unknown address type. */ 2259 ipmi_inc_stat(intf, sent_invalid_commands); 2260 rv = -EINVAL; 2261 } 2262 2263 if (rv) { 2264 out_err: 2265 ipmi_free_smi_msg(smi_msg); 2266 ipmi_free_recv_msg(recv_msg); 2267 } else { 2268 pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data); 2269 2270 smi_send(intf, intf->handlers, smi_msg, priority); 2271 } 2272 rcu_read_unlock(); 2273 2274 out: 2275 return rv; 2276 } 2277 2278 static int check_addr(struct ipmi_smi *intf, 2279 struct ipmi_addr *addr, 2280 unsigned char *saddr, 2281 unsigned char *lun) 2282 { 2283 if (addr->channel >= IPMI_MAX_CHANNELS) 2284 return -EINVAL; 2285 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS); 2286 *lun = intf->addrinfo[addr->channel].lun; 2287 *saddr = intf->addrinfo[addr->channel].address; 2288 return 0; 2289 } 2290 2291 int ipmi_request_settime(struct ipmi_user *user, 2292 struct ipmi_addr *addr, 2293 long msgid, 2294 struct kernel_ipmi_msg *msg, 2295 void *user_msg_data, 2296 int priority, 2297 int retries, 2298 unsigned int retry_time_ms) 2299 { 2300 unsigned char saddr = 0, lun = 0; 2301 int rv, index; 2302 2303 if (!user) 2304 return -EINVAL; 2305 2306 user = acquire_ipmi_user(user, &index); 2307 if (!user) 2308 return -ENODEV; 2309 2310 rv = check_addr(user->intf, addr, &saddr, &lun); 2311 if (!rv) 2312 rv = i_ipmi_request(user, 2313 user->intf, 2314 addr, 2315 msgid, 2316 msg, 2317 user_msg_data, 2318 NULL, NULL, 2319 priority, 2320 saddr, 2321 lun, 2322 retries, 2323 retry_time_ms); 2324 2325 release_ipmi_user(user, index); 2326 return rv; 2327 } 2328 EXPORT_SYMBOL(ipmi_request_settime); 2329 2330 int ipmi_request_supply_msgs(struct ipmi_user *user, 2331 struct ipmi_addr *addr, 2332 long msgid, 2333 struct kernel_ipmi_msg *msg, 2334 void *user_msg_data, 2335 void *supplied_smi, 2336 struct ipmi_recv_msg *supplied_recv, 2337 int priority) 2338 { 2339 unsigned char saddr = 0, lun = 0; 2340 int rv, index; 2341 2342 if (!user) 2343 return -EINVAL; 2344 2345 user = acquire_ipmi_user(user, &index); 2346 if (!user) 2347 return -ENODEV; 2348 2349 rv = check_addr(user->intf, addr, &saddr, &lun); 2350 if (!rv) 2351 rv = i_ipmi_request(user, 2352 user->intf, 2353 addr, 2354 msgid, 2355 msg, 2356 user_msg_data, 2357 supplied_smi, 2358 supplied_recv, 2359 priority, 2360 saddr, 2361 lun, 2362 -1, 0); 2363 2364 release_ipmi_user(user, index); 2365 return rv; 2366 } 2367 EXPORT_SYMBOL(ipmi_request_supply_msgs); 2368 2369 static void bmc_device_id_handler(struct ipmi_smi *intf, 2370 struct ipmi_recv_msg *msg) 2371 { 2372 int rv; 2373 2374 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2375 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2376 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { 2377 dev_warn(intf->si_dev, 2378 "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", 2379 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); 2380 return; 2381 } 2382 2383 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, 2384 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); 2385 if (rv) { 2386 dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); 2387 /* record completion code when error */ 2388 intf->bmc->cc = msg->msg.data[0]; 2389 intf->bmc->dyn_id_set = 0; 2390 } else { 2391 /* 2392 * Make sure the id data is available before setting 2393 * dyn_id_set. 2394 */ 2395 smp_wmb(); 2396 intf->bmc->dyn_id_set = 1; 2397 } 2398 2399 wake_up(&intf->waitq); 2400 } 2401 2402 static int 2403 send_get_device_id_cmd(struct ipmi_smi *intf) 2404 { 2405 struct ipmi_system_interface_addr si; 2406 struct kernel_ipmi_msg msg; 2407 2408 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2409 si.channel = IPMI_BMC_CHANNEL; 2410 si.lun = 0; 2411 2412 msg.netfn = IPMI_NETFN_APP_REQUEST; 2413 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 2414 msg.data = NULL; 2415 msg.data_len = 0; 2416 2417 return i_ipmi_request(NULL, 2418 intf, 2419 (struct ipmi_addr *) &si, 2420 0, 2421 &msg, 2422 intf, 2423 NULL, 2424 NULL, 2425 0, 2426 intf->addrinfo[0].address, 2427 intf->addrinfo[0].lun, 2428 -1, 0); 2429 } 2430 2431 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) 2432 { 2433 int rv; 2434 unsigned int retry_count = 0; 2435 2436 intf->null_user_handler = bmc_device_id_handler; 2437 2438 retry: 2439 bmc->cc = 0; 2440 bmc->dyn_id_set = 2; 2441 2442 rv = send_get_device_id_cmd(intf); 2443 if (rv) 2444 goto out_reset_handler; 2445 2446 wait_event(intf->waitq, bmc->dyn_id_set != 2); 2447 2448 if (!bmc->dyn_id_set) { 2449 if ((bmc->cc == IPMI_DEVICE_IN_FW_UPDATE_ERR 2450 || bmc->cc == IPMI_DEVICE_IN_INIT_ERR 2451 || bmc->cc == IPMI_NOT_IN_MY_STATE_ERR) 2452 && ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 2453 msleep(500); 2454 dev_warn(intf->si_dev, 2455 "BMC returned 0x%2.2x, retry get bmc device id\n", 2456 bmc->cc); 2457 goto retry; 2458 } 2459 2460 rv = -EIO; /* Something went wrong in the fetch. */ 2461 } 2462 2463 /* dyn_id_set makes the id data available. */ 2464 smp_rmb(); 2465 2466 out_reset_handler: 2467 intf->null_user_handler = NULL; 2468 2469 return rv; 2470 } 2471 2472 /* 2473 * Fetch the device id for the bmc/interface. You must pass in either 2474 * bmc or intf, this code will get the other one. If the data has 2475 * been recently fetched, this will just use the cached data. Otherwise 2476 * it will run a new fetch. 2477 * 2478 * Except for the first time this is called (in ipmi_add_smi()), 2479 * this will always return good data; 2480 */ 2481 static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2482 struct ipmi_device_id *id, 2483 bool *guid_set, guid_t *guid, int intf_num) 2484 { 2485 int rv = 0; 2486 int prev_dyn_id_set, prev_guid_set; 2487 bool intf_set = intf != NULL; 2488 2489 if (!intf) { 2490 mutex_lock(&bmc->dyn_mutex); 2491 retry_bmc_lock: 2492 if (list_empty(&bmc->intfs)) { 2493 mutex_unlock(&bmc->dyn_mutex); 2494 return -ENOENT; 2495 } 2496 intf = list_first_entry(&bmc->intfs, struct ipmi_smi, 2497 bmc_link); 2498 kref_get(&intf->refcount); 2499 mutex_unlock(&bmc->dyn_mutex); 2500 mutex_lock(&intf->bmc_reg_mutex); 2501 mutex_lock(&bmc->dyn_mutex); 2502 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi, 2503 bmc_link)) { 2504 mutex_unlock(&intf->bmc_reg_mutex); 2505 kref_put(&intf->refcount, intf_free); 2506 goto retry_bmc_lock; 2507 } 2508 } else { 2509 mutex_lock(&intf->bmc_reg_mutex); 2510 bmc = intf->bmc; 2511 mutex_lock(&bmc->dyn_mutex); 2512 kref_get(&intf->refcount); 2513 } 2514 2515 /* If we have a valid and current ID, just return that. */ 2516 if (intf->in_bmc_register || 2517 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) 2518 goto out_noprocessing; 2519 2520 prev_guid_set = bmc->dyn_guid_set; 2521 __get_guid(intf); 2522 2523 prev_dyn_id_set = bmc->dyn_id_set; 2524 rv = __get_device_id(intf, bmc); 2525 if (rv) 2526 goto out; 2527 2528 /* 2529 * The guid, device id, manufacturer id, and product id should 2530 * not change on a BMC. If it does we have to do some dancing. 2531 */ 2532 if (!intf->bmc_registered 2533 || (!prev_guid_set && bmc->dyn_guid_set) 2534 || (!prev_dyn_id_set && bmc->dyn_id_set) 2535 || (prev_guid_set && bmc->dyn_guid_set 2536 && !guid_equal(&bmc->guid, &bmc->fetch_guid)) 2537 || bmc->id.device_id != bmc->fetch_id.device_id 2538 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id 2539 || bmc->id.product_id != bmc->fetch_id.product_id) { 2540 struct ipmi_device_id id = bmc->fetch_id; 2541 int guid_set = bmc->dyn_guid_set; 2542 guid_t guid; 2543 2544 guid = bmc->fetch_guid; 2545 mutex_unlock(&bmc->dyn_mutex); 2546 2547 __ipmi_bmc_unregister(intf); 2548 /* Fill in the temporary BMC for good measure. */ 2549 intf->bmc->id = id; 2550 intf->bmc->dyn_guid_set = guid_set; 2551 intf->bmc->guid = guid; 2552 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num)) 2553 need_waiter(intf); /* Retry later on an error. */ 2554 else 2555 __scan_channels(intf, &id); 2556 2557 2558 if (!intf_set) { 2559 /* 2560 * We weren't given the interface on the 2561 * command line, so restart the operation on 2562 * the next interface for the BMC. 2563 */ 2564 mutex_unlock(&intf->bmc_reg_mutex); 2565 mutex_lock(&bmc->dyn_mutex); 2566 goto retry_bmc_lock; 2567 } 2568 2569 /* We have a new BMC, set it up. */ 2570 bmc = intf->bmc; 2571 mutex_lock(&bmc->dyn_mutex); 2572 goto out_noprocessing; 2573 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id))) 2574 /* Version info changes, scan the channels again. */ 2575 __scan_channels(intf, &bmc->fetch_id); 2576 2577 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 2578 2579 out: 2580 if (rv && prev_dyn_id_set) { 2581 rv = 0; /* Ignore failures if we have previous data. */ 2582 bmc->dyn_id_set = prev_dyn_id_set; 2583 } 2584 if (!rv) { 2585 bmc->id = bmc->fetch_id; 2586 if (bmc->dyn_guid_set) 2587 bmc->guid = bmc->fetch_guid; 2588 else if (prev_guid_set) 2589 /* 2590 * The guid used to be valid and it failed to fetch, 2591 * just use the cached value. 2592 */ 2593 bmc->dyn_guid_set = prev_guid_set; 2594 } 2595 out_noprocessing: 2596 if (!rv) { 2597 if (id) 2598 *id = bmc->id; 2599 2600 if (guid_set) 2601 *guid_set = bmc->dyn_guid_set; 2602 2603 if (guid && bmc->dyn_guid_set) 2604 *guid = bmc->guid; 2605 } 2606 2607 mutex_unlock(&bmc->dyn_mutex); 2608 mutex_unlock(&intf->bmc_reg_mutex); 2609 2610 kref_put(&intf->refcount, intf_free); 2611 return rv; 2612 } 2613 2614 static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc, 2615 struct ipmi_device_id *id, 2616 bool *guid_set, guid_t *guid) 2617 { 2618 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1); 2619 } 2620 2621 static ssize_t device_id_show(struct device *dev, 2622 struct device_attribute *attr, 2623 char *buf) 2624 { 2625 struct bmc_device *bmc = to_bmc_device(dev); 2626 struct ipmi_device_id id; 2627 int rv; 2628 2629 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2630 if (rv) 2631 return rv; 2632 2633 return snprintf(buf, 10, "%u\n", id.device_id); 2634 } 2635 static DEVICE_ATTR_RO(device_id); 2636 2637 static ssize_t provides_device_sdrs_show(struct device *dev, 2638 struct device_attribute *attr, 2639 char *buf) 2640 { 2641 struct bmc_device *bmc = to_bmc_device(dev); 2642 struct ipmi_device_id id; 2643 int rv; 2644 2645 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2646 if (rv) 2647 return rv; 2648 2649 return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7); 2650 } 2651 static DEVICE_ATTR_RO(provides_device_sdrs); 2652 2653 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2654 char *buf) 2655 { 2656 struct bmc_device *bmc = to_bmc_device(dev); 2657 struct ipmi_device_id id; 2658 int rv; 2659 2660 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2661 if (rv) 2662 return rv; 2663 2664 return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F); 2665 } 2666 static DEVICE_ATTR_RO(revision); 2667 2668 static ssize_t firmware_revision_show(struct device *dev, 2669 struct device_attribute *attr, 2670 char *buf) 2671 { 2672 struct bmc_device *bmc = to_bmc_device(dev); 2673 struct ipmi_device_id id; 2674 int rv; 2675 2676 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2677 if (rv) 2678 return rv; 2679 2680 return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1, 2681 id.firmware_revision_2); 2682 } 2683 static DEVICE_ATTR_RO(firmware_revision); 2684 2685 static ssize_t ipmi_version_show(struct device *dev, 2686 struct device_attribute *attr, 2687 char *buf) 2688 { 2689 struct bmc_device *bmc = to_bmc_device(dev); 2690 struct ipmi_device_id id; 2691 int rv; 2692 2693 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2694 if (rv) 2695 return rv; 2696 2697 return snprintf(buf, 20, "%u.%u\n", 2698 ipmi_version_major(&id), 2699 ipmi_version_minor(&id)); 2700 } 2701 static DEVICE_ATTR_RO(ipmi_version); 2702 2703 static ssize_t add_dev_support_show(struct device *dev, 2704 struct device_attribute *attr, 2705 char *buf) 2706 { 2707 struct bmc_device *bmc = to_bmc_device(dev); 2708 struct ipmi_device_id id; 2709 int rv; 2710 2711 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2712 if (rv) 2713 return rv; 2714 2715 return snprintf(buf, 10, "0x%02x\n", id.additional_device_support); 2716 } 2717 static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, 2718 NULL); 2719 2720 static ssize_t manufacturer_id_show(struct device *dev, 2721 struct device_attribute *attr, 2722 char *buf) 2723 { 2724 struct bmc_device *bmc = to_bmc_device(dev); 2725 struct ipmi_device_id id; 2726 int rv; 2727 2728 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2729 if (rv) 2730 return rv; 2731 2732 return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id); 2733 } 2734 static DEVICE_ATTR_RO(manufacturer_id); 2735 2736 static ssize_t product_id_show(struct device *dev, 2737 struct device_attribute *attr, 2738 char *buf) 2739 { 2740 struct bmc_device *bmc = to_bmc_device(dev); 2741 struct ipmi_device_id id; 2742 int rv; 2743 2744 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2745 if (rv) 2746 return rv; 2747 2748 return snprintf(buf, 10, "0x%4.4x\n", id.product_id); 2749 } 2750 static DEVICE_ATTR_RO(product_id); 2751 2752 static ssize_t aux_firmware_rev_show(struct device *dev, 2753 struct device_attribute *attr, 2754 char *buf) 2755 { 2756 struct bmc_device *bmc = to_bmc_device(dev); 2757 struct ipmi_device_id id; 2758 int rv; 2759 2760 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2761 if (rv) 2762 return rv; 2763 2764 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2765 id.aux_firmware_revision[3], 2766 id.aux_firmware_revision[2], 2767 id.aux_firmware_revision[1], 2768 id.aux_firmware_revision[0]); 2769 } 2770 static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2771 2772 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2773 char *buf) 2774 { 2775 struct bmc_device *bmc = to_bmc_device(dev); 2776 bool guid_set; 2777 guid_t guid; 2778 int rv; 2779 2780 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid); 2781 if (rv) 2782 return rv; 2783 if (!guid_set) 2784 return -ENOENT; 2785 2786 return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid); 2787 } 2788 static DEVICE_ATTR_RO(guid); 2789 2790 static struct attribute *bmc_dev_attrs[] = { 2791 &dev_attr_device_id.attr, 2792 &dev_attr_provides_device_sdrs.attr, 2793 &dev_attr_revision.attr, 2794 &dev_attr_firmware_revision.attr, 2795 &dev_attr_ipmi_version.attr, 2796 &dev_attr_additional_device_support.attr, 2797 &dev_attr_manufacturer_id.attr, 2798 &dev_attr_product_id.attr, 2799 &dev_attr_aux_firmware_revision.attr, 2800 &dev_attr_guid.attr, 2801 NULL 2802 }; 2803 2804 static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, 2805 struct attribute *attr, int idx) 2806 { 2807 struct device *dev = kobj_to_dev(kobj); 2808 struct bmc_device *bmc = to_bmc_device(dev); 2809 umode_t mode = attr->mode; 2810 int rv; 2811 2812 if (attr == &dev_attr_aux_firmware_revision.attr) { 2813 struct ipmi_device_id id; 2814 2815 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL); 2816 return (!rv && id.aux_firmware_revision_set) ? mode : 0; 2817 } 2818 if (attr == &dev_attr_guid.attr) { 2819 bool guid_set; 2820 2821 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL); 2822 return (!rv && guid_set) ? mode : 0; 2823 } 2824 return mode; 2825 } 2826 2827 static const struct attribute_group bmc_dev_attr_group = { 2828 .attrs = bmc_dev_attrs, 2829 .is_visible = bmc_dev_attr_is_visible, 2830 }; 2831 2832 static const struct attribute_group *bmc_dev_attr_groups[] = { 2833 &bmc_dev_attr_group, 2834 NULL 2835 }; 2836 2837 static const struct device_type bmc_device_type = { 2838 .groups = bmc_dev_attr_groups, 2839 }; 2840 2841 static int __find_bmc_guid(struct device *dev, const void *data) 2842 { 2843 const guid_t *guid = data; 2844 struct bmc_device *bmc; 2845 int rv; 2846 2847 if (dev->type != &bmc_device_type) 2848 return 0; 2849 2850 bmc = to_bmc_device(dev); 2851 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid); 2852 if (rv) 2853 rv = kref_get_unless_zero(&bmc->usecount); 2854 return rv; 2855 } 2856 2857 /* 2858 * Returns with the bmc's usecount incremented, if it is non-NULL. 2859 */ 2860 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 2861 guid_t *guid) 2862 { 2863 struct device *dev; 2864 struct bmc_device *bmc = NULL; 2865 2866 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2867 if (dev) { 2868 bmc = to_bmc_device(dev); 2869 put_device(dev); 2870 } 2871 return bmc; 2872 } 2873 2874 struct prod_dev_id { 2875 unsigned int product_id; 2876 unsigned char device_id; 2877 }; 2878 2879 static int __find_bmc_prod_dev_id(struct device *dev, const void *data) 2880 { 2881 const struct prod_dev_id *cid = data; 2882 struct bmc_device *bmc; 2883 int rv; 2884 2885 if (dev->type != &bmc_device_type) 2886 return 0; 2887 2888 bmc = to_bmc_device(dev); 2889 rv = (bmc->id.product_id == cid->product_id 2890 && bmc->id.device_id == cid->device_id); 2891 if (rv) 2892 rv = kref_get_unless_zero(&bmc->usecount); 2893 return rv; 2894 } 2895 2896 /* 2897 * Returns with the bmc's usecount incremented, if it is non-NULL. 2898 */ 2899 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 2900 struct device_driver *drv, 2901 unsigned int product_id, unsigned char device_id) 2902 { 2903 struct prod_dev_id id = { 2904 .product_id = product_id, 2905 .device_id = device_id, 2906 }; 2907 struct device *dev; 2908 struct bmc_device *bmc = NULL; 2909 2910 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 2911 if (dev) { 2912 bmc = to_bmc_device(dev); 2913 put_device(dev); 2914 } 2915 return bmc; 2916 } 2917 2918 static DEFINE_IDA(ipmi_bmc_ida); 2919 2920 static void 2921 release_bmc_device(struct device *dev) 2922 { 2923 kfree(to_bmc_device(dev)); 2924 } 2925 2926 static void cleanup_bmc_work(struct work_struct *work) 2927 { 2928 struct bmc_device *bmc = container_of(work, struct bmc_device, 2929 remove_work); 2930 int id = bmc->pdev.id; /* Unregister overwrites id */ 2931 2932 platform_device_unregister(&bmc->pdev); 2933 ida_simple_remove(&ipmi_bmc_ida, id); 2934 } 2935 2936 static void 2937 cleanup_bmc_device(struct kref *ref) 2938 { 2939 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); 2940 2941 /* 2942 * Remove the platform device in a work queue to avoid issues 2943 * with removing the device attributes while reading a device 2944 * attribute. 2945 */ 2946 schedule_work(&bmc->remove_work); 2947 } 2948 2949 /* 2950 * Must be called with intf->bmc_reg_mutex held. 2951 */ 2952 static void __ipmi_bmc_unregister(struct ipmi_smi *intf) 2953 { 2954 struct bmc_device *bmc = intf->bmc; 2955 2956 if (!intf->bmc_registered) 2957 return; 2958 2959 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 2960 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name); 2961 kfree(intf->my_dev_name); 2962 intf->my_dev_name = NULL; 2963 2964 mutex_lock(&bmc->dyn_mutex); 2965 list_del(&intf->bmc_link); 2966 mutex_unlock(&bmc->dyn_mutex); 2967 intf->bmc = &intf->tmp_bmc; 2968 kref_put(&bmc->usecount, cleanup_bmc_device); 2969 intf->bmc_registered = false; 2970 } 2971 2972 static void ipmi_bmc_unregister(struct ipmi_smi *intf) 2973 { 2974 mutex_lock(&intf->bmc_reg_mutex); 2975 __ipmi_bmc_unregister(intf); 2976 mutex_unlock(&intf->bmc_reg_mutex); 2977 } 2978 2979 /* 2980 * Must be called with intf->bmc_reg_mutex held. 2981 */ 2982 static int __ipmi_bmc_register(struct ipmi_smi *intf, 2983 struct ipmi_device_id *id, 2984 bool guid_set, guid_t *guid, int intf_num) 2985 { 2986 int rv; 2987 struct bmc_device *bmc; 2988 struct bmc_device *old_bmc; 2989 2990 /* 2991 * platform_device_register() can cause bmc_reg_mutex to 2992 * be claimed because of the is_visible functions of 2993 * the attributes. Eliminate possible recursion and 2994 * release the lock. 2995 */ 2996 intf->in_bmc_register = true; 2997 mutex_unlock(&intf->bmc_reg_mutex); 2998 2999 /* 3000 * Try to find if there is an bmc_device struct 3001 * representing the interfaced BMC already 3002 */ 3003 mutex_lock(&ipmidriver_mutex); 3004 if (guid_set) 3005 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid); 3006 else 3007 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 3008 id->product_id, 3009 id->device_id); 3010 3011 /* 3012 * If there is already an bmc_device, free the new one, 3013 * otherwise register the new BMC device 3014 */ 3015 if (old_bmc) { 3016 bmc = old_bmc; 3017 /* 3018 * Note: old_bmc already has usecount incremented by 3019 * the BMC find functions. 3020 */ 3021 intf->bmc = old_bmc; 3022 mutex_lock(&bmc->dyn_mutex); 3023 list_add_tail(&intf->bmc_link, &bmc->intfs); 3024 mutex_unlock(&bmc->dyn_mutex); 3025 3026 dev_info(intf->si_dev, 3027 "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3028 bmc->id.manufacturer_id, 3029 bmc->id.product_id, 3030 bmc->id.device_id); 3031 } else { 3032 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL); 3033 if (!bmc) { 3034 rv = -ENOMEM; 3035 goto out; 3036 } 3037 INIT_LIST_HEAD(&bmc->intfs); 3038 mutex_init(&bmc->dyn_mutex); 3039 INIT_WORK(&bmc->remove_work, cleanup_bmc_work); 3040 3041 bmc->id = *id; 3042 bmc->dyn_id_set = 1; 3043 bmc->dyn_guid_set = guid_set; 3044 bmc->guid = *guid; 3045 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY; 3046 3047 bmc->pdev.name = "ipmi_bmc"; 3048 3049 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL); 3050 if (rv < 0) { 3051 kfree(bmc); 3052 goto out; 3053 } 3054 3055 bmc->pdev.dev.driver = &ipmidriver.driver; 3056 bmc->pdev.id = rv; 3057 bmc->pdev.dev.release = release_bmc_device; 3058 bmc->pdev.dev.type = &bmc_device_type; 3059 kref_init(&bmc->usecount); 3060 3061 intf->bmc = bmc; 3062 mutex_lock(&bmc->dyn_mutex); 3063 list_add_tail(&intf->bmc_link, &bmc->intfs); 3064 mutex_unlock(&bmc->dyn_mutex); 3065 3066 rv = platform_device_register(&bmc->pdev); 3067 if (rv) { 3068 dev_err(intf->si_dev, 3069 "Unable to register bmc device: %d\n", 3070 rv); 3071 goto out_list_del; 3072 } 3073 3074 dev_info(intf->si_dev, 3075 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 3076 bmc->id.manufacturer_id, 3077 bmc->id.product_id, 3078 bmc->id.device_id); 3079 } 3080 3081 /* 3082 * create symlink from system interface device to bmc device 3083 * and back. 3084 */ 3085 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); 3086 if (rv) { 3087 dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); 3088 goto out_put_bmc; 3089 } 3090 3091 if (intf_num == -1) 3092 intf_num = intf->intf_num; 3093 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); 3094 if (!intf->my_dev_name) { 3095 rv = -ENOMEM; 3096 dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", 3097 rv); 3098 goto out_unlink1; 3099 } 3100 3101 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, 3102 intf->my_dev_name); 3103 if (rv) { 3104 dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", 3105 rv); 3106 goto out_free_my_dev_name; 3107 } 3108 3109 intf->bmc_registered = true; 3110 3111 out: 3112 mutex_unlock(&ipmidriver_mutex); 3113 mutex_lock(&intf->bmc_reg_mutex); 3114 intf->in_bmc_register = false; 3115 return rv; 3116 3117 3118 out_free_my_dev_name: 3119 kfree(intf->my_dev_name); 3120 intf->my_dev_name = NULL; 3121 3122 out_unlink1: 3123 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 3124 3125 out_put_bmc: 3126 mutex_lock(&bmc->dyn_mutex); 3127 list_del(&intf->bmc_link); 3128 mutex_unlock(&bmc->dyn_mutex); 3129 intf->bmc = &intf->tmp_bmc; 3130 kref_put(&bmc->usecount, cleanup_bmc_device); 3131 goto out; 3132 3133 out_list_del: 3134 mutex_lock(&bmc->dyn_mutex); 3135 list_del(&intf->bmc_link); 3136 mutex_unlock(&bmc->dyn_mutex); 3137 intf->bmc = &intf->tmp_bmc; 3138 put_device(&bmc->pdev.dev); 3139 goto out; 3140 } 3141 3142 static int 3143 send_guid_cmd(struct ipmi_smi *intf, int chan) 3144 { 3145 struct kernel_ipmi_msg msg; 3146 struct ipmi_system_interface_addr si; 3147 3148 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3149 si.channel = IPMI_BMC_CHANNEL; 3150 si.lun = 0; 3151 3152 msg.netfn = IPMI_NETFN_APP_REQUEST; 3153 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 3154 msg.data = NULL; 3155 msg.data_len = 0; 3156 return i_ipmi_request(NULL, 3157 intf, 3158 (struct ipmi_addr *) &si, 3159 0, 3160 &msg, 3161 intf, 3162 NULL, 3163 NULL, 3164 0, 3165 intf->addrinfo[0].address, 3166 intf->addrinfo[0].lun, 3167 -1, 0); 3168 } 3169 3170 static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3171 { 3172 struct bmc_device *bmc = intf->bmc; 3173 3174 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3175 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 3176 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 3177 /* Not for me */ 3178 return; 3179 3180 if (msg->msg.data[0] != 0) { 3181 /* Error from getting the GUID, the BMC doesn't have one. */ 3182 bmc->dyn_guid_set = 0; 3183 goto out; 3184 } 3185 3186 if (msg->msg.data_len < UUID_SIZE + 1) { 3187 bmc->dyn_guid_set = 0; 3188 dev_warn(intf->si_dev, 3189 "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", 3190 msg->msg.data_len, UUID_SIZE + 1); 3191 goto out; 3192 } 3193 3194 import_guid(&bmc->fetch_guid, msg->msg.data + 1); 3195 /* 3196 * Make sure the guid data is available before setting 3197 * dyn_guid_set. 3198 */ 3199 smp_wmb(); 3200 bmc->dyn_guid_set = 1; 3201 out: 3202 wake_up(&intf->waitq); 3203 } 3204 3205 static void __get_guid(struct ipmi_smi *intf) 3206 { 3207 int rv; 3208 struct bmc_device *bmc = intf->bmc; 3209 3210 bmc->dyn_guid_set = 2; 3211 intf->null_user_handler = guid_handler; 3212 rv = send_guid_cmd(intf, 0); 3213 if (rv) 3214 /* Send failed, no GUID available. */ 3215 bmc->dyn_guid_set = 0; 3216 else 3217 wait_event(intf->waitq, bmc->dyn_guid_set != 2); 3218 3219 /* dyn_guid_set makes the guid data available. */ 3220 smp_rmb(); 3221 3222 intf->null_user_handler = NULL; 3223 } 3224 3225 static int 3226 send_channel_info_cmd(struct ipmi_smi *intf, int chan) 3227 { 3228 struct kernel_ipmi_msg msg; 3229 unsigned char data[1]; 3230 struct ipmi_system_interface_addr si; 3231 3232 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3233 si.channel = IPMI_BMC_CHANNEL; 3234 si.lun = 0; 3235 3236 msg.netfn = IPMI_NETFN_APP_REQUEST; 3237 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 3238 msg.data = data; 3239 msg.data_len = 1; 3240 data[0] = chan; 3241 return i_ipmi_request(NULL, 3242 intf, 3243 (struct ipmi_addr *) &si, 3244 0, 3245 &msg, 3246 intf, 3247 NULL, 3248 NULL, 3249 0, 3250 intf->addrinfo[0].address, 3251 intf->addrinfo[0].lun, 3252 -1, 0); 3253 } 3254 3255 static void 3256 channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 3257 { 3258 int rv = 0; 3259 int ch; 3260 unsigned int set = intf->curr_working_cset; 3261 struct ipmi_channel *chans; 3262 3263 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3264 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3265 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { 3266 /* It's the one we want */ 3267 if (msg->msg.data[0] != 0) { 3268 /* Got an error from the channel, just go on. */ 3269 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 3270 /* 3271 * If the MC does not support this 3272 * command, that is legal. We just 3273 * assume it has one IPMB at channel 3274 * zero. 3275 */ 3276 intf->wchannels[set].c[0].medium 3277 = IPMI_CHANNEL_MEDIUM_IPMB; 3278 intf->wchannels[set].c[0].protocol 3279 = IPMI_CHANNEL_PROTOCOL_IPMB; 3280 3281 intf->channel_list = intf->wchannels + set; 3282 intf->channels_ready = true; 3283 wake_up(&intf->waitq); 3284 goto out; 3285 } 3286 goto next_channel; 3287 } 3288 if (msg->msg.data_len < 4) { 3289 /* Message not big enough, just go on. */ 3290 goto next_channel; 3291 } 3292 ch = intf->curr_channel; 3293 chans = intf->wchannels[set].c; 3294 chans[ch].medium = msg->msg.data[2] & 0x7f; 3295 chans[ch].protocol = msg->msg.data[3] & 0x1f; 3296 3297 next_channel: 3298 intf->curr_channel++; 3299 if (intf->curr_channel >= IPMI_MAX_CHANNELS) { 3300 intf->channel_list = intf->wchannels + set; 3301 intf->channels_ready = true; 3302 wake_up(&intf->waitq); 3303 } else { 3304 intf->channel_list = intf->wchannels + set; 3305 intf->channels_ready = true; 3306 rv = send_channel_info_cmd(intf, intf->curr_channel); 3307 } 3308 3309 if (rv) { 3310 /* Got an error somehow, just give up. */ 3311 dev_warn(intf->si_dev, 3312 "Error sending channel information for channel %d: %d\n", 3313 intf->curr_channel, rv); 3314 3315 intf->channel_list = intf->wchannels + set; 3316 intf->channels_ready = true; 3317 wake_up(&intf->waitq); 3318 } 3319 } 3320 out: 3321 return; 3322 } 3323 3324 /* 3325 * Must be holding intf->bmc_reg_mutex to call this. 3326 */ 3327 static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id) 3328 { 3329 int rv; 3330 3331 if (ipmi_version_major(id) > 1 3332 || (ipmi_version_major(id) == 1 3333 && ipmi_version_minor(id) >= 5)) { 3334 unsigned int set; 3335 3336 /* 3337 * Start scanning the channels to see what is 3338 * available. 3339 */ 3340 set = !intf->curr_working_cset; 3341 intf->curr_working_cset = set; 3342 memset(&intf->wchannels[set], 0, 3343 sizeof(struct ipmi_channel_set)); 3344 3345 intf->null_user_handler = channel_handler; 3346 intf->curr_channel = 0; 3347 rv = send_channel_info_cmd(intf, 0); 3348 if (rv) { 3349 dev_warn(intf->si_dev, 3350 "Error sending channel information for channel 0, %d\n", 3351 rv); 3352 intf->null_user_handler = NULL; 3353 return -EIO; 3354 } 3355 3356 /* Wait for the channel info to be read. */ 3357 wait_event(intf->waitq, intf->channels_ready); 3358 intf->null_user_handler = NULL; 3359 } else { 3360 unsigned int set = intf->curr_working_cset; 3361 3362 /* Assume a single IPMB channel at zero. */ 3363 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 3364 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 3365 intf->channel_list = intf->wchannels + set; 3366 intf->channels_ready = true; 3367 } 3368 3369 return 0; 3370 } 3371 3372 static void ipmi_poll(struct ipmi_smi *intf) 3373 { 3374 if (intf->handlers->poll) 3375 intf->handlers->poll(intf->send_info); 3376 /* In case something came in */ 3377 handle_new_recv_msgs(intf); 3378 } 3379 3380 void ipmi_poll_interface(struct ipmi_user *user) 3381 { 3382 ipmi_poll(user->intf); 3383 } 3384 EXPORT_SYMBOL(ipmi_poll_interface); 3385 3386 static void redo_bmc_reg(struct work_struct *work) 3387 { 3388 struct ipmi_smi *intf = container_of(work, struct ipmi_smi, 3389 bmc_reg_work); 3390 3391 if (!intf->in_shutdown) 3392 bmc_get_device_id(intf, NULL, NULL, NULL, NULL); 3393 3394 kref_put(&intf->refcount, intf_free); 3395 } 3396 3397 int ipmi_add_smi(struct module *owner, 3398 const struct ipmi_smi_handlers *handlers, 3399 void *send_info, 3400 struct device *si_dev, 3401 unsigned char slave_addr) 3402 { 3403 int i, j; 3404 int rv; 3405 struct ipmi_smi *intf, *tintf; 3406 struct list_head *link; 3407 struct ipmi_device_id id; 3408 3409 /* 3410 * Make sure the driver is actually initialized, this handles 3411 * problems with initialization order. 3412 */ 3413 rv = ipmi_init_msghandler(); 3414 if (rv) 3415 return rv; 3416 3417 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3418 if (!intf) 3419 return -ENOMEM; 3420 3421 rv = init_srcu_struct(&intf->users_srcu); 3422 if (rv) { 3423 kfree(intf); 3424 return rv; 3425 } 3426 3427 intf->owner = owner; 3428 intf->bmc = &intf->tmp_bmc; 3429 INIT_LIST_HEAD(&intf->bmc->intfs); 3430 mutex_init(&intf->bmc->dyn_mutex); 3431 INIT_LIST_HEAD(&intf->bmc_link); 3432 mutex_init(&intf->bmc_reg_mutex); 3433 intf->intf_num = -1; /* Mark it invalid for now. */ 3434 kref_init(&intf->refcount); 3435 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg); 3436 intf->si_dev = si_dev; 3437 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 3438 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR; 3439 intf->addrinfo[j].lun = 2; 3440 } 3441 if (slave_addr != 0) 3442 intf->addrinfo[0].address = slave_addr; 3443 INIT_LIST_HEAD(&intf->users); 3444 intf->handlers = handlers; 3445 intf->send_info = send_info; 3446 spin_lock_init(&intf->seq_lock); 3447 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 3448 intf->seq_table[j].inuse = 0; 3449 intf->seq_table[j].seqid = 0; 3450 } 3451 intf->curr_seq = 0; 3452 spin_lock_init(&intf->waiting_rcv_msgs_lock); 3453 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 3454 tasklet_setup(&intf->recv_tasklet, 3455 smi_recv_tasklet); 3456 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 3457 spin_lock_init(&intf->xmit_msgs_lock); 3458 INIT_LIST_HEAD(&intf->xmit_msgs); 3459 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 3460 spin_lock_init(&intf->events_lock); 3461 spin_lock_init(&intf->watch_lock); 3462 atomic_set(&intf->event_waiters, 0); 3463 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3464 INIT_LIST_HEAD(&intf->waiting_events); 3465 intf->waiting_events_count = 0; 3466 mutex_init(&intf->cmd_rcvrs_mutex); 3467 spin_lock_init(&intf->maintenance_mode_lock); 3468 INIT_LIST_HEAD(&intf->cmd_rcvrs); 3469 init_waitqueue_head(&intf->waitq); 3470 for (i = 0; i < IPMI_NUM_STATS; i++) 3471 atomic_set(&intf->stats[i], 0); 3472 3473 mutex_lock(&ipmi_interfaces_mutex); 3474 /* Look for a hole in the numbers. */ 3475 i = 0; 3476 link = &ipmi_interfaces; 3477 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link, 3478 ipmi_interfaces_mutex_held()) { 3479 if (tintf->intf_num != i) { 3480 link = &tintf->link; 3481 break; 3482 } 3483 i++; 3484 } 3485 /* Add the new interface in numeric order. */ 3486 if (i == 0) 3487 list_add_rcu(&intf->link, &ipmi_interfaces); 3488 else 3489 list_add_tail_rcu(&intf->link, link); 3490 3491 rv = handlers->start_processing(send_info, intf); 3492 if (rv) 3493 goto out_err; 3494 3495 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3496 if (rv) { 3497 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3498 goto out_err_started; 3499 } 3500 3501 mutex_lock(&intf->bmc_reg_mutex); 3502 rv = __scan_channels(intf, &id); 3503 mutex_unlock(&intf->bmc_reg_mutex); 3504 if (rv) 3505 goto out_err_bmc_reg; 3506 3507 /* 3508 * Keep memory order straight for RCU readers. Make 3509 * sure everything else is committed to memory before 3510 * setting intf_num to mark the interface valid. 3511 */ 3512 smp_wmb(); 3513 intf->intf_num = i; 3514 mutex_unlock(&ipmi_interfaces_mutex); 3515 3516 /* After this point the interface is legal to use. */ 3517 call_smi_watchers(i, intf->si_dev); 3518 3519 return 0; 3520 3521 out_err_bmc_reg: 3522 ipmi_bmc_unregister(intf); 3523 out_err_started: 3524 if (intf->handlers->shutdown) 3525 intf->handlers->shutdown(intf->send_info); 3526 out_err: 3527 list_del_rcu(&intf->link); 3528 mutex_unlock(&ipmi_interfaces_mutex); 3529 synchronize_srcu(&ipmi_interfaces_srcu); 3530 cleanup_srcu_struct(&intf->users_srcu); 3531 kref_put(&intf->refcount, intf_free); 3532 3533 return rv; 3534 } 3535 EXPORT_SYMBOL(ipmi_add_smi); 3536 3537 static void deliver_smi_err_response(struct ipmi_smi *intf, 3538 struct ipmi_smi_msg *msg, 3539 unsigned char err) 3540 { 3541 msg->rsp[0] = msg->data[0] | 4; 3542 msg->rsp[1] = msg->data[1]; 3543 msg->rsp[2] = err; 3544 msg->rsp_size = 3; 3545 /* It's an error, so it will never requeue, no need to check return. */ 3546 handle_one_recv_msg(intf, msg); 3547 } 3548 3549 static void cleanup_smi_msgs(struct ipmi_smi *intf) 3550 { 3551 int i; 3552 struct seq_table *ent; 3553 struct ipmi_smi_msg *msg; 3554 struct list_head *entry; 3555 struct list_head tmplist; 3556 3557 /* Clear out our transmit queues and hold the messages. */ 3558 INIT_LIST_HEAD(&tmplist); 3559 list_splice_tail(&intf->hp_xmit_msgs, &tmplist); 3560 list_splice_tail(&intf->xmit_msgs, &tmplist); 3561 3562 /* Current message first, to preserve order */ 3563 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { 3564 /* Wait for the message to clear out. */ 3565 schedule_timeout(1); 3566 } 3567 3568 /* No need for locks, the interface is down. */ 3569 3570 /* 3571 * Return errors for all pending messages in queue and in the 3572 * tables waiting for remote responses. 3573 */ 3574 while (!list_empty(&tmplist)) { 3575 entry = tmplist.next; 3576 list_del(entry); 3577 msg = list_entry(entry, struct ipmi_smi_msg, link); 3578 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED); 3579 } 3580 3581 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 3582 ent = &intf->seq_table[i]; 3583 if (!ent->inuse) 3584 continue; 3585 deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); 3586 } 3587 } 3588 3589 void ipmi_unregister_smi(struct ipmi_smi *intf) 3590 { 3591 struct ipmi_smi_watcher *w; 3592 int intf_num = intf->intf_num, index; 3593 3594 mutex_lock(&ipmi_interfaces_mutex); 3595 intf->intf_num = -1; 3596 intf->in_shutdown = true; 3597 list_del_rcu(&intf->link); 3598 mutex_unlock(&ipmi_interfaces_mutex); 3599 synchronize_srcu(&ipmi_interfaces_srcu); 3600 3601 /* At this point no users can be added to the interface. */ 3602 3603 /* 3604 * Call all the watcher interfaces to tell them that 3605 * an interface is going away. 3606 */ 3607 mutex_lock(&smi_watchers_mutex); 3608 list_for_each_entry(w, &smi_watchers, link) 3609 w->smi_gone(intf_num); 3610 mutex_unlock(&smi_watchers_mutex); 3611 3612 index = srcu_read_lock(&intf->users_srcu); 3613 while (!list_empty(&intf->users)) { 3614 struct ipmi_user *user = 3615 container_of(list_next_rcu(&intf->users), 3616 struct ipmi_user, link); 3617 3618 _ipmi_destroy_user(user); 3619 } 3620 srcu_read_unlock(&intf->users_srcu, index); 3621 3622 if (intf->handlers->shutdown) 3623 intf->handlers->shutdown(intf->send_info); 3624 3625 cleanup_smi_msgs(intf); 3626 3627 ipmi_bmc_unregister(intf); 3628 3629 cleanup_srcu_struct(&intf->users_srcu); 3630 kref_put(&intf->refcount, intf_free); 3631 } 3632 EXPORT_SYMBOL(ipmi_unregister_smi); 3633 3634 static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf, 3635 struct ipmi_smi_msg *msg) 3636 { 3637 struct ipmi_ipmb_addr ipmb_addr; 3638 struct ipmi_recv_msg *recv_msg; 3639 3640 /* 3641 * This is 11, not 10, because the response must contain a 3642 * completion code. 3643 */ 3644 if (msg->rsp_size < 11) { 3645 /* Message not big enough, just ignore it. */ 3646 ipmi_inc_stat(intf, invalid_ipmb_responses); 3647 return 0; 3648 } 3649 3650 if (msg->rsp[2] != 0) { 3651 /* An error getting the response, just ignore it. */ 3652 return 0; 3653 } 3654 3655 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 3656 ipmb_addr.slave_addr = msg->rsp[6]; 3657 ipmb_addr.channel = msg->rsp[3] & 0x0f; 3658 ipmb_addr.lun = msg->rsp[7] & 3; 3659 3660 /* 3661 * It's a response from a remote entity. Look up the sequence 3662 * number and handle the response. 3663 */ 3664 if (intf_find_seq(intf, 3665 msg->rsp[7] >> 2, 3666 msg->rsp[3] & 0x0f, 3667 msg->rsp[8], 3668 (msg->rsp[4] >> 2) & (~1), 3669 (struct ipmi_addr *) &ipmb_addr, 3670 &recv_msg)) { 3671 /* 3672 * We were unable to find the sequence number, 3673 * so just nuke the message. 3674 */ 3675 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3676 return 0; 3677 } 3678 3679 memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9); 3680 /* 3681 * The other fields matched, so no need to set them, except 3682 * for netfn, which needs to be the response that was 3683 * returned, not the request value. 3684 */ 3685 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3686 recv_msg->msg.data = recv_msg->msg_data; 3687 recv_msg->msg.data_len = msg->rsp_size - 10; 3688 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3689 if (deliver_response(intf, recv_msg)) 3690 ipmi_inc_stat(intf, unhandled_ipmb_responses); 3691 else 3692 ipmi_inc_stat(intf, handled_ipmb_responses); 3693 3694 return 0; 3695 } 3696 3697 static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, 3698 struct ipmi_smi_msg *msg) 3699 { 3700 struct cmd_rcvr *rcvr; 3701 int rv = 0; 3702 unsigned char netfn; 3703 unsigned char cmd; 3704 unsigned char chan; 3705 struct ipmi_user *user = NULL; 3706 struct ipmi_ipmb_addr *ipmb_addr; 3707 struct ipmi_recv_msg *recv_msg; 3708 3709 if (msg->rsp_size < 10) { 3710 /* Message not big enough, just ignore it. */ 3711 ipmi_inc_stat(intf, invalid_commands); 3712 return 0; 3713 } 3714 3715 if (msg->rsp[2] != 0) { 3716 /* An error getting the response, just ignore it. */ 3717 return 0; 3718 } 3719 3720 netfn = msg->rsp[4] >> 2; 3721 cmd = msg->rsp[8]; 3722 chan = msg->rsp[3] & 0xf; 3723 3724 rcu_read_lock(); 3725 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3726 if (rcvr) { 3727 user = rcvr->user; 3728 kref_get(&user->refcount); 3729 } else 3730 user = NULL; 3731 rcu_read_unlock(); 3732 3733 if (user == NULL) { 3734 /* We didn't find a user, deliver an error response. */ 3735 ipmi_inc_stat(intf, unhandled_commands); 3736 3737 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 3738 msg->data[1] = IPMI_SEND_MSG_CMD; 3739 msg->data[2] = msg->rsp[3]; 3740 msg->data[3] = msg->rsp[6]; 3741 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 3742 msg->data[5] = ipmb_checksum(&msg->data[3], 2); 3743 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address; 3744 /* rqseq/lun */ 3745 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 3746 msg->data[8] = msg->rsp[8]; /* cmd */ 3747 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 3748 msg->data[10] = ipmb_checksum(&msg->data[6], 4); 3749 msg->data_size = 11; 3750 3751 pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data); 3752 3753 rcu_read_lock(); 3754 if (!intf->in_shutdown) { 3755 smi_send(intf, intf->handlers, msg, 0); 3756 /* 3757 * We used the message, so return the value 3758 * that causes it to not be freed or 3759 * queued. 3760 */ 3761 rv = -1; 3762 } 3763 rcu_read_unlock(); 3764 } else { 3765 recv_msg = ipmi_alloc_recv_msg(); 3766 if (!recv_msg) { 3767 /* 3768 * We couldn't allocate memory for the 3769 * message, so requeue it for handling 3770 * later. 3771 */ 3772 rv = 1; 3773 kref_put(&user->refcount, free_user); 3774 } else { 3775 /* Extract the source address from the data. */ 3776 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 3777 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 3778 ipmb_addr->slave_addr = msg->rsp[6]; 3779 ipmb_addr->lun = msg->rsp[7] & 3; 3780 ipmb_addr->channel = msg->rsp[3] & 0xf; 3781 3782 /* 3783 * Extract the rest of the message information 3784 * from the IPMB header. 3785 */ 3786 recv_msg->user = user; 3787 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3788 recv_msg->msgid = msg->rsp[7] >> 2; 3789 recv_msg->msg.netfn = msg->rsp[4] >> 2; 3790 recv_msg->msg.cmd = msg->rsp[8]; 3791 recv_msg->msg.data = recv_msg->msg_data; 3792 3793 /* 3794 * We chop off 10, not 9 bytes because the checksum 3795 * at the end also needs to be removed. 3796 */ 3797 recv_msg->msg.data_len = msg->rsp_size - 10; 3798 memcpy(recv_msg->msg_data, &msg->rsp[9], 3799 msg->rsp_size - 10); 3800 if (deliver_response(intf, recv_msg)) 3801 ipmi_inc_stat(intf, unhandled_commands); 3802 else 3803 ipmi_inc_stat(intf, handled_commands); 3804 } 3805 } 3806 3807 return rv; 3808 } 3809 3810 static int handle_lan_get_msg_rsp(struct ipmi_smi *intf, 3811 struct ipmi_smi_msg *msg) 3812 { 3813 struct ipmi_lan_addr lan_addr; 3814 struct ipmi_recv_msg *recv_msg; 3815 3816 3817 /* 3818 * This is 13, not 12, because the response must contain a 3819 * completion code. 3820 */ 3821 if (msg->rsp_size < 13) { 3822 /* Message not big enough, just ignore it. */ 3823 ipmi_inc_stat(intf, invalid_lan_responses); 3824 return 0; 3825 } 3826 3827 if (msg->rsp[2] != 0) { 3828 /* An error getting the response, just ignore it. */ 3829 return 0; 3830 } 3831 3832 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 3833 lan_addr.session_handle = msg->rsp[4]; 3834 lan_addr.remote_SWID = msg->rsp[8]; 3835 lan_addr.local_SWID = msg->rsp[5]; 3836 lan_addr.channel = msg->rsp[3] & 0x0f; 3837 lan_addr.privilege = msg->rsp[3] >> 4; 3838 lan_addr.lun = msg->rsp[9] & 3; 3839 3840 /* 3841 * It's a response from a remote entity. Look up the sequence 3842 * number and handle the response. 3843 */ 3844 if (intf_find_seq(intf, 3845 msg->rsp[9] >> 2, 3846 msg->rsp[3] & 0x0f, 3847 msg->rsp[10], 3848 (msg->rsp[6] >> 2) & (~1), 3849 (struct ipmi_addr *) &lan_addr, 3850 &recv_msg)) { 3851 /* 3852 * We were unable to find the sequence number, 3853 * so just nuke the message. 3854 */ 3855 ipmi_inc_stat(intf, unhandled_lan_responses); 3856 return 0; 3857 } 3858 3859 memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11); 3860 /* 3861 * The other fields matched, so no need to set them, except 3862 * for netfn, which needs to be the response that was 3863 * returned, not the request value. 3864 */ 3865 recv_msg->msg.netfn = msg->rsp[6] >> 2; 3866 recv_msg->msg.data = recv_msg->msg_data; 3867 recv_msg->msg.data_len = msg->rsp_size - 12; 3868 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3869 if (deliver_response(intf, recv_msg)) 3870 ipmi_inc_stat(intf, unhandled_lan_responses); 3871 else 3872 ipmi_inc_stat(intf, handled_lan_responses); 3873 3874 return 0; 3875 } 3876 3877 static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, 3878 struct ipmi_smi_msg *msg) 3879 { 3880 struct cmd_rcvr *rcvr; 3881 int rv = 0; 3882 unsigned char netfn; 3883 unsigned char cmd; 3884 unsigned char chan; 3885 struct ipmi_user *user = NULL; 3886 struct ipmi_lan_addr *lan_addr; 3887 struct ipmi_recv_msg *recv_msg; 3888 3889 if (msg->rsp_size < 12) { 3890 /* Message not big enough, just ignore it. */ 3891 ipmi_inc_stat(intf, invalid_commands); 3892 return 0; 3893 } 3894 3895 if (msg->rsp[2] != 0) { 3896 /* An error getting the response, just ignore it. */ 3897 return 0; 3898 } 3899 3900 netfn = msg->rsp[6] >> 2; 3901 cmd = msg->rsp[10]; 3902 chan = msg->rsp[3] & 0xf; 3903 3904 rcu_read_lock(); 3905 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 3906 if (rcvr) { 3907 user = rcvr->user; 3908 kref_get(&user->refcount); 3909 } else 3910 user = NULL; 3911 rcu_read_unlock(); 3912 3913 if (user == NULL) { 3914 /* We didn't find a user, just give up. */ 3915 ipmi_inc_stat(intf, unhandled_commands); 3916 3917 /* 3918 * Don't do anything with these messages, just allow 3919 * them to be freed. 3920 */ 3921 rv = 0; 3922 } else { 3923 recv_msg = ipmi_alloc_recv_msg(); 3924 if (!recv_msg) { 3925 /* 3926 * We couldn't allocate memory for the 3927 * message, so requeue it for handling later. 3928 */ 3929 rv = 1; 3930 kref_put(&user->refcount, free_user); 3931 } else { 3932 /* Extract the source address from the data. */ 3933 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 3934 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 3935 lan_addr->session_handle = msg->rsp[4]; 3936 lan_addr->remote_SWID = msg->rsp[8]; 3937 lan_addr->local_SWID = msg->rsp[5]; 3938 lan_addr->lun = msg->rsp[9] & 3; 3939 lan_addr->channel = msg->rsp[3] & 0xf; 3940 lan_addr->privilege = msg->rsp[3] >> 4; 3941 3942 /* 3943 * Extract the rest of the message information 3944 * from the IPMB header. 3945 */ 3946 recv_msg->user = user; 3947 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 3948 recv_msg->msgid = msg->rsp[9] >> 2; 3949 recv_msg->msg.netfn = msg->rsp[6] >> 2; 3950 recv_msg->msg.cmd = msg->rsp[10]; 3951 recv_msg->msg.data = recv_msg->msg_data; 3952 3953 /* 3954 * We chop off 12, not 11 bytes because the checksum 3955 * at the end also needs to be removed. 3956 */ 3957 recv_msg->msg.data_len = msg->rsp_size - 12; 3958 memcpy(recv_msg->msg_data, &msg->rsp[11], 3959 msg->rsp_size - 12); 3960 if (deliver_response(intf, recv_msg)) 3961 ipmi_inc_stat(intf, unhandled_commands); 3962 else 3963 ipmi_inc_stat(intf, handled_commands); 3964 } 3965 } 3966 3967 return rv; 3968 } 3969 3970 /* 3971 * This routine will handle "Get Message" command responses with 3972 * channels that use an OEM Medium. The message format belongs to 3973 * the OEM. See IPMI 2.0 specification, Chapter 6 and 3974 * Chapter 22, sections 22.6 and 22.24 for more details. 3975 */ 3976 static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, 3977 struct ipmi_smi_msg *msg) 3978 { 3979 struct cmd_rcvr *rcvr; 3980 int rv = 0; 3981 unsigned char netfn; 3982 unsigned char cmd; 3983 unsigned char chan; 3984 struct ipmi_user *user = NULL; 3985 struct ipmi_system_interface_addr *smi_addr; 3986 struct ipmi_recv_msg *recv_msg; 3987 3988 /* 3989 * We expect the OEM SW to perform error checking 3990 * so we just do some basic sanity checks 3991 */ 3992 if (msg->rsp_size < 4) { 3993 /* Message not big enough, just ignore it. */ 3994 ipmi_inc_stat(intf, invalid_commands); 3995 return 0; 3996 } 3997 3998 if (msg->rsp[2] != 0) { 3999 /* An error getting the response, just ignore it. */ 4000 return 0; 4001 } 4002 4003 /* 4004 * This is an OEM Message so the OEM needs to know how 4005 * handle the message. We do no interpretation. 4006 */ 4007 netfn = msg->rsp[0] >> 2; 4008 cmd = msg->rsp[1]; 4009 chan = msg->rsp[3] & 0xf; 4010 4011 rcu_read_lock(); 4012 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); 4013 if (rcvr) { 4014 user = rcvr->user; 4015 kref_get(&user->refcount); 4016 } else 4017 user = NULL; 4018 rcu_read_unlock(); 4019 4020 if (user == NULL) { 4021 /* We didn't find a user, just give up. */ 4022 ipmi_inc_stat(intf, unhandled_commands); 4023 4024 /* 4025 * Don't do anything with these messages, just allow 4026 * them to be freed. 4027 */ 4028 4029 rv = 0; 4030 } else { 4031 recv_msg = ipmi_alloc_recv_msg(); 4032 if (!recv_msg) { 4033 /* 4034 * We couldn't allocate memory for the 4035 * message, so requeue it for handling 4036 * later. 4037 */ 4038 rv = 1; 4039 kref_put(&user->refcount, free_user); 4040 } else { 4041 /* 4042 * OEM Messages are expected to be delivered via 4043 * the system interface to SMS software. We might 4044 * need to visit this again depending on OEM 4045 * requirements 4046 */ 4047 smi_addr = ((struct ipmi_system_interface_addr *) 4048 &recv_msg->addr); 4049 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4050 smi_addr->channel = IPMI_BMC_CHANNEL; 4051 smi_addr->lun = msg->rsp[0] & 3; 4052 4053 recv_msg->user = user; 4054 recv_msg->user_msg_data = NULL; 4055 recv_msg->recv_type = IPMI_OEM_RECV_TYPE; 4056 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4057 recv_msg->msg.cmd = msg->rsp[1]; 4058 recv_msg->msg.data = recv_msg->msg_data; 4059 4060 /* 4061 * The message starts at byte 4 which follows the 4062 * the Channel Byte in the "GET MESSAGE" command 4063 */ 4064 recv_msg->msg.data_len = msg->rsp_size - 4; 4065 memcpy(recv_msg->msg_data, &msg->rsp[4], 4066 msg->rsp_size - 4); 4067 if (deliver_response(intf, recv_msg)) 4068 ipmi_inc_stat(intf, unhandled_commands); 4069 else 4070 ipmi_inc_stat(intf, handled_commands); 4071 } 4072 } 4073 4074 return rv; 4075 } 4076 4077 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 4078 struct ipmi_smi_msg *msg) 4079 { 4080 struct ipmi_system_interface_addr *smi_addr; 4081 4082 recv_msg->msgid = 0; 4083 smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr; 4084 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4085 smi_addr->channel = IPMI_BMC_CHANNEL; 4086 smi_addr->lun = msg->rsp[0] & 3; 4087 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 4088 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4089 recv_msg->msg.cmd = msg->rsp[1]; 4090 memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3); 4091 recv_msg->msg.data = recv_msg->msg_data; 4092 recv_msg->msg.data_len = msg->rsp_size - 3; 4093 } 4094 4095 static int handle_read_event_rsp(struct ipmi_smi *intf, 4096 struct ipmi_smi_msg *msg) 4097 { 4098 struct ipmi_recv_msg *recv_msg, *recv_msg2; 4099 struct list_head msgs; 4100 struct ipmi_user *user; 4101 int rv = 0, deliver_count = 0, index; 4102 unsigned long flags; 4103 4104 if (msg->rsp_size < 19) { 4105 /* Message is too small to be an IPMB event. */ 4106 ipmi_inc_stat(intf, invalid_events); 4107 return 0; 4108 } 4109 4110 if (msg->rsp[2] != 0) { 4111 /* An error getting the event, just ignore it. */ 4112 return 0; 4113 } 4114 4115 INIT_LIST_HEAD(&msgs); 4116 4117 spin_lock_irqsave(&intf->events_lock, flags); 4118 4119 ipmi_inc_stat(intf, events); 4120 4121 /* 4122 * Allocate and fill in one message for every user that is 4123 * getting events. 4124 */ 4125 index = srcu_read_lock(&intf->users_srcu); 4126 list_for_each_entry_rcu(user, &intf->users, link) { 4127 if (!user->gets_events) 4128 continue; 4129 4130 recv_msg = ipmi_alloc_recv_msg(); 4131 if (!recv_msg) { 4132 rcu_read_unlock(); 4133 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 4134 link) { 4135 list_del(&recv_msg->link); 4136 ipmi_free_recv_msg(recv_msg); 4137 } 4138 /* 4139 * We couldn't allocate memory for the 4140 * message, so requeue it for handling 4141 * later. 4142 */ 4143 rv = 1; 4144 goto out; 4145 } 4146 4147 deliver_count++; 4148 4149 copy_event_into_recv_msg(recv_msg, msg); 4150 recv_msg->user = user; 4151 kref_get(&user->refcount); 4152 list_add_tail(&recv_msg->link, &msgs); 4153 } 4154 srcu_read_unlock(&intf->users_srcu, index); 4155 4156 if (deliver_count) { 4157 /* Now deliver all the messages. */ 4158 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 4159 list_del(&recv_msg->link); 4160 deliver_local_response(intf, recv_msg); 4161 } 4162 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 4163 /* 4164 * No one to receive the message, put it in queue if there's 4165 * not already too many things in the queue. 4166 */ 4167 recv_msg = ipmi_alloc_recv_msg(); 4168 if (!recv_msg) { 4169 /* 4170 * We couldn't allocate memory for the 4171 * message, so requeue it for handling 4172 * later. 4173 */ 4174 rv = 1; 4175 goto out; 4176 } 4177 4178 copy_event_into_recv_msg(recv_msg, msg); 4179 list_add_tail(&recv_msg->link, &intf->waiting_events); 4180 intf->waiting_events_count++; 4181 } else if (!intf->event_msg_printed) { 4182 /* 4183 * There's too many things in the queue, discard this 4184 * message. 4185 */ 4186 dev_warn(intf->si_dev, 4187 "Event queue full, discarding incoming events\n"); 4188 intf->event_msg_printed = 1; 4189 } 4190 4191 out: 4192 spin_unlock_irqrestore(&intf->events_lock, flags); 4193 4194 return rv; 4195 } 4196 4197 static int handle_bmc_rsp(struct ipmi_smi *intf, 4198 struct ipmi_smi_msg *msg) 4199 { 4200 struct ipmi_recv_msg *recv_msg; 4201 struct ipmi_system_interface_addr *smi_addr; 4202 4203 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 4204 if (recv_msg == NULL) { 4205 dev_warn(intf->si_dev, 4206 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); 4207 return 0; 4208 } 4209 4210 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 4211 recv_msg->msgid = msg->msgid; 4212 smi_addr = ((struct ipmi_system_interface_addr *) 4213 &recv_msg->addr); 4214 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4215 smi_addr->channel = IPMI_BMC_CHANNEL; 4216 smi_addr->lun = msg->rsp[0] & 3; 4217 recv_msg->msg.netfn = msg->rsp[0] >> 2; 4218 recv_msg->msg.cmd = msg->rsp[1]; 4219 memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2); 4220 recv_msg->msg.data = recv_msg->msg_data; 4221 recv_msg->msg.data_len = msg->rsp_size - 2; 4222 deliver_local_response(intf, recv_msg); 4223 4224 return 0; 4225 } 4226 4227 /* 4228 * Handle a received message. Return 1 if the message should be requeued, 4229 * 0 if the message should be freed, or -1 if the message should not 4230 * be freed or requeued. 4231 */ 4232 static int handle_one_recv_msg(struct ipmi_smi *intf, 4233 struct ipmi_smi_msg *msg) 4234 { 4235 int requeue; 4236 int chan; 4237 4238 pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp); 4239 4240 if ((msg->data_size >= 2) 4241 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 4242 && (msg->data[1] == IPMI_SEND_MSG_CMD) 4243 && (msg->user_data == NULL)) { 4244 4245 if (intf->in_shutdown) 4246 goto free_msg; 4247 4248 /* 4249 * This is the local response to a command send, start 4250 * the timer for these. The user_data will not be 4251 * NULL if this is a response send, and we will let 4252 * response sends just go through. 4253 */ 4254 4255 /* 4256 * Check for errors, if we get certain errors (ones 4257 * that mean basically we can try again later), we 4258 * ignore them and start the timer. Otherwise we 4259 * report the error immediately. 4260 */ 4261 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 4262 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 4263 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) 4264 && (msg->rsp[2] != IPMI_BUS_ERR) 4265 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { 4266 int ch = msg->rsp[3] & 0xf; 4267 struct ipmi_channel *chans; 4268 4269 /* Got an error sending the message, handle it. */ 4270 4271 chans = READ_ONCE(intf->channel_list)->c; 4272 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) 4273 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) 4274 ipmi_inc_stat(intf, sent_lan_command_errs); 4275 else 4276 ipmi_inc_stat(intf, sent_ipmb_command_errs); 4277 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 4278 } else 4279 /* The message was sent, start the timer. */ 4280 intf_start_seq_timer(intf, msg->msgid); 4281 free_msg: 4282 requeue = 0; 4283 goto out; 4284 4285 } else if (msg->rsp_size < 2) { 4286 /* Message is too small to be correct. */ 4287 dev_warn(intf->si_dev, 4288 "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", 4289 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 4290 4291 /* Generate an error response for the message. */ 4292 msg->rsp[0] = msg->data[0] | (1 << 2); 4293 msg->rsp[1] = msg->data[1]; 4294 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4295 msg->rsp_size = 3; 4296 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4297 || (msg->rsp[1] != msg->data[1])) { 4298 /* 4299 * The NetFN and Command in the response is not even 4300 * marginally correct. 4301 */ 4302 dev_warn(intf->si_dev, 4303 "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", 4304 (msg->data[0] >> 2) | 1, msg->data[1], 4305 msg->rsp[0] >> 2, msg->rsp[1]); 4306 4307 /* Generate an error response for the message. */ 4308 msg->rsp[0] = msg->data[0] | (1 << 2); 4309 msg->rsp[1] = msg->data[1]; 4310 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 4311 msg->rsp_size = 3; 4312 } 4313 4314 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4315 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 4316 && (msg->user_data != NULL)) { 4317 /* 4318 * It's a response to a response we sent. For this we 4319 * deliver a send message response to the user. 4320 */ 4321 struct ipmi_recv_msg *recv_msg = msg->user_data; 4322 4323 requeue = 0; 4324 if (msg->rsp_size < 2) 4325 /* Message is too small to be correct. */ 4326 goto out; 4327 4328 chan = msg->data[2] & 0x0f; 4329 if (chan >= IPMI_MAX_CHANNELS) 4330 /* Invalid channel number */ 4331 goto out; 4332 4333 if (!recv_msg) 4334 goto out; 4335 4336 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 4337 recv_msg->msg.data = recv_msg->msg_data; 4338 recv_msg->msg.data_len = 1; 4339 recv_msg->msg_data[0] = msg->rsp[2]; 4340 deliver_local_response(intf, recv_msg); 4341 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4342 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { 4343 struct ipmi_channel *chans; 4344 4345 /* It's from the receive queue. */ 4346 chan = msg->rsp[3] & 0xf; 4347 if (chan >= IPMI_MAX_CHANNELS) { 4348 /* Invalid channel number */ 4349 requeue = 0; 4350 goto out; 4351 } 4352 4353 /* 4354 * We need to make sure the channels have been initialized. 4355 * The channel_handler routine will set the "curr_channel" 4356 * equal to or greater than IPMI_MAX_CHANNELS when all the 4357 * channels for this interface have been initialized. 4358 */ 4359 if (!intf->channels_ready) { 4360 requeue = 0; /* Throw the message away */ 4361 goto out; 4362 } 4363 4364 chans = READ_ONCE(intf->channel_list)->c; 4365 4366 switch (chans[chan].medium) { 4367 case IPMI_CHANNEL_MEDIUM_IPMB: 4368 if (msg->rsp[4] & 0x04) { 4369 /* 4370 * It's a response, so find the 4371 * requesting message and send it up. 4372 */ 4373 requeue = handle_ipmb_get_msg_rsp(intf, msg); 4374 } else { 4375 /* 4376 * It's a command to the SMS from some other 4377 * entity. Handle that. 4378 */ 4379 requeue = handle_ipmb_get_msg_cmd(intf, msg); 4380 } 4381 break; 4382 4383 case IPMI_CHANNEL_MEDIUM_8023LAN: 4384 case IPMI_CHANNEL_MEDIUM_ASYNC: 4385 if (msg->rsp[6] & 0x04) { 4386 /* 4387 * It's a response, so find the 4388 * requesting message and send it up. 4389 */ 4390 requeue = handle_lan_get_msg_rsp(intf, msg); 4391 } else { 4392 /* 4393 * It's a command to the SMS from some other 4394 * entity. Handle that. 4395 */ 4396 requeue = handle_lan_get_msg_cmd(intf, msg); 4397 } 4398 break; 4399 4400 default: 4401 /* Check for OEM Channels. Clients had better 4402 register for these commands. */ 4403 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN) 4404 && (chans[chan].medium 4405 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) { 4406 requeue = handle_oem_get_msg_cmd(intf, msg); 4407 } else { 4408 /* 4409 * We don't handle the channel type, so just 4410 * free the message. 4411 */ 4412 requeue = 0; 4413 } 4414 } 4415 4416 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 4417 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { 4418 /* It's an asynchronous event. */ 4419 requeue = handle_read_event_rsp(intf, msg); 4420 } else { 4421 /* It's a response from the local BMC. */ 4422 requeue = handle_bmc_rsp(intf, msg); 4423 } 4424 4425 out: 4426 return requeue; 4427 } 4428 4429 /* 4430 * If there are messages in the queue or pretimeouts, handle them. 4431 */ 4432 static void handle_new_recv_msgs(struct ipmi_smi *intf) 4433 { 4434 struct ipmi_smi_msg *smi_msg; 4435 unsigned long flags = 0; 4436 int rv; 4437 int run_to_completion = intf->run_to_completion; 4438 4439 /* See if any waiting messages need to be processed. */ 4440 if (!run_to_completion) 4441 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4442 while (!list_empty(&intf->waiting_rcv_msgs)) { 4443 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 4444 struct ipmi_smi_msg, link); 4445 list_del(&smi_msg->link); 4446 if (!run_to_completion) 4447 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4448 flags); 4449 rv = handle_one_recv_msg(intf, smi_msg); 4450 if (!run_to_completion) 4451 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4452 if (rv > 0) { 4453 /* 4454 * To preserve message order, quit if we 4455 * can't handle a message. Add the message 4456 * back at the head, this is safe because this 4457 * tasklet is the only thing that pulls the 4458 * messages. 4459 */ 4460 list_add(&smi_msg->link, &intf->waiting_rcv_msgs); 4461 break; 4462 } else { 4463 if (rv == 0) 4464 /* Message handled */ 4465 ipmi_free_smi_msg(smi_msg); 4466 /* If rv < 0, fatal error, del but don't free. */ 4467 } 4468 } 4469 if (!run_to_completion) 4470 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 4471 4472 /* 4473 * If the pretimout count is non-zero, decrement one from it and 4474 * deliver pretimeouts to all the users. 4475 */ 4476 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { 4477 struct ipmi_user *user; 4478 int index; 4479 4480 index = srcu_read_lock(&intf->users_srcu); 4481 list_for_each_entry_rcu(user, &intf->users, link) { 4482 if (user->handler->ipmi_watchdog_pretimeout) 4483 user->handler->ipmi_watchdog_pretimeout( 4484 user->handler_data); 4485 } 4486 srcu_read_unlock(&intf->users_srcu, index); 4487 } 4488 } 4489 4490 static void smi_recv_tasklet(struct tasklet_struct *t) 4491 { 4492 unsigned long flags = 0; /* keep us warning-free. */ 4493 struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); 4494 int run_to_completion = intf->run_to_completion; 4495 struct ipmi_smi_msg *newmsg = NULL; 4496 4497 /* 4498 * Start the next message if available. 4499 * 4500 * Do this here, not in the actual receiver, because we may deadlock 4501 * because the lower layer is allowed to hold locks while calling 4502 * message delivery. 4503 */ 4504 4505 rcu_read_lock(); 4506 4507 if (!run_to_completion) 4508 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4509 if (intf->curr_msg == NULL && !intf->in_shutdown) { 4510 struct list_head *entry = NULL; 4511 4512 /* Pick the high priority queue first. */ 4513 if (!list_empty(&intf->hp_xmit_msgs)) 4514 entry = intf->hp_xmit_msgs.next; 4515 else if (!list_empty(&intf->xmit_msgs)) 4516 entry = intf->xmit_msgs.next; 4517 4518 if (entry) { 4519 list_del(entry); 4520 newmsg = list_entry(entry, struct ipmi_smi_msg, link); 4521 intf->curr_msg = newmsg; 4522 } 4523 } 4524 4525 if (!run_to_completion) 4526 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4527 if (newmsg) 4528 intf->handlers->sender(intf->send_info, newmsg); 4529 4530 rcu_read_unlock(); 4531 4532 handle_new_recv_msgs(intf); 4533 } 4534 4535 /* Handle a new message from the lower layer. */ 4536 void ipmi_smi_msg_received(struct ipmi_smi *intf, 4537 struct ipmi_smi_msg *msg) 4538 { 4539 unsigned long flags = 0; /* keep us warning-free. */ 4540 int run_to_completion = intf->run_to_completion; 4541 4542 /* 4543 * To preserve message order, we keep a queue and deliver from 4544 * a tasklet. 4545 */ 4546 if (!run_to_completion) 4547 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 4548 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 4549 if (!run_to_completion) 4550 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 4551 flags); 4552 4553 if (!run_to_completion) 4554 spin_lock_irqsave(&intf->xmit_msgs_lock, flags); 4555 /* 4556 * We can get an asynchronous event or receive message in addition 4557 * to commands we send. 4558 */ 4559 if (msg == intf->curr_msg) 4560 intf->curr_msg = NULL; 4561 if (!run_to_completion) 4562 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); 4563 4564 if (run_to_completion) 4565 smi_recv_tasklet(&intf->recv_tasklet); 4566 else 4567 tasklet_schedule(&intf->recv_tasklet); 4568 } 4569 EXPORT_SYMBOL(ipmi_smi_msg_received); 4570 4571 void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) 4572 { 4573 if (intf->in_shutdown) 4574 return; 4575 4576 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4577 tasklet_schedule(&intf->recv_tasklet); 4578 } 4579 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 4580 4581 static struct ipmi_smi_msg * 4582 smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, 4583 unsigned char seq, long seqid) 4584 { 4585 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 4586 if (!smi_msg) 4587 /* 4588 * If we can't allocate the message, then just return, we 4589 * get 4 retries, so this should be ok. 4590 */ 4591 return NULL; 4592 4593 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 4594 smi_msg->data_size = recv_msg->msg.data_len; 4595 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 4596 4597 pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data); 4598 4599 return smi_msg; 4600 } 4601 4602 static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, 4603 struct list_head *timeouts, 4604 unsigned long timeout_period, 4605 int slot, unsigned long *flags, 4606 bool *need_timer) 4607 { 4608 struct ipmi_recv_msg *msg; 4609 4610 if (intf->in_shutdown) 4611 return; 4612 4613 if (!ent->inuse) 4614 return; 4615 4616 if (timeout_period < ent->timeout) { 4617 ent->timeout -= timeout_period; 4618 *need_timer = true; 4619 return; 4620 } 4621 4622 if (ent->retries_left == 0) { 4623 /* The message has used all its retries. */ 4624 ent->inuse = 0; 4625 smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); 4626 msg = ent->recv_msg; 4627 list_add_tail(&msg->link, timeouts); 4628 if (ent->broadcast) 4629 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); 4630 else if (is_lan_addr(&ent->recv_msg->addr)) 4631 ipmi_inc_stat(intf, timed_out_lan_commands); 4632 else 4633 ipmi_inc_stat(intf, timed_out_ipmb_commands); 4634 } else { 4635 struct ipmi_smi_msg *smi_msg; 4636 /* More retries, send again. */ 4637 4638 *need_timer = true; 4639 4640 /* 4641 * Start with the max timer, set to normal timer after 4642 * the message is sent. 4643 */ 4644 ent->timeout = MAX_MSG_TIMEOUT; 4645 ent->retries_left--; 4646 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 4647 ent->seqid); 4648 if (!smi_msg) { 4649 if (is_lan_addr(&ent->recv_msg->addr)) 4650 ipmi_inc_stat(intf, 4651 dropped_rexmit_lan_commands); 4652 else 4653 ipmi_inc_stat(intf, 4654 dropped_rexmit_ipmb_commands); 4655 return; 4656 } 4657 4658 spin_unlock_irqrestore(&intf->seq_lock, *flags); 4659 4660 /* 4661 * Send the new message. We send with a zero 4662 * priority. It timed out, I doubt time is that 4663 * critical now, and high priority messages are really 4664 * only for messages to the local MC, which don't get 4665 * resent. 4666 */ 4667 if (intf->handlers) { 4668 if (is_lan_addr(&ent->recv_msg->addr)) 4669 ipmi_inc_stat(intf, 4670 retransmitted_lan_commands); 4671 else 4672 ipmi_inc_stat(intf, 4673 retransmitted_ipmb_commands); 4674 4675 smi_send(intf, intf->handlers, smi_msg, 0); 4676 } else 4677 ipmi_free_smi_msg(smi_msg); 4678 4679 spin_lock_irqsave(&intf->seq_lock, *flags); 4680 } 4681 } 4682 4683 static bool ipmi_timeout_handler(struct ipmi_smi *intf, 4684 unsigned long timeout_period) 4685 { 4686 struct list_head timeouts; 4687 struct ipmi_recv_msg *msg, *msg2; 4688 unsigned long flags; 4689 int i; 4690 bool need_timer = false; 4691 4692 if (!intf->bmc_registered) { 4693 kref_get(&intf->refcount); 4694 if (!schedule_work(&intf->bmc_reg_work)) { 4695 kref_put(&intf->refcount, intf_free); 4696 need_timer = true; 4697 } 4698 } 4699 4700 /* 4701 * Go through the seq table and find any messages that 4702 * have timed out, putting them in the timeouts 4703 * list. 4704 */ 4705 INIT_LIST_HEAD(&timeouts); 4706 spin_lock_irqsave(&intf->seq_lock, flags); 4707 if (intf->ipmb_maintenance_mode_timeout) { 4708 if (intf->ipmb_maintenance_mode_timeout <= timeout_period) 4709 intf->ipmb_maintenance_mode_timeout = 0; 4710 else 4711 intf->ipmb_maintenance_mode_timeout -= timeout_period; 4712 } 4713 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 4714 check_msg_timeout(intf, &intf->seq_table[i], 4715 &timeouts, timeout_period, i, 4716 &flags, &need_timer); 4717 spin_unlock_irqrestore(&intf->seq_lock, flags); 4718 4719 list_for_each_entry_safe(msg, msg2, &timeouts, link) 4720 deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); 4721 4722 /* 4723 * Maintenance mode handling. Check the timeout 4724 * optimistically before we claim the lock. It may 4725 * mean a timeout gets missed occasionally, but that 4726 * only means the timeout gets extended by one period 4727 * in that case. No big deal, and it avoids the lock 4728 * most of the time. 4729 */ 4730 if (intf->auto_maintenance_timeout > 0) { 4731 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 4732 if (intf->auto_maintenance_timeout > 0) { 4733 intf->auto_maintenance_timeout 4734 -= timeout_period; 4735 if (!intf->maintenance_mode 4736 && (intf->auto_maintenance_timeout <= 0)) { 4737 intf->maintenance_mode_enable = false; 4738 maintenance_mode_update(intf); 4739 } 4740 } 4741 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 4742 flags); 4743 } 4744 4745 tasklet_schedule(&intf->recv_tasklet); 4746 4747 return need_timer; 4748 } 4749 4750 static void ipmi_request_event(struct ipmi_smi *intf) 4751 { 4752 /* No event requests when in maintenance mode. */ 4753 if (intf->maintenance_mode_enable) 4754 return; 4755 4756 if (!intf->in_shutdown) 4757 intf->handlers->request_events(intf->send_info); 4758 } 4759 4760 static struct timer_list ipmi_timer; 4761 4762 static atomic_t stop_operation; 4763 4764 static void ipmi_timeout(struct timer_list *unused) 4765 { 4766 struct ipmi_smi *intf; 4767 bool need_timer = false; 4768 int index; 4769 4770 if (atomic_read(&stop_operation)) 4771 return; 4772 4773 index = srcu_read_lock(&ipmi_interfaces_srcu); 4774 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4775 if (atomic_read(&intf->event_waiters)) { 4776 intf->ticks_to_req_ev--; 4777 if (intf->ticks_to_req_ev == 0) { 4778 ipmi_request_event(intf); 4779 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 4780 } 4781 need_timer = true; 4782 } 4783 4784 need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); 4785 } 4786 srcu_read_unlock(&ipmi_interfaces_srcu, index); 4787 4788 if (need_timer) 4789 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4790 } 4791 4792 static void need_waiter(struct ipmi_smi *intf) 4793 { 4794 /* Racy, but worst case we start the timer twice. */ 4795 if (!timer_pending(&ipmi_timer)) 4796 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4797 } 4798 4799 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 4800 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 4801 4802 static void free_smi_msg(struct ipmi_smi_msg *msg) 4803 { 4804 atomic_dec(&smi_msg_inuse_count); 4805 kfree(msg); 4806 } 4807 4808 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 4809 { 4810 struct ipmi_smi_msg *rv; 4811 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 4812 if (rv) { 4813 rv->done = free_smi_msg; 4814 rv->user_data = NULL; 4815 atomic_inc(&smi_msg_inuse_count); 4816 } 4817 return rv; 4818 } 4819 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 4820 4821 static void free_recv_msg(struct ipmi_recv_msg *msg) 4822 { 4823 atomic_dec(&recv_msg_inuse_count); 4824 kfree(msg); 4825 } 4826 4827 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 4828 { 4829 struct ipmi_recv_msg *rv; 4830 4831 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 4832 if (rv) { 4833 rv->user = NULL; 4834 rv->done = free_recv_msg; 4835 atomic_inc(&recv_msg_inuse_count); 4836 } 4837 return rv; 4838 } 4839 4840 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 4841 { 4842 if (msg->user) 4843 kref_put(&msg->user->refcount, free_user); 4844 msg->done(msg); 4845 } 4846 EXPORT_SYMBOL(ipmi_free_recv_msg); 4847 4848 static atomic_t panic_done_count = ATOMIC_INIT(0); 4849 4850 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 4851 { 4852 atomic_dec(&panic_done_count); 4853 } 4854 4855 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 4856 { 4857 atomic_dec(&panic_done_count); 4858 } 4859 4860 /* 4861 * Inside a panic, send a message and wait for a response. 4862 */ 4863 static void ipmi_panic_request_and_wait(struct ipmi_smi *intf, 4864 struct ipmi_addr *addr, 4865 struct kernel_ipmi_msg *msg) 4866 { 4867 struct ipmi_smi_msg smi_msg; 4868 struct ipmi_recv_msg recv_msg; 4869 int rv; 4870 4871 smi_msg.done = dummy_smi_done_handler; 4872 recv_msg.done = dummy_recv_done_handler; 4873 atomic_add(2, &panic_done_count); 4874 rv = i_ipmi_request(NULL, 4875 intf, 4876 addr, 4877 0, 4878 msg, 4879 intf, 4880 &smi_msg, 4881 &recv_msg, 4882 0, 4883 intf->addrinfo[0].address, 4884 intf->addrinfo[0].lun, 4885 0, 1); /* Don't retry, and don't wait. */ 4886 if (rv) 4887 atomic_sub(2, &panic_done_count); 4888 else if (intf->handlers->flush_messages) 4889 intf->handlers->flush_messages(intf->send_info); 4890 4891 while (atomic_read(&panic_done_count) != 0) 4892 ipmi_poll(intf); 4893 } 4894 4895 static void event_receiver_fetcher(struct ipmi_smi *intf, 4896 struct ipmi_recv_msg *msg) 4897 { 4898 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 4899 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 4900 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 4901 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 4902 /* A get event receiver command, save it. */ 4903 intf->event_receiver = msg->msg.data[1]; 4904 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 4905 } 4906 } 4907 4908 static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) 4909 { 4910 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 4911 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 4912 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 4913 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { 4914 /* 4915 * A get device id command, save if we are an event 4916 * receiver or generator. 4917 */ 4918 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 4919 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 4920 } 4921 } 4922 4923 static void send_panic_events(struct ipmi_smi *intf, char *str) 4924 { 4925 struct kernel_ipmi_msg msg; 4926 unsigned char data[16]; 4927 struct ipmi_system_interface_addr *si; 4928 struct ipmi_addr addr; 4929 char *p = str; 4930 struct ipmi_ipmb_addr *ipmb; 4931 int j; 4932 4933 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE) 4934 return; 4935 4936 si = (struct ipmi_system_interface_addr *) &addr; 4937 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4938 si->channel = IPMI_BMC_CHANNEL; 4939 si->lun = 0; 4940 4941 /* Fill in an event telling that we have failed. */ 4942 msg.netfn = 0x04; /* Sensor or Event. */ 4943 msg.cmd = 2; /* Platform event command. */ 4944 msg.data = data; 4945 msg.data_len = 8; 4946 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 4947 data[1] = 0x03; /* This is for IPMI 1.0. */ 4948 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 4949 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 4950 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 4951 4952 /* 4953 * Put a few breadcrumbs in. Hopefully later we can add more things 4954 * to make the panic events more useful. 4955 */ 4956 if (str) { 4957 data[3] = str[0]; 4958 data[6] = str[1]; 4959 data[7] = str[2]; 4960 } 4961 4962 /* Send the event announcing the panic. */ 4963 ipmi_panic_request_and_wait(intf, &addr, &msg); 4964 4965 /* 4966 * On every interface, dump a bunch of OEM event holding the 4967 * string. 4968 */ 4969 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str) 4970 return; 4971 4972 /* 4973 * intf_num is used as an marker to tell if the 4974 * interface is valid. Thus we need a read barrier to 4975 * make sure data fetched before checking intf_num 4976 * won't be used. 4977 */ 4978 smp_rmb(); 4979 4980 /* 4981 * First job here is to figure out where to send the 4982 * OEM events. There's no way in IPMI to send OEM 4983 * events using an event send command, so we have to 4984 * find the SEL to put them in and stick them in 4985 * there. 4986 */ 4987 4988 /* Get capabilities from the get device id. */ 4989 intf->local_sel_device = 0; 4990 intf->local_event_generator = 0; 4991 intf->event_receiver = 0; 4992 4993 /* Request the device info from the local MC. */ 4994 msg.netfn = IPMI_NETFN_APP_REQUEST; 4995 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 4996 msg.data = NULL; 4997 msg.data_len = 0; 4998 intf->null_user_handler = device_id_fetcher; 4999 ipmi_panic_request_and_wait(intf, &addr, &msg); 5000 5001 if (intf->local_event_generator) { 5002 /* Request the event receiver from the local MC. */ 5003 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 5004 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 5005 msg.data = NULL; 5006 msg.data_len = 0; 5007 intf->null_user_handler = event_receiver_fetcher; 5008 ipmi_panic_request_and_wait(intf, &addr, &msg); 5009 } 5010 intf->null_user_handler = NULL; 5011 5012 /* 5013 * Validate the event receiver. The low bit must not 5014 * be 1 (it must be a valid IPMB address), it cannot 5015 * be zero, and it must not be my address. 5016 */ 5017 if (((intf->event_receiver & 1) == 0) 5018 && (intf->event_receiver != 0) 5019 && (intf->event_receiver != intf->addrinfo[0].address)) { 5020 /* 5021 * The event receiver is valid, send an IPMB 5022 * message. 5023 */ 5024 ipmb = (struct ipmi_ipmb_addr *) &addr; 5025 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 5026 ipmb->channel = 0; /* FIXME - is this right? */ 5027 ipmb->lun = intf->event_receiver_lun; 5028 ipmb->slave_addr = intf->event_receiver; 5029 } else if (intf->local_sel_device) { 5030 /* 5031 * The event receiver was not valid (or was 5032 * me), but I am an SEL device, just dump it 5033 * in my SEL. 5034 */ 5035 si = (struct ipmi_system_interface_addr *) &addr; 5036 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 5037 si->channel = IPMI_BMC_CHANNEL; 5038 si->lun = 0; 5039 } else 5040 return; /* No where to send the event. */ 5041 5042 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 5043 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 5044 msg.data = data; 5045 msg.data_len = 16; 5046 5047 j = 0; 5048 while (*p) { 5049 int size = strlen(p); 5050 5051 if (size > 11) 5052 size = 11; 5053 data[0] = 0; 5054 data[1] = 0; 5055 data[2] = 0xf0; /* OEM event without timestamp. */ 5056 data[3] = intf->addrinfo[0].address; 5057 data[4] = j++; /* sequence # */ 5058 /* 5059 * Always give 11 bytes, so strncpy will fill 5060 * it with zeroes for me. 5061 */ 5062 strncpy(data+5, p, 11); 5063 p += size; 5064 5065 ipmi_panic_request_and_wait(intf, &addr, &msg); 5066 } 5067 } 5068 5069 static int has_panicked; 5070 5071 static int panic_event(struct notifier_block *this, 5072 unsigned long event, 5073 void *ptr) 5074 { 5075 struct ipmi_smi *intf; 5076 struct ipmi_user *user; 5077 5078 if (has_panicked) 5079 return NOTIFY_DONE; 5080 has_panicked = 1; 5081 5082 /* For every registered interface, set it to run to completion. */ 5083 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 5084 if (!intf->handlers || intf->intf_num == -1) 5085 /* Interface is not ready. */ 5086 continue; 5087 5088 if (!intf->handlers->poll) 5089 continue; 5090 5091 /* 5092 * If we were interrupted while locking xmit_msgs_lock or 5093 * waiting_rcv_msgs_lock, the corresponding list may be 5094 * corrupted. In this case, drop items on the list for 5095 * the safety. 5096 */ 5097 if (!spin_trylock(&intf->xmit_msgs_lock)) { 5098 INIT_LIST_HEAD(&intf->xmit_msgs); 5099 INIT_LIST_HEAD(&intf->hp_xmit_msgs); 5100 } else 5101 spin_unlock(&intf->xmit_msgs_lock); 5102 5103 if (!spin_trylock(&intf->waiting_rcv_msgs_lock)) 5104 INIT_LIST_HEAD(&intf->waiting_rcv_msgs); 5105 else 5106 spin_unlock(&intf->waiting_rcv_msgs_lock); 5107 5108 intf->run_to_completion = 1; 5109 if (intf->handlers->set_run_to_completion) 5110 intf->handlers->set_run_to_completion(intf->send_info, 5111 1); 5112 5113 list_for_each_entry_rcu(user, &intf->users, link) { 5114 if (user->handler->ipmi_panic_handler) 5115 user->handler->ipmi_panic_handler( 5116 user->handler_data); 5117 } 5118 5119 send_panic_events(intf, ptr); 5120 } 5121 5122 return NOTIFY_DONE; 5123 } 5124 5125 /* Must be called with ipmi_interfaces_mutex held. */ 5126 static int ipmi_register_driver(void) 5127 { 5128 int rv; 5129 5130 if (drvregistered) 5131 return 0; 5132 5133 rv = driver_register(&ipmidriver.driver); 5134 if (rv) 5135 pr_err("Could not register IPMI driver\n"); 5136 else 5137 drvregistered = true; 5138 return rv; 5139 } 5140 5141 static struct notifier_block panic_block = { 5142 .notifier_call = panic_event, 5143 .next = NULL, 5144 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 5145 }; 5146 5147 static int ipmi_init_msghandler(void) 5148 { 5149 int rv; 5150 5151 mutex_lock(&ipmi_interfaces_mutex); 5152 rv = ipmi_register_driver(); 5153 if (rv) 5154 goto out; 5155 if (initialized) 5156 goto out; 5157 5158 init_srcu_struct(&ipmi_interfaces_srcu); 5159 5160 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5161 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5162 5163 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5164 5165 initialized = true; 5166 5167 out: 5168 mutex_unlock(&ipmi_interfaces_mutex); 5169 return rv; 5170 } 5171 5172 static int __init ipmi_init_msghandler_mod(void) 5173 { 5174 int rv; 5175 5176 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5177 5178 mutex_lock(&ipmi_interfaces_mutex); 5179 rv = ipmi_register_driver(); 5180 mutex_unlock(&ipmi_interfaces_mutex); 5181 5182 return rv; 5183 } 5184 5185 static void __exit cleanup_ipmi(void) 5186 { 5187 int count; 5188 5189 if (initialized) { 5190 atomic_notifier_chain_unregister(&panic_notifier_list, 5191 &panic_block); 5192 5193 /* 5194 * This can't be called if any interfaces exist, so no worry 5195 * about shutting down the interfaces. 5196 */ 5197 5198 /* 5199 * Tell the timer to stop, then wait for it to stop. This 5200 * avoids problems with race conditions removing the timer 5201 * here. 5202 */ 5203 atomic_set(&stop_operation, 1); 5204 del_timer_sync(&ipmi_timer); 5205 5206 initialized = false; 5207 5208 /* Check for buffer leaks. */ 5209 count = atomic_read(&smi_msg_inuse_count); 5210 if (count != 0) 5211 pr_warn("SMI message count %d at exit\n", count); 5212 count = atomic_read(&recv_msg_inuse_count); 5213 if (count != 0) 5214 pr_warn("recv message count %d at exit\n", count); 5215 5216 cleanup_srcu_struct(&ipmi_interfaces_srcu); 5217 } 5218 if (drvregistered) 5219 driver_unregister(&ipmidriver.driver); 5220 } 5221 module_exit(cleanup_ipmi); 5222 5223 module_init(ipmi_init_msghandler_mod); 5224 MODULE_LICENSE("GPL"); 5225 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 5226 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI" 5227 " interface."); 5228 MODULE_VERSION(IPMI_DRIVER_VERSION); 5229 MODULE_SOFTDEP("post: ipmi_devintf"); 5230