1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_si.c 4 * 5 * The interface to the IPMI driver for the system interfaces (KCS, SMIC, 6 * BT). 7 * 8 * Author: MontaVista Software, Inc. 9 * Corey Minyard <minyard@mvista.com> 10 * source@mvista.com 11 * 12 * Copyright 2002 MontaVista Software Inc. 13 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com> 14 */ 15 16 /* 17 * This file holds the "policy" for the interface to the SMI state 18 * machine. It does the configuration, handles timers and interrupts, 19 * and drives the real SMI state machine. 20 */ 21 22 #define pr_fmt(fmt) "ipmi_si: " fmt 23 24 #include <linux/module.h> 25 #include <linux/moduleparam.h> 26 #include <linux/sched.h> 27 #include <linux/seq_file.h> 28 #include <linux/timer.h> 29 #include <linux/errno.h> 30 #include <linux/spinlock.h> 31 #include <linux/slab.h> 32 #include <linux/delay.h> 33 #include <linux/list.h> 34 #include <linux/notifier.h> 35 #include <linux/mutex.h> 36 #include <linux/kthread.h> 37 #include <asm/irq.h> 38 #include <linux/interrupt.h> 39 #include <linux/rcupdate.h> 40 #include <linux/ipmi.h> 41 #include <linux/ipmi_smi.h> 42 #include "ipmi_si.h" 43 #include "ipmi_si_sm.h" 44 #include <linux/string.h> 45 #include <linux/ctype.h> 46 47 /* Measure times between events in the driver. */ 48 #undef DEBUG_TIMING 49 50 /* Call every 10 ms. */ 51 #define SI_TIMEOUT_TIME_USEC 10000 52 #define SI_USEC_PER_JIFFY (1000000/HZ) 53 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) 54 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a 55 short timeout */ 56 57 enum si_intf_state { 58 SI_NORMAL, 59 SI_GETTING_FLAGS, 60 SI_GETTING_EVENTS, 61 SI_CLEARING_FLAGS, 62 SI_GETTING_MESSAGES, 63 SI_CHECKING_ENABLES, 64 SI_SETTING_ENABLES 65 /* FIXME - add watchdog stuff. */ 66 }; 67 68 /* Some BT-specific defines we need here. */ 69 #define IPMI_BT_INTMASK_REG 2 70 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2 71 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1 72 73 static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" }; 74 75 static bool initialized; 76 77 /* 78 * Indexes into stats[] in smi_info below. 79 */ 80 enum si_stat_indexes { 81 /* 82 * Number of times the driver requested a timer while an operation 83 * was in progress. 84 */ 85 SI_STAT_short_timeouts = 0, 86 87 /* 88 * Number of times the driver requested a timer while nothing was in 89 * progress. 90 */ 91 SI_STAT_long_timeouts, 92 93 /* Number of times the interface was idle while being polled. */ 94 SI_STAT_idles, 95 96 /* Number of interrupts the driver handled. */ 97 SI_STAT_interrupts, 98 99 /* Number of time the driver got an ATTN from the hardware. */ 100 SI_STAT_attentions, 101 102 /* Number of times the driver requested flags from the hardware. */ 103 SI_STAT_flag_fetches, 104 105 /* Number of times the hardware didn't follow the state machine. */ 106 SI_STAT_hosed_count, 107 108 /* Number of completed messages. */ 109 SI_STAT_complete_transactions, 110 111 /* Number of IPMI events received from the hardware. */ 112 SI_STAT_events, 113 114 /* Number of watchdog pretimeouts. */ 115 SI_STAT_watchdog_pretimeouts, 116 117 /* Number of asynchronous messages received. */ 118 SI_STAT_incoming_messages, 119 120 121 /* This *must* remain last, add new values above this. */ 122 SI_NUM_STATS 123 }; 124 125 struct smi_info { 126 int si_num; 127 struct ipmi_smi *intf; 128 struct si_sm_data *si_sm; 129 const struct si_sm_handlers *handlers; 130 spinlock_t si_lock; 131 struct ipmi_smi_msg *waiting_msg; 132 struct ipmi_smi_msg *curr_msg; 133 enum si_intf_state si_state; 134 135 /* 136 * Used to handle the various types of I/O that can occur with 137 * IPMI 138 */ 139 struct si_sm_io io; 140 141 /* 142 * Per-OEM handler, called from handle_flags(). Returns 1 143 * when handle_flags() needs to be re-run or 0 indicating it 144 * set si_state itself. 145 */ 146 int (*oem_data_avail_handler)(struct smi_info *smi_info); 147 148 /* 149 * Flags from the last GET_MSG_FLAGS command, used when an ATTN 150 * is set to hold the flags until we are done handling everything 151 * from the flags. 152 */ 153 #define RECEIVE_MSG_AVAIL 0x01 154 #define EVENT_MSG_BUFFER_FULL 0x02 155 #define WDT_PRE_TIMEOUT_INT 0x08 156 #define OEM0_DATA_AVAIL 0x20 157 #define OEM1_DATA_AVAIL 0x40 158 #define OEM2_DATA_AVAIL 0x80 159 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ 160 OEM1_DATA_AVAIL | \ 161 OEM2_DATA_AVAIL) 162 unsigned char msg_flags; 163 164 /* Does the BMC have an event buffer? */ 165 bool has_event_buffer; 166 167 /* 168 * If set to true, this will request events the next time the 169 * state machine is idle. 170 */ 171 atomic_t req_events; 172 173 /* 174 * If true, run the state machine to completion on every send 175 * call. Generally used after a panic to make sure stuff goes 176 * out. 177 */ 178 bool run_to_completion; 179 180 /* The timer for this si. */ 181 struct timer_list si_timer; 182 183 /* This flag is set, if the timer can be set */ 184 bool timer_can_start; 185 186 /* This flag is set, if the timer is running (timer_pending() isn't enough) */ 187 bool timer_running; 188 189 /* The time (in jiffies) the last timeout occurred at. */ 190 unsigned long last_timeout_jiffies; 191 192 /* Are we waiting for the events, pretimeouts, received msgs? */ 193 atomic_t need_watch; 194 195 /* 196 * The driver will disable interrupts when it gets into a 197 * situation where it cannot handle messages due to lack of 198 * memory. Once that situation clears up, it will re-enable 199 * interrupts. 200 */ 201 bool interrupt_disabled; 202 203 /* 204 * Does the BMC support events? 205 */ 206 bool supports_event_msg_buff; 207 208 /* 209 * Can we disable interrupts the global enables receive irq 210 * bit? There are currently two forms of brokenness, some 211 * systems cannot disable the bit (which is technically within 212 * the spec but a bad idea) and some systems have the bit 213 * forced to zero even though interrupts work (which is 214 * clearly outside the spec). The next bool tells which form 215 * of brokenness is present. 216 */ 217 bool cannot_disable_irq; 218 219 /* 220 * Some systems are broken and cannot set the irq enable 221 * bit, even if they support interrupts. 222 */ 223 bool irq_enable_broken; 224 225 /* Is the driver in maintenance mode? */ 226 bool in_maintenance_mode; 227 228 /* 229 * Did we get an attention that we did not handle? 230 */ 231 bool got_attn; 232 233 /* From the get device id response... */ 234 struct ipmi_device_id device_id; 235 236 /* Have we added the device group to the device? */ 237 bool dev_group_added; 238 239 /* Counters and things for the proc filesystem. */ 240 atomic_t stats[SI_NUM_STATS]; 241 242 struct task_struct *thread; 243 244 struct list_head link; 245 }; 246 247 #define smi_inc_stat(smi, stat) \ 248 atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) 249 #define smi_get_stat(smi, stat) \ 250 ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) 251 252 #define IPMI_MAX_INTFS 4 253 static int force_kipmid[IPMI_MAX_INTFS]; 254 static int num_force_kipmid; 255 256 static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS]; 257 static int num_max_busy_us; 258 259 static bool unload_when_empty = true; 260 261 static int try_smi_init(struct smi_info *smi); 262 static void cleanup_one_si(struct smi_info *smi_info); 263 static void cleanup_ipmi_si(void); 264 265 #ifdef DEBUG_TIMING 266 void debug_timestamp(char *msg) 267 { 268 struct timespec64 t; 269 270 ktime_get_ts64(&t); 271 pr_debug("**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec); 272 } 273 #else 274 #define debug_timestamp(x) 275 #endif 276 277 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); 278 static int register_xaction_notifier(struct notifier_block *nb) 279 { 280 return atomic_notifier_chain_register(&xaction_notifier_list, nb); 281 } 282 283 static void deliver_recv_msg(struct smi_info *smi_info, 284 struct ipmi_smi_msg *msg) 285 { 286 /* Deliver the message to the upper layer. */ 287 ipmi_smi_msg_received(smi_info->intf, msg); 288 } 289 290 static void return_hosed_msg(struct smi_info *smi_info, int cCode) 291 { 292 struct ipmi_smi_msg *msg = smi_info->curr_msg; 293 294 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED) 295 cCode = IPMI_ERR_UNSPECIFIED; 296 /* else use it as is */ 297 298 /* Make it a response */ 299 msg->rsp[0] = msg->data[0] | 4; 300 msg->rsp[1] = msg->data[1]; 301 msg->rsp[2] = cCode; 302 msg->rsp_size = 3; 303 304 smi_info->curr_msg = NULL; 305 deliver_recv_msg(smi_info, msg); 306 } 307 308 static enum si_sm_result start_next_msg(struct smi_info *smi_info) 309 { 310 int rv; 311 312 if (!smi_info->waiting_msg) { 313 smi_info->curr_msg = NULL; 314 rv = SI_SM_IDLE; 315 } else { 316 int err; 317 318 smi_info->curr_msg = smi_info->waiting_msg; 319 smi_info->waiting_msg = NULL; 320 debug_timestamp("Start2"); 321 err = atomic_notifier_call_chain(&xaction_notifier_list, 322 0, smi_info); 323 if (err & NOTIFY_STOP_MASK) { 324 rv = SI_SM_CALL_WITHOUT_DELAY; 325 goto out; 326 } 327 err = smi_info->handlers->start_transaction( 328 smi_info->si_sm, 329 smi_info->curr_msg->data, 330 smi_info->curr_msg->data_size); 331 if (err) 332 return_hosed_msg(smi_info, err); 333 334 rv = SI_SM_CALL_WITHOUT_DELAY; 335 } 336 out: 337 return rv; 338 } 339 340 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) 341 { 342 if (!smi_info->timer_can_start) 343 return; 344 smi_info->last_timeout_jiffies = jiffies; 345 mod_timer(&smi_info->si_timer, new_val); 346 smi_info->timer_running = true; 347 } 348 349 /* 350 * Start a new message and (re)start the timer and thread. 351 */ 352 static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, 353 unsigned int size) 354 { 355 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); 356 357 if (smi_info->thread) 358 wake_up_process(smi_info->thread); 359 360 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); 361 } 362 363 static void start_check_enables(struct smi_info *smi_info) 364 { 365 unsigned char msg[2]; 366 367 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 368 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 369 370 start_new_msg(smi_info, msg, 2); 371 smi_info->si_state = SI_CHECKING_ENABLES; 372 } 373 374 static void start_clear_flags(struct smi_info *smi_info) 375 { 376 unsigned char msg[3]; 377 378 /* Make sure the watchdog pre-timeout flag is not set at startup. */ 379 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 380 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 381 msg[2] = WDT_PRE_TIMEOUT_INT; 382 383 start_new_msg(smi_info, msg, 3); 384 smi_info->si_state = SI_CLEARING_FLAGS; 385 } 386 387 static void start_getting_msg_queue(struct smi_info *smi_info) 388 { 389 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 390 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; 391 smi_info->curr_msg->data_size = 2; 392 393 start_new_msg(smi_info, smi_info->curr_msg->data, 394 smi_info->curr_msg->data_size); 395 smi_info->si_state = SI_GETTING_MESSAGES; 396 } 397 398 static void start_getting_events(struct smi_info *smi_info) 399 { 400 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 401 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 402 smi_info->curr_msg->data_size = 2; 403 404 start_new_msg(smi_info, smi_info->curr_msg->data, 405 smi_info->curr_msg->data_size); 406 smi_info->si_state = SI_GETTING_EVENTS; 407 } 408 409 /* 410 * When we have a situtaion where we run out of memory and cannot 411 * allocate messages, we just leave them in the BMC and run the system 412 * polled until we can allocate some memory. Once we have some 413 * memory, we will re-enable the interrupt. 414 * 415 * Note that we cannot just use disable_irq(), since the interrupt may 416 * be shared. 417 */ 418 static inline bool disable_si_irq(struct smi_info *smi_info) 419 { 420 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { 421 smi_info->interrupt_disabled = true; 422 start_check_enables(smi_info); 423 return true; 424 } 425 return false; 426 } 427 428 static inline bool enable_si_irq(struct smi_info *smi_info) 429 { 430 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { 431 smi_info->interrupt_disabled = false; 432 start_check_enables(smi_info); 433 return true; 434 } 435 return false; 436 } 437 438 /* 439 * Allocate a message. If unable to allocate, start the interrupt 440 * disable process and return NULL. If able to allocate but 441 * interrupts are disabled, free the message and return NULL after 442 * starting the interrupt enable process. 443 */ 444 static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) 445 { 446 struct ipmi_smi_msg *msg; 447 448 msg = ipmi_alloc_smi_msg(); 449 if (!msg) { 450 if (!disable_si_irq(smi_info)) 451 smi_info->si_state = SI_NORMAL; 452 } else if (enable_si_irq(smi_info)) { 453 ipmi_free_smi_msg(msg); 454 msg = NULL; 455 } 456 return msg; 457 } 458 459 static void handle_flags(struct smi_info *smi_info) 460 { 461 retry: 462 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { 463 /* Watchdog pre-timeout */ 464 smi_inc_stat(smi_info, watchdog_pretimeouts); 465 466 start_clear_flags(smi_info); 467 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 468 ipmi_smi_watchdog_pretimeout(smi_info->intf); 469 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 470 /* Messages available. */ 471 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 472 if (!smi_info->curr_msg) 473 return; 474 475 start_getting_msg_queue(smi_info); 476 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { 477 /* Events available. */ 478 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 479 if (!smi_info->curr_msg) 480 return; 481 482 start_getting_events(smi_info); 483 } else if (smi_info->msg_flags & OEM_DATA_AVAIL && 484 smi_info->oem_data_avail_handler) { 485 if (smi_info->oem_data_avail_handler(smi_info)) 486 goto retry; 487 } else 488 smi_info->si_state = SI_NORMAL; 489 } 490 491 /* 492 * Global enables we care about. 493 */ 494 #define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ 495 IPMI_BMC_EVT_MSG_INTR) 496 497 static u8 current_global_enables(struct smi_info *smi_info, u8 base, 498 bool *irq_on) 499 { 500 u8 enables = 0; 501 502 if (smi_info->supports_event_msg_buff) 503 enables |= IPMI_BMC_EVT_MSG_BUFF; 504 505 if (((smi_info->io.irq && !smi_info->interrupt_disabled) || 506 smi_info->cannot_disable_irq) && 507 !smi_info->irq_enable_broken) 508 enables |= IPMI_BMC_RCV_MSG_INTR; 509 510 if (smi_info->supports_event_msg_buff && 511 smi_info->io.irq && !smi_info->interrupt_disabled && 512 !smi_info->irq_enable_broken) 513 enables |= IPMI_BMC_EVT_MSG_INTR; 514 515 *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR); 516 517 return enables; 518 } 519 520 static void check_bt_irq(struct smi_info *smi_info, bool irq_on) 521 { 522 u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG); 523 524 irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT; 525 526 if ((bool)irqstate == irq_on) 527 return; 528 529 if (irq_on) 530 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 531 IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 532 else 533 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0); 534 } 535 536 static void handle_transaction_done(struct smi_info *smi_info) 537 { 538 struct ipmi_smi_msg *msg; 539 540 debug_timestamp("Done"); 541 switch (smi_info->si_state) { 542 case SI_NORMAL: 543 if (!smi_info->curr_msg) 544 break; 545 546 smi_info->curr_msg->rsp_size 547 = smi_info->handlers->get_result( 548 smi_info->si_sm, 549 smi_info->curr_msg->rsp, 550 IPMI_MAX_MSG_LENGTH); 551 552 /* 553 * Do this here becase deliver_recv_msg() releases the 554 * lock, and a new message can be put in during the 555 * time the lock is released. 556 */ 557 msg = smi_info->curr_msg; 558 smi_info->curr_msg = NULL; 559 deliver_recv_msg(smi_info, msg); 560 break; 561 562 case SI_GETTING_FLAGS: 563 { 564 unsigned char msg[4]; 565 unsigned int len; 566 567 /* We got the flags from the SMI, now handle them. */ 568 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 569 if (msg[2] != 0) { 570 /* Error fetching flags, just give up for now. */ 571 smi_info->si_state = SI_NORMAL; 572 } else if (len < 4) { 573 /* 574 * Hmm, no flags. That's technically illegal, but 575 * don't use uninitialized data. 576 */ 577 smi_info->si_state = SI_NORMAL; 578 } else { 579 smi_info->msg_flags = msg[3]; 580 handle_flags(smi_info); 581 } 582 break; 583 } 584 585 case SI_CLEARING_FLAGS: 586 { 587 unsigned char msg[3]; 588 589 /* We cleared the flags. */ 590 smi_info->handlers->get_result(smi_info->si_sm, msg, 3); 591 if (msg[2] != 0) { 592 /* Error clearing flags */ 593 dev_warn(smi_info->io.dev, 594 "Error clearing flags: %2.2x\n", msg[2]); 595 } 596 smi_info->si_state = SI_NORMAL; 597 break; 598 } 599 600 case SI_GETTING_EVENTS: 601 { 602 smi_info->curr_msg->rsp_size 603 = smi_info->handlers->get_result( 604 smi_info->si_sm, 605 smi_info->curr_msg->rsp, 606 IPMI_MAX_MSG_LENGTH); 607 608 /* 609 * Do this here becase deliver_recv_msg() releases the 610 * lock, and a new message can be put in during the 611 * time the lock is released. 612 */ 613 msg = smi_info->curr_msg; 614 smi_info->curr_msg = NULL; 615 if (msg->rsp[2] != 0) { 616 /* Error getting event, probably done. */ 617 msg->done(msg); 618 619 /* Take off the event flag. */ 620 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; 621 handle_flags(smi_info); 622 } else { 623 smi_inc_stat(smi_info, events); 624 625 /* 626 * Do this before we deliver the message 627 * because delivering the message releases the 628 * lock and something else can mess with the 629 * state. 630 */ 631 handle_flags(smi_info); 632 633 deliver_recv_msg(smi_info, msg); 634 } 635 break; 636 } 637 638 case SI_GETTING_MESSAGES: 639 { 640 smi_info->curr_msg->rsp_size 641 = smi_info->handlers->get_result( 642 smi_info->si_sm, 643 smi_info->curr_msg->rsp, 644 IPMI_MAX_MSG_LENGTH); 645 646 /* 647 * Do this here becase deliver_recv_msg() releases the 648 * lock, and a new message can be put in during the 649 * time the lock is released. 650 */ 651 msg = smi_info->curr_msg; 652 smi_info->curr_msg = NULL; 653 if (msg->rsp[2] != 0) { 654 /* Error getting event, probably done. */ 655 msg->done(msg); 656 657 /* Take off the msg flag. */ 658 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; 659 handle_flags(smi_info); 660 } else { 661 smi_inc_stat(smi_info, incoming_messages); 662 663 /* 664 * Do this before we deliver the message 665 * because delivering the message releases the 666 * lock and something else can mess with the 667 * state. 668 */ 669 handle_flags(smi_info); 670 671 deliver_recv_msg(smi_info, msg); 672 } 673 break; 674 } 675 676 case SI_CHECKING_ENABLES: 677 { 678 unsigned char msg[4]; 679 u8 enables; 680 bool irq_on; 681 682 /* We got the flags from the SMI, now handle them. */ 683 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 684 if (msg[2] != 0) { 685 dev_warn(smi_info->io.dev, 686 "Couldn't get irq info: %x.\n", msg[2]); 687 dev_warn(smi_info->io.dev, 688 "Maybe ok, but ipmi might run very slowly.\n"); 689 smi_info->si_state = SI_NORMAL; 690 break; 691 } 692 enables = current_global_enables(smi_info, 0, &irq_on); 693 if (smi_info->io.si_type == SI_BT) 694 /* BT has its own interrupt enable bit. */ 695 check_bt_irq(smi_info, irq_on); 696 if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) { 697 /* Enables are not correct, fix them. */ 698 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 699 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 700 msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK); 701 smi_info->handlers->start_transaction( 702 smi_info->si_sm, msg, 3); 703 smi_info->si_state = SI_SETTING_ENABLES; 704 } else if (smi_info->supports_event_msg_buff) { 705 smi_info->curr_msg = ipmi_alloc_smi_msg(); 706 if (!smi_info->curr_msg) { 707 smi_info->si_state = SI_NORMAL; 708 break; 709 } 710 start_getting_events(smi_info); 711 } else { 712 smi_info->si_state = SI_NORMAL; 713 } 714 break; 715 } 716 717 case SI_SETTING_ENABLES: 718 { 719 unsigned char msg[4]; 720 721 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 722 if (msg[2] != 0) 723 dev_warn(smi_info->io.dev, 724 "Could not set the global enables: 0x%x.\n", 725 msg[2]); 726 727 if (smi_info->supports_event_msg_buff) { 728 smi_info->curr_msg = ipmi_alloc_smi_msg(); 729 if (!smi_info->curr_msg) { 730 smi_info->si_state = SI_NORMAL; 731 break; 732 } 733 start_getting_events(smi_info); 734 } else { 735 smi_info->si_state = SI_NORMAL; 736 } 737 break; 738 } 739 } 740 } 741 742 /* 743 * Called on timeouts and events. Timeouts should pass the elapsed 744 * time, interrupts should pass in zero. Must be called with 745 * si_lock held and interrupts disabled. 746 */ 747 static enum si_sm_result smi_event_handler(struct smi_info *smi_info, 748 int time) 749 { 750 enum si_sm_result si_sm_result; 751 752 restart: 753 /* 754 * There used to be a loop here that waited a little while 755 * (around 25us) before giving up. That turned out to be 756 * pointless, the minimum delays I was seeing were in the 300us 757 * range, which is far too long to wait in an interrupt. So 758 * we just run until the state machine tells us something 759 * happened or it needs a delay. 760 */ 761 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); 762 time = 0; 763 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) 764 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 765 766 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { 767 smi_inc_stat(smi_info, complete_transactions); 768 769 handle_transaction_done(smi_info); 770 goto restart; 771 } else if (si_sm_result == SI_SM_HOSED) { 772 smi_inc_stat(smi_info, hosed_count); 773 774 /* 775 * Do the before return_hosed_msg, because that 776 * releases the lock. 777 */ 778 smi_info->si_state = SI_NORMAL; 779 if (smi_info->curr_msg != NULL) { 780 /* 781 * If we were handling a user message, format 782 * a response to send to the upper layer to 783 * tell it about the error. 784 */ 785 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); 786 } 787 goto restart; 788 } 789 790 /* 791 * We prefer handling attn over new messages. But don't do 792 * this if there is not yet an upper layer to handle anything. 793 */ 794 if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) { 795 unsigned char msg[2]; 796 797 if (smi_info->si_state != SI_NORMAL) { 798 /* 799 * We got an ATTN, but we are doing something else. 800 * Handle the ATTN later. 801 */ 802 smi_info->got_attn = true; 803 } else { 804 smi_info->got_attn = false; 805 smi_inc_stat(smi_info, attentions); 806 807 /* 808 * Got a attn, send down a get message flags to see 809 * what's causing it. It would be better to handle 810 * this in the upper layer, but due to the way 811 * interrupts work with the SMI, that's not really 812 * possible. 813 */ 814 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 815 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 816 817 start_new_msg(smi_info, msg, 2); 818 smi_info->si_state = SI_GETTING_FLAGS; 819 goto restart; 820 } 821 } 822 823 /* If we are currently idle, try to start the next message. */ 824 if (si_sm_result == SI_SM_IDLE) { 825 smi_inc_stat(smi_info, idles); 826 827 si_sm_result = start_next_msg(smi_info); 828 if (si_sm_result != SI_SM_IDLE) 829 goto restart; 830 } 831 832 if ((si_sm_result == SI_SM_IDLE) 833 && (atomic_read(&smi_info->req_events))) { 834 /* 835 * We are idle and the upper layer requested that I fetch 836 * events, so do so. 837 */ 838 atomic_set(&smi_info->req_events, 0); 839 840 /* 841 * Take this opportunity to check the interrupt and 842 * message enable state for the BMC. The BMC can be 843 * asynchronously reset, and may thus get interrupts 844 * disable and messages disabled. 845 */ 846 if (smi_info->supports_event_msg_buff || smi_info->io.irq) { 847 start_check_enables(smi_info); 848 } else { 849 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 850 if (!smi_info->curr_msg) 851 goto out; 852 853 start_getting_events(smi_info); 854 } 855 goto restart; 856 } 857 858 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { 859 /* Ok it if fails, the timer will just go off. */ 860 if (del_timer(&smi_info->si_timer)) 861 smi_info->timer_running = false; 862 } 863 864 out: 865 return si_sm_result; 866 } 867 868 static void check_start_timer_thread(struct smi_info *smi_info) 869 { 870 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { 871 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); 872 873 if (smi_info->thread) 874 wake_up_process(smi_info->thread); 875 876 start_next_msg(smi_info); 877 smi_event_handler(smi_info, 0); 878 } 879 } 880 881 static void flush_messages(void *send_info) 882 { 883 struct smi_info *smi_info = send_info; 884 enum si_sm_result result; 885 886 /* 887 * Currently, this function is called only in run-to-completion 888 * mode. This means we are single-threaded, no need for locks. 889 */ 890 result = smi_event_handler(smi_info, 0); 891 while (result != SI_SM_IDLE) { 892 udelay(SI_SHORT_TIMEOUT_USEC); 893 result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); 894 } 895 } 896 897 static void sender(void *send_info, 898 struct ipmi_smi_msg *msg) 899 { 900 struct smi_info *smi_info = send_info; 901 unsigned long flags; 902 903 debug_timestamp("Enqueue"); 904 905 if (smi_info->run_to_completion) { 906 /* 907 * If we are running to completion, start it. Upper 908 * layer will call flush_messages to clear it out. 909 */ 910 smi_info->waiting_msg = msg; 911 return; 912 } 913 914 spin_lock_irqsave(&smi_info->si_lock, flags); 915 /* 916 * The following two lines don't need to be under the lock for 917 * the lock's sake, but they do need SMP memory barriers to 918 * avoid getting things out of order. We are already claiming 919 * the lock, anyway, so just do it under the lock to avoid the 920 * ordering problem. 921 */ 922 BUG_ON(smi_info->waiting_msg); 923 smi_info->waiting_msg = msg; 924 check_start_timer_thread(smi_info); 925 spin_unlock_irqrestore(&smi_info->si_lock, flags); 926 } 927 928 static void set_run_to_completion(void *send_info, bool i_run_to_completion) 929 { 930 struct smi_info *smi_info = send_info; 931 932 smi_info->run_to_completion = i_run_to_completion; 933 if (i_run_to_completion) 934 flush_messages(smi_info); 935 } 936 937 /* 938 * Use -1 as a special constant to tell that we are spinning in kipmid 939 * looking for something and not delaying between checks 940 */ 941 #define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull) 942 static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result, 943 const struct smi_info *smi_info, 944 ktime_t *busy_until) 945 { 946 unsigned int max_busy_us = 0; 947 948 if (smi_info->si_num < num_max_busy_us) 949 max_busy_us = kipmid_max_busy_us[smi_info->si_num]; 950 if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) 951 *busy_until = IPMI_TIME_NOT_BUSY; 952 else if (*busy_until == IPMI_TIME_NOT_BUSY) { 953 *busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC; 954 } else { 955 if (unlikely(ktime_get() > *busy_until)) { 956 *busy_until = IPMI_TIME_NOT_BUSY; 957 return false; 958 } 959 } 960 return true; 961 } 962 963 964 /* 965 * A busy-waiting loop for speeding up IPMI operation. 966 * 967 * Lousy hardware makes this hard. This is only enabled for systems 968 * that are not BT and do not have interrupts. It starts spinning 969 * when an operation is complete or until max_busy tells it to stop 970 * (if that is enabled). See the paragraph on kimid_max_busy_us in 971 * Documentation/driver-api/ipmi.rst for details. 972 */ 973 static int ipmi_thread(void *data) 974 { 975 struct smi_info *smi_info = data; 976 unsigned long flags; 977 enum si_sm_result smi_result; 978 ktime_t busy_until = IPMI_TIME_NOT_BUSY; 979 980 set_user_nice(current, MAX_NICE); 981 while (!kthread_should_stop()) { 982 int busy_wait; 983 984 spin_lock_irqsave(&(smi_info->si_lock), flags); 985 smi_result = smi_event_handler(smi_info, 0); 986 987 /* 988 * If the driver is doing something, there is a possible 989 * race with the timer. If the timer handler see idle, 990 * and the thread here sees something else, the timer 991 * handler won't restart the timer even though it is 992 * required. So start it here if necessary. 993 */ 994 if (smi_result != SI_SM_IDLE && !smi_info->timer_running) 995 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); 996 997 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 998 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, 999 &busy_until); 1000 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { 1001 ; /* do nothing */ 1002 } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) { 1003 /* 1004 * In maintenance mode we run as fast as 1005 * possible to allow firmware updates to 1006 * complete as fast as possible, but normally 1007 * don't bang on the scheduler. 1008 */ 1009 if (smi_info->in_maintenance_mode) 1010 schedule(); 1011 else 1012 usleep_range(100, 200); 1013 } else if (smi_result == SI_SM_IDLE) { 1014 if (atomic_read(&smi_info->need_watch)) { 1015 schedule_timeout_interruptible(100); 1016 } else { 1017 /* Wait to be woken up when we are needed. */ 1018 __set_current_state(TASK_INTERRUPTIBLE); 1019 schedule(); 1020 } 1021 } else { 1022 schedule_timeout_interruptible(1); 1023 } 1024 } 1025 return 0; 1026 } 1027 1028 1029 static void poll(void *send_info) 1030 { 1031 struct smi_info *smi_info = send_info; 1032 unsigned long flags = 0; 1033 bool run_to_completion = smi_info->run_to_completion; 1034 1035 /* 1036 * Make sure there is some delay in the poll loop so we can 1037 * drive time forward and timeout things. 1038 */ 1039 udelay(10); 1040 if (!run_to_completion) 1041 spin_lock_irqsave(&smi_info->si_lock, flags); 1042 smi_event_handler(smi_info, 10); 1043 if (!run_to_completion) 1044 spin_unlock_irqrestore(&smi_info->si_lock, flags); 1045 } 1046 1047 static void request_events(void *send_info) 1048 { 1049 struct smi_info *smi_info = send_info; 1050 1051 if (!smi_info->has_event_buffer) 1052 return; 1053 1054 atomic_set(&smi_info->req_events, 1); 1055 } 1056 1057 static void set_need_watch(void *send_info, unsigned int watch_mask) 1058 { 1059 struct smi_info *smi_info = send_info; 1060 unsigned long flags; 1061 int enable; 1062 1063 enable = !!watch_mask; 1064 1065 atomic_set(&smi_info->need_watch, enable); 1066 spin_lock_irqsave(&smi_info->si_lock, flags); 1067 check_start_timer_thread(smi_info); 1068 spin_unlock_irqrestore(&smi_info->si_lock, flags); 1069 } 1070 1071 static void smi_timeout(struct timer_list *t) 1072 { 1073 struct smi_info *smi_info = from_timer(smi_info, t, si_timer); 1074 enum si_sm_result smi_result; 1075 unsigned long flags; 1076 unsigned long jiffies_now; 1077 long time_diff; 1078 long timeout; 1079 1080 spin_lock_irqsave(&(smi_info->si_lock), flags); 1081 debug_timestamp("Timer"); 1082 1083 jiffies_now = jiffies; 1084 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) 1085 * SI_USEC_PER_JIFFY); 1086 smi_result = smi_event_handler(smi_info, time_diff); 1087 1088 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { 1089 /* Running with interrupts, only do long timeouts. */ 1090 timeout = jiffies + SI_TIMEOUT_JIFFIES; 1091 smi_inc_stat(smi_info, long_timeouts); 1092 goto do_mod_timer; 1093 } 1094 1095 /* 1096 * If the state machine asks for a short delay, then shorten 1097 * the timer timeout. 1098 */ 1099 if (smi_result == SI_SM_CALL_WITH_DELAY) { 1100 smi_inc_stat(smi_info, short_timeouts); 1101 timeout = jiffies + 1; 1102 } else { 1103 smi_inc_stat(smi_info, long_timeouts); 1104 timeout = jiffies + SI_TIMEOUT_JIFFIES; 1105 } 1106 1107 do_mod_timer: 1108 if (smi_result != SI_SM_IDLE) 1109 smi_mod_timer(smi_info, timeout); 1110 else 1111 smi_info->timer_running = false; 1112 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1113 } 1114 1115 irqreturn_t ipmi_si_irq_handler(int irq, void *data) 1116 { 1117 struct smi_info *smi_info = data; 1118 unsigned long flags; 1119 1120 if (smi_info->io.si_type == SI_BT) 1121 /* We need to clear the IRQ flag for the BT interface. */ 1122 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 1123 IPMI_BT_INTMASK_CLEAR_IRQ_BIT 1124 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 1125 1126 spin_lock_irqsave(&(smi_info->si_lock), flags); 1127 1128 smi_inc_stat(smi_info, interrupts); 1129 1130 debug_timestamp("Interrupt"); 1131 1132 smi_event_handler(smi_info, 0); 1133 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1134 return IRQ_HANDLED; 1135 } 1136 1137 static int smi_start_processing(void *send_info, 1138 struct ipmi_smi *intf) 1139 { 1140 struct smi_info *new_smi = send_info; 1141 int enable = 0; 1142 1143 new_smi->intf = intf; 1144 1145 /* Set up the timer that drives the interface. */ 1146 timer_setup(&new_smi->si_timer, smi_timeout, 0); 1147 new_smi->timer_can_start = true; 1148 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); 1149 1150 /* Try to claim any interrupts. */ 1151 if (new_smi->io.irq_setup) { 1152 new_smi->io.irq_handler_data = new_smi; 1153 new_smi->io.irq_setup(&new_smi->io); 1154 } 1155 1156 /* 1157 * Check if the user forcefully enabled the daemon. 1158 */ 1159 if (new_smi->si_num < num_force_kipmid) 1160 enable = force_kipmid[new_smi->si_num]; 1161 /* 1162 * The BT interface is efficient enough to not need a thread, 1163 * and there is no need for a thread if we have interrupts. 1164 */ 1165 else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq)) 1166 enable = 1; 1167 1168 if (enable) { 1169 new_smi->thread = kthread_run(ipmi_thread, new_smi, 1170 "kipmi%d", new_smi->si_num); 1171 if (IS_ERR(new_smi->thread)) { 1172 dev_notice(new_smi->io.dev, "Could not start" 1173 " kernel thread due to error %ld, only using" 1174 " timers to drive the interface\n", 1175 PTR_ERR(new_smi->thread)); 1176 new_smi->thread = NULL; 1177 } 1178 } 1179 1180 return 0; 1181 } 1182 1183 static int get_smi_info(void *send_info, struct ipmi_smi_info *data) 1184 { 1185 struct smi_info *smi = send_info; 1186 1187 data->addr_src = smi->io.addr_source; 1188 data->dev = smi->io.dev; 1189 data->addr_info = smi->io.addr_info; 1190 get_device(smi->io.dev); 1191 1192 return 0; 1193 } 1194 1195 static void set_maintenance_mode(void *send_info, bool enable) 1196 { 1197 struct smi_info *smi_info = send_info; 1198 1199 if (!enable) 1200 atomic_set(&smi_info->req_events, 0); 1201 smi_info->in_maintenance_mode = enable; 1202 } 1203 1204 static void shutdown_smi(void *send_info); 1205 static const struct ipmi_smi_handlers handlers = { 1206 .owner = THIS_MODULE, 1207 .start_processing = smi_start_processing, 1208 .shutdown = shutdown_smi, 1209 .get_smi_info = get_smi_info, 1210 .sender = sender, 1211 .request_events = request_events, 1212 .set_need_watch = set_need_watch, 1213 .set_maintenance_mode = set_maintenance_mode, 1214 .set_run_to_completion = set_run_to_completion, 1215 .flush_messages = flush_messages, 1216 .poll = poll, 1217 }; 1218 1219 static LIST_HEAD(smi_infos); 1220 static DEFINE_MUTEX(smi_infos_lock); 1221 static int smi_num; /* Used to sequence the SMIs */ 1222 1223 static const char * const addr_space_to_str[] = { "i/o", "mem" }; 1224 1225 module_param_array(force_kipmid, int, &num_force_kipmid, 0); 1226 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" 1227 " disabled(0). Normally the IPMI driver auto-detects" 1228 " this, but the value may be overridden by this parm."); 1229 module_param(unload_when_empty, bool, 0); 1230 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" 1231 " specified or found, default is 1. Setting to 0" 1232 " is useful for hot add of devices using hotmod."); 1233 module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644); 1234 MODULE_PARM_DESC(kipmid_max_busy_us, 1235 "Max time (in microseconds) to busy-wait for IPMI data before" 1236 " sleeping. 0 (default) means to wait forever. Set to 100-500" 1237 " if kipmid is using up a lot of CPU time."); 1238 1239 void ipmi_irq_finish_setup(struct si_sm_io *io) 1240 { 1241 if (io->si_type == SI_BT) 1242 /* Enable the interrupt in the BT interface. */ 1243 io->outputb(io, IPMI_BT_INTMASK_REG, 1244 IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 1245 } 1246 1247 void ipmi_irq_start_cleanup(struct si_sm_io *io) 1248 { 1249 if (io->si_type == SI_BT) 1250 /* Disable the interrupt in the BT interface. */ 1251 io->outputb(io, IPMI_BT_INTMASK_REG, 0); 1252 } 1253 1254 static void std_irq_cleanup(struct si_sm_io *io) 1255 { 1256 ipmi_irq_start_cleanup(io); 1257 free_irq(io->irq, io->irq_handler_data); 1258 } 1259 1260 int ipmi_std_irq_setup(struct si_sm_io *io) 1261 { 1262 int rv; 1263 1264 if (!io->irq) 1265 return 0; 1266 1267 rv = request_irq(io->irq, 1268 ipmi_si_irq_handler, 1269 IRQF_SHARED, 1270 SI_DEVICE_NAME, 1271 io->irq_handler_data); 1272 if (rv) { 1273 dev_warn(io->dev, "%s unable to claim interrupt %d," 1274 " running polled\n", 1275 SI_DEVICE_NAME, io->irq); 1276 io->irq = 0; 1277 } else { 1278 io->irq_cleanup = std_irq_cleanup; 1279 ipmi_irq_finish_setup(io); 1280 dev_info(io->dev, "Using irq %d\n", io->irq); 1281 } 1282 1283 return rv; 1284 } 1285 1286 static int wait_for_msg_done(struct smi_info *smi_info) 1287 { 1288 enum si_sm_result smi_result; 1289 1290 smi_result = smi_info->handlers->event(smi_info->si_sm, 0); 1291 for (;;) { 1292 if (smi_result == SI_SM_CALL_WITH_DELAY || 1293 smi_result == SI_SM_CALL_WITH_TICK_DELAY) { 1294 schedule_timeout_uninterruptible(1); 1295 smi_result = smi_info->handlers->event( 1296 smi_info->si_sm, jiffies_to_usecs(1)); 1297 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { 1298 smi_result = smi_info->handlers->event( 1299 smi_info->si_sm, 0); 1300 } else 1301 break; 1302 } 1303 if (smi_result == SI_SM_HOSED) 1304 /* 1305 * We couldn't get the state machine to run, so whatever's at 1306 * the port is probably not an IPMI SMI interface. 1307 */ 1308 return -ENODEV; 1309 1310 return 0; 1311 } 1312 1313 static int try_get_dev_id(struct smi_info *smi_info) 1314 { 1315 unsigned char msg[2]; 1316 unsigned char *resp; 1317 unsigned long resp_len; 1318 int rv = 0; 1319 unsigned int retry_count = 0; 1320 1321 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1322 if (!resp) 1323 return -ENOMEM; 1324 1325 /* 1326 * Do a Get Device ID command, since it comes back with some 1327 * useful info. 1328 */ 1329 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1330 msg[1] = IPMI_GET_DEVICE_ID_CMD; 1331 1332 retry: 1333 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 1334 1335 rv = wait_for_msg_done(smi_info); 1336 if (rv) 1337 goto out; 1338 1339 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1340 resp, IPMI_MAX_MSG_LENGTH); 1341 1342 /* Check and record info from the get device id, in case we need it. */ 1343 rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1], 1344 resp + 2, resp_len - 2, &smi_info->device_id); 1345 if (rv) { 1346 /* record completion code */ 1347 unsigned char cc = *(resp + 2); 1348 1349 if ((cc == IPMI_DEVICE_IN_FW_UPDATE_ERR 1350 || cc == IPMI_DEVICE_IN_INIT_ERR 1351 || cc == IPMI_NOT_IN_MY_STATE_ERR) 1352 && ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { 1353 dev_warn(smi_info->io.dev, 1354 "BMC returned 0x%2.2x, retry get bmc device id\n", 1355 cc); 1356 goto retry; 1357 } 1358 } 1359 1360 out: 1361 kfree(resp); 1362 return rv; 1363 } 1364 1365 static int get_global_enables(struct smi_info *smi_info, u8 *enables) 1366 { 1367 unsigned char msg[3]; 1368 unsigned char *resp; 1369 unsigned long resp_len; 1370 int rv; 1371 1372 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1373 if (!resp) 1374 return -ENOMEM; 1375 1376 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1377 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 1378 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 1379 1380 rv = wait_for_msg_done(smi_info); 1381 if (rv) { 1382 dev_warn(smi_info->io.dev, 1383 "Error getting response from get global enables command: %d\n", 1384 rv); 1385 goto out; 1386 } 1387 1388 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1389 resp, IPMI_MAX_MSG_LENGTH); 1390 1391 if (resp_len < 4 || 1392 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 1393 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || 1394 resp[2] != 0) { 1395 dev_warn(smi_info->io.dev, 1396 "Invalid return from get global enables command: %ld %x %x %x\n", 1397 resp_len, resp[0], resp[1], resp[2]); 1398 rv = -EINVAL; 1399 goto out; 1400 } else { 1401 *enables = resp[3]; 1402 } 1403 1404 out: 1405 kfree(resp); 1406 return rv; 1407 } 1408 1409 /* 1410 * Returns 1 if it gets an error from the command. 1411 */ 1412 static int set_global_enables(struct smi_info *smi_info, u8 enables) 1413 { 1414 unsigned char msg[3]; 1415 unsigned char *resp; 1416 unsigned long resp_len; 1417 int rv; 1418 1419 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1420 if (!resp) 1421 return -ENOMEM; 1422 1423 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1424 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 1425 msg[2] = enables; 1426 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 1427 1428 rv = wait_for_msg_done(smi_info); 1429 if (rv) { 1430 dev_warn(smi_info->io.dev, 1431 "Error getting response from set global enables command: %d\n", 1432 rv); 1433 goto out; 1434 } 1435 1436 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1437 resp, IPMI_MAX_MSG_LENGTH); 1438 1439 if (resp_len < 3 || 1440 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 1441 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { 1442 dev_warn(smi_info->io.dev, 1443 "Invalid return from set global enables command: %ld %x %x\n", 1444 resp_len, resp[0], resp[1]); 1445 rv = -EINVAL; 1446 goto out; 1447 } 1448 1449 if (resp[2] != 0) 1450 rv = 1; 1451 1452 out: 1453 kfree(resp); 1454 return rv; 1455 } 1456 1457 /* 1458 * Some BMCs do not support clearing the receive irq bit in the global 1459 * enables (even if they don't support interrupts on the BMC). Check 1460 * for this and handle it properly. 1461 */ 1462 static void check_clr_rcv_irq(struct smi_info *smi_info) 1463 { 1464 u8 enables = 0; 1465 int rv; 1466 1467 rv = get_global_enables(smi_info, &enables); 1468 if (!rv) { 1469 if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0) 1470 /* Already clear, should work ok. */ 1471 return; 1472 1473 enables &= ~IPMI_BMC_RCV_MSG_INTR; 1474 rv = set_global_enables(smi_info, enables); 1475 } 1476 1477 if (rv < 0) { 1478 dev_err(smi_info->io.dev, 1479 "Cannot check clearing the rcv irq: %d\n", rv); 1480 return; 1481 } 1482 1483 if (rv) { 1484 /* 1485 * An error when setting the event buffer bit means 1486 * clearing the bit is not supported. 1487 */ 1488 dev_warn(smi_info->io.dev, 1489 "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n"); 1490 smi_info->cannot_disable_irq = true; 1491 } 1492 } 1493 1494 /* 1495 * Some BMCs do not support setting the interrupt bits in the global 1496 * enables even if they support interrupts. Clearly bad, but we can 1497 * compensate. 1498 */ 1499 static void check_set_rcv_irq(struct smi_info *smi_info) 1500 { 1501 u8 enables = 0; 1502 int rv; 1503 1504 if (!smi_info->io.irq) 1505 return; 1506 1507 rv = get_global_enables(smi_info, &enables); 1508 if (!rv) { 1509 enables |= IPMI_BMC_RCV_MSG_INTR; 1510 rv = set_global_enables(smi_info, enables); 1511 } 1512 1513 if (rv < 0) { 1514 dev_err(smi_info->io.dev, 1515 "Cannot check setting the rcv irq: %d\n", rv); 1516 return; 1517 } 1518 1519 if (rv) { 1520 /* 1521 * An error when setting the event buffer bit means 1522 * setting the bit is not supported. 1523 */ 1524 dev_warn(smi_info->io.dev, 1525 "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n"); 1526 smi_info->cannot_disable_irq = true; 1527 smi_info->irq_enable_broken = true; 1528 } 1529 } 1530 1531 static int try_enable_event_buffer(struct smi_info *smi_info) 1532 { 1533 unsigned char msg[3]; 1534 unsigned char *resp; 1535 unsigned long resp_len; 1536 int rv = 0; 1537 1538 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1539 if (!resp) 1540 return -ENOMEM; 1541 1542 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1543 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 1544 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 1545 1546 rv = wait_for_msg_done(smi_info); 1547 if (rv) { 1548 pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n"); 1549 goto out; 1550 } 1551 1552 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1553 resp, IPMI_MAX_MSG_LENGTH); 1554 1555 if (resp_len < 4 || 1556 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 1557 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || 1558 resp[2] != 0) { 1559 pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n"); 1560 rv = -EINVAL; 1561 goto out; 1562 } 1563 1564 if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { 1565 /* buffer is already enabled, nothing to do. */ 1566 smi_info->supports_event_msg_buff = true; 1567 goto out; 1568 } 1569 1570 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1571 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 1572 msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; 1573 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 1574 1575 rv = wait_for_msg_done(smi_info); 1576 if (rv) { 1577 pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n"); 1578 goto out; 1579 } 1580 1581 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1582 resp, IPMI_MAX_MSG_LENGTH); 1583 1584 if (resp_len < 3 || 1585 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 1586 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { 1587 pr_warn("Invalid return from get global, enables command, not enable the event buffer\n"); 1588 rv = -EINVAL; 1589 goto out; 1590 } 1591 1592 if (resp[2] != 0) 1593 /* 1594 * An error when setting the event buffer bit means 1595 * that the event buffer is not supported. 1596 */ 1597 rv = -ENOENT; 1598 else 1599 smi_info->supports_event_msg_buff = true; 1600 1601 out: 1602 kfree(resp); 1603 return rv; 1604 } 1605 1606 #define IPMI_SI_ATTR(name) \ 1607 static ssize_t name##_show(struct device *dev, \ 1608 struct device_attribute *attr, \ 1609 char *buf) \ 1610 { \ 1611 struct smi_info *smi_info = dev_get_drvdata(dev); \ 1612 \ 1613 return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \ 1614 } \ 1615 static DEVICE_ATTR(name, 0444, name##_show, NULL) 1616 1617 static ssize_t type_show(struct device *dev, 1618 struct device_attribute *attr, 1619 char *buf) 1620 { 1621 struct smi_info *smi_info = dev_get_drvdata(dev); 1622 1623 return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]); 1624 } 1625 static DEVICE_ATTR(type, 0444, type_show, NULL); 1626 1627 static ssize_t interrupts_enabled_show(struct device *dev, 1628 struct device_attribute *attr, 1629 char *buf) 1630 { 1631 struct smi_info *smi_info = dev_get_drvdata(dev); 1632 int enabled = smi_info->io.irq && !smi_info->interrupt_disabled; 1633 1634 return snprintf(buf, 10, "%d\n", enabled); 1635 } 1636 static DEVICE_ATTR(interrupts_enabled, 0444, 1637 interrupts_enabled_show, NULL); 1638 1639 IPMI_SI_ATTR(short_timeouts); 1640 IPMI_SI_ATTR(long_timeouts); 1641 IPMI_SI_ATTR(idles); 1642 IPMI_SI_ATTR(interrupts); 1643 IPMI_SI_ATTR(attentions); 1644 IPMI_SI_ATTR(flag_fetches); 1645 IPMI_SI_ATTR(hosed_count); 1646 IPMI_SI_ATTR(complete_transactions); 1647 IPMI_SI_ATTR(events); 1648 IPMI_SI_ATTR(watchdog_pretimeouts); 1649 IPMI_SI_ATTR(incoming_messages); 1650 1651 static ssize_t params_show(struct device *dev, 1652 struct device_attribute *attr, 1653 char *buf) 1654 { 1655 struct smi_info *smi_info = dev_get_drvdata(dev); 1656 1657 return snprintf(buf, 200, 1658 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", 1659 si_to_str[smi_info->io.si_type], 1660 addr_space_to_str[smi_info->io.addr_space], 1661 smi_info->io.addr_data, 1662 smi_info->io.regspacing, 1663 smi_info->io.regsize, 1664 smi_info->io.regshift, 1665 smi_info->io.irq, 1666 smi_info->io.slave_addr); 1667 } 1668 static DEVICE_ATTR(params, 0444, params_show, NULL); 1669 1670 static struct attribute *ipmi_si_dev_attrs[] = { 1671 &dev_attr_type.attr, 1672 &dev_attr_interrupts_enabled.attr, 1673 &dev_attr_short_timeouts.attr, 1674 &dev_attr_long_timeouts.attr, 1675 &dev_attr_idles.attr, 1676 &dev_attr_interrupts.attr, 1677 &dev_attr_attentions.attr, 1678 &dev_attr_flag_fetches.attr, 1679 &dev_attr_hosed_count.attr, 1680 &dev_attr_complete_transactions.attr, 1681 &dev_attr_events.attr, 1682 &dev_attr_watchdog_pretimeouts.attr, 1683 &dev_attr_incoming_messages.attr, 1684 &dev_attr_params.attr, 1685 NULL 1686 }; 1687 1688 static const struct attribute_group ipmi_si_dev_attr_group = { 1689 .attrs = ipmi_si_dev_attrs, 1690 }; 1691 1692 /* 1693 * oem_data_avail_to_receive_msg_avail 1694 * @info - smi_info structure with msg_flags set 1695 * 1696 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL 1697 * Returns 1 indicating need to re-run handle_flags(). 1698 */ 1699 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) 1700 { 1701 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | 1702 RECEIVE_MSG_AVAIL); 1703 return 1; 1704 } 1705 1706 /* 1707 * setup_dell_poweredge_oem_data_handler 1708 * @info - smi_info.device_id must be populated 1709 * 1710 * Systems that match, but have firmware version < 1.40 may assert 1711 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that 1712 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL 1713 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags 1714 * as RECEIVE_MSG_AVAIL instead. 1715 * 1716 * As Dell has no plans to release IPMI 1.5 firmware that *ever* 1717 * assert the OEM[012] bits, and if it did, the driver would have to 1718 * change to handle that properly, we don't actually check for the 1719 * firmware version. 1720 * Device ID = 0x20 BMC on PowerEdge 8G servers 1721 * Device Revision = 0x80 1722 * Firmware Revision1 = 0x01 BMC version 1.40 1723 * Firmware Revision2 = 0x40 BCD encoded 1724 * IPMI Version = 0x51 IPMI 1.5 1725 * Manufacturer ID = A2 02 00 Dell IANA 1726 * 1727 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert 1728 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL. 1729 * 1730 */ 1731 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 1732 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 1733 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 1734 #define DELL_IANA_MFR_ID 0x0002a2 1735 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) 1736 { 1737 struct ipmi_device_id *id = &smi_info->device_id; 1738 if (id->manufacturer_id == DELL_IANA_MFR_ID) { 1739 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && 1740 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && 1741 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { 1742 smi_info->oem_data_avail_handler = 1743 oem_data_avail_to_receive_msg_avail; 1744 } else if (ipmi_version_major(id) < 1 || 1745 (ipmi_version_major(id) == 1 && 1746 ipmi_version_minor(id) < 5)) { 1747 smi_info->oem_data_avail_handler = 1748 oem_data_avail_to_receive_msg_avail; 1749 } 1750 } 1751 } 1752 1753 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA 1754 static void return_hosed_msg_badsize(struct smi_info *smi_info) 1755 { 1756 struct ipmi_smi_msg *msg = smi_info->curr_msg; 1757 1758 /* Make it a response */ 1759 msg->rsp[0] = msg->data[0] | 4; 1760 msg->rsp[1] = msg->data[1]; 1761 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; 1762 msg->rsp_size = 3; 1763 smi_info->curr_msg = NULL; 1764 deliver_recv_msg(smi_info, msg); 1765 } 1766 1767 /* 1768 * dell_poweredge_bt_xaction_handler 1769 * @info - smi_info.device_id must be populated 1770 * 1771 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will 1772 * not respond to a Get SDR command if the length of the data 1773 * requested is exactly 0x3A, which leads to command timeouts and no 1774 * data returned. This intercepts such commands, and causes userspace 1775 * callers to try again with a different-sized buffer, which succeeds. 1776 */ 1777 1778 #define STORAGE_NETFN 0x0A 1779 #define STORAGE_CMD_GET_SDR 0x23 1780 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self, 1781 unsigned long unused, 1782 void *in) 1783 { 1784 struct smi_info *smi_info = in; 1785 unsigned char *data = smi_info->curr_msg->data; 1786 unsigned int size = smi_info->curr_msg->data_size; 1787 if (size >= 8 && 1788 (data[0]>>2) == STORAGE_NETFN && 1789 data[1] == STORAGE_CMD_GET_SDR && 1790 data[7] == 0x3A) { 1791 return_hosed_msg_badsize(smi_info); 1792 return NOTIFY_STOP; 1793 } 1794 return NOTIFY_DONE; 1795 } 1796 1797 static struct notifier_block dell_poweredge_bt_xaction_notifier = { 1798 .notifier_call = dell_poweredge_bt_xaction_handler, 1799 }; 1800 1801 /* 1802 * setup_dell_poweredge_bt_xaction_handler 1803 * @info - smi_info.device_id must be filled in already 1804 * 1805 * Fills in smi_info.device_id.start_transaction_pre_hook 1806 * when we know what function to use there. 1807 */ 1808 static void 1809 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) 1810 { 1811 struct ipmi_device_id *id = &smi_info->device_id; 1812 if (id->manufacturer_id == DELL_IANA_MFR_ID && 1813 smi_info->io.si_type == SI_BT) 1814 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); 1815 } 1816 1817 /* 1818 * setup_oem_data_handler 1819 * @info - smi_info.device_id must be filled in already 1820 * 1821 * Fills in smi_info.device_id.oem_data_available_handler 1822 * when we know what function to use there. 1823 */ 1824 1825 static void setup_oem_data_handler(struct smi_info *smi_info) 1826 { 1827 setup_dell_poweredge_oem_data_handler(smi_info); 1828 } 1829 1830 static void setup_xaction_handlers(struct smi_info *smi_info) 1831 { 1832 setup_dell_poweredge_bt_xaction_handler(smi_info); 1833 } 1834 1835 static void check_for_broken_irqs(struct smi_info *smi_info) 1836 { 1837 check_clr_rcv_irq(smi_info); 1838 check_set_rcv_irq(smi_info); 1839 } 1840 1841 static inline void stop_timer_and_thread(struct smi_info *smi_info) 1842 { 1843 if (smi_info->thread != NULL) { 1844 kthread_stop(smi_info->thread); 1845 smi_info->thread = NULL; 1846 } 1847 1848 smi_info->timer_can_start = false; 1849 del_timer_sync(&smi_info->si_timer); 1850 } 1851 1852 static struct smi_info *find_dup_si(struct smi_info *info) 1853 { 1854 struct smi_info *e; 1855 1856 list_for_each_entry(e, &smi_infos, link) { 1857 if (e->io.addr_space != info->io.addr_space) 1858 continue; 1859 if (e->io.addr_data == info->io.addr_data) { 1860 /* 1861 * This is a cheap hack, ACPI doesn't have a defined 1862 * slave address but SMBIOS does. Pick it up from 1863 * any source that has it available. 1864 */ 1865 if (info->io.slave_addr && !e->io.slave_addr) 1866 e->io.slave_addr = info->io.slave_addr; 1867 return e; 1868 } 1869 } 1870 1871 return NULL; 1872 } 1873 1874 int ipmi_si_add_smi(struct si_sm_io *io) 1875 { 1876 int rv = 0; 1877 struct smi_info *new_smi, *dup; 1878 1879 /* 1880 * If the user gave us a hard-coded device at the same 1881 * address, they presumably want us to use it and not what is 1882 * in the firmware. 1883 */ 1884 if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD && 1885 ipmi_si_hardcode_match(io->addr_space, io->addr_data)) { 1886 dev_info(io->dev, 1887 "Hard-coded device at this address already exists"); 1888 return -ENODEV; 1889 } 1890 1891 if (!io->io_setup) { 1892 if (io->addr_space == IPMI_IO_ADDR_SPACE) { 1893 io->io_setup = ipmi_si_port_setup; 1894 } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) { 1895 io->io_setup = ipmi_si_mem_setup; 1896 } else { 1897 return -EINVAL; 1898 } 1899 } 1900 1901 new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL); 1902 if (!new_smi) 1903 return -ENOMEM; 1904 spin_lock_init(&new_smi->si_lock); 1905 1906 new_smi->io = *io; 1907 1908 mutex_lock(&smi_infos_lock); 1909 dup = find_dup_si(new_smi); 1910 if (dup) { 1911 if (new_smi->io.addr_source == SI_ACPI && 1912 dup->io.addr_source == SI_SMBIOS) { 1913 /* We prefer ACPI over SMBIOS. */ 1914 dev_info(dup->io.dev, 1915 "Removing SMBIOS-specified %s state machine in favor of ACPI\n", 1916 si_to_str[new_smi->io.si_type]); 1917 cleanup_one_si(dup); 1918 } else { 1919 dev_info(new_smi->io.dev, 1920 "%s-specified %s state machine: duplicate\n", 1921 ipmi_addr_src_to_str(new_smi->io.addr_source), 1922 si_to_str[new_smi->io.si_type]); 1923 rv = -EBUSY; 1924 kfree(new_smi); 1925 goto out_err; 1926 } 1927 } 1928 1929 pr_info("Adding %s-specified %s state machine\n", 1930 ipmi_addr_src_to_str(new_smi->io.addr_source), 1931 si_to_str[new_smi->io.si_type]); 1932 1933 list_add_tail(&new_smi->link, &smi_infos); 1934 1935 if (initialized) 1936 rv = try_smi_init(new_smi); 1937 out_err: 1938 mutex_unlock(&smi_infos_lock); 1939 return rv; 1940 } 1941 1942 /* 1943 * Try to start up an interface. Must be called with smi_infos_lock 1944 * held, primarily to keep smi_num consistent, we only one to do these 1945 * one at a time. 1946 */ 1947 static int try_smi_init(struct smi_info *new_smi) 1948 { 1949 int rv = 0; 1950 int i; 1951 1952 pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n", 1953 ipmi_addr_src_to_str(new_smi->io.addr_source), 1954 si_to_str[new_smi->io.si_type], 1955 addr_space_to_str[new_smi->io.addr_space], 1956 new_smi->io.addr_data, 1957 new_smi->io.slave_addr, new_smi->io.irq); 1958 1959 switch (new_smi->io.si_type) { 1960 case SI_KCS: 1961 new_smi->handlers = &kcs_smi_handlers; 1962 break; 1963 1964 case SI_SMIC: 1965 new_smi->handlers = &smic_smi_handlers; 1966 break; 1967 1968 case SI_BT: 1969 new_smi->handlers = &bt_smi_handlers; 1970 break; 1971 1972 default: 1973 /* No support for anything else yet. */ 1974 rv = -EIO; 1975 goto out_err; 1976 } 1977 1978 new_smi->si_num = smi_num; 1979 1980 /* Do this early so it's available for logs. */ 1981 if (!new_smi->io.dev) { 1982 pr_err("IPMI interface added with no device\n"); 1983 rv = -EIO; 1984 goto out_err; 1985 } 1986 1987 /* Allocate the state machine's data and initialize it. */ 1988 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 1989 if (!new_smi->si_sm) { 1990 rv = -ENOMEM; 1991 goto out_err; 1992 } 1993 new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm, 1994 &new_smi->io); 1995 1996 /* Now that we know the I/O size, we can set up the I/O. */ 1997 rv = new_smi->io.io_setup(&new_smi->io); 1998 if (rv) { 1999 dev_err(new_smi->io.dev, "Could not set up I/O space\n"); 2000 goto out_err; 2001 } 2002 2003 /* Do low-level detection first. */ 2004 if (new_smi->handlers->detect(new_smi->si_sm)) { 2005 if (new_smi->io.addr_source) 2006 dev_err(new_smi->io.dev, 2007 "Interface detection failed\n"); 2008 rv = -ENODEV; 2009 goto out_err; 2010 } 2011 2012 /* 2013 * Attempt a get device id command. If it fails, we probably 2014 * don't have a BMC here. 2015 */ 2016 rv = try_get_dev_id(new_smi); 2017 if (rv) { 2018 if (new_smi->io.addr_source) 2019 dev_err(new_smi->io.dev, 2020 "There appears to be no BMC at this location\n"); 2021 goto out_err; 2022 } 2023 2024 setup_oem_data_handler(new_smi); 2025 setup_xaction_handlers(new_smi); 2026 check_for_broken_irqs(new_smi); 2027 2028 new_smi->waiting_msg = NULL; 2029 new_smi->curr_msg = NULL; 2030 atomic_set(&new_smi->req_events, 0); 2031 new_smi->run_to_completion = false; 2032 for (i = 0; i < SI_NUM_STATS; i++) 2033 atomic_set(&new_smi->stats[i], 0); 2034 2035 new_smi->interrupt_disabled = true; 2036 atomic_set(&new_smi->need_watch, 0); 2037 2038 rv = try_enable_event_buffer(new_smi); 2039 if (rv == 0) 2040 new_smi->has_event_buffer = true; 2041 2042 /* 2043 * Start clearing the flags before we enable interrupts or the 2044 * timer to avoid racing with the timer. 2045 */ 2046 start_clear_flags(new_smi); 2047 2048 /* 2049 * IRQ is defined to be set when non-zero. req_events will 2050 * cause a global flags check that will enable interrupts. 2051 */ 2052 if (new_smi->io.irq) { 2053 new_smi->interrupt_disabled = false; 2054 atomic_set(&new_smi->req_events, 1); 2055 } 2056 2057 dev_set_drvdata(new_smi->io.dev, new_smi); 2058 rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group); 2059 if (rv) { 2060 dev_err(new_smi->io.dev, 2061 "Unable to add device attributes: error %d\n", 2062 rv); 2063 goto out_err; 2064 } 2065 new_smi->dev_group_added = true; 2066 2067 rv = ipmi_register_smi(&handlers, 2068 new_smi, 2069 new_smi->io.dev, 2070 new_smi->io.slave_addr); 2071 if (rv) { 2072 dev_err(new_smi->io.dev, 2073 "Unable to register device: error %d\n", 2074 rv); 2075 goto out_err; 2076 } 2077 2078 /* Don't increment till we know we have succeeded. */ 2079 smi_num++; 2080 2081 dev_info(new_smi->io.dev, "IPMI %s interface initialized\n", 2082 si_to_str[new_smi->io.si_type]); 2083 2084 WARN_ON(new_smi->io.dev->init_name != NULL); 2085 2086 out_err: 2087 if (rv && new_smi->io.io_cleanup) { 2088 new_smi->io.io_cleanup(&new_smi->io); 2089 new_smi->io.io_cleanup = NULL; 2090 } 2091 2092 return rv; 2093 } 2094 2095 static int __init init_ipmi_si(void) 2096 { 2097 struct smi_info *e; 2098 enum ipmi_addr_src type = SI_INVALID; 2099 2100 if (initialized) 2101 return 0; 2102 2103 ipmi_hardcode_init(); 2104 2105 pr_info("IPMI System Interface driver\n"); 2106 2107 ipmi_si_platform_init(); 2108 2109 ipmi_si_pci_init(); 2110 2111 ipmi_si_parisc_init(); 2112 2113 /* We prefer devices with interrupts, but in the case of a machine 2114 with multiple BMCs we assume that there will be several instances 2115 of a given type so if we succeed in registering a type then also 2116 try to register everything else of the same type */ 2117 mutex_lock(&smi_infos_lock); 2118 list_for_each_entry(e, &smi_infos, link) { 2119 /* Try to register a device if it has an IRQ and we either 2120 haven't successfully registered a device yet or this 2121 device has the same type as one we successfully registered */ 2122 if (e->io.irq && (!type || e->io.addr_source == type)) { 2123 if (!try_smi_init(e)) { 2124 type = e->io.addr_source; 2125 } 2126 } 2127 } 2128 2129 /* type will only have been set if we successfully registered an si */ 2130 if (type) 2131 goto skip_fallback_noirq; 2132 2133 /* Fall back to the preferred device */ 2134 2135 list_for_each_entry(e, &smi_infos, link) { 2136 if (!e->io.irq && (!type || e->io.addr_source == type)) { 2137 if (!try_smi_init(e)) { 2138 type = e->io.addr_source; 2139 } 2140 } 2141 } 2142 2143 skip_fallback_noirq: 2144 initialized = true; 2145 mutex_unlock(&smi_infos_lock); 2146 2147 if (type) 2148 return 0; 2149 2150 mutex_lock(&smi_infos_lock); 2151 if (unload_when_empty && list_empty(&smi_infos)) { 2152 mutex_unlock(&smi_infos_lock); 2153 cleanup_ipmi_si(); 2154 pr_warn("Unable to find any System Interface(s)\n"); 2155 return -ENODEV; 2156 } else { 2157 mutex_unlock(&smi_infos_lock); 2158 return 0; 2159 } 2160 } 2161 module_init(init_ipmi_si); 2162 2163 static void shutdown_smi(void *send_info) 2164 { 2165 struct smi_info *smi_info = send_info; 2166 2167 if (smi_info->dev_group_added) { 2168 device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group); 2169 smi_info->dev_group_added = false; 2170 } 2171 if (smi_info->io.dev) 2172 dev_set_drvdata(smi_info->io.dev, NULL); 2173 2174 /* 2175 * Make sure that interrupts, the timer and the thread are 2176 * stopped and will not run again. 2177 */ 2178 smi_info->interrupt_disabled = true; 2179 if (smi_info->io.irq_cleanup) { 2180 smi_info->io.irq_cleanup(&smi_info->io); 2181 smi_info->io.irq_cleanup = NULL; 2182 } 2183 stop_timer_and_thread(smi_info); 2184 2185 /* 2186 * Wait until we know that we are out of any interrupt 2187 * handlers might have been running before we freed the 2188 * interrupt. 2189 */ 2190 synchronize_rcu(); 2191 2192 /* 2193 * Timeouts are stopped, now make sure the interrupts are off 2194 * in the BMC. Note that timers and CPU interrupts are off, 2195 * so no need for locks. 2196 */ 2197 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { 2198 poll(smi_info); 2199 schedule_timeout_uninterruptible(1); 2200 } 2201 if (smi_info->handlers) 2202 disable_si_irq(smi_info); 2203 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { 2204 poll(smi_info); 2205 schedule_timeout_uninterruptible(1); 2206 } 2207 if (smi_info->handlers) 2208 smi_info->handlers->cleanup(smi_info->si_sm); 2209 2210 if (smi_info->io.addr_source_cleanup) { 2211 smi_info->io.addr_source_cleanup(&smi_info->io); 2212 smi_info->io.addr_source_cleanup = NULL; 2213 } 2214 if (smi_info->io.io_cleanup) { 2215 smi_info->io.io_cleanup(&smi_info->io); 2216 smi_info->io.io_cleanup = NULL; 2217 } 2218 2219 kfree(smi_info->si_sm); 2220 smi_info->si_sm = NULL; 2221 2222 smi_info->intf = NULL; 2223 } 2224 2225 /* 2226 * Must be called with smi_infos_lock held, to serialize the 2227 * smi_info->intf check. 2228 */ 2229 static void cleanup_one_si(struct smi_info *smi_info) 2230 { 2231 if (!smi_info) 2232 return; 2233 2234 list_del(&smi_info->link); 2235 2236 if (smi_info->intf) 2237 ipmi_unregister_smi(smi_info->intf); 2238 2239 kfree(smi_info); 2240 } 2241 2242 int ipmi_si_remove_by_dev(struct device *dev) 2243 { 2244 struct smi_info *e; 2245 int rv = -ENOENT; 2246 2247 mutex_lock(&smi_infos_lock); 2248 list_for_each_entry(e, &smi_infos, link) { 2249 if (e->io.dev == dev) { 2250 cleanup_one_si(e); 2251 rv = 0; 2252 break; 2253 } 2254 } 2255 mutex_unlock(&smi_infos_lock); 2256 2257 return rv; 2258 } 2259 2260 struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type, 2261 unsigned long addr) 2262 { 2263 /* remove */ 2264 struct smi_info *e, *tmp_e; 2265 struct device *dev = NULL; 2266 2267 mutex_lock(&smi_infos_lock); 2268 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { 2269 if (e->io.addr_space != addr_space) 2270 continue; 2271 if (e->io.si_type != si_type) 2272 continue; 2273 if (e->io.addr_data == addr) { 2274 dev = get_device(e->io.dev); 2275 cleanup_one_si(e); 2276 } 2277 } 2278 mutex_unlock(&smi_infos_lock); 2279 2280 return dev; 2281 } 2282 2283 static void cleanup_ipmi_si(void) 2284 { 2285 struct smi_info *e, *tmp_e; 2286 2287 if (!initialized) 2288 return; 2289 2290 ipmi_si_pci_shutdown(); 2291 2292 ipmi_si_parisc_shutdown(); 2293 2294 ipmi_si_platform_shutdown(); 2295 2296 mutex_lock(&smi_infos_lock); 2297 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) 2298 cleanup_one_si(e); 2299 mutex_unlock(&smi_infos_lock); 2300 2301 ipmi_si_hardcode_exit(); 2302 ipmi_si_hotmod_exit(); 2303 } 2304 module_exit(cleanup_ipmi_si); 2305 2306 MODULE_ALIAS("platform:dmi-ipmi-si"); 2307 MODULE_LICENSE("GPL"); 2308 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 2309 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT" 2310 " system interfaces."); 2311