1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_si.c 4 * 5 * The interface to the IPMI driver for the system interfaces (KCS, SMIC, 6 * BT). 7 * 8 * Author: MontaVista Software, Inc. 9 * Corey Minyard <minyard@mvista.com> 10 * source@mvista.com 11 * 12 * Copyright 2002 MontaVista Software Inc. 13 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com> 14 */ 15 16 /* 17 * This file holds the "policy" for the interface to the SMI state 18 * machine. It does the configuration, handles timers and interrupts, 19 * and drives the real SMI state machine. 20 */ 21 22 #include <linux/module.h> 23 #include <linux/moduleparam.h> 24 #include <linux/sched.h> 25 #include <linux/seq_file.h> 26 #include <linux/timer.h> 27 #include <linux/errno.h> 28 #include <linux/spinlock.h> 29 #include <linux/slab.h> 30 #include <linux/delay.h> 31 #include <linux/list.h> 32 #include <linux/notifier.h> 33 #include <linux/mutex.h> 34 #include <linux/kthread.h> 35 #include <asm/irq.h> 36 #include <linux/interrupt.h> 37 #include <linux/rcupdate.h> 38 #include <linux/ipmi.h> 39 #include <linux/ipmi_smi.h> 40 #include "ipmi_si.h" 41 #include <linux/string.h> 42 #include <linux/ctype.h> 43 44 #define PFX "ipmi_si: " 45 46 /* Measure times between events in the driver. */ 47 #undef DEBUG_TIMING 48 49 /* Call every 10 ms. */ 50 #define SI_TIMEOUT_TIME_USEC 10000 51 #define SI_USEC_PER_JIFFY (1000000/HZ) 52 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) 53 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a 54 short timeout */ 55 56 enum si_intf_state { 57 SI_NORMAL, 58 SI_GETTING_FLAGS, 59 SI_GETTING_EVENTS, 60 SI_CLEARING_FLAGS, 61 SI_GETTING_MESSAGES, 62 SI_CHECKING_ENABLES, 63 SI_SETTING_ENABLES 64 /* FIXME - add watchdog stuff. */ 65 }; 66 67 /* Some BT-specific defines we need here. */ 68 #define IPMI_BT_INTMASK_REG 2 69 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2 70 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1 71 72 static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" }; 73 74 static int initialized; 75 76 /* 77 * Indexes into stats[] in smi_info below. 78 */ 79 enum si_stat_indexes { 80 /* 81 * Number of times the driver requested a timer while an operation 82 * was in progress. 83 */ 84 SI_STAT_short_timeouts = 0, 85 86 /* 87 * Number of times the driver requested a timer while nothing was in 88 * progress. 89 */ 90 SI_STAT_long_timeouts, 91 92 /* Number of times the interface was idle while being polled. */ 93 SI_STAT_idles, 94 95 /* Number of interrupts the driver handled. */ 96 SI_STAT_interrupts, 97 98 /* Number of time the driver got an ATTN from the hardware. */ 99 SI_STAT_attentions, 100 101 /* Number of times the driver requested flags from the hardware. */ 102 SI_STAT_flag_fetches, 103 104 /* Number of times the hardware didn't follow the state machine. */ 105 SI_STAT_hosed_count, 106 107 /* Number of completed messages. */ 108 SI_STAT_complete_transactions, 109 110 /* Number of IPMI events received from the hardware. */ 111 SI_STAT_events, 112 113 /* Number of watchdog pretimeouts. */ 114 SI_STAT_watchdog_pretimeouts, 115 116 /* Number of asynchronous messages received. */ 117 SI_STAT_incoming_messages, 118 119 120 /* This *must* remain last, add new values above this. */ 121 SI_NUM_STATS 122 }; 123 124 struct smi_info { 125 int intf_num; 126 ipmi_smi_t intf; 127 struct si_sm_data *si_sm; 128 const struct si_sm_handlers *handlers; 129 spinlock_t si_lock; 130 struct ipmi_smi_msg *waiting_msg; 131 struct ipmi_smi_msg *curr_msg; 132 enum si_intf_state si_state; 133 134 /* 135 * Used to handle the various types of I/O that can occur with 136 * IPMI 137 */ 138 struct si_sm_io io; 139 140 /* 141 * Per-OEM handler, called from handle_flags(). Returns 1 142 * when handle_flags() needs to be re-run or 0 indicating it 143 * set si_state itself. 144 */ 145 int (*oem_data_avail_handler)(struct smi_info *smi_info); 146 147 /* 148 * Flags from the last GET_MSG_FLAGS command, used when an ATTN 149 * is set to hold the flags until we are done handling everything 150 * from the flags. 151 */ 152 #define RECEIVE_MSG_AVAIL 0x01 153 #define EVENT_MSG_BUFFER_FULL 0x02 154 #define WDT_PRE_TIMEOUT_INT 0x08 155 #define OEM0_DATA_AVAIL 0x20 156 #define OEM1_DATA_AVAIL 0x40 157 #define OEM2_DATA_AVAIL 0x80 158 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ 159 OEM1_DATA_AVAIL | \ 160 OEM2_DATA_AVAIL) 161 unsigned char msg_flags; 162 163 /* Does the BMC have an event buffer? */ 164 bool has_event_buffer; 165 166 /* 167 * If set to true, this will request events the next time the 168 * state machine is idle. 169 */ 170 atomic_t req_events; 171 172 /* 173 * If true, run the state machine to completion on every send 174 * call. Generally used after a panic to make sure stuff goes 175 * out. 176 */ 177 bool run_to_completion; 178 179 /* The timer for this si. */ 180 struct timer_list si_timer; 181 182 /* This flag is set, if the timer can be set */ 183 bool timer_can_start; 184 185 /* This flag is set, if the timer is running (timer_pending() isn't enough) */ 186 bool timer_running; 187 188 /* The time (in jiffies) the last timeout occurred at. */ 189 unsigned long last_timeout_jiffies; 190 191 /* Are we waiting for the events, pretimeouts, received msgs? */ 192 atomic_t need_watch; 193 194 /* 195 * The driver will disable interrupts when it gets into a 196 * situation where it cannot handle messages due to lack of 197 * memory. Once that situation clears up, it will re-enable 198 * interrupts. 199 */ 200 bool interrupt_disabled; 201 202 /* 203 * Does the BMC support events? 204 */ 205 bool supports_event_msg_buff; 206 207 /* 208 * Can we disable interrupts the global enables receive irq 209 * bit? There are currently two forms of brokenness, some 210 * systems cannot disable the bit (which is technically within 211 * the spec but a bad idea) and some systems have the bit 212 * forced to zero even though interrupts work (which is 213 * clearly outside the spec). The next bool tells which form 214 * of brokenness is present. 215 */ 216 bool cannot_disable_irq; 217 218 /* 219 * Some systems are broken and cannot set the irq enable 220 * bit, even if they support interrupts. 221 */ 222 bool irq_enable_broken; 223 224 /* 225 * Did we get an attention that we did not handle? 226 */ 227 bool got_attn; 228 229 /* From the get device id response... */ 230 struct ipmi_device_id device_id; 231 232 /* Default driver model device. */ 233 struct platform_device *pdev; 234 235 /* Have we added the device group to the device? */ 236 bool dev_group_added; 237 238 /* Have we added the platform device? */ 239 bool pdev_registered; 240 241 /* Counters and things for the proc filesystem. */ 242 atomic_t stats[SI_NUM_STATS]; 243 244 struct task_struct *thread; 245 246 struct list_head link; 247 }; 248 249 #define smi_inc_stat(smi, stat) \ 250 atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) 251 #define smi_get_stat(smi, stat) \ 252 ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) 253 254 #define IPMI_MAX_INTFS 4 255 static int force_kipmid[IPMI_MAX_INTFS]; 256 static int num_force_kipmid; 257 258 static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS]; 259 static int num_max_busy_us; 260 261 static bool unload_when_empty = true; 262 263 static int try_smi_init(struct smi_info *smi); 264 static void shutdown_one_si(struct smi_info *smi_info); 265 static void cleanup_one_si(struct smi_info *smi_info); 266 static void cleanup_ipmi_si(void); 267 268 #ifdef DEBUG_TIMING 269 void debug_timestamp(char *msg) 270 { 271 struct timespec64 t; 272 273 getnstimeofday64(&t); 274 pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec); 275 } 276 #else 277 #define debug_timestamp(x) 278 #endif 279 280 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); 281 static int register_xaction_notifier(struct notifier_block *nb) 282 { 283 return atomic_notifier_chain_register(&xaction_notifier_list, nb); 284 } 285 286 static void deliver_recv_msg(struct smi_info *smi_info, 287 struct ipmi_smi_msg *msg) 288 { 289 /* Deliver the message to the upper layer. */ 290 if (smi_info->intf) 291 ipmi_smi_msg_received(smi_info->intf, msg); 292 else 293 ipmi_free_smi_msg(msg); 294 } 295 296 static void return_hosed_msg(struct smi_info *smi_info, int cCode) 297 { 298 struct ipmi_smi_msg *msg = smi_info->curr_msg; 299 300 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED) 301 cCode = IPMI_ERR_UNSPECIFIED; 302 /* else use it as is */ 303 304 /* Make it a response */ 305 msg->rsp[0] = msg->data[0] | 4; 306 msg->rsp[1] = msg->data[1]; 307 msg->rsp[2] = cCode; 308 msg->rsp_size = 3; 309 310 smi_info->curr_msg = NULL; 311 deliver_recv_msg(smi_info, msg); 312 } 313 314 static enum si_sm_result start_next_msg(struct smi_info *smi_info) 315 { 316 int rv; 317 318 if (!smi_info->waiting_msg) { 319 smi_info->curr_msg = NULL; 320 rv = SI_SM_IDLE; 321 } else { 322 int err; 323 324 smi_info->curr_msg = smi_info->waiting_msg; 325 smi_info->waiting_msg = NULL; 326 debug_timestamp("Start2"); 327 err = atomic_notifier_call_chain(&xaction_notifier_list, 328 0, smi_info); 329 if (err & NOTIFY_STOP_MASK) { 330 rv = SI_SM_CALL_WITHOUT_DELAY; 331 goto out; 332 } 333 err = smi_info->handlers->start_transaction( 334 smi_info->si_sm, 335 smi_info->curr_msg->data, 336 smi_info->curr_msg->data_size); 337 if (err) 338 return_hosed_msg(smi_info, err); 339 340 rv = SI_SM_CALL_WITHOUT_DELAY; 341 } 342 out: 343 return rv; 344 } 345 346 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) 347 { 348 if (!smi_info->timer_can_start) 349 return; 350 smi_info->last_timeout_jiffies = jiffies; 351 mod_timer(&smi_info->si_timer, new_val); 352 smi_info->timer_running = true; 353 } 354 355 /* 356 * Start a new message and (re)start the timer and thread. 357 */ 358 static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, 359 unsigned int size) 360 { 361 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); 362 363 if (smi_info->thread) 364 wake_up_process(smi_info->thread); 365 366 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); 367 } 368 369 static void start_check_enables(struct smi_info *smi_info) 370 { 371 unsigned char msg[2]; 372 373 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 374 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 375 376 start_new_msg(smi_info, msg, 2); 377 smi_info->si_state = SI_CHECKING_ENABLES; 378 } 379 380 static void start_clear_flags(struct smi_info *smi_info) 381 { 382 unsigned char msg[3]; 383 384 /* Make sure the watchdog pre-timeout flag is not set at startup. */ 385 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 386 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 387 msg[2] = WDT_PRE_TIMEOUT_INT; 388 389 start_new_msg(smi_info, msg, 3); 390 smi_info->si_state = SI_CLEARING_FLAGS; 391 } 392 393 static void start_getting_msg_queue(struct smi_info *smi_info) 394 { 395 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 396 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; 397 smi_info->curr_msg->data_size = 2; 398 399 start_new_msg(smi_info, smi_info->curr_msg->data, 400 smi_info->curr_msg->data_size); 401 smi_info->si_state = SI_GETTING_MESSAGES; 402 } 403 404 static void start_getting_events(struct smi_info *smi_info) 405 { 406 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 407 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 408 smi_info->curr_msg->data_size = 2; 409 410 start_new_msg(smi_info, smi_info->curr_msg->data, 411 smi_info->curr_msg->data_size); 412 smi_info->si_state = SI_GETTING_EVENTS; 413 } 414 415 /* 416 * When we have a situtaion where we run out of memory and cannot 417 * allocate messages, we just leave them in the BMC and run the system 418 * polled until we can allocate some memory. Once we have some 419 * memory, we will re-enable the interrupt. 420 * 421 * Note that we cannot just use disable_irq(), since the interrupt may 422 * be shared. 423 */ 424 static inline bool disable_si_irq(struct smi_info *smi_info) 425 { 426 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { 427 smi_info->interrupt_disabled = true; 428 start_check_enables(smi_info); 429 return true; 430 } 431 return false; 432 } 433 434 static inline bool enable_si_irq(struct smi_info *smi_info) 435 { 436 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { 437 smi_info->interrupt_disabled = false; 438 start_check_enables(smi_info); 439 return true; 440 } 441 return false; 442 } 443 444 /* 445 * Allocate a message. If unable to allocate, start the interrupt 446 * disable process and return NULL. If able to allocate but 447 * interrupts are disabled, free the message and return NULL after 448 * starting the interrupt enable process. 449 */ 450 static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) 451 { 452 struct ipmi_smi_msg *msg; 453 454 msg = ipmi_alloc_smi_msg(); 455 if (!msg) { 456 if (!disable_si_irq(smi_info)) 457 smi_info->si_state = SI_NORMAL; 458 } else if (enable_si_irq(smi_info)) { 459 ipmi_free_smi_msg(msg); 460 msg = NULL; 461 } 462 return msg; 463 } 464 465 static void handle_flags(struct smi_info *smi_info) 466 { 467 retry: 468 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { 469 /* Watchdog pre-timeout */ 470 smi_inc_stat(smi_info, watchdog_pretimeouts); 471 472 start_clear_flags(smi_info); 473 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 474 if (smi_info->intf) 475 ipmi_smi_watchdog_pretimeout(smi_info->intf); 476 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 477 /* Messages available. */ 478 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 479 if (!smi_info->curr_msg) 480 return; 481 482 start_getting_msg_queue(smi_info); 483 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { 484 /* Events available. */ 485 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 486 if (!smi_info->curr_msg) 487 return; 488 489 start_getting_events(smi_info); 490 } else if (smi_info->msg_flags & OEM_DATA_AVAIL && 491 smi_info->oem_data_avail_handler) { 492 if (smi_info->oem_data_avail_handler(smi_info)) 493 goto retry; 494 } else 495 smi_info->si_state = SI_NORMAL; 496 } 497 498 /* 499 * Global enables we care about. 500 */ 501 #define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ 502 IPMI_BMC_EVT_MSG_INTR) 503 504 static u8 current_global_enables(struct smi_info *smi_info, u8 base, 505 bool *irq_on) 506 { 507 u8 enables = 0; 508 509 if (smi_info->supports_event_msg_buff) 510 enables |= IPMI_BMC_EVT_MSG_BUFF; 511 512 if (((smi_info->io.irq && !smi_info->interrupt_disabled) || 513 smi_info->cannot_disable_irq) && 514 !smi_info->irq_enable_broken) 515 enables |= IPMI_BMC_RCV_MSG_INTR; 516 517 if (smi_info->supports_event_msg_buff && 518 smi_info->io.irq && !smi_info->interrupt_disabled && 519 !smi_info->irq_enable_broken) 520 enables |= IPMI_BMC_EVT_MSG_INTR; 521 522 *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR); 523 524 return enables; 525 } 526 527 static void check_bt_irq(struct smi_info *smi_info, bool irq_on) 528 { 529 u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG); 530 531 irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT; 532 533 if ((bool)irqstate == irq_on) 534 return; 535 536 if (irq_on) 537 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 538 IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 539 else 540 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0); 541 } 542 543 static void handle_transaction_done(struct smi_info *smi_info) 544 { 545 struct ipmi_smi_msg *msg; 546 547 debug_timestamp("Done"); 548 switch (smi_info->si_state) { 549 case SI_NORMAL: 550 if (!smi_info->curr_msg) 551 break; 552 553 smi_info->curr_msg->rsp_size 554 = smi_info->handlers->get_result( 555 smi_info->si_sm, 556 smi_info->curr_msg->rsp, 557 IPMI_MAX_MSG_LENGTH); 558 559 /* 560 * Do this here becase deliver_recv_msg() releases the 561 * lock, and a new message can be put in during the 562 * time the lock is released. 563 */ 564 msg = smi_info->curr_msg; 565 smi_info->curr_msg = NULL; 566 deliver_recv_msg(smi_info, msg); 567 break; 568 569 case SI_GETTING_FLAGS: 570 { 571 unsigned char msg[4]; 572 unsigned int len; 573 574 /* We got the flags from the SMI, now handle them. */ 575 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 576 if (msg[2] != 0) { 577 /* Error fetching flags, just give up for now. */ 578 smi_info->si_state = SI_NORMAL; 579 } else if (len < 4) { 580 /* 581 * Hmm, no flags. That's technically illegal, but 582 * don't use uninitialized data. 583 */ 584 smi_info->si_state = SI_NORMAL; 585 } else { 586 smi_info->msg_flags = msg[3]; 587 handle_flags(smi_info); 588 } 589 break; 590 } 591 592 case SI_CLEARING_FLAGS: 593 { 594 unsigned char msg[3]; 595 596 /* We cleared the flags. */ 597 smi_info->handlers->get_result(smi_info->si_sm, msg, 3); 598 if (msg[2] != 0) { 599 /* Error clearing flags */ 600 dev_warn(smi_info->io.dev, 601 "Error clearing flags: %2.2x\n", msg[2]); 602 } 603 smi_info->si_state = SI_NORMAL; 604 break; 605 } 606 607 case SI_GETTING_EVENTS: 608 { 609 smi_info->curr_msg->rsp_size 610 = smi_info->handlers->get_result( 611 smi_info->si_sm, 612 smi_info->curr_msg->rsp, 613 IPMI_MAX_MSG_LENGTH); 614 615 /* 616 * Do this here becase deliver_recv_msg() releases the 617 * lock, and a new message can be put in during the 618 * time the lock is released. 619 */ 620 msg = smi_info->curr_msg; 621 smi_info->curr_msg = NULL; 622 if (msg->rsp[2] != 0) { 623 /* Error getting event, probably done. */ 624 msg->done(msg); 625 626 /* Take off the event flag. */ 627 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; 628 handle_flags(smi_info); 629 } else { 630 smi_inc_stat(smi_info, events); 631 632 /* 633 * Do this before we deliver the message 634 * because delivering the message releases the 635 * lock and something else can mess with the 636 * state. 637 */ 638 handle_flags(smi_info); 639 640 deliver_recv_msg(smi_info, msg); 641 } 642 break; 643 } 644 645 case SI_GETTING_MESSAGES: 646 { 647 smi_info->curr_msg->rsp_size 648 = smi_info->handlers->get_result( 649 smi_info->si_sm, 650 smi_info->curr_msg->rsp, 651 IPMI_MAX_MSG_LENGTH); 652 653 /* 654 * Do this here becase deliver_recv_msg() releases the 655 * lock, and a new message can be put in during the 656 * time the lock is released. 657 */ 658 msg = smi_info->curr_msg; 659 smi_info->curr_msg = NULL; 660 if (msg->rsp[2] != 0) { 661 /* Error getting event, probably done. */ 662 msg->done(msg); 663 664 /* Take off the msg flag. */ 665 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; 666 handle_flags(smi_info); 667 } else { 668 smi_inc_stat(smi_info, incoming_messages); 669 670 /* 671 * Do this before we deliver the message 672 * because delivering the message releases the 673 * lock and something else can mess with the 674 * state. 675 */ 676 handle_flags(smi_info); 677 678 deliver_recv_msg(smi_info, msg); 679 } 680 break; 681 } 682 683 case SI_CHECKING_ENABLES: 684 { 685 unsigned char msg[4]; 686 u8 enables; 687 bool irq_on; 688 689 /* We got the flags from the SMI, now handle them. */ 690 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 691 if (msg[2] != 0) { 692 dev_warn(smi_info->io.dev, 693 "Couldn't get irq info: %x.\n", msg[2]); 694 dev_warn(smi_info->io.dev, 695 "Maybe ok, but ipmi might run very slowly.\n"); 696 smi_info->si_state = SI_NORMAL; 697 break; 698 } 699 enables = current_global_enables(smi_info, 0, &irq_on); 700 if (smi_info->io.si_type == SI_BT) 701 /* BT has its own interrupt enable bit. */ 702 check_bt_irq(smi_info, irq_on); 703 if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) { 704 /* Enables are not correct, fix them. */ 705 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 706 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 707 msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK); 708 smi_info->handlers->start_transaction( 709 smi_info->si_sm, msg, 3); 710 smi_info->si_state = SI_SETTING_ENABLES; 711 } else if (smi_info->supports_event_msg_buff) { 712 smi_info->curr_msg = ipmi_alloc_smi_msg(); 713 if (!smi_info->curr_msg) { 714 smi_info->si_state = SI_NORMAL; 715 break; 716 } 717 start_getting_events(smi_info); 718 } else { 719 smi_info->si_state = SI_NORMAL; 720 } 721 break; 722 } 723 724 case SI_SETTING_ENABLES: 725 { 726 unsigned char msg[4]; 727 728 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 729 if (msg[2] != 0) 730 dev_warn(smi_info->io.dev, 731 "Could not set the global enables: 0x%x.\n", 732 msg[2]); 733 734 if (smi_info->supports_event_msg_buff) { 735 smi_info->curr_msg = ipmi_alloc_smi_msg(); 736 if (!smi_info->curr_msg) { 737 smi_info->si_state = SI_NORMAL; 738 break; 739 } 740 start_getting_events(smi_info); 741 } else { 742 smi_info->si_state = SI_NORMAL; 743 } 744 break; 745 } 746 } 747 } 748 749 /* 750 * Called on timeouts and events. Timeouts should pass the elapsed 751 * time, interrupts should pass in zero. Must be called with 752 * si_lock held and interrupts disabled. 753 */ 754 static enum si_sm_result smi_event_handler(struct smi_info *smi_info, 755 int time) 756 { 757 enum si_sm_result si_sm_result; 758 759 restart: 760 /* 761 * There used to be a loop here that waited a little while 762 * (around 25us) before giving up. That turned out to be 763 * pointless, the minimum delays I was seeing were in the 300us 764 * range, which is far too long to wait in an interrupt. So 765 * we just run until the state machine tells us something 766 * happened or it needs a delay. 767 */ 768 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); 769 time = 0; 770 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) 771 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 772 773 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { 774 smi_inc_stat(smi_info, complete_transactions); 775 776 handle_transaction_done(smi_info); 777 goto restart; 778 } else if (si_sm_result == SI_SM_HOSED) { 779 smi_inc_stat(smi_info, hosed_count); 780 781 /* 782 * Do the before return_hosed_msg, because that 783 * releases the lock. 784 */ 785 smi_info->si_state = SI_NORMAL; 786 if (smi_info->curr_msg != NULL) { 787 /* 788 * If we were handling a user message, format 789 * a response to send to the upper layer to 790 * tell it about the error. 791 */ 792 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); 793 } 794 goto restart; 795 } 796 797 /* 798 * We prefer handling attn over new messages. But don't do 799 * this if there is not yet an upper layer to handle anything. 800 */ 801 if (likely(smi_info->intf) && 802 (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) { 803 unsigned char msg[2]; 804 805 if (smi_info->si_state != SI_NORMAL) { 806 /* 807 * We got an ATTN, but we are doing something else. 808 * Handle the ATTN later. 809 */ 810 smi_info->got_attn = true; 811 } else { 812 smi_info->got_attn = false; 813 smi_inc_stat(smi_info, attentions); 814 815 /* 816 * Got a attn, send down a get message flags to see 817 * what's causing it. It would be better to handle 818 * this in the upper layer, but due to the way 819 * interrupts work with the SMI, that's not really 820 * possible. 821 */ 822 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 823 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 824 825 start_new_msg(smi_info, msg, 2); 826 smi_info->si_state = SI_GETTING_FLAGS; 827 goto restart; 828 } 829 } 830 831 /* If we are currently idle, try to start the next message. */ 832 if (si_sm_result == SI_SM_IDLE) { 833 smi_inc_stat(smi_info, idles); 834 835 si_sm_result = start_next_msg(smi_info); 836 if (si_sm_result != SI_SM_IDLE) 837 goto restart; 838 } 839 840 if ((si_sm_result == SI_SM_IDLE) 841 && (atomic_read(&smi_info->req_events))) { 842 /* 843 * We are idle and the upper layer requested that I fetch 844 * events, so do so. 845 */ 846 atomic_set(&smi_info->req_events, 0); 847 848 /* 849 * Take this opportunity to check the interrupt and 850 * message enable state for the BMC. The BMC can be 851 * asynchronously reset, and may thus get interrupts 852 * disable and messages disabled. 853 */ 854 if (smi_info->supports_event_msg_buff || smi_info->io.irq) { 855 start_check_enables(smi_info); 856 } else { 857 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 858 if (!smi_info->curr_msg) 859 goto out; 860 861 start_getting_events(smi_info); 862 } 863 goto restart; 864 } 865 866 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { 867 /* Ok it if fails, the timer will just go off. */ 868 if (del_timer(&smi_info->si_timer)) 869 smi_info->timer_running = false; 870 } 871 872 out: 873 return si_sm_result; 874 } 875 876 static void check_start_timer_thread(struct smi_info *smi_info) 877 { 878 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { 879 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); 880 881 if (smi_info->thread) 882 wake_up_process(smi_info->thread); 883 884 start_next_msg(smi_info); 885 smi_event_handler(smi_info, 0); 886 } 887 } 888 889 static void flush_messages(void *send_info) 890 { 891 struct smi_info *smi_info = send_info; 892 enum si_sm_result result; 893 894 /* 895 * Currently, this function is called only in run-to-completion 896 * mode. This means we are single-threaded, no need for locks. 897 */ 898 result = smi_event_handler(smi_info, 0); 899 while (result != SI_SM_IDLE) { 900 udelay(SI_SHORT_TIMEOUT_USEC); 901 result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); 902 } 903 } 904 905 static void sender(void *send_info, 906 struct ipmi_smi_msg *msg) 907 { 908 struct smi_info *smi_info = send_info; 909 unsigned long flags; 910 911 debug_timestamp("Enqueue"); 912 913 if (smi_info->run_to_completion) { 914 /* 915 * If we are running to completion, start it. Upper 916 * layer will call flush_messages to clear it out. 917 */ 918 smi_info->waiting_msg = msg; 919 return; 920 } 921 922 spin_lock_irqsave(&smi_info->si_lock, flags); 923 /* 924 * The following two lines don't need to be under the lock for 925 * the lock's sake, but they do need SMP memory barriers to 926 * avoid getting things out of order. We are already claiming 927 * the lock, anyway, so just do it under the lock to avoid the 928 * ordering problem. 929 */ 930 BUG_ON(smi_info->waiting_msg); 931 smi_info->waiting_msg = msg; 932 check_start_timer_thread(smi_info); 933 spin_unlock_irqrestore(&smi_info->si_lock, flags); 934 } 935 936 static void set_run_to_completion(void *send_info, bool i_run_to_completion) 937 { 938 struct smi_info *smi_info = send_info; 939 940 smi_info->run_to_completion = i_run_to_completion; 941 if (i_run_to_completion) 942 flush_messages(smi_info); 943 } 944 945 /* 946 * Use -1 in the nsec value of the busy waiting timespec to tell that 947 * we are spinning in kipmid looking for something and not delaying 948 * between checks 949 */ 950 static inline void ipmi_si_set_not_busy(struct timespec64 *ts) 951 { 952 ts->tv_nsec = -1; 953 } 954 static inline int ipmi_si_is_busy(struct timespec64 *ts) 955 { 956 return ts->tv_nsec != -1; 957 } 958 959 static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result, 960 const struct smi_info *smi_info, 961 struct timespec64 *busy_until) 962 { 963 unsigned int max_busy_us = 0; 964 965 if (smi_info->intf_num < num_max_busy_us) 966 max_busy_us = kipmid_max_busy_us[smi_info->intf_num]; 967 if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) 968 ipmi_si_set_not_busy(busy_until); 969 else if (!ipmi_si_is_busy(busy_until)) { 970 getnstimeofday64(busy_until); 971 timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC); 972 } else { 973 struct timespec64 now; 974 975 getnstimeofday64(&now); 976 if (unlikely(timespec64_compare(&now, busy_until) > 0)) { 977 ipmi_si_set_not_busy(busy_until); 978 return 0; 979 } 980 } 981 return 1; 982 } 983 984 985 /* 986 * A busy-waiting loop for speeding up IPMI operation. 987 * 988 * Lousy hardware makes this hard. This is only enabled for systems 989 * that are not BT and do not have interrupts. It starts spinning 990 * when an operation is complete or until max_busy tells it to stop 991 * (if that is enabled). See the paragraph on kimid_max_busy_us in 992 * Documentation/IPMI.txt for details. 993 */ 994 static int ipmi_thread(void *data) 995 { 996 struct smi_info *smi_info = data; 997 unsigned long flags; 998 enum si_sm_result smi_result; 999 struct timespec64 busy_until; 1000 1001 ipmi_si_set_not_busy(&busy_until); 1002 set_user_nice(current, MAX_NICE); 1003 while (!kthread_should_stop()) { 1004 int busy_wait; 1005 1006 spin_lock_irqsave(&(smi_info->si_lock), flags); 1007 smi_result = smi_event_handler(smi_info, 0); 1008 1009 /* 1010 * If the driver is doing something, there is a possible 1011 * race with the timer. If the timer handler see idle, 1012 * and the thread here sees something else, the timer 1013 * handler won't restart the timer even though it is 1014 * required. So start it here if necessary. 1015 */ 1016 if (smi_result != SI_SM_IDLE && !smi_info->timer_running) 1017 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); 1018 1019 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1020 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, 1021 &busy_until); 1022 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) 1023 ; /* do nothing */ 1024 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) 1025 schedule(); 1026 else if (smi_result == SI_SM_IDLE) { 1027 if (atomic_read(&smi_info->need_watch)) { 1028 schedule_timeout_interruptible(100); 1029 } else { 1030 /* Wait to be woken up when we are needed. */ 1031 __set_current_state(TASK_INTERRUPTIBLE); 1032 schedule(); 1033 } 1034 } else 1035 schedule_timeout_interruptible(1); 1036 } 1037 return 0; 1038 } 1039 1040 1041 static void poll(void *send_info) 1042 { 1043 struct smi_info *smi_info = send_info; 1044 unsigned long flags = 0; 1045 bool run_to_completion = smi_info->run_to_completion; 1046 1047 /* 1048 * Make sure there is some delay in the poll loop so we can 1049 * drive time forward and timeout things. 1050 */ 1051 udelay(10); 1052 if (!run_to_completion) 1053 spin_lock_irqsave(&smi_info->si_lock, flags); 1054 smi_event_handler(smi_info, 10); 1055 if (!run_to_completion) 1056 spin_unlock_irqrestore(&smi_info->si_lock, flags); 1057 } 1058 1059 static void request_events(void *send_info) 1060 { 1061 struct smi_info *smi_info = send_info; 1062 1063 if (!smi_info->has_event_buffer) 1064 return; 1065 1066 atomic_set(&smi_info->req_events, 1); 1067 } 1068 1069 static void set_need_watch(void *send_info, bool enable) 1070 { 1071 struct smi_info *smi_info = send_info; 1072 unsigned long flags; 1073 1074 atomic_set(&smi_info->need_watch, enable); 1075 spin_lock_irqsave(&smi_info->si_lock, flags); 1076 check_start_timer_thread(smi_info); 1077 spin_unlock_irqrestore(&smi_info->si_lock, flags); 1078 } 1079 1080 static void smi_timeout(struct timer_list *t) 1081 { 1082 struct smi_info *smi_info = from_timer(smi_info, t, si_timer); 1083 enum si_sm_result smi_result; 1084 unsigned long flags; 1085 unsigned long jiffies_now; 1086 long time_diff; 1087 long timeout; 1088 1089 spin_lock_irqsave(&(smi_info->si_lock), flags); 1090 debug_timestamp("Timer"); 1091 1092 jiffies_now = jiffies; 1093 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) 1094 * SI_USEC_PER_JIFFY); 1095 smi_result = smi_event_handler(smi_info, time_diff); 1096 1097 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { 1098 /* Running with interrupts, only do long timeouts. */ 1099 timeout = jiffies + SI_TIMEOUT_JIFFIES; 1100 smi_inc_stat(smi_info, long_timeouts); 1101 goto do_mod_timer; 1102 } 1103 1104 /* 1105 * If the state machine asks for a short delay, then shorten 1106 * the timer timeout. 1107 */ 1108 if (smi_result == SI_SM_CALL_WITH_DELAY) { 1109 smi_inc_stat(smi_info, short_timeouts); 1110 timeout = jiffies + 1; 1111 } else { 1112 smi_inc_stat(smi_info, long_timeouts); 1113 timeout = jiffies + SI_TIMEOUT_JIFFIES; 1114 } 1115 1116 do_mod_timer: 1117 if (smi_result != SI_SM_IDLE) 1118 smi_mod_timer(smi_info, timeout); 1119 else 1120 smi_info->timer_running = false; 1121 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1122 } 1123 1124 irqreturn_t ipmi_si_irq_handler(int irq, void *data) 1125 { 1126 struct smi_info *smi_info = data; 1127 unsigned long flags; 1128 1129 if (smi_info->io.si_type == SI_BT) 1130 /* We need to clear the IRQ flag for the BT interface. */ 1131 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 1132 IPMI_BT_INTMASK_CLEAR_IRQ_BIT 1133 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 1134 1135 spin_lock_irqsave(&(smi_info->si_lock), flags); 1136 1137 smi_inc_stat(smi_info, interrupts); 1138 1139 debug_timestamp("Interrupt"); 1140 1141 smi_event_handler(smi_info, 0); 1142 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1143 return IRQ_HANDLED; 1144 } 1145 1146 static int smi_start_processing(void *send_info, 1147 ipmi_smi_t intf) 1148 { 1149 struct smi_info *new_smi = send_info; 1150 int enable = 0; 1151 1152 new_smi->intf = intf; 1153 1154 /* Set up the timer that drives the interface. */ 1155 timer_setup(&new_smi->si_timer, smi_timeout, 0); 1156 new_smi->timer_can_start = true; 1157 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); 1158 1159 /* Try to claim any interrupts. */ 1160 if (new_smi->io.irq_setup) { 1161 new_smi->io.irq_handler_data = new_smi; 1162 new_smi->io.irq_setup(&new_smi->io); 1163 } 1164 1165 /* 1166 * Check if the user forcefully enabled the daemon. 1167 */ 1168 if (new_smi->intf_num < num_force_kipmid) 1169 enable = force_kipmid[new_smi->intf_num]; 1170 /* 1171 * The BT interface is efficient enough to not need a thread, 1172 * and there is no need for a thread if we have interrupts. 1173 */ 1174 else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq)) 1175 enable = 1; 1176 1177 if (enable) { 1178 new_smi->thread = kthread_run(ipmi_thread, new_smi, 1179 "kipmi%d", new_smi->intf_num); 1180 if (IS_ERR(new_smi->thread)) { 1181 dev_notice(new_smi->io.dev, "Could not start" 1182 " kernel thread due to error %ld, only using" 1183 " timers to drive the interface\n", 1184 PTR_ERR(new_smi->thread)); 1185 new_smi->thread = NULL; 1186 } 1187 } 1188 1189 return 0; 1190 } 1191 1192 static int get_smi_info(void *send_info, struct ipmi_smi_info *data) 1193 { 1194 struct smi_info *smi = send_info; 1195 1196 data->addr_src = smi->io.addr_source; 1197 data->dev = smi->io.dev; 1198 data->addr_info = smi->io.addr_info; 1199 get_device(smi->io.dev); 1200 1201 return 0; 1202 } 1203 1204 static void set_maintenance_mode(void *send_info, bool enable) 1205 { 1206 struct smi_info *smi_info = send_info; 1207 1208 if (!enable) 1209 atomic_set(&smi_info->req_events, 0); 1210 } 1211 1212 static const struct ipmi_smi_handlers handlers = { 1213 .owner = THIS_MODULE, 1214 .start_processing = smi_start_processing, 1215 .get_smi_info = get_smi_info, 1216 .sender = sender, 1217 .request_events = request_events, 1218 .set_need_watch = set_need_watch, 1219 .set_maintenance_mode = set_maintenance_mode, 1220 .set_run_to_completion = set_run_to_completion, 1221 .flush_messages = flush_messages, 1222 .poll = poll, 1223 }; 1224 1225 static LIST_HEAD(smi_infos); 1226 static DEFINE_MUTEX(smi_infos_lock); 1227 static int smi_num; /* Used to sequence the SMIs */ 1228 1229 static const char * const addr_space_to_str[] = { "i/o", "mem" }; 1230 1231 module_param_array(force_kipmid, int, &num_force_kipmid, 0); 1232 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" 1233 " disabled(0). Normally the IPMI driver auto-detects" 1234 " this, but the value may be overridden by this parm."); 1235 module_param(unload_when_empty, bool, 0); 1236 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" 1237 " specified or found, default is 1. Setting to 0" 1238 " is useful for hot add of devices using hotmod."); 1239 module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644); 1240 MODULE_PARM_DESC(kipmid_max_busy_us, 1241 "Max time (in microseconds) to busy-wait for IPMI data before" 1242 " sleeping. 0 (default) means to wait forever. Set to 100-500" 1243 " if kipmid is using up a lot of CPU time."); 1244 1245 void ipmi_irq_finish_setup(struct si_sm_io *io) 1246 { 1247 if (io->si_type == SI_BT) 1248 /* Enable the interrupt in the BT interface. */ 1249 io->outputb(io, IPMI_BT_INTMASK_REG, 1250 IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 1251 } 1252 1253 void ipmi_irq_start_cleanup(struct si_sm_io *io) 1254 { 1255 if (io->si_type == SI_BT) 1256 /* Disable the interrupt in the BT interface. */ 1257 io->outputb(io, IPMI_BT_INTMASK_REG, 0); 1258 } 1259 1260 static void std_irq_cleanup(struct si_sm_io *io) 1261 { 1262 ipmi_irq_start_cleanup(io); 1263 free_irq(io->irq, io->irq_handler_data); 1264 } 1265 1266 int ipmi_std_irq_setup(struct si_sm_io *io) 1267 { 1268 int rv; 1269 1270 if (!io->irq) 1271 return 0; 1272 1273 rv = request_irq(io->irq, 1274 ipmi_si_irq_handler, 1275 IRQF_SHARED, 1276 DEVICE_NAME, 1277 io->irq_handler_data); 1278 if (rv) { 1279 dev_warn(io->dev, "%s unable to claim interrupt %d," 1280 " running polled\n", 1281 DEVICE_NAME, io->irq); 1282 io->irq = 0; 1283 } else { 1284 io->irq_cleanup = std_irq_cleanup; 1285 ipmi_irq_finish_setup(io); 1286 dev_info(io->dev, "Using irq %d\n", io->irq); 1287 } 1288 1289 return rv; 1290 } 1291 1292 static int wait_for_msg_done(struct smi_info *smi_info) 1293 { 1294 enum si_sm_result smi_result; 1295 1296 smi_result = smi_info->handlers->event(smi_info->si_sm, 0); 1297 for (;;) { 1298 if (smi_result == SI_SM_CALL_WITH_DELAY || 1299 smi_result == SI_SM_CALL_WITH_TICK_DELAY) { 1300 schedule_timeout_uninterruptible(1); 1301 smi_result = smi_info->handlers->event( 1302 smi_info->si_sm, jiffies_to_usecs(1)); 1303 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { 1304 smi_result = smi_info->handlers->event( 1305 smi_info->si_sm, 0); 1306 } else 1307 break; 1308 } 1309 if (smi_result == SI_SM_HOSED) 1310 /* 1311 * We couldn't get the state machine to run, so whatever's at 1312 * the port is probably not an IPMI SMI interface. 1313 */ 1314 return -ENODEV; 1315 1316 return 0; 1317 } 1318 1319 static int try_get_dev_id(struct smi_info *smi_info) 1320 { 1321 unsigned char msg[2]; 1322 unsigned char *resp; 1323 unsigned long resp_len; 1324 int rv = 0; 1325 1326 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1327 if (!resp) 1328 return -ENOMEM; 1329 1330 /* 1331 * Do a Get Device ID command, since it comes back with some 1332 * useful info. 1333 */ 1334 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1335 msg[1] = IPMI_GET_DEVICE_ID_CMD; 1336 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 1337 1338 rv = wait_for_msg_done(smi_info); 1339 if (rv) 1340 goto out; 1341 1342 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1343 resp, IPMI_MAX_MSG_LENGTH); 1344 1345 /* Check and record info from the get device id, in case we need it. */ 1346 rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1], 1347 resp + 2, resp_len - 2, &smi_info->device_id); 1348 1349 out: 1350 kfree(resp); 1351 return rv; 1352 } 1353 1354 static int get_global_enables(struct smi_info *smi_info, u8 *enables) 1355 { 1356 unsigned char msg[3]; 1357 unsigned char *resp; 1358 unsigned long resp_len; 1359 int rv; 1360 1361 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1362 if (!resp) 1363 return -ENOMEM; 1364 1365 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1366 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 1367 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 1368 1369 rv = wait_for_msg_done(smi_info); 1370 if (rv) { 1371 dev_warn(smi_info->io.dev, 1372 "Error getting response from get global enables command: %d\n", 1373 rv); 1374 goto out; 1375 } 1376 1377 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1378 resp, IPMI_MAX_MSG_LENGTH); 1379 1380 if (resp_len < 4 || 1381 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 1382 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || 1383 resp[2] != 0) { 1384 dev_warn(smi_info->io.dev, 1385 "Invalid return from get global enables command: %ld %x %x %x\n", 1386 resp_len, resp[0], resp[1], resp[2]); 1387 rv = -EINVAL; 1388 goto out; 1389 } else { 1390 *enables = resp[3]; 1391 } 1392 1393 out: 1394 kfree(resp); 1395 return rv; 1396 } 1397 1398 /* 1399 * Returns 1 if it gets an error from the command. 1400 */ 1401 static int set_global_enables(struct smi_info *smi_info, u8 enables) 1402 { 1403 unsigned char msg[3]; 1404 unsigned char *resp; 1405 unsigned long resp_len; 1406 int rv; 1407 1408 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1409 if (!resp) 1410 return -ENOMEM; 1411 1412 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1413 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 1414 msg[2] = enables; 1415 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 1416 1417 rv = wait_for_msg_done(smi_info); 1418 if (rv) { 1419 dev_warn(smi_info->io.dev, 1420 "Error getting response from set global enables command: %d\n", 1421 rv); 1422 goto out; 1423 } 1424 1425 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1426 resp, IPMI_MAX_MSG_LENGTH); 1427 1428 if (resp_len < 3 || 1429 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 1430 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { 1431 dev_warn(smi_info->io.dev, 1432 "Invalid return from set global enables command: %ld %x %x\n", 1433 resp_len, resp[0], resp[1]); 1434 rv = -EINVAL; 1435 goto out; 1436 } 1437 1438 if (resp[2] != 0) 1439 rv = 1; 1440 1441 out: 1442 kfree(resp); 1443 return rv; 1444 } 1445 1446 /* 1447 * Some BMCs do not support clearing the receive irq bit in the global 1448 * enables (even if they don't support interrupts on the BMC). Check 1449 * for this and handle it properly. 1450 */ 1451 static void check_clr_rcv_irq(struct smi_info *smi_info) 1452 { 1453 u8 enables = 0; 1454 int rv; 1455 1456 rv = get_global_enables(smi_info, &enables); 1457 if (!rv) { 1458 if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0) 1459 /* Already clear, should work ok. */ 1460 return; 1461 1462 enables &= ~IPMI_BMC_RCV_MSG_INTR; 1463 rv = set_global_enables(smi_info, enables); 1464 } 1465 1466 if (rv < 0) { 1467 dev_err(smi_info->io.dev, 1468 "Cannot check clearing the rcv irq: %d\n", rv); 1469 return; 1470 } 1471 1472 if (rv) { 1473 /* 1474 * An error when setting the event buffer bit means 1475 * clearing the bit is not supported. 1476 */ 1477 dev_warn(smi_info->io.dev, 1478 "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n"); 1479 smi_info->cannot_disable_irq = true; 1480 } 1481 } 1482 1483 /* 1484 * Some BMCs do not support setting the interrupt bits in the global 1485 * enables even if they support interrupts. Clearly bad, but we can 1486 * compensate. 1487 */ 1488 static void check_set_rcv_irq(struct smi_info *smi_info) 1489 { 1490 u8 enables = 0; 1491 int rv; 1492 1493 if (!smi_info->io.irq) 1494 return; 1495 1496 rv = get_global_enables(smi_info, &enables); 1497 if (!rv) { 1498 enables |= IPMI_BMC_RCV_MSG_INTR; 1499 rv = set_global_enables(smi_info, enables); 1500 } 1501 1502 if (rv < 0) { 1503 dev_err(smi_info->io.dev, 1504 "Cannot check setting the rcv irq: %d\n", rv); 1505 return; 1506 } 1507 1508 if (rv) { 1509 /* 1510 * An error when setting the event buffer bit means 1511 * setting the bit is not supported. 1512 */ 1513 dev_warn(smi_info->io.dev, 1514 "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n"); 1515 smi_info->cannot_disable_irq = true; 1516 smi_info->irq_enable_broken = true; 1517 } 1518 } 1519 1520 static int try_enable_event_buffer(struct smi_info *smi_info) 1521 { 1522 unsigned char msg[3]; 1523 unsigned char *resp; 1524 unsigned long resp_len; 1525 int rv = 0; 1526 1527 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1528 if (!resp) 1529 return -ENOMEM; 1530 1531 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1532 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 1533 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 1534 1535 rv = wait_for_msg_done(smi_info); 1536 if (rv) { 1537 pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n"); 1538 goto out; 1539 } 1540 1541 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1542 resp, IPMI_MAX_MSG_LENGTH); 1543 1544 if (resp_len < 4 || 1545 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 1546 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || 1547 resp[2] != 0) { 1548 pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n"); 1549 rv = -EINVAL; 1550 goto out; 1551 } 1552 1553 if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { 1554 /* buffer is already enabled, nothing to do. */ 1555 smi_info->supports_event_msg_buff = true; 1556 goto out; 1557 } 1558 1559 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 1560 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 1561 msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; 1562 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 1563 1564 rv = wait_for_msg_done(smi_info); 1565 if (rv) { 1566 pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n"); 1567 goto out; 1568 } 1569 1570 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1571 resp, IPMI_MAX_MSG_LENGTH); 1572 1573 if (resp_len < 3 || 1574 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 1575 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { 1576 pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n"); 1577 rv = -EINVAL; 1578 goto out; 1579 } 1580 1581 if (resp[2] != 0) 1582 /* 1583 * An error when setting the event buffer bit means 1584 * that the event buffer is not supported. 1585 */ 1586 rv = -ENOENT; 1587 else 1588 smi_info->supports_event_msg_buff = true; 1589 1590 out: 1591 kfree(resp); 1592 return rv; 1593 } 1594 1595 #ifdef CONFIG_IPMI_PROC_INTERFACE 1596 static int smi_type_proc_show(struct seq_file *m, void *v) 1597 { 1598 struct smi_info *smi = m->private; 1599 1600 seq_printf(m, "%s\n", si_to_str[smi->io.si_type]); 1601 1602 return 0; 1603 } 1604 1605 static int smi_type_proc_open(struct inode *inode, struct file *file) 1606 { 1607 return single_open(file, smi_type_proc_show, PDE_DATA(inode)); 1608 } 1609 1610 static const struct file_operations smi_type_proc_ops = { 1611 .open = smi_type_proc_open, 1612 .read = seq_read, 1613 .llseek = seq_lseek, 1614 .release = single_release, 1615 }; 1616 1617 static int smi_si_stats_proc_show(struct seq_file *m, void *v) 1618 { 1619 struct smi_info *smi = m->private; 1620 1621 seq_printf(m, "interrupts_enabled: %d\n", 1622 smi->io.irq && !smi->interrupt_disabled); 1623 seq_printf(m, "short_timeouts: %u\n", 1624 smi_get_stat(smi, short_timeouts)); 1625 seq_printf(m, "long_timeouts: %u\n", 1626 smi_get_stat(smi, long_timeouts)); 1627 seq_printf(m, "idles: %u\n", 1628 smi_get_stat(smi, idles)); 1629 seq_printf(m, "interrupts: %u\n", 1630 smi_get_stat(smi, interrupts)); 1631 seq_printf(m, "attentions: %u\n", 1632 smi_get_stat(smi, attentions)); 1633 seq_printf(m, "flag_fetches: %u\n", 1634 smi_get_stat(smi, flag_fetches)); 1635 seq_printf(m, "hosed_count: %u\n", 1636 smi_get_stat(smi, hosed_count)); 1637 seq_printf(m, "complete_transactions: %u\n", 1638 smi_get_stat(smi, complete_transactions)); 1639 seq_printf(m, "events: %u\n", 1640 smi_get_stat(smi, events)); 1641 seq_printf(m, "watchdog_pretimeouts: %u\n", 1642 smi_get_stat(smi, watchdog_pretimeouts)); 1643 seq_printf(m, "incoming_messages: %u\n", 1644 smi_get_stat(smi, incoming_messages)); 1645 return 0; 1646 } 1647 1648 static int smi_si_stats_proc_open(struct inode *inode, struct file *file) 1649 { 1650 return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode)); 1651 } 1652 1653 static const struct file_operations smi_si_stats_proc_ops = { 1654 .open = smi_si_stats_proc_open, 1655 .read = seq_read, 1656 .llseek = seq_lseek, 1657 .release = single_release, 1658 }; 1659 1660 static int smi_params_proc_show(struct seq_file *m, void *v) 1661 { 1662 struct smi_info *smi = m->private; 1663 1664 seq_printf(m, 1665 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", 1666 si_to_str[smi->io.si_type], 1667 addr_space_to_str[smi->io.addr_type], 1668 smi->io.addr_data, 1669 smi->io.regspacing, 1670 smi->io.regsize, 1671 smi->io.regshift, 1672 smi->io.irq, 1673 smi->io.slave_addr); 1674 1675 return 0; 1676 } 1677 1678 static int smi_params_proc_open(struct inode *inode, struct file *file) 1679 { 1680 return single_open(file, smi_params_proc_show, PDE_DATA(inode)); 1681 } 1682 1683 static const struct file_operations smi_params_proc_ops = { 1684 .open = smi_params_proc_open, 1685 .read = seq_read, 1686 .llseek = seq_lseek, 1687 .release = single_release, 1688 }; 1689 #endif 1690 1691 #define IPMI_SI_ATTR(name) \ 1692 static ssize_t ipmi_##name##_show(struct device *dev, \ 1693 struct device_attribute *attr, \ 1694 char *buf) \ 1695 { \ 1696 struct smi_info *smi_info = dev_get_drvdata(dev); \ 1697 \ 1698 return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \ 1699 } \ 1700 static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL) 1701 1702 static ssize_t ipmi_type_show(struct device *dev, 1703 struct device_attribute *attr, 1704 char *buf) 1705 { 1706 struct smi_info *smi_info = dev_get_drvdata(dev); 1707 1708 return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]); 1709 } 1710 static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL); 1711 1712 static ssize_t ipmi_interrupts_enabled_show(struct device *dev, 1713 struct device_attribute *attr, 1714 char *buf) 1715 { 1716 struct smi_info *smi_info = dev_get_drvdata(dev); 1717 int enabled = smi_info->io.irq && !smi_info->interrupt_disabled; 1718 1719 return snprintf(buf, 10, "%d\n", enabled); 1720 } 1721 static DEVICE_ATTR(interrupts_enabled, S_IRUGO, 1722 ipmi_interrupts_enabled_show, NULL); 1723 1724 IPMI_SI_ATTR(short_timeouts); 1725 IPMI_SI_ATTR(long_timeouts); 1726 IPMI_SI_ATTR(idles); 1727 IPMI_SI_ATTR(interrupts); 1728 IPMI_SI_ATTR(attentions); 1729 IPMI_SI_ATTR(flag_fetches); 1730 IPMI_SI_ATTR(hosed_count); 1731 IPMI_SI_ATTR(complete_transactions); 1732 IPMI_SI_ATTR(events); 1733 IPMI_SI_ATTR(watchdog_pretimeouts); 1734 IPMI_SI_ATTR(incoming_messages); 1735 1736 static ssize_t ipmi_params_show(struct device *dev, 1737 struct device_attribute *attr, 1738 char *buf) 1739 { 1740 struct smi_info *smi_info = dev_get_drvdata(dev); 1741 1742 return snprintf(buf, 200, 1743 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", 1744 si_to_str[smi_info->io.si_type], 1745 addr_space_to_str[smi_info->io.addr_type], 1746 smi_info->io.addr_data, 1747 smi_info->io.regspacing, 1748 smi_info->io.regsize, 1749 smi_info->io.regshift, 1750 smi_info->io.irq, 1751 smi_info->io.slave_addr); 1752 } 1753 static DEVICE_ATTR(params, S_IRUGO, ipmi_params_show, NULL); 1754 1755 static struct attribute *ipmi_si_dev_attrs[] = { 1756 &dev_attr_type.attr, 1757 &dev_attr_interrupts_enabled.attr, 1758 &dev_attr_short_timeouts.attr, 1759 &dev_attr_long_timeouts.attr, 1760 &dev_attr_idles.attr, 1761 &dev_attr_interrupts.attr, 1762 &dev_attr_attentions.attr, 1763 &dev_attr_flag_fetches.attr, 1764 &dev_attr_hosed_count.attr, 1765 &dev_attr_complete_transactions.attr, 1766 &dev_attr_events.attr, 1767 &dev_attr_watchdog_pretimeouts.attr, 1768 &dev_attr_incoming_messages.attr, 1769 &dev_attr_params.attr, 1770 NULL 1771 }; 1772 1773 static const struct attribute_group ipmi_si_dev_attr_group = { 1774 .attrs = ipmi_si_dev_attrs, 1775 }; 1776 1777 /* 1778 * oem_data_avail_to_receive_msg_avail 1779 * @info - smi_info structure with msg_flags set 1780 * 1781 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL 1782 * Returns 1 indicating need to re-run handle_flags(). 1783 */ 1784 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) 1785 { 1786 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | 1787 RECEIVE_MSG_AVAIL); 1788 return 1; 1789 } 1790 1791 /* 1792 * setup_dell_poweredge_oem_data_handler 1793 * @info - smi_info.device_id must be populated 1794 * 1795 * Systems that match, but have firmware version < 1.40 may assert 1796 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that 1797 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL 1798 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags 1799 * as RECEIVE_MSG_AVAIL instead. 1800 * 1801 * As Dell has no plans to release IPMI 1.5 firmware that *ever* 1802 * assert the OEM[012] bits, and if it did, the driver would have to 1803 * change to handle that properly, we don't actually check for the 1804 * firmware version. 1805 * Device ID = 0x20 BMC on PowerEdge 8G servers 1806 * Device Revision = 0x80 1807 * Firmware Revision1 = 0x01 BMC version 1.40 1808 * Firmware Revision2 = 0x40 BCD encoded 1809 * IPMI Version = 0x51 IPMI 1.5 1810 * Manufacturer ID = A2 02 00 Dell IANA 1811 * 1812 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert 1813 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL. 1814 * 1815 */ 1816 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 1817 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 1818 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 1819 #define DELL_IANA_MFR_ID 0x0002a2 1820 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) 1821 { 1822 struct ipmi_device_id *id = &smi_info->device_id; 1823 if (id->manufacturer_id == DELL_IANA_MFR_ID) { 1824 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && 1825 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && 1826 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { 1827 smi_info->oem_data_avail_handler = 1828 oem_data_avail_to_receive_msg_avail; 1829 } else if (ipmi_version_major(id) < 1 || 1830 (ipmi_version_major(id) == 1 && 1831 ipmi_version_minor(id) < 5)) { 1832 smi_info->oem_data_avail_handler = 1833 oem_data_avail_to_receive_msg_avail; 1834 } 1835 } 1836 } 1837 1838 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA 1839 static void return_hosed_msg_badsize(struct smi_info *smi_info) 1840 { 1841 struct ipmi_smi_msg *msg = smi_info->curr_msg; 1842 1843 /* Make it a response */ 1844 msg->rsp[0] = msg->data[0] | 4; 1845 msg->rsp[1] = msg->data[1]; 1846 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; 1847 msg->rsp_size = 3; 1848 smi_info->curr_msg = NULL; 1849 deliver_recv_msg(smi_info, msg); 1850 } 1851 1852 /* 1853 * dell_poweredge_bt_xaction_handler 1854 * @info - smi_info.device_id must be populated 1855 * 1856 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will 1857 * not respond to a Get SDR command if the length of the data 1858 * requested is exactly 0x3A, which leads to command timeouts and no 1859 * data returned. This intercepts such commands, and causes userspace 1860 * callers to try again with a different-sized buffer, which succeeds. 1861 */ 1862 1863 #define STORAGE_NETFN 0x0A 1864 #define STORAGE_CMD_GET_SDR 0x23 1865 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self, 1866 unsigned long unused, 1867 void *in) 1868 { 1869 struct smi_info *smi_info = in; 1870 unsigned char *data = smi_info->curr_msg->data; 1871 unsigned int size = smi_info->curr_msg->data_size; 1872 if (size >= 8 && 1873 (data[0]>>2) == STORAGE_NETFN && 1874 data[1] == STORAGE_CMD_GET_SDR && 1875 data[7] == 0x3A) { 1876 return_hosed_msg_badsize(smi_info); 1877 return NOTIFY_STOP; 1878 } 1879 return NOTIFY_DONE; 1880 } 1881 1882 static struct notifier_block dell_poweredge_bt_xaction_notifier = { 1883 .notifier_call = dell_poweredge_bt_xaction_handler, 1884 }; 1885 1886 /* 1887 * setup_dell_poweredge_bt_xaction_handler 1888 * @info - smi_info.device_id must be filled in already 1889 * 1890 * Fills in smi_info.device_id.start_transaction_pre_hook 1891 * when we know what function to use there. 1892 */ 1893 static void 1894 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) 1895 { 1896 struct ipmi_device_id *id = &smi_info->device_id; 1897 if (id->manufacturer_id == DELL_IANA_MFR_ID && 1898 smi_info->io.si_type == SI_BT) 1899 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); 1900 } 1901 1902 /* 1903 * setup_oem_data_handler 1904 * @info - smi_info.device_id must be filled in already 1905 * 1906 * Fills in smi_info.device_id.oem_data_available_handler 1907 * when we know what function to use there. 1908 */ 1909 1910 static void setup_oem_data_handler(struct smi_info *smi_info) 1911 { 1912 setup_dell_poweredge_oem_data_handler(smi_info); 1913 } 1914 1915 static void setup_xaction_handlers(struct smi_info *smi_info) 1916 { 1917 setup_dell_poweredge_bt_xaction_handler(smi_info); 1918 } 1919 1920 static void check_for_broken_irqs(struct smi_info *smi_info) 1921 { 1922 check_clr_rcv_irq(smi_info); 1923 check_set_rcv_irq(smi_info); 1924 } 1925 1926 static inline void stop_timer_and_thread(struct smi_info *smi_info) 1927 { 1928 if (smi_info->thread != NULL) { 1929 kthread_stop(smi_info->thread); 1930 smi_info->thread = NULL; 1931 } 1932 1933 smi_info->timer_can_start = false; 1934 if (smi_info->timer_running) 1935 del_timer_sync(&smi_info->si_timer); 1936 } 1937 1938 static struct smi_info *find_dup_si(struct smi_info *info) 1939 { 1940 struct smi_info *e; 1941 1942 list_for_each_entry(e, &smi_infos, link) { 1943 if (e->io.addr_type != info->io.addr_type) 1944 continue; 1945 if (e->io.addr_data == info->io.addr_data) { 1946 /* 1947 * This is a cheap hack, ACPI doesn't have a defined 1948 * slave address but SMBIOS does. Pick it up from 1949 * any source that has it available. 1950 */ 1951 if (info->io.slave_addr && !e->io.slave_addr) 1952 e->io.slave_addr = info->io.slave_addr; 1953 return e; 1954 } 1955 } 1956 1957 return NULL; 1958 } 1959 1960 int ipmi_si_add_smi(struct si_sm_io *io) 1961 { 1962 int rv = 0; 1963 struct smi_info *new_smi, *dup; 1964 1965 if (!io->io_setup) { 1966 if (io->addr_type == IPMI_IO_ADDR_SPACE) { 1967 io->io_setup = ipmi_si_port_setup; 1968 } else if (io->addr_type == IPMI_MEM_ADDR_SPACE) { 1969 io->io_setup = ipmi_si_mem_setup; 1970 } else { 1971 return -EINVAL; 1972 } 1973 } 1974 1975 new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL); 1976 if (!new_smi) 1977 return -ENOMEM; 1978 spin_lock_init(&new_smi->si_lock); 1979 1980 new_smi->io = *io; 1981 1982 mutex_lock(&smi_infos_lock); 1983 dup = find_dup_si(new_smi); 1984 if (dup) { 1985 if (new_smi->io.addr_source == SI_ACPI && 1986 dup->io.addr_source == SI_SMBIOS) { 1987 /* We prefer ACPI over SMBIOS. */ 1988 dev_info(dup->io.dev, 1989 "Removing SMBIOS-specified %s state machine in favor of ACPI\n", 1990 si_to_str[new_smi->io.si_type]); 1991 cleanup_one_si(dup); 1992 } else { 1993 dev_info(new_smi->io.dev, 1994 "%s-specified %s state machine: duplicate\n", 1995 ipmi_addr_src_to_str(new_smi->io.addr_source), 1996 si_to_str[new_smi->io.si_type]); 1997 rv = -EBUSY; 1998 kfree(new_smi); 1999 goto out_err; 2000 } 2001 } 2002 2003 pr_info(PFX "Adding %s-specified %s state machine\n", 2004 ipmi_addr_src_to_str(new_smi->io.addr_source), 2005 si_to_str[new_smi->io.si_type]); 2006 2007 list_add_tail(&new_smi->link, &smi_infos); 2008 2009 if (initialized) { 2010 rv = try_smi_init(new_smi); 2011 if (rv) { 2012 cleanup_one_si(new_smi); 2013 mutex_unlock(&smi_infos_lock); 2014 return rv; 2015 } 2016 } 2017 out_err: 2018 mutex_unlock(&smi_infos_lock); 2019 return rv; 2020 } 2021 2022 /* 2023 * Try to start up an interface. Must be called with smi_infos_lock 2024 * held, primarily to keep smi_num consistent, we only one to do these 2025 * one at a time. 2026 */ 2027 static int try_smi_init(struct smi_info *new_smi) 2028 { 2029 int rv = 0; 2030 int i; 2031 char *init_name = NULL; 2032 2033 pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n", 2034 ipmi_addr_src_to_str(new_smi->io.addr_source), 2035 si_to_str[new_smi->io.si_type], 2036 addr_space_to_str[new_smi->io.addr_type], 2037 new_smi->io.addr_data, 2038 new_smi->io.slave_addr, new_smi->io.irq); 2039 2040 switch (new_smi->io.si_type) { 2041 case SI_KCS: 2042 new_smi->handlers = &kcs_smi_handlers; 2043 break; 2044 2045 case SI_SMIC: 2046 new_smi->handlers = &smic_smi_handlers; 2047 break; 2048 2049 case SI_BT: 2050 new_smi->handlers = &bt_smi_handlers; 2051 break; 2052 2053 default: 2054 /* No support for anything else yet. */ 2055 rv = -EIO; 2056 goto out_err; 2057 } 2058 2059 new_smi->intf_num = smi_num; 2060 2061 /* Do this early so it's available for logs. */ 2062 if (!new_smi->io.dev) { 2063 init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d", 2064 new_smi->intf_num); 2065 2066 /* 2067 * If we don't already have a device from something 2068 * else (like PCI), then register a new one. 2069 */ 2070 new_smi->pdev = platform_device_alloc("ipmi_si", 2071 new_smi->intf_num); 2072 if (!new_smi->pdev) { 2073 pr_err(PFX "Unable to allocate platform device\n"); 2074 rv = -ENOMEM; 2075 goto out_err; 2076 } 2077 new_smi->io.dev = &new_smi->pdev->dev; 2078 new_smi->io.dev->driver = &ipmi_platform_driver.driver; 2079 /* Nulled by device_add() */ 2080 new_smi->io.dev->init_name = init_name; 2081 } 2082 2083 /* Allocate the state machine's data and initialize it. */ 2084 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 2085 if (!new_smi->si_sm) { 2086 rv = -ENOMEM; 2087 goto out_err; 2088 } 2089 new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm, 2090 &new_smi->io); 2091 2092 /* Now that we know the I/O size, we can set up the I/O. */ 2093 rv = new_smi->io.io_setup(&new_smi->io); 2094 if (rv) { 2095 dev_err(new_smi->io.dev, "Could not set up I/O space\n"); 2096 goto out_err; 2097 } 2098 2099 /* Do low-level detection first. */ 2100 if (new_smi->handlers->detect(new_smi->si_sm)) { 2101 if (new_smi->io.addr_source) 2102 dev_err(new_smi->io.dev, 2103 "Interface detection failed\n"); 2104 rv = -ENODEV; 2105 goto out_err; 2106 } 2107 2108 /* 2109 * Attempt a get device id command. If it fails, we probably 2110 * don't have a BMC here. 2111 */ 2112 rv = try_get_dev_id(new_smi); 2113 if (rv) { 2114 if (new_smi->io.addr_source) 2115 dev_err(new_smi->io.dev, 2116 "There appears to be no BMC at this location\n"); 2117 goto out_err; 2118 } 2119 2120 setup_oem_data_handler(new_smi); 2121 setup_xaction_handlers(new_smi); 2122 check_for_broken_irqs(new_smi); 2123 2124 new_smi->waiting_msg = NULL; 2125 new_smi->curr_msg = NULL; 2126 atomic_set(&new_smi->req_events, 0); 2127 new_smi->run_to_completion = false; 2128 for (i = 0; i < SI_NUM_STATS; i++) 2129 atomic_set(&new_smi->stats[i], 0); 2130 2131 new_smi->interrupt_disabled = true; 2132 atomic_set(&new_smi->need_watch, 0); 2133 2134 rv = try_enable_event_buffer(new_smi); 2135 if (rv == 0) 2136 new_smi->has_event_buffer = true; 2137 2138 /* 2139 * Start clearing the flags before we enable interrupts or the 2140 * timer to avoid racing with the timer. 2141 */ 2142 start_clear_flags(new_smi); 2143 2144 /* 2145 * IRQ is defined to be set when non-zero. req_events will 2146 * cause a global flags check that will enable interrupts. 2147 */ 2148 if (new_smi->io.irq) { 2149 new_smi->interrupt_disabled = false; 2150 atomic_set(&new_smi->req_events, 1); 2151 } 2152 2153 if (new_smi->pdev && !new_smi->pdev_registered) { 2154 rv = platform_device_add(new_smi->pdev); 2155 if (rv) { 2156 dev_err(new_smi->io.dev, 2157 "Unable to register system interface device: %d\n", 2158 rv); 2159 goto out_err; 2160 } 2161 new_smi->pdev_registered = true; 2162 } 2163 2164 dev_set_drvdata(new_smi->io.dev, new_smi); 2165 rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group); 2166 if (rv) { 2167 dev_err(new_smi->io.dev, 2168 "Unable to add device attributes: error %d\n", 2169 rv); 2170 goto out_err; 2171 } 2172 new_smi->dev_group_added = true; 2173 2174 rv = ipmi_register_smi(&handlers, 2175 new_smi, 2176 new_smi->io.dev, 2177 new_smi->io.slave_addr); 2178 if (rv) { 2179 dev_err(new_smi->io.dev, 2180 "Unable to register device: error %d\n", 2181 rv); 2182 goto out_err; 2183 } 2184 2185 #ifdef CONFIG_IPMI_PROC_INTERFACE 2186 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", 2187 &smi_type_proc_ops, 2188 new_smi); 2189 if (rv) { 2190 dev_err(new_smi->io.dev, 2191 "Unable to create proc entry: %d\n", rv); 2192 goto out_err; 2193 } 2194 2195 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", 2196 &smi_si_stats_proc_ops, 2197 new_smi); 2198 if (rv) { 2199 dev_err(new_smi->io.dev, 2200 "Unable to create proc entry: %d\n", rv); 2201 goto out_err; 2202 } 2203 2204 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", 2205 &smi_params_proc_ops, 2206 new_smi); 2207 if (rv) { 2208 dev_err(new_smi->io.dev, 2209 "Unable to create proc entry: %d\n", rv); 2210 goto out_err; 2211 } 2212 #endif 2213 2214 /* Don't increment till we know we have succeeded. */ 2215 smi_num++; 2216 2217 dev_info(new_smi->io.dev, "IPMI %s interface initialized\n", 2218 si_to_str[new_smi->io.si_type]); 2219 2220 WARN_ON(new_smi->io.dev->init_name != NULL); 2221 kfree(init_name); 2222 2223 return 0; 2224 2225 out_err: 2226 shutdown_one_si(new_smi); 2227 2228 kfree(init_name); 2229 2230 return rv; 2231 } 2232 2233 static int init_ipmi_si(void) 2234 { 2235 struct smi_info *e; 2236 enum ipmi_addr_src type = SI_INVALID; 2237 2238 if (initialized) 2239 return 0; 2240 2241 pr_info("IPMI System Interface driver.\n"); 2242 2243 /* If the user gave us a device, they presumably want us to use it */ 2244 if (!ipmi_si_hardcode_find_bmc()) 2245 goto do_scan; 2246 2247 ipmi_si_platform_init(); 2248 2249 ipmi_si_pci_init(); 2250 2251 ipmi_si_parisc_init(); 2252 2253 /* We prefer devices with interrupts, but in the case of a machine 2254 with multiple BMCs we assume that there will be several instances 2255 of a given type so if we succeed in registering a type then also 2256 try to register everything else of the same type */ 2257 do_scan: 2258 mutex_lock(&smi_infos_lock); 2259 list_for_each_entry(e, &smi_infos, link) { 2260 /* Try to register a device if it has an IRQ and we either 2261 haven't successfully registered a device yet or this 2262 device has the same type as one we successfully registered */ 2263 if (e->io.irq && (!type || e->io.addr_source == type)) { 2264 if (!try_smi_init(e)) { 2265 type = e->io.addr_source; 2266 } 2267 } 2268 } 2269 2270 /* type will only have been set if we successfully registered an si */ 2271 if (type) 2272 goto skip_fallback_noirq; 2273 2274 /* Fall back to the preferred device */ 2275 2276 list_for_each_entry(e, &smi_infos, link) { 2277 if (!e->io.irq && (!type || e->io.addr_source == type)) { 2278 if (!try_smi_init(e)) { 2279 type = e->io.addr_source; 2280 } 2281 } 2282 } 2283 2284 skip_fallback_noirq: 2285 initialized = 1; 2286 mutex_unlock(&smi_infos_lock); 2287 2288 if (type) 2289 return 0; 2290 2291 mutex_lock(&smi_infos_lock); 2292 if (unload_when_empty && list_empty(&smi_infos)) { 2293 mutex_unlock(&smi_infos_lock); 2294 cleanup_ipmi_si(); 2295 pr_warn(PFX "Unable to find any System Interface(s)\n"); 2296 return -ENODEV; 2297 } else { 2298 mutex_unlock(&smi_infos_lock); 2299 return 0; 2300 } 2301 } 2302 module_init(init_ipmi_si); 2303 2304 static void shutdown_one_si(struct smi_info *smi_info) 2305 { 2306 int rv = 0; 2307 2308 if (smi_info->intf) { 2309 ipmi_smi_t intf = smi_info->intf; 2310 2311 smi_info->intf = NULL; 2312 rv = ipmi_unregister_smi(intf); 2313 if (rv) { 2314 pr_err(PFX "Unable to unregister device: errno=%d\n", 2315 rv); 2316 } 2317 } 2318 2319 if (smi_info->dev_group_added) { 2320 device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group); 2321 smi_info->dev_group_added = false; 2322 } 2323 if (smi_info->io.dev) 2324 dev_set_drvdata(smi_info->io.dev, NULL); 2325 2326 /* 2327 * Make sure that interrupts, the timer and the thread are 2328 * stopped and will not run again. 2329 */ 2330 smi_info->interrupt_disabled = true; 2331 if (smi_info->io.irq_cleanup) { 2332 smi_info->io.irq_cleanup(&smi_info->io); 2333 smi_info->io.irq_cleanup = NULL; 2334 } 2335 stop_timer_and_thread(smi_info); 2336 2337 /* 2338 * Wait until we know that we are out of any interrupt 2339 * handlers might have been running before we freed the 2340 * interrupt. 2341 */ 2342 synchronize_sched(); 2343 2344 /* 2345 * Timeouts are stopped, now make sure the interrupts are off 2346 * in the BMC. Note that timers and CPU interrupts are off, 2347 * so no need for locks. 2348 */ 2349 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { 2350 poll(smi_info); 2351 schedule_timeout_uninterruptible(1); 2352 } 2353 if (smi_info->handlers) 2354 disable_si_irq(smi_info); 2355 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { 2356 poll(smi_info); 2357 schedule_timeout_uninterruptible(1); 2358 } 2359 if (smi_info->handlers) 2360 smi_info->handlers->cleanup(smi_info->si_sm); 2361 2362 if (smi_info->io.addr_source_cleanup) { 2363 smi_info->io.addr_source_cleanup(&smi_info->io); 2364 smi_info->io.addr_source_cleanup = NULL; 2365 } 2366 if (smi_info->io.io_cleanup) { 2367 smi_info->io.io_cleanup(&smi_info->io); 2368 smi_info->io.io_cleanup = NULL; 2369 } 2370 2371 kfree(smi_info->si_sm); 2372 smi_info->si_sm = NULL; 2373 } 2374 2375 static void cleanup_one_si(struct smi_info *smi_info) 2376 { 2377 if (!smi_info) 2378 return; 2379 2380 list_del(&smi_info->link); 2381 2382 shutdown_one_si(smi_info); 2383 2384 if (smi_info->pdev) { 2385 if (smi_info->pdev_registered) 2386 platform_device_unregister(smi_info->pdev); 2387 else 2388 platform_device_put(smi_info->pdev); 2389 } 2390 2391 kfree(smi_info); 2392 } 2393 2394 int ipmi_si_remove_by_dev(struct device *dev) 2395 { 2396 struct smi_info *e; 2397 int rv = -ENOENT; 2398 2399 mutex_lock(&smi_infos_lock); 2400 list_for_each_entry(e, &smi_infos, link) { 2401 if (e->io.dev == dev) { 2402 cleanup_one_si(e); 2403 rv = 0; 2404 break; 2405 } 2406 } 2407 mutex_unlock(&smi_infos_lock); 2408 2409 return rv; 2410 } 2411 2412 void ipmi_si_remove_by_data(int addr_space, enum si_type si_type, 2413 unsigned long addr) 2414 { 2415 /* remove */ 2416 struct smi_info *e, *tmp_e; 2417 2418 mutex_lock(&smi_infos_lock); 2419 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { 2420 if (e->io.addr_type != addr_space) 2421 continue; 2422 if (e->io.si_type != si_type) 2423 continue; 2424 if (e->io.addr_data == addr) 2425 cleanup_one_si(e); 2426 } 2427 mutex_unlock(&smi_infos_lock); 2428 } 2429 2430 static void cleanup_ipmi_si(void) 2431 { 2432 struct smi_info *e, *tmp_e; 2433 2434 if (!initialized) 2435 return; 2436 2437 ipmi_si_pci_shutdown(); 2438 2439 ipmi_si_parisc_shutdown(); 2440 2441 ipmi_si_platform_shutdown(); 2442 2443 mutex_lock(&smi_infos_lock); 2444 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) 2445 cleanup_one_si(e); 2446 mutex_unlock(&smi_infos_lock); 2447 } 2448 module_exit(cleanup_ipmi_si); 2449 2450 MODULE_ALIAS("platform:dmi-ipmi-si"); 2451 MODULE_LICENSE("GPL"); 2452 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 2453 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT" 2454 " system interfaces."); 2455