1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_devintf.c 4 * 5 * Linux device interface for the IPMI message handler. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/errno.h> 17 #include <linux/poll.h> 18 #include <linux/sched.h> 19 #include <linux/spinlock.h> 20 #include <linux/slab.h> 21 #include <linux/ipmi.h> 22 #include <linux/mutex.h> 23 #include <linux/init.h> 24 #include <linux/device.h> 25 #include <linux/compat.h> 26 27 struct ipmi_file_private 28 { 29 struct ipmi_user *user; 30 spinlock_t recv_msg_lock; 31 struct list_head recv_msgs; 32 struct file *file; 33 struct fasync_struct *fasync_queue; 34 wait_queue_head_t wait; 35 struct mutex recv_mutex; 36 int default_retries; 37 unsigned int default_retry_time_ms; 38 }; 39 40 static void file_receive_handler(struct ipmi_recv_msg *msg, 41 void *handler_data) 42 { 43 struct ipmi_file_private *priv = handler_data; 44 int was_empty; 45 unsigned long flags; 46 47 spin_lock_irqsave(&priv->recv_msg_lock, flags); 48 was_empty = list_empty(&priv->recv_msgs); 49 list_add_tail(&msg->link, &priv->recv_msgs); 50 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 51 52 if (was_empty) { 53 wake_up_interruptible(&priv->wait); 54 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN); 55 } 56 } 57 58 static __poll_t ipmi_poll(struct file *file, poll_table *wait) 59 { 60 struct ipmi_file_private *priv = file->private_data; 61 __poll_t mask = 0; 62 unsigned long flags; 63 64 poll_wait(file, &priv->wait, wait); 65 66 spin_lock_irqsave(&priv->recv_msg_lock, flags); 67 68 if (!list_empty(&priv->recv_msgs)) 69 mask |= (EPOLLIN | EPOLLRDNORM); 70 71 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 72 73 return mask; 74 } 75 76 static int ipmi_fasync(int fd, struct file *file, int on) 77 { 78 struct ipmi_file_private *priv = file->private_data; 79 80 return fasync_helper(fd, file, on, &priv->fasync_queue); 81 } 82 83 static const struct ipmi_user_hndl ipmi_hndlrs = 84 { 85 .ipmi_recv_hndl = file_receive_handler, 86 }; 87 88 static int ipmi_open(struct inode *inode, struct file *file) 89 { 90 int if_num = iminor(inode); 91 int rv; 92 struct ipmi_file_private *priv; 93 94 priv = kmalloc(sizeof(*priv), GFP_KERNEL); 95 if (!priv) 96 return -ENOMEM; 97 98 priv->file = file; 99 100 rv = ipmi_create_user(if_num, 101 &ipmi_hndlrs, 102 priv, 103 &priv->user); 104 if (rv) { 105 kfree(priv); 106 goto out; 107 } 108 109 file->private_data = priv; 110 111 spin_lock_init(&priv->recv_msg_lock); 112 INIT_LIST_HEAD(&priv->recv_msgs); 113 init_waitqueue_head(&priv->wait); 114 priv->fasync_queue = NULL; 115 mutex_init(&priv->recv_mutex); 116 117 /* Use the low-level defaults. */ 118 priv->default_retries = -1; 119 priv->default_retry_time_ms = 0; 120 121 out: 122 return rv; 123 } 124 125 static int ipmi_release(struct inode *inode, struct file *file) 126 { 127 struct ipmi_file_private *priv = file->private_data; 128 int rv; 129 struct ipmi_recv_msg *msg, *next; 130 131 rv = ipmi_destroy_user(priv->user); 132 if (rv) 133 return rv; 134 135 list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) 136 ipmi_free_recv_msg(msg); 137 138 kfree(priv); 139 140 return 0; 141 } 142 143 static int handle_send_req(struct ipmi_user *user, 144 struct ipmi_req *req, 145 int retries, 146 unsigned int retry_time_ms) 147 { 148 int rv; 149 struct ipmi_addr addr; 150 struct kernel_ipmi_msg msg; 151 152 if (req->addr_len > sizeof(struct ipmi_addr)) 153 return -EINVAL; 154 155 if (copy_from_user(&addr, req->addr, req->addr_len)) 156 return -EFAULT; 157 158 msg.netfn = req->msg.netfn; 159 msg.cmd = req->msg.cmd; 160 msg.data_len = req->msg.data_len; 161 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 162 if (!msg.data) 163 return -ENOMEM; 164 165 /* From here out we cannot return, we must jump to "out" for 166 error exits to free msgdata. */ 167 168 rv = ipmi_validate_addr(&addr, req->addr_len); 169 if (rv) 170 goto out; 171 172 if (req->msg.data != NULL) { 173 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) { 174 rv = -EMSGSIZE; 175 goto out; 176 } 177 178 if (copy_from_user(msg.data, 179 req->msg.data, 180 req->msg.data_len)) { 181 rv = -EFAULT; 182 goto out; 183 } 184 } else { 185 msg.data_len = 0; 186 } 187 188 rv = ipmi_request_settime(user, 189 &addr, 190 req->msgid, 191 &msg, 192 NULL, 193 0, 194 retries, 195 retry_time_ms); 196 out: 197 kfree(msg.data); 198 return rv; 199 } 200 201 static int handle_recv(struct ipmi_file_private *priv, 202 bool trunc, struct ipmi_recv *rsp, 203 int (*copyout)(struct ipmi_recv *, void __user *), 204 void __user *to) 205 { 206 int addr_len; 207 struct list_head *entry; 208 struct ipmi_recv_msg *msg; 209 unsigned long flags; 210 int rv = 0; 211 212 /* We claim a mutex because we don't want two 213 users getting something from the queue at a time. 214 Since we have to release the spinlock before we can 215 copy the data to the user, it's possible another 216 user will grab something from the queue, too. Then 217 the messages might get out of order if something 218 fails and the message gets put back onto the 219 queue. This mutex prevents that problem. */ 220 mutex_lock(&priv->recv_mutex); 221 222 /* Grab the message off the list. */ 223 spin_lock_irqsave(&priv->recv_msg_lock, flags); 224 if (list_empty(&(priv->recv_msgs))) { 225 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 226 rv = -EAGAIN; 227 goto recv_err; 228 } 229 entry = priv->recv_msgs.next; 230 msg = list_entry(entry, struct ipmi_recv_msg, link); 231 list_del(entry); 232 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 233 234 addr_len = ipmi_addr_length(msg->addr.addr_type); 235 if (rsp->addr_len < addr_len) { 236 rv = -EINVAL; 237 goto recv_putback_on_err; 238 } 239 240 if (copy_to_user(rsp->addr, &msg->addr, addr_len)) { 241 rv = -EFAULT; 242 goto recv_putback_on_err; 243 } 244 rsp->addr_len = addr_len; 245 246 rsp->recv_type = msg->recv_type; 247 rsp->msgid = msg->msgid; 248 rsp->msg.netfn = msg->msg.netfn; 249 rsp->msg.cmd = msg->msg.cmd; 250 251 if (msg->msg.data_len > 0) { 252 if (rsp->msg.data_len < msg->msg.data_len) { 253 rv = -EMSGSIZE; 254 if (trunc) 255 msg->msg.data_len = rsp->msg.data_len; 256 else 257 goto recv_putback_on_err; 258 } 259 260 if (copy_to_user(rsp->msg.data, 261 msg->msg.data, 262 msg->msg.data_len)) { 263 rv = -EFAULT; 264 goto recv_putback_on_err; 265 } 266 rsp->msg.data_len = msg->msg.data_len; 267 } else { 268 rsp->msg.data_len = 0; 269 } 270 271 rv = copyout(rsp, to); 272 if (rv) 273 goto recv_putback_on_err; 274 275 mutex_unlock(&priv->recv_mutex); 276 ipmi_free_recv_msg(msg); 277 return 0; 278 279 recv_putback_on_err: 280 /* If we got an error, put the message back onto 281 the head of the queue. */ 282 spin_lock_irqsave(&priv->recv_msg_lock, flags); 283 list_add(entry, &priv->recv_msgs); 284 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 285 recv_err: 286 mutex_unlock(&priv->recv_mutex); 287 return rv; 288 } 289 290 static int copyout_recv(struct ipmi_recv *rsp, void __user *to) 291 { 292 return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0; 293 } 294 295 static long ipmi_ioctl(struct file *file, 296 unsigned int cmd, 297 unsigned long data) 298 { 299 int rv = -EINVAL; 300 struct ipmi_file_private *priv = file->private_data; 301 void __user *arg = (void __user *)data; 302 303 switch (cmd) 304 { 305 case IPMICTL_SEND_COMMAND: 306 { 307 struct ipmi_req req; 308 int retries; 309 unsigned int retry_time_ms; 310 311 if (copy_from_user(&req, arg, sizeof(req))) { 312 rv = -EFAULT; 313 break; 314 } 315 316 mutex_lock(&priv->recv_mutex); 317 retries = priv->default_retries; 318 retry_time_ms = priv->default_retry_time_ms; 319 mutex_unlock(&priv->recv_mutex); 320 321 rv = handle_send_req(priv->user, &req, retries, retry_time_ms); 322 break; 323 } 324 325 case IPMICTL_SEND_COMMAND_SETTIME: 326 { 327 struct ipmi_req_settime req; 328 329 if (copy_from_user(&req, arg, sizeof(req))) { 330 rv = -EFAULT; 331 break; 332 } 333 334 rv = handle_send_req(priv->user, 335 &req.req, 336 req.retries, 337 req.retry_time_ms); 338 break; 339 } 340 341 case IPMICTL_RECEIVE_MSG: 342 case IPMICTL_RECEIVE_MSG_TRUNC: 343 { 344 struct ipmi_recv rsp; 345 346 if (copy_from_user(&rsp, arg, sizeof(rsp))) 347 rv = -EFAULT; 348 else 349 rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC, 350 &rsp, copyout_recv, arg); 351 break; 352 } 353 354 case IPMICTL_REGISTER_FOR_CMD: 355 { 356 struct ipmi_cmdspec val; 357 358 if (copy_from_user(&val, arg, sizeof(val))) { 359 rv = -EFAULT; 360 break; 361 } 362 363 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, 364 IPMI_CHAN_ALL); 365 break; 366 } 367 368 case IPMICTL_UNREGISTER_FOR_CMD: 369 { 370 struct ipmi_cmdspec val; 371 372 if (copy_from_user(&val, arg, sizeof(val))) { 373 rv = -EFAULT; 374 break; 375 } 376 377 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, 378 IPMI_CHAN_ALL); 379 break; 380 } 381 382 case IPMICTL_REGISTER_FOR_CMD_CHANS: 383 { 384 struct ipmi_cmdspec_chans val; 385 386 if (copy_from_user(&val, arg, sizeof(val))) { 387 rv = -EFAULT; 388 break; 389 } 390 391 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, 392 val.chans); 393 break; 394 } 395 396 case IPMICTL_UNREGISTER_FOR_CMD_CHANS: 397 { 398 struct ipmi_cmdspec_chans val; 399 400 if (copy_from_user(&val, arg, sizeof(val))) { 401 rv = -EFAULT; 402 break; 403 } 404 405 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, 406 val.chans); 407 break; 408 } 409 410 case IPMICTL_SET_GETS_EVENTS_CMD: 411 { 412 int val; 413 414 if (copy_from_user(&val, arg, sizeof(val))) { 415 rv = -EFAULT; 416 break; 417 } 418 419 rv = ipmi_set_gets_events(priv->user, val); 420 break; 421 } 422 423 /* The next four are legacy, not per-channel. */ 424 case IPMICTL_SET_MY_ADDRESS_CMD: 425 { 426 unsigned int val; 427 428 if (copy_from_user(&val, arg, sizeof(val))) { 429 rv = -EFAULT; 430 break; 431 } 432 433 rv = ipmi_set_my_address(priv->user, 0, val); 434 break; 435 } 436 437 case IPMICTL_GET_MY_ADDRESS_CMD: 438 { 439 unsigned int val; 440 unsigned char rval; 441 442 rv = ipmi_get_my_address(priv->user, 0, &rval); 443 if (rv) 444 break; 445 446 val = rval; 447 448 if (copy_to_user(arg, &val, sizeof(val))) { 449 rv = -EFAULT; 450 break; 451 } 452 break; 453 } 454 455 case IPMICTL_SET_MY_LUN_CMD: 456 { 457 unsigned int val; 458 459 if (copy_from_user(&val, arg, sizeof(val))) { 460 rv = -EFAULT; 461 break; 462 } 463 464 rv = ipmi_set_my_LUN(priv->user, 0, val); 465 break; 466 } 467 468 case IPMICTL_GET_MY_LUN_CMD: 469 { 470 unsigned int val; 471 unsigned char rval; 472 473 rv = ipmi_get_my_LUN(priv->user, 0, &rval); 474 if (rv) 475 break; 476 477 val = rval; 478 479 if (copy_to_user(arg, &val, sizeof(val))) { 480 rv = -EFAULT; 481 break; 482 } 483 break; 484 } 485 486 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD: 487 { 488 struct ipmi_channel_lun_address_set val; 489 490 if (copy_from_user(&val, arg, sizeof(val))) { 491 rv = -EFAULT; 492 break; 493 } 494 495 return ipmi_set_my_address(priv->user, val.channel, val.value); 496 break; 497 } 498 499 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD: 500 { 501 struct ipmi_channel_lun_address_set val; 502 503 if (copy_from_user(&val, arg, sizeof(val))) { 504 rv = -EFAULT; 505 break; 506 } 507 508 rv = ipmi_get_my_address(priv->user, val.channel, &val.value); 509 if (rv) 510 break; 511 512 if (copy_to_user(arg, &val, sizeof(val))) { 513 rv = -EFAULT; 514 break; 515 } 516 break; 517 } 518 519 case IPMICTL_SET_MY_CHANNEL_LUN_CMD: 520 { 521 struct ipmi_channel_lun_address_set val; 522 523 if (copy_from_user(&val, arg, sizeof(val))) { 524 rv = -EFAULT; 525 break; 526 } 527 528 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value); 529 break; 530 } 531 532 case IPMICTL_GET_MY_CHANNEL_LUN_CMD: 533 { 534 struct ipmi_channel_lun_address_set val; 535 536 if (copy_from_user(&val, arg, sizeof(val))) { 537 rv = -EFAULT; 538 break; 539 } 540 541 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value); 542 if (rv) 543 break; 544 545 if (copy_to_user(arg, &val, sizeof(val))) { 546 rv = -EFAULT; 547 break; 548 } 549 break; 550 } 551 552 case IPMICTL_SET_TIMING_PARMS_CMD: 553 { 554 struct ipmi_timing_parms parms; 555 556 if (copy_from_user(&parms, arg, sizeof(parms))) { 557 rv = -EFAULT; 558 break; 559 } 560 561 mutex_lock(&priv->recv_mutex); 562 priv->default_retries = parms.retries; 563 priv->default_retry_time_ms = parms.retry_time_ms; 564 mutex_unlock(&priv->recv_mutex); 565 rv = 0; 566 break; 567 } 568 569 case IPMICTL_GET_TIMING_PARMS_CMD: 570 { 571 struct ipmi_timing_parms parms; 572 573 mutex_lock(&priv->recv_mutex); 574 parms.retries = priv->default_retries; 575 parms.retry_time_ms = priv->default_retry_time_ms; 576 mutex_unlock(&priv->recv_mutex); 577 578 if (copy_to_user(arg, &parms, sizeof(parms))) { 579 rv = -EFAULT; 580 break; 581 } 582 583 rv = 0; 584 break; 585 } 586 587 case IPMICTL_GET_MAINTENANCE_MODE_CMD: 588 { 589 int mode; 590 591 mode = ipmi_get_maintenance_mode(priv->user); 592 if (copy_to_user(arg, &mode, sizeof(mode))) { 593 rv = -EFAULT; 594 break; 595 } 596 rv = 0; 597 break; 598 } 599 600 case IPMICTL_SET_MAINTENANCE_MODE_CMD: 601 { 602 int mode; 603 604 if (copy_from_user(&mode, arg, sizeof(mode))) { 605 rv = -EFAULT; 606 break; 607 } 608 rv = ipmi_set_maintenance_mode(priv->user, mode); 609 break; 610 } 611 612 default: 613 rv = -ENOTTY; 614 break; 615 } 616 617 return rv; 618 } 619 620 #ifdef CONFIG_COMPAT 621 /* 622 * The following code contains code for supporting 32-bit compatible 623 * ioctls on 64-bit kernels. This allows running 32-bit apps on the 624 * 64-bit kernel 625 */ 626 #define COMPAT_IPMICTL_SEND_COMMAND \ 627 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req) 628 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \ 629 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime) 630 #define COMPAT_IPMICTL_RECEIVE_MSG \ 631 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv) 632 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \ 633 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv) 634 635 struct compat_ipmi_msg { 636 u8 netfn; 637 u8 cmd; 638 u16 data_len; 639 compat_uptr_t data; 640 }; 641 642 struct compat_ipmi_req { 643 compat_uptr_t addr; 644 compat_uint_t addr_len; 645 compat_long_t msgid; 646 struct compat_ipmi_msg msg; 647 }; 648 649 struct compat_ipmi_recv { 650 compat_int_t recv_type; 651 compat_uptr_t addr; 652 compat_uint_t addr_len; 653 compat_long_t msgid; 654 struct compat_ipmi_msg msg; 655 }; 656 657 struct compat_ipmi_req_settime { 658 struct compat_ipmi_req req; 659 compat_int_t retries; 660 compat_uint_t retry_time_ms; 661 }; 662 663 /* 664 * Define some helper functions for copying IPMI data 665 */ 666 static void get_compat_ipmi_msg(struct ipmi_msg *p64, 667 struct compat_ipmi_msg *p32) 668 { 669 p64->netfn = p32->netfn; 670 p64->cmd = p32->cmd; 671 p64->data_len = p32->data_len; 672 p64->data = compat_ptr(p32->data); 673 } 674 675 static void get_compat_ipmi_req(struct ipmi_req *p64, 676 struct compat_ipmi_req *p32) 677 { 678 p64->addr = compat_ptr(p32->addr); 679 p64->addr_len = p32->addr_len; 680 p64->msgid = p32->msgid; 681 get_compat_ipmi_msg(&p64->msg, &p32->msg); 682 } 683 684 static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64, 685 struct compat_ipmi_req_settime *p32) 686 { 687 get_compat_ipmi_req(&p64->req, &p32->req); 688 p64->retries = p32->retries; 689 p64->retry_time_ms = p32->retry_time_ms; 690 } 691 692 static void get_compat_ipmi_recv(struct ipmi_recv *p64, 693 struct compat_ipmi_recv *p32) 694 { 695 memset(p64, 0, sizeof(struct ipmi_recv)); 696 p64->recv_type = p32->recv_type; 697 p64->addr = compat_ptr(p32->addr); 698 p64->addr_len = p32->addr_len; 699 p64->msgid = p32->msgid; 700 get_compat_ipmi_msg(&p64->msg, &p32->msg); 701 } 702 703 static int copyout_recv32(struct ipmi_recv *p64, void __user *to) 704 { 705 struct compat_ipmi_recv v32; 706 memset(&v32, 0, sizeof(struct compat_ipmi_recv)); 707 v32.recv_type = p64->recv_type; 708 v32.addr = ptr_to_compat(p64->addr); 709 v32.addr_len = p64->addr_len; 710 v32.msgid = p64->msgid; 711 v32.msg.netfn = p64->msg.netfn; 712 v32.msg.cmd = p64->msg.cmd; 713 v32.msg.data_len = p64->msg.data_len; 714 v32.msg.data = ptr_to_compat(p64->msg.data); 715 return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0; 716 } 717 718 /* 719 * Handle compatibility ioctls 720 */ 721 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, 722 unsigned long arg) 723 { 724 struct ipmi_file_private *priv = filep->private_data; 725 726 switch(cmd) { 727 case COMPAT_IPMICTL_SEND_COMMAND: 728 { 729 struct ipmi_req rp; 730 struct compat_ipmi_req r32; 731 int retries; 732 unsigned int retry_time_ms; 733 734 if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32))) 735 return -EFAULT; 736 737 get_compat_ipmi_req(&rp, &r32); 738 739 mutex_lock(&priv->recv_mutex); 740 retries = priv->default_retries; 741 retry_time_ms = priv->default_retry_time_ms; 742 mutex_unlock(&priv->recv_mutex); 743 744 return handle_send_req(priv->user, &rp, 745 retries, retry_time_ms); 746 } 747 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME: 748 { 749 struct ipmi_req_settime sp; 750 struct compat_ipmi_req_settime sp32; 751 752 if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32))) 753 return -EFAULT; 754 755 get_compat_ipmi_req_settime(&sp, &sp32); 756 757 return handle_send_req(priv->user, &sp.req, 758 sp.retries, sp.retry_time_ms); 759 } 760 case COMPAT_IPMICTL_RECEIVE_MSG: 761 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC: 762 { 763 struct ipmi_recv recv64; 764 struct compat_ipmi_recv recv32; 765 766 if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32))) 767 return -EFAULT; 768 769 get_compat_ipmi_recv(&recv64, &recv32); 770 771 return handle_recv(priv, 772 cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC, 773 &recv64, copyout_recv32, compat_ptr(arg)); 774 } 775 default: 776 return ipmi_ioctl(filep, cmd, arg); 777 } 778 } 779 #endif 780 781 static const struct file_operations ipmi_fops = { 782 .owner = THIS_MODULE, 783 .unlocked_ioctl = ipmi_ioctl, 784 #ifdef CONFIG_COMPAT 785 .compat_ioctl = compat_ipmi_ioctl, 786 #endif 787 .open = ipmi_open, 788 .release = ipmi_release, 789 .fasync = ipmi_fasync, 790 .poll = ipmi_poll, 791 .llseek = noop_llseek, 792 }; 793 794 #define DEVICE_NAME "ipmidev" 795 796 static int ipmi_major; 797 module_param(ipmi_major, int, 0); 798 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By" 799 " default, or if you set it to zero, it will choose the next" 800 " available device. Setting it to -1 will disable the" 801 " interface. Other values will set the major device number" 802 " to that value."); 803 804 /* Keep track of the devices that are registered. */ 805 struct ipmi_reg_list { 806 dev_t dev; 807 struct list_head link; 808 }; 809 static LIST_HEAD(reg_list); 810 static DEFINE_MUTEX(reg_list_mutex); 811 812 static struct class *ipmi_class; 813 814 static void ipmi_new_smi(int if_num, struct device *device) 815 { 816 dev_t dev = MKDEV(ipmi_major, if_num); 817 struct ipmi_reg_list *entry; 818 819 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 820 if (!entry) { 821 pr_err("ipmi_devintf: Unable to create the ipmi class device link\n"); 822 return; 823 } 824 entry->dev = dev; 825 826 mutex_lock(®_list_mutex); 827 device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num); 828 list_add(&entry->link, ®_list); 829 mutex_unlock(®_list_mutex); 830 } 831 832 static void ipmi_smi_gone(int if_num) 833 { 834 dev_t dev = MKDEV(ipmi_major, if_num); 835 struct ipmi_reg_list *entry; 836 837 mutex_lock(®_list_mutex); 838 list_for_each_entry(entry, ®_list, link) { 839 if (entry->dev == dev) { 840 list_del(&entry->link); 841 kfree(entry); 842 break; 843 } 844 } 845 device_destroy(ipmi_class, dev); 846 mutex_unlock(®_list_mutex); 847 } 848 849 static struct ipmi_smi_watcher smi_watcher = 850 { 851 .owner = THIS_MODULE, 852 .new_smi = ipmi_new_smi, 853 .smi_gone = ipmi_smi_gone, 854 }; 855 856 static int __init init_ipmi_devintf(void) 857 { 858 int rv; 859 860 if (ipmi_major < 0) 861 return -EINVAL; 862 863 pr_info("ipmi device interface\n"); 864 865 ipmi_class = class_create(THIS_MODULE, "ipmi"); 866 if (IS_ERR(ipmi_class)) { 867 pr_err("ipmi: can't register device class\n"); 868 return PTR_ERR(ipmi_class); 869 } 870 871 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops); 872 if (rv < 0) { 873 class_destroy(ipmi_class); 874 pr_err("ipmi: can't get major %d\n", ipmi_major); 875 return rv; 876 } 877 878 if (ipmi_major == 0) { 879 ipmi_major = rv; 880 } 881 882 rv = ipmi_smi_watcher_register(&smi_watcher); 883 if (rv) { 884 unregister_chrdev(ipmi_major, DEVICE_NAME); 885 class_destroy(ipmi_class); 886 pr_warn("ipmi: can't register smi watcher\n"); 887 return rv; 888 } 889 890 return 0; 891 } 892 module_init(init_ipmi_devintf); 893 894 static void __exit cleanup_ipmi(void) 895 { 896 struct ipmi_reg_list *entry, *entry2; 897 mutex_lock(®_list_mutex); 898 list_for_each_entry_safe(entry, entry2, ®_list, link) { 899 list_del(&entry->link); 900 device_destroy(ipmi_class, entry->dev); 901 kfree(entry); 902 } 903 mutex_unlock(®_list_mutex); 904 class_destroy(ipmi_class); 905 ipmi_smi_watcher_unregister(&smi_watcher); 906 unregister_chrdev(ipmi_major, DEVICE_NAME); 907 } 908 module_exit(cleanup_ipmi); 909 910 MODULE_LICENSE("GPL"); 911 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 912 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler."); 913