1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_devintf.c 4 * 5 * Linux device interface for the IPMI message handler. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/errno.h> 17 #include <linux/poll.h> 18 #include <linux/sched.h> 19 #include <linux/spinlock.h> 20 #include <linux/slab.h> 21 #include <linux/ipmi.h> 22 #include <linux/mutex.h> 23 #include <linux/init.h> 24 #include <linux/device.h> 25 #include <linux/compat.h> 26 27 struct ipmi_file_private 28 { 29 ipmi_user_t user; 30 spinlock_t recv_msg_lock; 31 struct list_head recv_msgs; 32 struct file *file; 33 struct fasync_struct *fasync_queue; 34 wait_queue_head_t wait; 35 struct mutex recv_mutex; 36 int default_retries; 37 unsigned int default_retry_time_ms; 38 }; 39 40 static DEFINE_MUTEX(ipmi_mutex); 41 static void file_receive_handler(struct ipmi_recv_msg *msg, 42 void *handler_data) 43 { 44 struct ipmi_file_private *priv = handler_data; 45 int was_empty; 46 unsigned long flags; 47 48 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 49 50 was_empty = list_empty(&(priv->recv_msgs)); 51 list_add_tail(&(msg->link), &(priv->recv_msgs)); 52 53 if (was_empty) { 54 wake_up_interruptible(&priv->wait); 55 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN); 56 } 57 58 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 59 } 60 61 static __poll_t ipmi_poll(struct file *file, poll_table *wait) 62 { 63 struct ipmi_file_private *priv = file->private_data; 64 __poll_t mask = 0; 65 unsigned long flags; 66 67 poll_wait(file, &priv->wait, wait); 68 69 spin_lock_irqsave(&priv->recv_msg_lock, flags); 70 71 if (!list_empty(&(priv->recv_msgs))) 72 mask |= (EPOLLIN | EPOLLRDNORM); 73 74 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 75 76 return mask; 77 } 78 79 static int ipmi_fasync(int fd, struct file *file, int on) 80 { 81 struct ipmi_file_private *priv = file->private_data; 82 int result; 83 84 mutex_lock(&ipmi_mutex); /* could race against open() otherwise */ 85 result = fasync_helper(fd, file, on, &priv->fasync_queue); 86 mutex_unlock(&ipmi_mutex); 87 88 return (result); 89 } 90 91 static const struct ipmi_user_hndl ipmi_hndlrs = 92 { 93 .ipmi_recv_hndl = file_receive_handler, 94 }; 95 96 static int ipmi_open(struct inode *inode, struct file *file) 97 { 98 int if_num = iminor(inode); 99 int rv; 100 struct ipmi_file_private *priv; 101 102 103 priv = kmalloc(sizeof(*priv), GFP_KERNEL); 104 if (!priv) 105 return -ENOMEM; 106 107 mutex_lock(&ipmi_mutex); 108 priv->file = file; 109 110 rv = ipmi_create_user(if_num, 111 &ipmi_hndlrs, 112 priv, 113 &(priv->user)); 114 if (rv) { 115 kfree(priv); 116 goto out; 117 } 118 119 file->private_data = priv; 120 121 spin_lock_init(&(priv->recv_msg_lock)); 122 INIT_LIST_HEAD(&(priv->recv_msgs)); 123 init_waitqueue_head(&priv->wait); 124 priv->fasync_queue = NULL; 125 mutex_init(&priv->recv_mutex); 126 127 /* Use the low-level defaults. */ 128 priv->default_retries = -1; 129 priv->default_retry_time_ms = 0; 130 131 out: 132 mutex_unlock(&ipmi_mutex); 133 return rv; 134 } 135 136 static int ipmi_release(struct inode *inode, struct file *file) 137 { 138 struct ipmi_file_private *priv = file->private_data; 139 int rv; 140 struct ipmi_recv_msg *msg, *next; 141 142 rv = ipmi_destroy_user(priv->user); 143 if (rv) 144 return rv; 145 146 list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) 147 ipmi_free_recv_msg(msg); 148 149 150 kfree(priv); 151 152 return 0; 153 } 154 155 static int handle_send_req(ipmi_user_t user, 156 struct ipmi_req *req, 157 int retries, 158 unsigned int retry_time_ms) 159 { 160 int rv; 161 struct ipmi_addr addr; 162 struct kernel_ipmi_msg msg; 163 164 if (req->addr_len > sizeof(struct ipmi_addr)) 165 return -EINVAL; 166 167 if (copy_from_user(&addr, req->addr, req->addr_len)) 168 return -EFAULT; 169 170 msg.netfn = req->msg.netfn; 171 msg.cmd = req->msg.cmd; 172 msg.data_len = req->msg.data_len; 173 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 174 if (!msg.data) 175 return -ENOMEM; 176 177 /* From here out we cannot return, we must jump to "out" for 178 error exits to free msgdata. */ 179 180 rv = ipmi_validate_addr(&addr, req->addr_len); 181 if (rv) 182 goto out; 183 184 if (req->msg.data != NULL) { 185 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) { 186 rv = -EMSGSIZE; 187 goto out; 188 } 189 190 if (copy_from_user(msg.data, 191 req->msg.data, 192 req->msg.data_len)) 193 { 194 rv = -EFAULT; 195 goto out; 196 } 197 } else { 198 msg.data_len = 0; 199 } 200 201 rv = ipmi_request_settime(user, 202 &addr, 203 req->msgid, 204 &msg, 205 NULL, 206 0, 207 retries, 208 retry_time_ms); 209 out: 210 kfree(msg.data); 211 return rv; 212 } 213 214 static int handle_recv(struct ipmi_file_private *priv, 215 bool trunc, struct ipmi_recv *rsp, 216 int (*copyout)(struct ipmi_recv *, void __user *), 217 void __user *to) 218 { 219 int addr_len; 220 struct list_head *entry; 221 struct ipmi_recv_msg *msg; 222 unsigned long flags; 223 int rv = 0; 224 225 /* We claim a mutex because we don't want two 226 users getting something from the queue at a time. 227 Since we have to release the spinlock before we can 228 copy the data to the user, it's possible another 229 user will grab something from the queue, too. Then 230 the messages might get out of order if something 231 fails and the message gets put back onto the 232 queue. This mutex prevents that problem. */ 233 mutex_lock(&priv->recv_mutex); 234 235 /* Grab the message off the list. */ 236 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 237 if (list_empty(&(priv->recv_msgs))) { 238 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 239 rv = -EAGAIN; 240 goto recv_err; 241 } 242 entry = priv->recv_msgs.next; 243 msg = list_entry(entry, struct ipmi_recv_msg, link); 244 list_del(entry); 245 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 246 247 addr_len = ipmi_addr_length(msg->addr.addr_type); 248 if (rsp->addr_len < addr_len) 249 { 250 rv = -EINVAL; 251 goto recv_putback_on_err; 252 } 253 254 if (copy_to_user(rsp->addr, &(msg->addr), addr_len)) { 255 rv = -EFAULT; 256 goto recv_putback_on_err; 257 } 258 rsp->addr_len = addr_len; 259 260 rsp->recv_type = msg->recv_type; 261 rsp->msgid = msg->msgid; 262 rsp->msg.netfn = msg->msg.netfn; 263 rsp->msg.cmd = msg->msg.cmd; 264 265 if (msg->msg.data_len > 0) { 266 if (rsp->msg.data_len < msg->msg.data_len) { 267 rv = -EMSGSIZE; 268 if (trunc) 269 msg->msg.data_len = rsp->msg.data_len; 270 else 271 goto recv_putback_on_err; 272 } 273 274 if (copy_to_user(rsp->msg.data, 275 msg->msg.data, 276 msg->msg.data_len)) 277 { 278 rv = -EFAULT; 279 goto recv_putback_on_err; 280 } 281 rsp->msg.data_len = msg->msg.data_len; 282 } else { 283 rsp->msg.data_len = 0; 284 } 285 286 rv = copyout(rsp, to); 287 if (rv) 288 goto recv_putback_on_err; 289 290 mutex_unlock(&priv->recv_mutex); 291 ipmi_free_recv_msg(msg); 292 return 0; 293 294 recv_putback_on_err: 295 /* If we got an error, put the message back onto 296 the head of the queue. */ 297 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 298 list_add(entry, &(priv->recv_msgs)); 299 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 300 recv_err: 301 mutex_unlock(&priv->recv_mutex); 302 return rv; 303 } 304 305 static int copyout_recv(struct ipmi_recv *rsp, void __user *to) 306 { 307 return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0; 308 } 309 310 static int ipmi_ioctl(struct file *file, 311 unsigned int cmd, 312 unsigned long data) 313 { 314 int rv = -EINVAL; 315 struct ipmi_file_private *priv = file->private_data; 316 void __user *arg = (void __user *)data; 317 318 switch (cmd) 319 { 320 case IPMICTL_SEND_COMMAND: 321 { 322 struct ipmi_req req; 323 324 if (copy_from_user(&req, arg, sizeof(req))) { 325 rv = -EFAULT; 326 break; 327 } 328 329 rv = handle_send_req(priv->user, 330 &req, 331 priv->default_retries, 332 priv->default_retry_time_ms); 333 break; 334 } 335 336 case IPMICTL_SEND_COMMAND_SETTIME: 337 { 338 struct ipmi_req_settime req; 339 340 if (copy_from_user(&req, arg, sizeof(req))) { 341 rv = -EFAULT; 342 break; 343 } 344 345 rv = handle_send_req(priv->user, 346 &req.req, 347 req.retries, 348 req.retry_time_ms); 349 break; 350 } 351 352 case IPMICTL_RECEIVE_MSG: 353 case IPMICTL_RECEIVE_MSG_TRUNC: 354 { 355 struct ipmi_recv rsp; 356 357 if (copy_from_user(&rsp, arg, sizeof(rsp))) 358 rv = -EFAULT; 359 else 360 rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC, 361 &rsp, copyout_recv, arg); 362 break; 363 } 364 365 case IPMICTL_REGISTER_FOR_CMD: 366 { 367 struct ipmi_cmdspec val; 368 369 if (copy_from_user(&val, arg, sizeof(val))) { 370 rv = -EFAULT; 371 break; 372 } 373 374 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, 375 IPMI_CHAN_ALL); 376 break; 377 } 378 379 case IPMICTL_UNREGISTER_FOR_CMD: 380 { 381 struct ipmi_cmdspec val; 382 383 if (copy_from_user(&val, arg, sizeof(val))) { 384 rv = -EFAULT; 385 break; 386 } 387 388 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, 389 IPMI_CHAN_ALL); 390 break; 391 } 392 393 case IPMICTL_REGISTER_FOR_CMD_CHANS: 394 { 395 struct ipmi_cmdspec_chans val; 396 397 if (copy_from_user(&val, arg, sizeof(val))) { 398 rv = -EFAULT; 399 break; 400 } 401 402 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, 403 val.chans); 404 break; 405 } 406 407 case IPMICTL_UNREGISTER_FOR_CMD_CHANS: 408 { 409 struct ipmi_cmdspec_chans val; 410 411 if (copy_from_user(&val, arg, sizeof(val))) { 412 rv = -EFAULT; 413 break; 414 } 415 416 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, 417 val.chans); 418 break; 419 } 420 421 case IPMICTL_SET_GETS_EVENTS_CMD: 422 { 423 int val; 424 425 if (copy_from_user(&val, arg, sizeof(val))) { 426 rv = -EFAULT; 427 break; 428 } 429 430 rv = ipmi_set_gets_events(priv->user, val); 431 break; 432 } 433 434 /* The next four are legacy, not per-channel. */ 435 case IPMICTL_SET_MY_ADDRESS_CMD: 436 { 437 unsigned int val; 438 439 if (copy_from_user(&val, arg, sizeof(val))) { 440 rv = -EFAULT; 441 break; 442 } 443 444 rv = ipmi_set_my_address(priv->user, 0, val); 445 break; 446 } 447 448 case IPMICTL_GET_MY_ADDRESS_CMD: 449 { 450 unsigned int val; 451 unsigned char rval; 452 453 rv = ipmi_get_my_address(priv->user, 0, &rval); 454 if (rv) 455 break; 456 457 val = rval; 458 459 if (copy_to_user(arg, &val, sizeof(val))) { 460 rv = -EFAULT; 461 break; 462 } 463 break; 464 } 465 466 case IPMICTL_SET_MY_LUN_CMD: 467 { 468 unsigned int val; 469 470 if (copy_from_user(&val, arg, sizeof(val))) { 471 rv = -EFAULT; 472 break; 473 } 474 475 rv = ipmi_set_my_LUN(priv->user, 0, val); 476 break; 477 } 478 479 case IPMICTL_GET_MY_LUN_CMD: 480 { 481 unsigned int val; 482 unsigned char rval; 483 484 rv = ipmi_get_my_LUN(priv->user, 0, &rval); 485 if (rv) 486 break; 487 488 val = rval; 489 490 if (copy_to_user(arg, &val, sizeof(val))) { 491 rv = -EFAULT; 492 break; 493 } 494 break; 495 } 496 497 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD: 498 { 499 struct ipmi_channel_lun_address_set val; 500 501 if (copy_from_user(&val, arg, sizeof(val))) { 502 rv = -EFAULT; 503 break; 504 } 505 506 return ipmi_set_my_address(priv->user, val.channel, val.value); 507 break; 508 } 509 510 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD: 511 { 512 struct ipmi_channel_lun_address_set val; 513 514 if (copy_from_user(&val, arg, sizeof(val))) { 515 rv = -EFAULT; 516 break; 517 } 518 519 rv = ipmi_get_my_address(priv->user, val.channel, &val.value); 520 if (rv) 521 break; 522 523 if (copy_to_user(arg, &val, sizeof(val))) { 524 rv = -EFAULT; 525 break; 526 } 527 break; 528 } 529 530 case IPMICTL_SET_MY_CHANNEL_LUN_CMD: 531 { 532 struct ipmi_channel_lun_address_set val; 533 534 if (copy_from_user(&val, arg, sizeof(val))) { 535 rv = -EFAULT; 536 break; 537 } 538 539 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value); 540 break; 541 } 542 543 case IPMICTL_GET_MY_CHANNEL_LUN_CMD: 544 { 545 struct ipmi_channel_lun_address_set val; 546 547 if (copy_from_user(&val, arg, sizeof(val))) { 548 rv = -EFAULT; 549 break; 550 } 551 552 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value); 553 if (rv) 554 break; 555 556 if (copy_to_user(arg, &val, sizeof(val))) { 557 rv = -EFAULT; 558 break; 559 } 560 break; 561 } 562 563 case IPMICTL_SET_TIMING_PARMS_CMD: 564 { 565 struct ipmi_timing_parms parms; 566 567 if (copy_from_user(&parms, arg, sizeof(parms))) { 568 rv = -EFAULT; 569 break; 570 } 571 572 priv->default_retries = parms.retries; 573 priv->default_retry_time_ms = parms.retry_time_ms; 574 rv = 0; 575 break; 576 } 577 578 case IPMICTL_GET_TIMING_PARMS_CMD: 579 { 580 struct ipmi_timing_parms parms; 581 582 parms.retries = priv->default_retries; 583 parms.retry_time_ms = priv->default_retry_time_ms; 584 585 if (copy_to_user(arg, &parms, sizeof(parms))) { 586 rv = -EFAULT; 587 break; 588 } 589 590 rv = 0; 591 break; 592 } 593 594 case IPMICTL_GET_MAINTENANCE_MODE_CMD: 595 { 596 int mode; 597 598 mode = ipmi_get_maintenance_mode(priv->user); 599 if (copy_to_user(arg, &mode, sizeof(mode))) { 600 rv = -EFAULT; 601 break; 602 } 603 rv = 0; 604 break; 605 } 606 607 case IPMICTL_SET_MAINTENANCE_MODE_CMD: 608 { 609 int mode; 610 611 if (copy_from_user(&mode, arg, sizeof(mode))) { 612 rv = -EFAULT; 613 break; 614 } 615 rv = ipmi_set_maintenance_mode(priv->user, mode); 616 break; 617 } 618 } 619 620 return rv; 621 } 622 623 /* 624 * Note: it doesn't make sense to take the BKL here but 625 * not in compat_ipmi_ioctl. -arnd 626 */ 627 static long ipmi_unlocked_ioctl(struct file *file, 628 unsigned int cmd, 629 unsigned long data) 630 { 631 int ret; 632 633 mutex_lock(&ipmi_mutex); 634 ret = ipmi_ioctl(file, cmd, data); 635 mutex_unlock(&ipmi_mutex); 636 637 return ret; 638 } 639 640 #ifdef CONFIG_COMPAT 641 642 /* 643 * The following code contains code for supporting 32-bit compatible 644 * ioctls on 64-bit kernels. This allows running 32-bit apps on the 645 * 64-bit kernel 646 */ 647 #define COMPAT_IPMICTL_SEND_COMMAND \ 648 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req) 649 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \ 650 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime) 651 #define COMPAT_IPMICTL_RECEIVE_MSG \ 652 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv) 653 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \ 654 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv) 655 656 struct compat_ipmi_msg { 657 u8 netfn; 658 u8 cmd; 659 u16 data_len; 660 compat_uptr_t data; 661 }; 662 663 struct compat_ipmi_req { 664 compat_uptr_t addr; 665 compat_uint_t addr_len; 666 compat_long_t msgid; 667 struct compat_ipmi_msg msg; 668 }; 669 670 struct compat_ipmi_recv { 671 compat_int_t recv_type; 672 compat_uptr_t addr; 673 compat_uint_t addr_len; 674 compat_long_t msgid; 675 struct compat_ipmi_msg msg; 676 }; 677 678 struct compat_ipmi_req_settime { 679 struct compat_ipmi_req req; 680 compat_int_t retries; 681 compat_uint_t retry_time_ms; 682 }; 683 684 /* 685 * Define some helper functions for copying IPMI data 686 */ 687 static void get_compat_ipmi_msg(struct ipmi_msg *p64, 688 struct compat_ipmi_msg *p32) 689 { 690 p64->netfn = p32->netfn; 691 p64->cmd = p32->cmd; 692 p64->data_len = p32->data_len; 693 p64->data = compat_ptr(p32->data); 694 } 695 696 static void get_compat_ipmi_req(struct ipmi_req *p64, 697 struct compat_ipmi_req *p32) 698 { 699 p64->addr = compat_ptr(p32->addr); 700 p64->addr_len = p32->addr_len; 701 p64->msgid = p32->msgid; 702 get_compat_ipmi_msg(&p64->msg, &p32->msg); 703 } 704 705 static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64, 706 struct compat_ipmi_req_settime *p32) 707 { 708 get_compat_ipmi_req(&p64->req, &p32->req); 709 p64->retries = p32->retries; 710 p64->retry_time_ms = p32->retry_time_ms; 711 } 712 713 static void get_compat_ipmi_recv(struct ipmi_recv *p64, 714 struct compat_ipmi_recv *p32) 715 { 716 memset(p64, 0, sizeof(struct ipmi_recv)); 717 p64->recv_type = p32->recv_type; 718 p64->addr = compat_ptr(p32->addr); 719 p64->addr_len = p32->addr_len; 720 p64->msgid = p32->msgid; 721 get_compat_ipmi_msg(&p64->msg, &p32->msg); 722 } 723 724 static int copyout_recv32(struct ipmi_recv *p64, void __user *to) 725 { 726 struct compat_ipmi_recv v32; 727 memset(&v32, 0, sizeof(struct compat_ipmi_recv)); 728 v32.recv_type = p64->recv_type; 729 v32.addr = ptr_to_compat(p64->addr); 730 v32.addr_len = p64->addr_len; 731 v32.msgid = p64->msgid; 732 v32.msg.netfn = p64->msg.netfn; 733 v32.msg.cmd = p64->msg.cmd; 734 v32.msg.data_len = p64->msg.data_len; 735 v32.msg.data = ptr_to_compat(p64->msg.data); 736 return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0; 737 } 738 739 /* 740 * Handle compatibility ioctls 741 */ 742 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, 743 unsigned long arg) 744 { 745 struct ipmi_file_private *priv = filep->private_data; 746 747 switch(cmd) { 748 case COMPAT_IPMICTL_SEND_COMMAND: 749 { 750 struct ipmi_req rp; 751 struct compat_ipmi_req r32; 752 753 if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32))) 754 return -EFAULT; 755 756 get_compat_ipmi_req(&rp, &r32); 757 758 return handle_send_req(priv->user, &rp, 759 priv->default_retries, 760 priv->default_retry_time_ms); 761 } 762 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME: 763 { 764 struct ipmi_req_settime sp; 765 struct compat_ipmi_req_settime sp32; 766 767 if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32))) 768 return -EFAULT; 769 770 get_compat_ipmi_req_settime(&sp, &sp32); 771 772 return handle_send_req(priv->user, &sp.req, 773 sp.retries, sp.retry_time_ms); 774 } 775 case COMPAT_IPMICTL_RECEIVE_MSG: 776 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC: 777 { 778 struct ipmi_recv recv64; 779 struct compat_ipmi_recv recv32; 780 781 if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32))) 782 return -EFAULT; 783 784 get_compat_ipmi_recv(&recv64, &recv32); 785 786 return handle_recv(priv, 787 cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC, 788 &recv64, copyout_recv32, compat_ptr(arg)); 789 } 790 default: 791 return ipmi_ioctl(filep, cmd, arg); 792 } 793 } 794 795 static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd, 796 unsigned long arg) 797 { 798 int ret; 799 800 mutex_lock(&ipmi_mutex); 801 ret = compat_ipmi_ioctl(filep, cmd, arg); 802 mutex_unlock(&ipmi_mutex); 803 804 return ret; 805 } 806 #endif 807 808 static const struct file_operations ipmi_fops = { 809 .owner = THIS_MODULE, 810 .unlocked_ioctl = ipmi_unlocked_ioctl, 811 #ifdef CONFIG_COMPAT 812 .compat_ioctl = unlocked_compat_ipmi_ioctl, 813 #endif 814 .open = ipmi_open, 815 .release = ipmi_release, 816 .fasync = ipmi_fasync, 817 .poll = ipmi_poll, 818 .llseek = noop_llseek, 819 }; 820 821 #define DEVICE_NAME "ipmidev" 822 823 static int ipmi_major; 824 module_param(ipmi_major, int, 0); 825 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By" 826 " default, or if you set it to zero, it will choose the next" 827 " available device. Setting it to -1 will disable the" 828 " interface. Other values will set the major device number" 829 " to that value."); 830 831 /* Keep track of the devices that are registered. */ 832 struct ipmi_reg_list { 833 dev_t dev; 834 struct list_head link; 835 }; 836 static LIST_HEAD(reg_list); 837 static DEFINE_MUTEX(reg_list_mutex); 838 839 static struct class *ipmi_class; 840 841 static void ipmi_new_smi(int if_num, struct device *device) 842 { 843 dev_t dev = MKDEV(ipmi_major, if_num); 844 struct ipmi_reg_list *entry; 845 846 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 847 if (!entry) { 848 printk(KERN_ERR "ipmi_devintf: Unable to create the" 849 " ipmi class device link\n"); 850 return; 851 } 852 entry->dev = dev; 853 854 mutex_lock(®_list_mutex); 855 device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num); 856 list_add(&entry->link, ®_list); 857 mutex_unlock(®_list_mutex); 858 } 859 860 static void ipmi_smi_gone(int if_num) 861 { 862 dev_t dev = MKDEV(ipmi_major, if_num); 863 struct ipmi_reg_list *entry; 864 865 mutex_lock(®_list_mutex); 866 list_for_each_entry(entry, ®_list, link) { 867 if (entry->dev == dev) { 868 list_del(&entry->link); 869 kfree(entry); 870 break; 871 } 872 } 873 device_destroy(ipmi_class, dev); 874 mutex_unlock(®_list_mutex); 875 } 876 877 static struct ipmi_smi_watcher smi_watcher = 878 { 879 .owner = THIS_MODULE, 880 .new_smi = ipmi_new_smi, 881 .smi_gone = ipmi_smi_gone, 882 }; 883 884 static int __init init_ipmi_devintf(void) 885 { 886 int rv; 887 888 if (ipmi_major < 0) 889 return -EINVAL; 890 891 printk(KERN_INFO "ipmi device interface\n"); 892 893 ipmi_class = class_create(THIS_MODULE, "ipmi"); 894 if (IS_ERR(ipmi_class)) { 895 printk(KERN_ERR "ipmi: can't register device class\n"); 896 return PTR_ERR(ipmi_class); 897 } 898 899 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops); 900 if (rv < 0) { 901 class_destroy(ipmi_class); 902 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major); 903 return rv; 904 } 905 906 if (ipmi_major == 0) { 907 ipmi_major = rv; 908 } 909 910 rv = ipmi_smi_watcher_register(&smi_watcher); 911 if (rv) { 912 unregister_chrdev(ipmi_major, DEVICE_NAME); 913 class_destroy(ipmi_class); 914 printk(KERN_WARNING "ipmi: can't register smi watcher\n"); 915 return rv; 916 } 917 918 return 0; 919 } 920 module_init(init_ipmi_devintf); 921 922 static void __exit cleanup_ipmi(void) 923 { 924 struct ipmi_reg_list *entry, *entry2; 925 mutex_lock(®_list_mutex); 926 list_for_each_entry_safe(entry, entry2, ®_list, link) { 927 list_del(&entry->link); 928 device_destroy(ipmi_class, entry->dev); 929 kfree(entry); 930 } 931 mutex_unlock(®_list_mutex); 932 class_destroy(ipmi_class); 933 ipmi_smi_watcher_unregister(&smi_watcher); 934 unregister_chrdev(ipmi_major, DEVICE_NAME); 935 } 936 module_exit(cleanup_ipmi); 937 938 MODULE_LICENSE("GPL"); 939 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 940 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler."); 941