1 /* 2 * ipmi_devintf.c 3 * 4 * Linux device interface for the IPMI message handler. 5 * 6 * Author: MontaVista Software, Inc. 7 * Corey Minyard <minyard@mvista.com> 8 * source@mvista.com 9 * 10 * Copyright 2002 MontaVista Software Inc. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 * 17 * 18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * You should have received a copy of the GNU General Public License along 30 * with this program; if not, write to the Free Software Foundation, Inc., 31 * 675 Mass Ave, Cambridge, MA 02139, USA. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/moduleparam.h> 36 #include <linux/errno.h> 37 #include <linux/poll.h> 38 #include <linux/sched.h> 39 #include <linux/spinlock.h> 40 #include <linux/slab.h> 41 #include <linux/ipmi.h> 42 #include <linux/mutex.h> 43 #include <linux/init.h> 44 #include <linux/device.h> 45 #include <linux/compat.h> 46 47 struct ipmi_file_private 48 { 49 ipmi_user_t user; 50 spinlock_t recv_msg_lock; 51 struct list_head recv_msgs; 52 struct file *file; 53 struct fasync_struct *fasync_queue; 54 wait_queue_head_t wait; 55 struct mutex recv_mutex; 56 int default_retries; 57 unsigned int default_retry_time_ms; 58 }; 59 60 static DEFINE_MUTEX(ipmi_mutex); 61 static void file_receive_handler(struct ipmi_recv_msg *msg, 62 void *handler_data) 63 { 64 struct ipmi_file_private *priv = handler_data; 65 int was_empty; 66 unsigned long flags; 67 68 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 69 70 was_empty = list_empty(&(priv->recv_msgs)); 71 list_add_tail(&(msg->link), &(priv->recv_msgs)); 72 73 if (was_empty) { 74 wake_up_interruptible(&priv->wait); 75 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN); 76 } 77 78 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 79 } 80 81 static unsigned int ipmi_poll(struct file *file, poll_table *wait) 82 { 83 struct ipmi_file_private *priv = file->private_data; 84 unsigned int mask = 0; 85 unsigned long flags; 86 87 poll_wait(file, &priv->wait, wait); 88 89 spin_lock_irqsave(&priv->recv_msg_lock, flags); 90 91 if (!list_empty(&(priv->recv_msgs))) 92 mask |= (POLLIN | POLLRDNORM); 93 94 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 95 96 return mask; 97 } 98 99 static int ipmi_fasync(int fd, struct file *file, int on) 100 { 101 struct ipmi_file_private *priv = file->private_data; 102 int result; 103 104 mutex_lock(&ipmi_mutex); /* could race against open() otherwise */ 105 result = fasync_helper(fd, file, on, &priv->fasync_queue); 106 mutex_unlock(&ipmi_mutex); 107 108 return (result); 109 } 110 111 static struct ipmi_user_hndl ipmi_hndlrs = 112 { 113 .ipmi_recv_hndl = file_receive_handler, 114 }; 115 116 static int ipmi_open(struct inode *inode, struct file *file) 117 { 118 int if_num = iminor(inode); 119 int rv; 120 struct ipmi_file_private *priv; 121 122 123 priv = kmalloc(sizeof(*priv), GFP_KERNEL); 124 if (!priv) 125 return -ENOMEM; 126 127 mutex_lock(&ipmi_mutex); 128 priv->file = file; 129 130 rv = ipmi_create_user(if_num, 131 &ipmi_hndlrs, 132 priv, 133 &(priv->user)); 134 if (rv) { 135 kfree(priv); 136 goto out; 137 } 138 139 file->private_data = priv; 140 141 spin_lock_init(&(priv->recv_msg_lock)); 142 INIT_LIST_HEAD(&(priv->recv_msgs)); 143 init_waitqueue_head(&priv->wait); 144 priv->fasync_queue = NULL; 145 mutex_init(&priv->recv_mutex); 146 147 /* Use the low-level defaults. */ 148 priv->default_retries = -1; 149 priv->default_retry_time_ms = 0; 150 151 out: 152 mutex_unlock(&ipmi_mutex); 153 return rv; 154 } 155 156 static int ipmi_release(struct inode *inode, struct file *file) 157 { 158 struct ipmi_file_private *priv = file->private_data; 159 int rv; 160 struct ipmi_recv_msg *msg, *next; 161 162 rv = ipmi_destroy_user(priv->user); 163 if (rv) 164 return rv; 165 166 list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) 167 ipmi_free_recv_msg(msg); 168 169 170 kfree(priv); 171 172 return 0; 173 } 174 175 static int handle_send_req(ipmi_user_t user, 176 struct ipmi_req *req, 177 int retries, 178 unsigned int retry_time_ms) 179 { 180 int rv; 181 struct ipmi_addr addr; 182 struct kernel_ipmi_msg msg; 183 184 if (req->addr_len > sizeof(struct ipmi_addr)) 185 return -EINVAL; 186 187 if (copy_from_user(&addr, req->addr, req->addr_len)) 188 return -EFAULT; 189 190 msg.netfn = req->msg.netfn; 191 msg.cmd = req->msg.cmd; 192 msg.data_len = req->msg.data_len; 193 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 194 if (!msg.data) 195 return -ENOMEM; 196 197 /* From here out we cannot return, we must jump to "out" for 198 error exits to free msgdata. */ 199 200 rv = ipmi_validate_addr(&addr, req->addr_len); 201 if (rv) 202 goto out; 203 204 if (req->msg.data != NULL) { 205 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) { 206 rv = -EMSGSIZE; 207 goto out; 208 } 209 210 if (copy_from_user(msg.data, 211 req->msg.data, 212 req->msg.data_len)) 213 { 214 rv = -EFAULT; 215 goto out; 216 } 217 } else { 218 msg.data_len = 0; 219 } 220 221 rv = ipmi_request_settime(user, 222 &addr, 223 req->msgid, 224 &msg, 225 NULL, 226 0, 227 retries, 228 retry_time_ms); 229 out: 230 kfree(msg.data); 231 return rv; 232 } 233 234 static int ipmi_ioctl(struct file *file, 235 unsigned int cmd, 236 unsigned long data) 237 { 238 int rv = -EINVAL; 239 struct ipmi_file_private *priv = file->private_data; 240 void __user *arg = (void __user *)data; 241 242 switch (cmd) 243 { 244 case IPMICTL_SEND_COMMAND: 245 { 246 struct ipmi_req req; 247 248 if (copy_from_user(&req, arg, sizeof(req))) { 249 rv = -EFAULT; 250 break; 251 } 252 253 rv = handle_send_req(priv->user, 254 &req, 255 priv->default_retries, 256 priv->default_retry_time_ms); 257 break; 258 } 259 260 case IPMICTL_SEND_COMMAND_SETTIME: 261 { 262 struct ipmi_req_settime req; 263 264 if (copy_from_user(&req, arg, sizeof(req))) { 265 rv = -EFAULT; 266 break; 267 } 268 269 rv = handle_send_req(priv->user, 270 &req.req, 271 req.retries, 272 req.retry_time_ms); 273 break; 274 } 275 276 case IPMICTL_RECEIVE_MSG: 277 case IPMICTL_RECEIVE_MSG_TRUNC: 278 { 279 struct ipmi_recv rsp; 280 int addr_len; 281 struct list_head *entry; 282 struct ipmi_recv_msg *msg; 283 unsigned long flags; 284 285 286 rv = 0; 287 if (copy_from_user(&rsp, arg, sizeof(rsp))) { 288 rv = -EFAULT; 289 break; 290 } 291 292 /* We claim a mutex because we don't want two 293 users getting something from the queue at a time. 294 Since we have to release the spinlock before we can 295 copy the data to the user, it's possible another 296 user will grab something from the queue, too. Then 297 the messages might get out of order if something 298 fails and the message gets put back onto the 299 queue. This mutex prevents that problem. */ 300 mutex_lock(&priv->recv_mutex); 301 302 /* Grab the message off the list. */ 303 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 304 if (list_empty(&(priv->recv_msgs))) { 305 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 306 rv = -EAGAIN; 307 goto recv_err; 308 } 309 entry = priv->recv_msgs.next; 310 msg = list_entry(entry, struct ipmi_recv_msg, link); 311 list_del(entry); 312 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 313 314 addr_len = ipmi_addr_length(msg->addr.addr_type); 315 if (rsp.addr_len < addr_len) 316 { 317 rv = -EINVAL; 318 goto recv_putback_on_err; 319 } 320 321 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) { 322 rv = -EFAULT; 323 goto recv_putback_on_err; 324 } 325 rsp.addr_len = addr_len; 326 327 rsp.recv_type = msg->recv_type; 328 rsp.msgid = msg->msgid; 329 rsp.msg.netfn = msg->msg.netfn; 330 rsp.msg.cmd = msg->msg.cmd; 331 332 if (msg->msg.data_len > 0) { 333 if (rsp.msg.data_len < msg->msg.data_len) { 334 rv = -EMSGSIZE; 335 if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) { 336 msg->msg.data_len = rsp.msg.data_len; 337 } else { 338 goto recv_putback_on_err; 339 } 340 } 341 342 if (copy_to_user(rsp.msg.data, 343 msg->msg.data, 344 msg->msg.data_len)) 345 { 346 rv = -EFAULT; 347 goto recv_putback_on_err; 348 } 349 rsp.msg.data_len = msg->msg.data_len; 350 } else { 351 rsp.msg.data_len = 0; 352 } 353 354 if (copy_to_user(arg, &rsp, sizeof(rsp))) { 355 rv = -EFAULT; 356 goto recv_putback_on_err; 357 } 358 359 mutex_unlock(&priv->recv_mutex); 360 ipmi_free_recv_msg(msg); 361 break; 362 363 recv_putback_on_err: 364 /* If we got an error, put the message back onto 365 the head of the queue. */ 366 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 367 list_add(entry, &(priv->recv_msgs)); 368 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 369 mutex_unlock(&priv->recv_mutex); 370 break; 371 372 recv_err: 373 mutex_unlock(&priv->recv_mutex); 374 break; 375 } 376 377 case IPMICTL_REGISTER_FOR_CMD: 378 { 379 struct ipmi_cmdspec val; 380 381 if (copy_from_user(&val, arg, sizeof(val))) { 382 rv = -EFAULT; 383 break; 384 } 385 386 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, 387 IPMI_CHAN_ALL); 388 break; 389 } 390 391 case IPMICTL_UNREGISTER_FOR_CMD: 392 { 393 struct ipmi_cmdspec val; 394 395 if (copy_from_user(&val, arg, sizeof(val))) { 396 rv = -EFAULT; 397 break; 398 } 399 400 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, 401 IPMI_CHAN_ALL); 402 break; 403 } 404 405 case IPMICTL_REGISTER_FOR_CMD_CHANS: 406 { 407 struct ipmi_cmdspec_chans val; 408 409 if (copy_from_user(&val, arg, sizeof(val))) { 410 rv = -EFAULT; 411 break; 412 } 413 414 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, 415 val.chans); 416 break; 417 } 418 419 case IPMICTL_UNREGISTER_FOR_CMD_CHANS: 420 { 421 struct ipmi_cmdspec_chans val; 422 423 if (copy_from_user(&val, arg, sizeof(val))) { 424 rv = -EFAULT; 425 break; 426 } 427 428 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, 429 val.chans); 430 break; 431 } 432 433 case IPMICTL_SET_GETS_EVENTS_CMD: 434 { 435 int val; 436 437 if (copy_from_user(&val, arg, sizeof(val))) { 438 rv = -EFAULT; 439 break; 440 } 441 442 rv = ipmi_set_gets_events(priv->user, val); 443 break; 444 } 445 446 /* The next four are legacy, not per-channel. */ 447 case IPMICTL_SET_MY_ADDRESS_CMD: 448 { 449 unsigned int val; 450 451 if (copy_from_user(&val, arg, sizeof(val))) { 452 rv = -EFAULT; 453 break; 454 } 455 456 rv = ipmi_set_my_address(priv->user, 0, val); 457 break; 458 } 459 460 case IPMICTL_GET_MY_ADDRESS_CMD: 461 { 462 unsigned int val; 463 unsigned char rval; 464 465 rv = ipmi_get_my_address(priv->user, 0, &rval); 466 if (rv) 467 break; 468 469 val = rval; 470 471 if (copy_to_user(arg, &val, sizeof(val))) { 472 rv = -EFAULT; 473 break; 474 } 475 break; 476 } 477 478 case IPMICTL_SET_MY_LUN_CMD: 479 { 480 unsigned int val; 481 482 if (copy_from_user(&val, arg, sizeof(val))) { 483 rv = -EFAULT; 484 break; 485 } 486 487 rv = ipmi_set_my_LUN(priv->user, 0, val); 488 break; 489 } 490 491 case IPMICTL_GET_MY_LUN_CMD: 492 { 493 unsigned int val; 494 unsigned char rval; 495 496 rv = ipmi_get_my_LUN(priv->user, 0, &rval); 497 if (rv) 498 break; 499 500 val = rval; 501 502 if (copy_to_user(arg, &val, sizeof(val))) { 503 rv = -EFAULT; 504 break; 505 } 506 break; 507 } 508 509 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD: 510 { 511 struct ipmi_channel_lun_address_set val; 512 513 if (copy_from_user(&val, arg, sizeof(val))) { 514 rv = -EFAULT; 515 break; 516 } 517 518 return ipmi_set_my_address(priv->user, val.channel, val.value); 519 break; 520 } 521 522 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD: 523 { 524 struct ipmi_channel_lun_address_set val; 525 526 if (copy_from_user(&val, arg, sizeof(val))) { 527 rv = -EFAULT; 528 break; 529 } 530 531 rv = ipmi_get_my_address(priv->user, val.channel, &val.value); 532 if (rv) 533 break; 534 535 if (copy_to_user(arg, &val, sizeof(val))) { 536 rv = -EFAULT; 537 break; 538 } 539 break; 540 } 541 542 case IPMICTL_SET_MY_CHANNEL_LUN_CMD: 543 { 544 struct ipmi_channel_lun_address_set val; 545 546 if (copy_from_user(&val, arg, sizeof(val))) { 547 rv = -EFAULT; 548 break; 549 } 550 551 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value); 552 break; 553 } 554 555 case IPMICTL_GET_MY_CHANNEL_LUN_CMD: 556 { 557 struct ipmi_channel_lun_address_set val; 558 559 if (copy_from_user(&val, arg, sizeof(val))) { 560 rv = -EFAULT; 561 break; 562 } 563 564 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value); 565 if (rv) 566 break; 567 568 if (copy_to_user(arg, &val, sizeof(val))) { 569 rv = -EFAULT; 570 break; 571 } 572 break; 573 } 574 575 case IPMICTL_SET_TIMING_PARMS_CMD: 576 { 577 struct ipmi_timing_parms parms; 578 579 if (copy_from_user(&parms, arg, sizeof(parms))) { 580 rv = -EFAULT; 581 break; 582 } 583 584 priv->default_retries = parms.retries; 585 priv->default_retry_time_ms = parms.retry_time_ms; 586 rv = 0; 587 break; 588 } 589 590 case IPMICTL_GET_TIMING_PARMS_CMD: 591 { 592 struct ipmi_timing_parms parms; 593 594 parms.retries = priv->default_retries; 595 parms.retry_time_ms = priv->default_retry_time_ms; 596 597 if (copy_to_user(arg, &parms, sizeof(parms))) { 598 rv = -EFAULT; 599 break; 600 } 601 602 rv = 0; 603 break; 604 } 605 606 case IPMICTL_GET_MAINTENANCE_MODE_CMD: 607 { 608 int mode; 609 610 mode = ipmi_get_maintenance_mode(priv->user); 611 if (copy_to_user(arg, &mode, sizeof(mode))) { 612 rv = -EFAULT; 613 break; 614 } 615 rv = 0; 616 break; 617 } 618 619 case IPMICTL_SET_MAINTENANCE_MODE_CMD: 620 { 621 int mode; 622 623 if (copy_from_user(&mode, arg, sizeof(mode))) { 624 rv = -EFAULT; 625 break; 626 } 627 rv = ipmi_set_maintenance_mode(priv->user, mode); 628 break; 629 } 630 } 631 632 return rv; 633 } 634 635 /* 636 * Note: it doesn't make sense to take the BKL here but 637 * not in compat_ipmi_ioctl. -arnd 638 */ 639 static long ipmi_unlocked_ioctl(struct file *file, 640 unsigned int cmd, 641 unsigned long data) 642 { 643 int ret; 644 645 mutex_lock(&ipmi_mutex); 646 ret = ipmi_ioctl(file, cmd, data); 647 mutex_unlock(&ipmi_mutex); 648 649 return ret; 650 } 651 652 #ifdef CONFIG_COMPAT 653 654 /* 655 * The following code contains code for supporting 32-bit compatible 656 * ioctls on 64-bit kernels. This allows running 32-bit apps on the 657 * 64-bit kernel 658 */ 659 #define COMPAT_IPMICTL_SEND_COMMAND \ 660 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req) 661 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \ 662 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime) 663 #define COMPAT_IPMICTL_RECEIVE_MSG \ 664 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv) 665 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \ 666 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv) 667 668 struct compat_ipmi_msg { 669 u8 netfn; 670 u8 cmd; 671 u16 data_len; 672 compat_uptr_t data; 673 }; 674 675 struct compat_ipmi_req { 676 compat_uptr_t addr; 677 compat_uint_t addr_len; 678 compat_long_t msgid; 679 struct compat_ipmi_msg msg; 680 }; 681 682 struct compat_ipmi_recv { 683 compat_int_t recv_type; 684 compat_uptr_t addr; 685 compat_uint_t addr_len; 686 compat_long_t msgid; 687 struct compat_ipmi_msg msg; 688 }; 689 690 struct compat_ipmi_req_settime { 691 struct compat_ipmi_req req; 692 compat_int_t retries; 693 compat_uint_t retry_time_ms; 694 }; 695 696 /* 697 * Define some helper functions for copying IPMI data 698 */ 699 static long get_compat_ipmi_msg(struct ipmi_msg *p64, 700 struct compat_ipmi_msg __user *p32) 701 { 702 compat_uptr_t tmp; 703 704 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 705 __get_user(p64->netfn, &p32->netfn) || 706 __get_user(p64->cmd, &p32->cmd) || 707 __get_user(p64->data_len, &p32->data_len) || 708 __get_user(tmp, &p32->data)) 709 return -EFAULT; 710 p64->data = compat_ptr(tmp); 711 return 0; 712 } 713 714 static long put_compat_ipmi_msg(struct ipmi_msg *p64, 715 struct compat_ipmi_msg __user *p32) 716 { 717 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 718 __put_user(p64->netfn, &p32->netfn) || 719 __put_user(p64->cmd, &p32->cmd) || 720 __put_user(p64->data_len, &p32->data_len)) 721 return -EFAULT; 722 return 0; 723 } 724 725 static long get_compat_ipmi_req(struct ipmi_req *p64, 726 struct compat_ipmi_req __user *p32) 727 { 728 729 compat_uptr_t tmp; 730 731 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 732 __get_user(tmp, &p32->addr) || 733 __get_user(p64->addr_len, &p32->addr_len) || 734 __get_user(p64->msgid, &p32->msgid) || 735 get_compat_ipmi_msg(&p64->msg, &p32->msg)) 736 return -EFAULT; 737 p64->addr = compat_ptr(tmp); 738 return 0; 739 } 740 741 static long get_compat_ipmi_req_settime(struct ipmi_req_settime *p64, 742 struct compat_ipmi_req_settime __user *p32) 743 { 744 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 745 get_compat_ipmi_req(&p64->req, &p32->req) || 746 __get_user(p64->retries, &p32->retries) || 747 __get_user(p64->retry_time_ms, &p32->retry_time_ms)) 748 return -EFAULT; 749 return 0; 750 } 751 752 static long get_compat_ipmi_recv(struct ipmi_recv *p64, 753 struct compat_ipmi_recv __user *p32) 754 { 755 compat_uptr_t tmp; 756 757 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 758 __get_user(p64->recv_type, &p32->recv_type) || 759 __get_user(tmp, &p32->addr) || 760 __get_user(p64->addr_len, &p32->addr_len) || 761 __get_user(p64->msgid, &p32->msgid) || 762 get_compat_ipmi_msg(&p64->msg, &p32->msg)) 763 return -EFAULT; 764 p64->addr = compat_ptr(tmp); 765 return 0; 766 } 767 768 static long put_compat_ipmi_recv(struct ipmi_recv *p64, 769 struct compat_ipmi_recv __user *p32) 770 { 771 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 772 __put_user(p64->recv_type, &p32->recv_type) || 773 __put_user(p64->addr_len, &p32->addr_len) || 774 __put_user(p64->msgid, &p32->msgid) || 775 put_compat_ipmi_msg(&p64->msg, &p32->msg)) 776 return -EFAULT; 777 return 0; 778 } 779 780 /* 781 * Handle compatibility ioctls 782 */ 783 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, 784 unsigned long arg) 785 { 786 int rc; 787 struct ipmi_file_private *priv = filep->private_data; 788 789 switch(cmd) { 790 case COMPAT_IPMICTL_SEND_COMMAND: 791 { 792 struct ipmi_req rp; 793 794 if (get_compat_ipmi_req(&rp, compat_ptr(arg))) 795 return -EFAULT; 796 797 return handle_send_req(priv->user, &rp, 798 priv->default_retries, 799 priv->default_retry_time_ms); 800 } 801 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME: 802 { 803 struct ipmi_req_settime sp; 804 805 if (get_compat_ipmi_req_settime(&sp, compat_ptr(arg))) 806 return -EFAULT; 807 808 return handle_send_req(priv->user, &sp.req, 809 sp.retries, sp.retry_time_ms); 810 } 811 case COMPAT_IPMICTL_RECEIVE_MSG: 812 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC: 813 { 814 struct ipmi_recv __user *precv64; 815 struct ipmi_recv recv64; 816 817 memset(&recv64, 0, sizeof(recv64)); 818 if (get_compat_ipmi_recv(&recv64, compat_ptr(arg))) 819 return -EFAULT; 820 821 precv64 = compat_alloc_user_space(sizeof(recv64)); 822 if (copy_to_user(precv64, &recv64, sizeof(recv64))) 823 return -EFAULT; 824 825 rc = ipmi_ioctl(filep, 826 ((cmd == COMPAT_IPMICTL_RECEIVE_MSG) 827 ? IPMICTL_RECEIVE_MSG 828 : IPMICTL_RECEIVE_MSG_TRUNC), 829 (unsigned long) precv64); 830 if (rc != 0) 831 return rc; 832 833 if (copy_from_user(&recv64, precv64, sizeof(recv64))) 834 return -EFAULT; 835 836 if (put_compat_ipmi_recv(&recv64, compat_ptr(arg))) 837 return -EFAULT; 838 839 return rc; 840 } 841 default: 842 return ipmi_ioctl(filep, cmd, arg); 843 } 844 } 845 846 static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd, 847 unsigned long arg) 848 { 849 int ret; 850 851 mutex_lock(&ipmi_mutex); 852 ret = compat_ipmi_ioctl(filep, cmd, arg); 853 mutex_unlock(&ipmi_mutex); 854 855 return ret; 856 } 857 #endif 858 859 static const struct file_operations ipmi_fops = { 860 .owner = THIS_MODULE, 861 .unlocked_ioctl = ipmi_unlocked_ioctl, 862 #ifdef CONFIG_COMPAT 863 .compat_ioctl = unlocked_compat_ipmi_ioctl, 864 #endif 865 .open = ipmi_open, 866 .release = ipmi_release, 867 .fasync = ipmi_fasync, 868 .poll = ipmi_poll, 869 .llseek = noop_llseek, 870 }; 871 872 #define DEVICE_NAME "ipmidev" 873 874 static int ipmi_major; 875 module_param(ipmi_major, int, 0); 876 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By" 877 " default, or if you set it to zero, it will choose the next" 878 " available device. Setting it to -1 will disable the" 879 " interface. Other values will set the major device number" 880 " to that value."); 881 882 /* Keep track of the devices that are registered. */ 883 struct ipmi_reg_list { 884 dev_t dev; 885 struct list_head link; 886 }; 887 static LIST_HEAD(reg_list); 888 static DEFINE_MUTEX(reg_list_mutex); 889 890 static struct class *ipmi_class; 891 892 static void ipmi_new_smi(int if_num, struct device *device) 893 { 894 dev_t dev = MKDEV(ipmi_major, if_num); 895 struct ipmi_reg_list *entry; 896 897 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 898 if (!entry) { 899 printk(KERN_ERR "ipmi_devintf: Unable to create the" 900 " ipmi class device link\n"); 901 return; 902 } 903 entry->dev = dev; 904 905 mutex_lock(®_list_mutex); 906 device_create(ipmi_class, device, dev, NULL, "ipmi%d", if_num); 907 list_add(&entry->link, ®_list); 908 mutex_unlock(®_list_mutex); 909 } 910 911 static void ipmi_smi_gone(int if_num) 912 { 913 dev_t dev = MKDEV(ipmi_major, if_num); 914 struct ipmi_reg_list *entry; 915 916 mutex_lock(®_list_mutex); 917 list_for_each_entry(entry, ®_list, link) { 918 if (entry->dev == dev) { 919 list_del(&entry->link); 920 kfree(entry); 921 break; 922 } 923 } 924 device_destroy(ipmi_class, dev); 925 mutex_unlock(®_list_mutex); 926 } 927 928 static struct ipmi_smi_watcher smi_watcher = 929 { 930 .owner = THIS_MODULE, 931 .new_smi = ipmi_new_smi, 932 .smi_gone = ipmi_smi_gone, 933 }; 934 935 static int __init init_ipmi_devintf(void) 936 { 937 int rv; 938 939 if (ipmi_major < 0) 940 return -EINVAL; 941 942 printk(KERN_INFO "ipmi device interface\n"); 943 944 ipmi_class = class_create(THIS_MODULE, "ipmi"); 945 if (IS_ERR(ipmi_class)) { 946 printk(KERN_ERR "ipmi: can't register device class\n"); 947 return PTR_ERR(ipmi_class); 948 } 949 950 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops); 951 if (rv < 0) { 952 class_destroy(ipmi_class); 953 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major); 954 return rv; 955 } 956 957 if (ipmi_major == 0) { 958 ipmi_major = rv; 959 } 960 961 rv = ipmi_smi_watcher_register(&smi_watcher); 962 if (rv) { 963 unregister_chrdev(ipmi_major, DEVICE_NAME); 964 class_destroy(ipmi_class); 965 printk(KERN_WARNING "ipmi: can't register smi watcher\n"); 966 return rv; 967 } 968 969 return 0; 970 } 971 module_init(init_ipmi_devintf); 972 973 static void __exit cleanup_ipmi(void) 974 { 975 struct ipmi_reg_list *entry, *entry2; 976 mutex_lock(®_list_mutex); 977 list_for_each_entry_safe(entry, entry2, ®_list, link) { 978 list_del(&entry->link); 979 device_destroy(ipmi_class, entry->dev); 980 kfree(entry); 981 } 982 mutex_unlock(®_list_mutex); 983 class_destroy(ipmi_class); 984 ipmi_smi_watcher_unregister(&smi_watcher); 985 unregister_chrdev(ipmi_major, DEVICE_NAME); 986 } 987 module_exit(cleanup_ipmi); 988 989 MODULE_LICENSE("GPL"); 990 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 991 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler."); 992 MODULE_ALIAS("platform:ipmi_si"); 993