1 /* 2 * ipmi_devintf.c 3 * 4 * Linux device interface for the IPMI message handler. 5 * 6 * Author: MontaVista Software, Inc. 7 * Corey Minyard <minyard@mvista.com> 8 * source@mvista.com 9 * 10 * Copyright 2002 MontaVista Software Inc. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 * 17 * 18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * You should have received a copy of the GNU General Public License along 30 * with this program; if not, write to the Free Software Foundation, Inc., 31 * 675 Mass Ave, Cambridge, MA 02139, USA. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/moduleparam.h> 36 #include <linux/errno.h> 37 #include <asm/system.h> 38 #include <linux/poll.h> 39 #include <linux/spinlock.h> 40 #include <linux/slab.h> 41 #include <linux/ipmi.h> 42 #include <linux/mutex.h> 43 #include <linux/init.h> 44 #include <linux/device.h> 45 #include <linux/compat.h> 46 #include <linux/smp_lock.h> 47 48 struct ipmi_file_private 49 { 50 ipmi_user_t user; 51 spinlock_t recv_msg_lock; 52 struct list_head recv_msgs; 53 struct file *file; 54 struct fasync_struct *fasync_queue; 55 wait_queue_head_t wait; 56 struct mutex recv_mutex; 57 int default_retries; 58 unsigned int default_retry_time_ms; 59 }; 60 61 static void file_receive_handler(struct ipmi_recv_msg *msg, 62 void *handler_data) 63 { 64 struct ipmi_file_private *priv = handler_data; 65 int was_empty; 66 unsigned long flags; 67 68 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 69 70 was_empty = list_empty(&(priv->recv_msgs)); 71 list_add_tail(&(msg->link), &(priv->recv_msgs)); 72 73 if (was_empty) { 74 wake_up_interruptible(&priv->wait); 75 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN); 76 } 77 78 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 79 } 80 81 static unsigned int ipmi_poll(struct file *file, poll_table *wait) 82 { 83 struct ipmi_file_private *priv = file->private_data; 84 unsigned int mask = 0; 85 unsigned long flags; 86 87 poll_wait(file, &priv->wait, wait); 88 89 spin_lock_irqsave(&priv->recv_msg_lock, flags); 90 91 if (!list_empty(&(priv->recv_msgs))) 92 mask |= (POLLIN | POLLRDNORM); 93 94 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 95 96 return mask; 97 } 98 99 static int ipmi_fasync(int fd, struct file *file, int on) 100 { 101 struct ipmi_file_private *priv = file->private_data; 102 int result; 103 104 lock_kernel(); /* could race against open() otherwise */ 105 result = fasync_helper(fd, file, on, &priv->fasync_queue); 106 unlock_kernel(); 107 108 return (result); 109 } 110 111 static struct ipmi_user_hndl ipmi_hndlrs = 112 { 113 .ipmi_recv_hndl = file_receive_handler, 114 }; 115 116 static int ipmi_open(struct inode *inode, struct file *file) 117 { 118 int if_num = iminor(inode); 119 int rv; 120 struct ipmi_file_private *priv; 121 122 123 priv = kmalloc(sizeof(*priv), GFP_KERNEL); 124 if (!priv) 125 return -ENOMEM; 126 127 lock_kernel(); 128 priv->file = file; 129 130 rv = ipmi_create_user(if_num, 131 &ipmi_hndlrs, 132 priv, 133 &(priv->user)); 134 if (rv) { 135 kfree(priv); 136 goto out; 137 } 138 139 file->private_data = priv; 140 141 spin_lock_init(&(priv->recv_msg_lock)); 142 INIT_LIST_HEAD(&(priv->recv_msgs)); 143 init_waitqueue_head(&priv->wait); 144 priv->fasync_queue = NULL; 145 mutex_init(&priv->recv_mutex); 146 147 /* Use the low-level defaults. */ 148 priv->default_retries = -1; 149 priv->default_retry_time_ms = 0; 150 151 out: 152 unlock_kernel(); 153 return rv; 154 } 155 156 static int ipmi_release(struct inode *inode, struct file *file) 157 { 158 struct ipmi_file_private *priv = file->private_data; 159 int rv; 160 161 rv = ipmi_destroy_user(priv->user); 162 if (rv) 163 return rv; 164 165 ipmi_fasync (-1, file, 0); 166 167 /* FIXME - free the messages in the list. */ 168 kfree(priv); 169 170 return 0; 171 } 172 173 static int handle_send_req(ipmi_user_t user, 174 struct ipmi_req *req, 175 int retries, 176 unsigned int retry_time_ms) 177 { 178 int rv; 179 struct ipmi_addr addr; 180 struct kernel_ipmi_msg msg; 181 182 if (req->addr_len > sizeof(struct ipmi_addr)) 183 return -EINVAL; 184 185 if (copy_from_user(&addr, req->addr, req->addr_len)) 186 return -EFAULT; 187 188 msg.netfn = req->msg.netfn; 189 msg.cmd = req->msg.cmd; 190 msg.data_len = req->msg.data_len; 191 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 192 if (!msg.data) 193 return -ENOMEM; 194 195 /* From here out we cannot return, we must jump to "out" for 196 error exits to free msgdata. */ 197 198 rv = ipmi_validate_addr(&addr, req->addr_len); 199 if (rv) 200 goto out; 201 202 if (req->msg.data != NULL) { 203 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) { 204 rv = -EMSGSIZE; 205 goto out; 206 } 207 208 if (copy_from_user(msg.data, 209 req->msg.data, 210 req->msg.data_len)) 211 { 212 rv = -EFAULT; 213 goto out; 214 } 215 } else { 216 msg.data_len = 0; 217 } 218 219 rv = ipmi_request_settime(user, 220 &addr, 221 req->msgid, 222 &msg, 223 NULL, 224 0, 225 retries, 226 retry_time_ms); 227 out: 228 kfree(msg.data); 229 return rv; 230 } 231 232 static int ipmi_ioctl(struct inode *inode, 233 struct file *file, 234 unsigned int cmd, 235 unsigned long data) 236 { 237 int rv = -EINVAL; 238 struct ipmi_file_private *priv = file->private_data; 239 void __user *arg = (void __user *)data; 240 241 switch (cmd) 242 { 243 case IPMICTL_SEND_COMMAND: 244 { 245 struct ipmi_req req; 246 247 if (copy_from_user(&req, arg, sizeof(req))) { 248 rv = -EFAULT; 249 break; 250 } 251 252 rv = handle_send_req(priv->user, 253 &req, 254 priv->default_retries, 255 priv->default_retry_time_ms); 256 break; 257 } 258 259 case IPMICTL_SEND_COMMAND_SETTIME: 260 { 261 struct ipmi_req_settime req; 262 263 if (copy_from_user(&req, arg, sizeof(req))) { 264 rv = -EFAULT; 265 break; 266 } 267 268 rv = handle_send_req(priv->user, 269 &req.req, 270 req.retries, 271 req.retry_time_ms); 272 break; 273 } 274 275 case IPMICTL_RECEIVE_MSG: 276 case IPMICTL_RECEIVE_MSG_TRUNC: 277 { 278 struct ipmi_recv rsp; 279 int addr_len; 280 struct list_head *entry; 281 struct ipmi_recv_msg *msg; 282 unsigned long flags; 283 284 285 rv = 0; 286 if (copy_from_user(&rsp, arg, sizeof(rsp))) { 287 rv = -EFAULT; 288 break; 289 } 290 291 /* We claim a mutex because we don't want two 292 users getting something from the queue at a time. 293 Since we have to release the spinlock before we can 294 copy the data to the user, it's possible another 295 user will grab something from the queue, too. Then 296 the messages might get out of order if something 297 fails and the message gets put back onto the 298 queue. This mutex prevents that problem. */ 299 mutex_lock(&priv->recv_mutex); 300 301 /* Grab the message off the list. */ 302 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 303 if (list_empty(&(priv->recv_msgs))) { 304 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 305 rv = -EAGAIN; 306 goto recv_err; 307 } 308 entry = priv->recv_msgs.next; 309 msg = list_entry(entry, struct ipmi_recv_msg, link); 310 list_del(entry); 311 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 312 313 addr_len = ipmi_addr_length(msg->addr.addr_type); 314 if (rsp.addr_len < addr_len) 315 { 316 rv = -EINVAL; 317 goto recv_putback_on_err; 318 } 319 320 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) { 321 rv = -EFAULT; 322 goto recv_putback_on_err; 323 } 324 rsp.addr_len = addr_len; 325 326 rsp.recv_type = msg->recv_type; 327 rsp.msgid = msg->msgid; 328 rsp.msg.netfn = msg->msg.netfn; 329 rsp.msg.cmd = msg->msg.cmd; 330 331 if (msg->msg.data_len > 0) { 332 if (rsp.msg.data_len < msg->msg.data_len) { 333 rv = -EMSGSIZE; 334 if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) { 335 msg->msg.data_len = rsp.msg.data_len; 336 } else { 337 goto recv_putback_on_err; 338 } 339 } 340 341 if (copy_to_user(rsp.msg.data, 342 msg->msg.data, 343 msg->msg.data_len)) 344 { 345 rv = -EFAULT; 346 goto recv_putback_on_err; 347 } 348 rsp.msg.data_len = msg->msg.data_len; 349 } else { 350 rsp.msg.data_len = 0; 351 } 352 353 if (copy_to_user(arg, &rsp, sizeof(rsp))) { 354 rv = -EFAULT; 355 goto recv_putback_on_err; 356 } 357 358 mutex_unlock(&priv->recv_mutex); 359 ipmi_free_recv_msg(msg); 360 break; 361 362 recv_putback_on_err: 363 /* If we got an error, put the message back onto 364 the head of the queue. */ 365 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 366 list_add(entry, &(priv->recv_msgs)); 367 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 368 mutex_unlock(&priv->recv_mutex); 369 break; 370 371 recv_err: 372 mutex_unlock(&priv->recv_mutex); 373 break; 374 } 375 376 case IPMICTL_REGISTER_FOR_CMD: 377 { 378 struct ipmi_cmdspec val; 379 380 if (copy_from_user(&val, arg, sizeof(val))) { 381 rv = -EFAULT; 382 break; 383 } 384 385 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, 386 IPMI_CHAN_ALL); 387 break; 388 } 389 390 case IPMICTL_UNREGISTER_FOR_CMD: 391 { 392 struct ipmi_cmdspec val; 393 394 if (copy_from_user(&val, arg, sizeof(val))) { 395 rv = -EFAULT; 396 break; 397 } 398 399 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, 400 IPMI_CHAN_ALL); 401 break; 402 } 403 404 case IPMICTL_REGISTER_FOR_CMD_CHANS: 405 { 406 struct ipmi_cmdspec_chans val; 407 408 if (copy_from_user(&val, arg, sizeof(val))) { 409 rv = -EFAULT; 410 break; 411 } 412 413 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, 414 val.chans); 415 break; 416 } 417 418 case IPMICTL_UNREGISTER_FOR_CMD_CHANS: 419 { 420 struct ipmi_cmdspec_chans val; 421 422 if (copy_from_user(&val, arg, sizeof(val))) { 423 rv = -EFAULT; 424 break; 425 } 426 427 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, 428 val.chans); 429 break; 430 } 431 432 case IPMICTL_SET_GETS_EVENTS_CMD: 433 { 434 int val; 435 436 if (copy_from_user(&val, arg, sizeof(val))) { 437 rv = -EFAULT; 438 break; 439 } 440 441 rv = ipmi_set_gets_events(priv->user, val); 442 break; 443 } 444 445 /* The next four are legacy, not per-channel. */ 446 case IPMICTL_SET_MY_ADDRESS_CMD: 447 { 448 unsigned int val; 449 450 if (copy_from_user(&val, arg, sizeof(val))) { 451 rv = -EFAULT; 452 break; 453 } 454 455 rv = ipmi_set_my_address(priv->user, 0, val); 456 break; 457 } 458 459 case IPMICTL_GET_MY_ADDRESS_CMD: 460 { 461 unsigned int val; 462 unsigned char rval; 463 464 rv = ipmi_get_my_address(priv->user, 0, &rval); 465 if (rv) 466 break; 467 468 val = rval; 469 470 if (copy_to_user(arg, &val, sizeof(val))) { 471 rv = -EFAULT; 472 break; 473 } 474 break; 475 } 476 477 case IPMICTL_SET_MY_LUN_CMD: 478 { 479 unsigned int val; 480 481 if (copy_from_user(&val, arg, sizeof(val))) { 482 rv = -EFAULT; 483 break; 484 } 485 486 rv = ipmi_set_my_LUN(priv->user, 0, val); 487 break; 488 } 489 490 case IPMICTL_GET_MY_LUN_CMD: 491 { 492 unsigned int val; 493 unsigned char rval; 494 495 rv = ipmi_get_my_LUN(priv->user, 0, &rval); 496 if (rv) 497 break; 498 499 val = rval; 500 501 if (copy_to_user(arg, &val, sizeof(val))) { 502 rv = -EFAULT; 503 break; 504 } 505 break; 506 } 507 508 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD: 509 { 510 struct ipmi_channel_lun_address_set val; 511 512 if (copy_from_user(&val, arg, sizeof(val))) { 513 rv = -EFAULT; 514 break; 515 } 516 517 return ipmi_set_my_address(priv->user, val.channel, val.value); 518 break; 519 } 520 521 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD: 522 { 523 struct ipmi_channel_lun_address_set val; 524 525 if (copy_from_user(&val, arg, sizeof(val))) { 526 rv = -EFAULT; 527 break; 528 } 529 530 rv = ipmi_get_my_address(priv->user, val.channel, &val.value); 531 if (rv) 532 break; 533 534 if (copy_to_user(arg, &val, sizeof(val))) { 535 rv = -EFAULT; 536 break; 537 } 538 break; 539 } 540 541 case IPMICTL_SET_MY_CHANNEL_LUN_CMD: 542 { 543 struct ipmi_channel_lun_address_set val; 544 545 if (copy_from_user(&val, arg, sizeof(val))) { 546 rv = -EFAULT; 547 break; 548 } 549 550 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value); 551 break; 552 } 553 554 case IPMICTL_GET_MY_CHANNEL_LUN_CMD: 555 { 556 struct ipmi_channel_lun_address_set val; 557 558 if (copy_from_user(&val, arg, sizeof(val))) { 559 rv = -EFAULT; 560 break; 561 } 562 563 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value); 564 if (rv) 565 break; 566 567 if (copy_to_user(arg, &val, sizeof(val))) { 568 rv = -EFAULT; 569 break; 570 } 571 break; 572 } 573 574 case IPMICTL_SET_TIMING_PARMS_CMD: 575 { 576 struct ipmi_timing_parms parms; 577 578 if (copy_from_user(&parms, arg, sizeof(parms))) { 579 rv = -EFAULT; 580 break; 581 } 582 583 priv->default_retries = parms.retries; 584 priv->default_retry_time_ms = parms.retry_time_ms; 585 rv = 0; 586 break; 587 } 588 589 case IPMICTL_GET_TIMING_PARMS_CMD: 590 { 591 struct ipmi_timing_parms parms; 592 593 parms.retries = priv->default_retries; 594 parms.retry_time_ms = priv->default_retry_time_ms; 595 596 if (copy_to_user(arg, &parms, sizeof(parms))) { 597 rv = -EFAULT; 598 break; 599 } 600 601 rv = 0; 602 break; 603 } 604 605 case IPMICTL_GET_MAINTENANCE_MODE_CMD: 606 { 607 int mode; 608 609 mode = ipmi_get_maintenance_mode(priv->user); 610 if (copy_to_user(arg, &mode, sizeof(mode))) { 611 rv = -EFAULT; 612 break; 613 } 614 rv = 0; 615 break; 616 } 617 618 case IPMICTL_SET_MAINTENANCE_MODE_CMD: 619 { 620 int mode; 621 622 if (copy_from_user(&mode, arg, sizeof(mode))) { 623 rv = -EFAULT; 624 break; 625 } 626 rv = ipmi_set_maintenance_mode(priv->user, mode); 627 break; 628 } 629 } 630 631 return rv; 632 } 633 634 #ifdef CONFIG_COMPAT 635 636 /* 637 * The following code contains code for supporting 32-bit compatible 638 * ioctls on 64-bit kernels. This allows running 32-bit apps on the 639 * 64-bit kernel 640 */ 641 #define COMPAT_IPMICTL_SEND_COMMAND \ 642 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req) 643 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \ 644 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime) 645 #define COMPAT_IPMICTL_RECEIVE_MSG \ 646 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv) 647 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \ 648 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv) 649 650 struct compat_ipmi_msg { 651 u8 netfn; 652 u8 cmd; 653 u16 data_len; 654 compat_uptr_t data; 655 }; 656 657 struct compat_ipmi_req { 658 compat_uptr_t addr; 659 compat_uint_t addr_len; 660 compat_long_t msgid; 661 struct compat_ipmi_msg msg; 662 }; 663 664 struct compat_ipmi_recv { 665 compat_int_t recv_type; 666 compat_uptr_t addr; 667 compat_uint_t addr_len; 668 compat_long_t msgid; 669 struct compat_ipmi_msg msg; 670 }; 671 672 struct compat_ipmi_req_settime { 673 struct compat_ipmi_req req; 674 compat_int_t retries; 675 compat_uint_t retry_time_ms; 676 }; 677 678 /* 679 * Define some helper functions for copying IPMI data 680 */ 681 static long get_compat_ipmi_msg(struct ipmi_msg *p64, 682 struct compat_ipmi_msg __user *p32) 683 { 684 compat_uptr_t tmp; 685 686 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 687 __get_user(p64->netfn, &p32->netfn) || 688 __get_user(p64->cmd, &p32->cmd) || 689 __get_user(p64->data_len, &p32->data_len) || 690 __get_user(tmp, &p32->data)) 691 return -EFAULT; 692 p64->data = compat_ptr(tmp); 693 return 0; 694 } 695 696 static long put_compat_ipmi_msg(struct ipmi_msg *p64, 697 struct compat_ipmi_msg __user *p32) 698 { 699 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 700 __put_user(p64->netfn, &p32->netfn) || 701 __put_user(p64->cmd, &p32->cmd) || 702 __put_user(p64->data_len, &p32->data_len)) 703 return -EFAULT; 704 return 0; 705 } 706 707 static long get_compat_ipmi_req(struct ipmi_req *p64, 708 struct compat_ipmi_req __user *p32) 709 { 710 711 compat_uptr_t tmp; 712 713 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 714 __get_user(tmp, &p32->addr) || 715 __get_user(p64->addr_len, &p32->addr_len) || 716 __get_user(p64->msgid, &p32->msgid) || 717 get_compat_ipmi_msg(&p64->msg, &p32->msg)) 718 return -EFAULT; 719 p64->addr = compat_ptr(tmp); 720 return 0; 721 } 722 723 static long get_compat_ipmi_req_settime(struct ipmi_req_settime *p64, 724 struct compat_ipmi_req_settime __user *p32) 725 { 726 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 727 get_compat_ipmi_req(&p64->req, &p32->req) || 728 __get_user(p64->retries, &p32->retries) || 729 __get_user(p64->retry_time_ms, &p32->retry_time_ms)) 730 return -EFAULT; 731 return 0; 732 } 733 734 static long get_compat_ipmi_recv(struct ipmi_recv *p64, 735 struct compat_ipmi_recv __user *p32) 736 { 737 compat_uptr_t tmp; 738 739 if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) || 740 __get_user(p64->recv_type, &p32->recv_type) || 741 __get_user(tmp, &p32->addr) || 742 __get_user(p64->addr_len, &p32->addr_len) || 743 __get_user(p64->msgid, &p32->msgid) || 744 get_compat_ipmi_msg(&p64->msg, &p32->msg)) 745 return -EFAULT; 746 p64->addr = compat_ptr(tmp); 747 return 0; 748 } 749 750 static long put_compat_ipmi_recv(struct ipmi_recv *p64, 751 struct compat_ipmi_recv __user *p32) 752 { 753 if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) || 754 __put_user(p64->recv_type, &p32->recv_type) || 755 __put_user(p64->addr_len, &p32->addr_len) || 756 __put_user(p64->msgid, &p32->msgid) || 757 put_compat_ipmi_msg(&p64->msg, &p32->msg)) 758 return -EFAULT; 759 return 0; 760 } 761 762 /* 763 * Handle compatibility ioctls 764 */ 765 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, 766 unsigned long arg) 767 { 768 int rc; 769 struct ipmi_file_private *priv = filep->private_data; 770 771 switch(cmd) { 772 case COMPAT_IPMICTL_SEND_COMMAND: 773 { 774 struct ipmi_req rp; 775 776 if (get_compat_ipmi_req(&rp, compat_ptr(arg))) 777 return -EFAULT; 778 779 return handle_send_req(priv->user, &rp, 780 priv->default_retries, 781 priv->default_retry_time_ms); 782 } 783 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME: 784 { 785 struct ipmi_req_settime sp; 786 787 if (get_compat_ipmi_req_settime(&sp, compat_ptr(arg))) 788 return -EFAULT; 789 790 return handle_send_req(priv->user, &sp.req, 791 sp.retries, sp.retry_time_ms); 792 } 793 case COMPAT_IPMICTL_RECEIVE_MSG: 794 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC: 795 { 796 struct ipmi_recv __user *precv64; 797 struct ipmi_recv recv64; 798 799 if (get_compat_ipmi_recv(&recv64, compat_ptr(arg))) 800 return -EFAULT; 801 802 precv64 = compat_alloc_user_space(sizeof(recv64)); 803 if (copy_to_user(precv64, &recv64, sizeof(recv64))) 804 return -EFAULT; 805 806 rc = ipmi_ioctl(filep->f_path.dentry->d_inode, filep, 807 ((cmd == COMPAT_IPMICTL_RECEIVE_MSG) 808 ? IPMICTL_RECEIVE_MSG 809 : IPMICTL_RECEIVE_MSG_TRUNC), 810 (unsigned long) precv64); 811 if (rc != 0) 812 return rc; 813 814 if (copy_from_user(&recv64, precv64, sizeof(recv64))) 815 return -EFAULT; 816 817 if (put_compat_ipmi_recv(&recv64, compat_ptr(arg))) 818 return -EFAULT; 819 820 return rc; 821 } 822 default: 823 return ipmi_ioctl(filep->f_path.dentry->d_inode, filep, cmd, arg); 824 } 825 } 826 #endif 827 828 static const struct file_operations ipmi_fops = { 829 .owner = THIS_MODULE, 830 .ioctl = ipmi_ioctl, 831 #ifdef CONFIG_COMPAT 832 .compat_ioctl = compat_ipmi_ioctl, 833 #endif 834 .open = ipmi_open, 835 .release = ipmi_release, 836 .fasync = ipmi_fasync, 837 .poll = ipmi_poll, 838 }; 839 840 #define DEVICE_NAME "ipmidev" 841 842 static int ipmi_major; 843 module_param(ipmi_major, int, 0); 844 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By" 845 " default, or if you set it to zero, it will choose the next" 846 " available device. Setting it to -1 will disable the" 847 " interface. Other values will set the major device number" 848 " to that value."); 849 850 /* Keep track of the devices that are registered. */ 851 struct ipmi_reg_list { 852 dev_t dev; 853 struct list_head link; 854 }; 855 static LIST_HEAD(reg_list); 856 static DEFINE_MUTEX(reg_list_mutex); 857 858 static struct class *ipmi_class; 859 860 static void ipmi_new_smi(int if_num, struct device *device) 861 { 862 dev_t dev = MKDEV(ipmi_major, if_num); 863 struct ipmi_reg_list *entry; 864 865 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 866 if (!entry) { 867 printk(KERN_ERR "ipmi_devintf: Unable to create the" 868 " ipmi class device link\n"); 869 return; 870 } 871 entry->dev = dev; 872 873 mutex_lock(®_list_mutex); 874 device_create_drvdata(ipmi_class, device, dev, NULL, "ipmi%d", if_num); 875 list_add(&entry->link, ®_list); 876 mutex_unlock(®_list_mutex); 877 } 878 879 static void ipmi_smi_gone(int if_num) 880 { 881 dev_t dev = MKDEV(ipmi_major, if_num); 882 struct ipmi_reg_list *entry; 883 884 mutex_lock(®_list_mutex); 885 list_for_each_entry(entry, ®_list, link) { 886 if (entry->dev == dev) { 887 list_del(&entry->link); 888 kfree(entry); 889 break; 890 } 891 } 892 device_destroy(ipmi_class, dev); 893 mutex_unlock(®_list_mutex); 894 } 895 896 static struct ipmi_smi_watcher smi_watcher = 897 { 898 .owner = THIS_MODULE, 899 .new_smi = ipmi_new_smi, 900 .smi_gone = ipmi_smi_gone, 901 }; 902 903 static __init int init_ipmi_devintf(void) 904 { 905 int rv; 906 907 if (ipmi_major < 0) 908 return -EINVAL; 909 910 printk(KERN_INFO "ipmi device interface\n"); 911 912 ipmi_class = class_create(THIS_MODULE, "ipmi"); 913 if (IS_ERR(ipmi_class)) { 914 printk(KERN_ERR "ipmi: can't register device class\n"); 915 return PTR_ERR(ipmi_class); 916 } 917 918 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops); 919 if (rv < 0) { 920 class_destroy(ipmi_class); 921 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major); 922 return rv; 923 } 924 925 if (ipmi_major == 0) { 926 ipmi_major = rv; 927 } 928 929 rv = ipmi_smi_watcher_register(&smi_watcher); 930 if (rv) { 931 unregister_chrdev(ipmi_major, DEVICE_NAME); 932 class_destroy(ipmi_class); 933 printk(KERN_WARNING "ipmi: can't register smi watcher\n"); 934 return rv; 935 } 936 937 return 0; 938 } 939 module_init(init_ipmi_devintf); 940 941 static __exit void cleanup_ipmi(void) 942 { 943 struct ipmi_reg_list *entry, *entry2; 944 mutex_lock(®_list_mutex); 945 list_for_each_entry_safe(entry, entry2, ®_list, link) { 946 list_del(&entry->link); 947 device_destroy(ipmi_class, entry->dev); 948 kfree(entry); 949 } 950 mutex_unlock(®_list_mutex); 951 class_destroy(ipmi_class); 952 ipmi_smi_watcher_unregister(&smi_watcher); 953 unregister_chrdev(ipmi_major, DEVICE_NAME); 954 } 955 module_exit(cleanup_ipmi); 956 957 MODULE_LICENSE("GPL"); 958 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 959 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler."); 960