1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/slab.h> 21 #include <linux/fs.h> 22 #include <linux/errno.h> 23 #include <linux/types.h> 24 #include <linux/fcntl.h> 25 #include <linux/aio.h> 26 #include <linux/poll.h> 27 #include <linux/init.h> 28 #include <linux/ioctl.h> 29 #include <linux/cdev.h> 30 #include <linux/sched.h> 31 #include <linux/uuid.h> 32 #include <linux/compat.h> 33 #include <linux/jiffies.h> 34 #include <linux/interrupt.h> 35 36 #include <linux/mei.h> 37 38 #include "mei_dev.h" 39 #include "client.h" 40 41 /** 42 * mei_open - the open function 43 * 44 * @inode: pointer to inode structure 45 * @file: pointer to file structure 46 * 47 * Return: 0 on success, <0 on error 48 */ 49 static int mei_open(struct inode *inode, struct file *file) 50 { 51 struct mei_device *dev; 52 struct mei_cl *cl; 53 54 int err; 55 56 dev = container_of(inode->i_cdev, struct mei_device, cdev); 57 if (!dev) 58 return -ENODEV; 59 60 mutex_lock(&dev->device_lock); 61 62 cl = NULL; 63 64 err = -ENODEV; 65 if (dev->dev_state != MEI_DEV_ENABLED) { 66 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", 67 mei_dev_state_str(dev->dev_state)); 68 goto err_unlock; 69 } 70 71 err = -ENOMEM; 72 cl = mei_cl_allocate(dev); 73 if (!cl) 74 goto err_unlock; 75 76 /* open_handle_count check is handled in the mei_cl_link */ 77 err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY); 78 if (err) 79 goto err_unlock; 80 81 file->private_data = cl; 82 83 mutex_unlock(&dev->device_lock); 84 85 return nonseekable_open(inode, file); 86 87 err_unlock: 88 mutex_unlock(&dev->device_lock); 89 kfree(cl); 90 return err; 91 } 92 93 /** 94 * mei_release - the release function 95 * 96 * @inode: pointer to inode structure 97 * @file: pointer to file structure 98 * 99 * Return: 0 on success, <0 on error 100 */ 101 static int mei_release(struct inode *inode, struct file *file) 102 { 103 struct mei_cl *cl = file->private_data; 104 struct mei_cl_cb *cb; 105 struct mei_device *dev; 106 int rets = 0; 107 108 if (WARN_ON(!cl || !cl->dev)) 109 return -ENODEV; 110 111 dev = cl->dev; 112 113 mutex_lock(&dev->device_lock); 114 if (cl == &dev->iamthif_cl) { 115 rets = mei_amthif_release(dev, file); 116 goto out; 117 } 118 if (cl->state == MEI_FILE_CONNECTED) { 119 cl->state = MEI_FILE_DISCONNECTING; 120 cl_dbg(dev, cl, "disconnecting\n"); 121 rets = mei_cl_disconnect(cl); 122 } 123 mei_cl_flush_queues(cl); 124 cl_dbg(dev, cl, "removing\n"); 125 126 mei_cl_unlink(cl); 127 128 129 /* free read cb */ 130 cb = NULL; 131 if (cl->read_cb) { 132 cb = mei_cl_find_read_cb(cl); 133 /* Remove entry from read list */ 134 if (cb) 135 list_del(&cb->list); 136 137 cb = cl->read_cb; 138 cl->read_cb = NULL; 139 } 140 141 file->private_data = NULL; 142 143 mei_io_cb_free(cb); 144 145 kfree(cl); 146 out: 147 mutex_unlock(&dev->device_lock); 148 return rets; 149 } 150 151 152 /** 153 * mei_read - the read function. 154 * 155 * @file: pointer to file structure 156 * @ubuf: pointer to user buffer 157 * @length: buffer length 158 * @offset: data offset in buffer 159 * 160 * Return: >=0 data length on success , <0 on error 161 */ 162 static ssize_t mei_read(struct file *file, char __user *ubuf, 163 size_t length, loff_t *offset) 164 { 165 struct mei_cl *cl = file->private_data; 166 struct mei_cl_cb *cb_pos = NULL; 167 struct mei_cl_cb *cb = NULL; 168 struct mei_device *dev; 169 int rets; 170 int err; 171 172 173 if (WARN_ON(!cl || !cl->dev)) 174 return -ENODEV; 175 176 dev = cl->dev; 177 178 179 mutex_lock(&dev->device_lock); 180 if (dev->dev_state != MEI_DEV_ENABLED) { 181 rets = -ENODEV; 182 goto out; 183 } 184 185 if (length == 0) { 186 rets = 0; 187 goto out; 188 } 189 190 if (cl == &dev->iamthif_cl) { 191 rets = mei_amthif_read(dev, file, ubuf, length, offset); 192 goto out; 193 } 194 195 if (cl->read_cb) { 196 cb = cl->read_cb; 197 /* read what left */ 198 if (cb->buf_idx > *offset) 199 goto copy_buffer; 200 /* offset is beyond buf_idx we have no more data return 0 */ 201 if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { 202 rets = 0; 203 goto free; 204 } 205 /* Offset needs to be cleaned for contiguous reads*/ 206 if (cb->buf_idx == 0 && *offset > 0) 207 *offset = 0; 208 } else if (*offset > 0) { 209 *offset = 0; 210 } 211 212 err = mei_cl_read_start(cl, length); 213 if (err && err != -EBUSY) { 214 dev_dbg(dev->dev, 215 "mei start read failure with status = %d\n", err); 216 rets = err; 217 goto out; 218 } 219 220 if (MEI_READ_COMPLETE != cl->reading_state && 221 !waitqueue_active(&cl->rx_wait)) { 222 if (file->f_flags & O_NONBLOCK) { 223 rets = -EAGAIN; 224 goto out; 225 } 226 227 mutex_unlock(&dev->device_lock); 228 229 if (wait_event_interruptible(cl->rx_wait, 230 MEI_READ_COMPLETE == cl->reading_state || 231 mei_cl_is_transitioning(cl))) { 232 233 if (signal_pending(current)) 234 return -EINTR; 235 return -ERESTARTSYS; 236 } 237 238 mutex_lock(&dev->device_lock); 239 if (mei_cl_is_transitioning(cl)) { 240 rets = -EBUSY; 241 goto out; 242 } 243 } 244 245 cb = cl->read_cb; 246 247 if (!cb) { 248 rets = -ENODEV; 249 goto out; 250 } 251 if (cl->reading_state != MEI_READ_COMPLETE) { 252 rets = 0; 253 goto out; 254 } 255 /* now copy the data to user space */ 256 copy_buffer: 257 dev_dbg(dev->dev, "buf.size = %d buf.idx= %ld\n", 258 cb->response_buffer.size, cb->buf_idx); 259 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { 260 rets = -EMSGSIZE; 261 goto free; 262 } 263 264 /* length is being truncated to PAGE_SIZE, 265 * however buf_idx may point beyond that */ 266 length = min_t(size_t, length, cb->buf_idx - *offset); 267 268 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { 269 dev_dbg(dev->dev, "failed to copy data to userland\n"); 270 rets = -EFAULT; 271 goto free; 272 } 273 274 rets = length; 275 *offset += length; 276 if ((unsigned long)*offset < cb->buf_idx) 277 goto out; 278 279 free: 280 cb_pos = mei_cl_find_read_cb(cl); 281 /* Remove entry from read list */ 282 if (cb_pos) 283 list_del(&cb_pos->list); 284 mei_io_cb_free(cb); 285 cl->reading_state = MEI_IDLE; 286 cl->read_cb = NULL; 287 out: 288 dev_dbg(dev->dev, "end mei read rets= %d\n", rets); 289 mutex_unlock(&dev->device_lock); 290 return rets; 291 } 292 /** 293 * mei_write - the write function. 294 * 295 * @file: pointer to file structure 296 * @ubuf: pointer to user buffer 297 * @length: buffer length 298 * @offset: data offset in buffer 299 * 300 * Return: >=0 data length on success , <0 on error 301 */ 302 static ssize_t mei_write(struct file *file, const char __user *ubuf, 303 size_t length, loff_t *offset) 304 { 305 struct mei_cl *cl = file->private_data; 306 struct mei_me_client *me_cl; 307 struct mei_cl_cb *write_cb = NULL; 308 struct mei_device *dev; 309 unsigned long timeout = 0; 310 int rets; 311 312 if (WARN_ON(!cl || !cl->dev)) 313 return -ENODEV; 314 315 dev = cl->dev; 316 317 mutex_lock(&dev->device_lock); 318 319 if (dev->dev_state != MEI_DEV_ENABLED) { 320 rets = -ENODEV; 321 goto out; 322 } 323 324 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id); 325 if (!me_cl) { 326 rets = -ENOTTY; 327 goto out; 328 } 329 330 if (length == 0) { 331 rets = 0; 332 goto out; 333 } 334 335 if (length > me_cl->props.max_msg_length) { 336 rets = -EFBIG; 337 goto out; 338 } 339 340 if (cl->state != MEI_FILE_CONNECTED) { 341 dev_err(dev->dev, "host client = %d, is not connected to ME client = %d", 342 cl->host_client_id, cl->me_client_id); 343 rets = -ENODEV; 344 goto out; 345 } 346 if (cl == &dev->iamthif_cl) { 347 write_cb = mei_amthif_find_read_list_entry(dev, file); 348 349 if (write_cb) { 350 timeout = write_cb->read_time + 351 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 352 353 if (time_after(jiffies, timeout) || 354 cl->reading_state == MEI_READ_COMPLETE) { 355 *offset = 0; 356 list_del(&write_cb->list); 357 mei_io_cb_free(write_cb); 358 write_cb = NULL; 359 } 360 } 361 } 362 363 /* free entry used in read */ 364 if (cl->reading_state == MEI_READ_COMPLETE) { 365 *offset = 0; 366 write_cb = mei_cl_find_read_cb(cl); 367 if (write_cb) { 368 list_del(&write_cb->list); 369 mei_io_cb_free(write_cb); 370 write_cb = NULL; 371 cl->reading_state = MEI_IDLE; 372 cl->read_cb = NULL; 373 } 374 } else if (cl->reading_state == MEI_IDLE) 375 *offset = 0; 376 377 378 write_cb = mei_io_cb_init(cl, file); 379 if (!write_cb) { 380 rets = -ENOMEM; 381 goto out; 382 } 383 rets = mei_io_cb_alloc_req_buf(write_cb, length); 384 if (rets) 385 goto out; 386 387 rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); 388 if (rets) { 389 dev_dbg(dev->dev, "failed to copy data from userland\n"); 390 rets = -EFAULT; 391 goto out; 392 } 393 394 if (cl == &dev->iamthif_cl) { 395 rets = mei_amthif_write(dev, write_cb); 396 397 if (rets) { 398 dev_err(dev->dev, 399 "amthif write failed with status = %d\n", rets); 400 goto out; 401 } 402 mutex_unlock(&dev->device_lock); 403 return length; 404 } 405 406 rets = mei_cl_write(cl, write_cb, false); 407 out: 408 mutex_unlock(&dev->device_lock); 409 if (rets < 0) 410 mei_io_cb_free(write_cb); 411 return rets; 412 } 413 414 /** 415 * mei_ioctl_connect_client - the connect to fw client IOCTL function 416 * 417 * @file: private data of the file object 418 * @data: IOCTL connect data, input and output parameters 419 * 420 * Locking: called under "dev->device_lock" lock 421 * 422 * Return: 0 on success, <0 on failure. 423 */ 424 static int mei_ioctl_connect_client(struct file *file, 425 struct mei_connect_client_data *data) 426 { 427 struct mei_device *dev; 428 struct mei_client *client; 429 struct mei_me_client *me_cl; 430 struct mei_cl *cl; 431 int rets; 432 433 cl = file->private_data; 434 dev = cl->dev; 435 436 if (dev->dev_state != MEI_DEV_ENABLED) { 437 rets = -ENODEV; 438 goto end; 439 } 440 441 if (cl->state != MEI_FILE_INITIALIZING && 442 cl->state != MEI_FILE_DISCONNECTED) { 443 rets = -EBUSY; 444 goto end; 445 } 446 447 /* find ME client we're trying to connect to */ 448 me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid); 449 if (!me_cl || me_cl->props.fixed_address) { 450 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", 451 &data->in_client_uuid); 452 rets = -ENOTTY; 453 goto end; 454 } 455 456 cl->me_client_id = me_cl->client_id; 457 cl->cl_uuid = me_cl->props.protocol_name; 458 459 dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", 460 cl->me_client_id); 461 dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n", 462 me_cl->props.protocol_version); 463 dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n", 464 me_cl->props.max_msg_length); 465 466 /* if we're connecting to amthif client then we will use the 467 * existing connection 468 */ 469 if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) { 470 dev_dbg(dev->dev, "FW Client is amthi\n"); 471 if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) { 472 rets = -ENODEV; 473 goto end; 474 } 475 mei_cl_unlink(cl); 476 477 kfree(cl); 478 cl = NULL; 479 dev->iamthif_open_count++; 480 file->private_data = &dev->iamthif_cl; 481 482 client = &data->out_client_properties; 483 client->max_msg_length = me_cl->props.max_msg_length; 484 client->protocol_version = me_cl->props.protocol_version; 485 rets = dev->iamthif_cl.status; 486 487 goto end; 488 } 489 490 491 /* prepare the output buffer */ 492 client = &data->out_client_properties; 493 client->max_msg_length = me_cl->props.max_msg_length; 494 client->protocol_version = me_cl->props.protocol_version; 495 dev_dbg(dev->dev, "Can connect?\n"); 496 497 498 rets = mei_cl_connect(cl, file); 499 500 end: 501 return rets; 502 } 503 504 /** 505 * mei_ioctl - the IOCTL function 506 * 507 * @file: pointer to file structure 508 * @cmd: ioctl command 509 * @data: pointer to mei message structure 510 * 511 * Return: 0 on success , <0 on error 512 */ 513 static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) 514 { 515 struct mei_device *dev; 516 struct mei_cl *cl = file->private_data; 517 struct mei_connect_client_data connect_data; 518 int rets; 519 520 521 if (WARN_ON(!cl || !cl->dev)) 522 return -ENODEV; 523 524 dev = cl->dev; 525 526 dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd); 527 528 mutex_lock(&dev->device_lock); 529 if (dev->dev_state != MEI_DEV_ENABLED) { 530 rets = -ENODEV; 531 goto out; 532 } 533 534 switch (cmd) { 535 case IOCTL_MEI_CONNECT_CLIENT: 536 dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); 537 if (copy_from_user(&connect_data, (char __user *)data, 538 sizeof(struct mei_connect_client_data))) { 539 dev_dbg(dev->dev, "failed to copy data from userland\n"); 540 rets = -EFAULT; 541 goto out; 542 } 543 544 rets = mei_ioctl_connect_client(file, &connect_data); 545 if (rets) 546 goto out; 547 548 /* if all is ok, copying the data back to user. */ 549 if (copy_to_user((char __user *)data, &connect_data, 550 sizeof(struct mei_connect_client_data))) { 551 dev_dbg(dev->dev, "failed to copy data to userland\n"); 552 rets = -EFAULT; 553 goto out; 554 } 555 556 break; 557 558 default: 559 dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd); 560 rets = -ENOIOCTLCMD; 561 } 562 563 out: 564 mutex_unlock(&dev->device_lock); 565 return rets; 566 } 567 568 /** 569 * mei_compat_ioctl - the compat IOCTL function 570 * 571 * @file: pointer to file structure 572 * @cmd: ioctl command 573 * @data: pointer to mei message structure 574 * 575 * Return: 0 on success , <0 on error 576 */ 577 #ifdef CONFIG_COMPAT 578 static long mei_compat_ioctl(struct file *file, 579 unsigned int cmd, unsigned long data) 580 { 581 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data)); 582 } 583 #endif 584 585 586 /** 587 * mei_poll - the poll function 588 * 589 * @file: pointer to file structure 590 * @wait: pointer to poll_table structure 591 * 592 * Return: poll mask 593 */ 594 static unsigned int mei_poll(struct file *file, poll_table *wait) 595 { 596 struct mei_cl *cl = file->private_data; 597 struct mei_device *dev; 598 unsigned int mask = 0; 599 600 if (WARN_ON(!cl || !cl->dev)) 601 return POLLERR; 602 603 dev = cl->dev; 604 605 mutex_lock(&dev->device_lock); 606 607 if (!mei_cl_is_connected(cl)) { 608 mask = POLLERR; 609 goto out; 610 } 611 612 mutex_unlock(&dev->device_lock); 613 614 615 if (cl == &dev->iamthif_cl) 616 return mei_amthif_poll(dev, file, wait); 617 618 poll_wait(file, &cl->tx_wait, wait); 619 620 mutex_lock(&dev->device_lock); 621 622 if (!mei_cl_is_connected(cl)) { 623 mask = POLLERR; 624 goto out; 625 } 626 627 mask |= (POLLIN | POLLRDNORM); 628 629 out: 630 mutex_unlock(&dev->device_lock); 631 return mask; 632 } 633 634 /* 635 * file operations structure will be used for mei char device. 636 */ 637 static const struct file_operations mei_fops = { 638 .owner = THIS_MODULE, 639 .read = mei_read, 640 .unlocked_ioctl = mei_ioctl, 641 #ifdef CONFIG_COMPAT 642 .compat_ioctl = mei_compat_ioctl, 643 #endif 644 .open = mei_open, 645 .release = mei_release, 646 .write = mei_write, 647 .poll = mei_poll, 648 .llseek = no_llseek 649 }; 650 651 static struct class *mei_class; 652 static dev_t mei_devt; 653 #define MEI_MAX_DEVS MINORMASK 654 static DEFINE_MUTEX(mei_minor_lock); 655 static DEFINE_IDR(mei_idr); 656 657 /** 658 * mei_minor_get - obtain next free device minor number 659 * 660 * @dev: device pointer 661 * 662 * Return: allocated minor, or -ENOSPC if no free minor left 663 */ 664 static int mei_minor_get(struct mei_device *dev) 665 { 666 int ret; 667 668 mutex_lock(&mei_minor_lock); 669 ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL); 670 if (ret >= 0) 671 dev->minor = ret; 672 else if (ret == -ENOSPC) 673 dev_err(dev->dev, "too many mei devices\n"); 674 675 mutex_unlock(&mei_minor_lock); 676 return ret; 677 } 678 679 /** 680 * mei_minor_free - mark device minor number as free 681 * 682 * @dev: device pointer 683 */ 684 static void mei_minor_free(struct mei_device *dev) 685 { 686 mutex_lock(&mei_minor_lock); 687 idr_remove(&mei_idr, dev->minor); 688 mutex_unlock(&mei_minor_lock); 689 } 690 691 int mei_register(struct mei_device *dev, struct device *parent) 692 { 693 struct device *clsdev; /* class device */ 694 int ret, devno; 695 696 ret = mei_minor_get(dev); 697 if (ret < 0) 698 return ret; 699 700 /* Fill in the data structures */ 701 devno = MKDEV(MAJOR(mei_devt), dev->minor); 702 cdev_init(&dev->cdev, &mei_fops); 703 dev->cdev.owner = mei_fops.owner; 704 705 /* Add the device */ 706 ret = cdev_add(&dev->cdev, devno, 1); 707 if (ret) { 708 dev_err(parent, "unable to add device %d:%d\n", 709 MAJOR(mei_devt), dev->minor); 710 goto err_dev_add; 711 } 712 713 clsdev = device_create(mei_class, parent, devno, 714 NULL, "mei%d", dev->minor); 715 716 if (IS_ERR(clsdev)) { 717 dev_err(parent, "unable to create device %d:%d\n", 718 MAJOR(mei_devt), dev->minor); 719 ret = PTR_ERR(clsdev); 720 goto err_dev_create; 721 } 722 723 ret = mei_dbgfs_register(dev, dev_name(clsdev)); 724 if (ret) { 725 dev_err(clsdev, "cannot register debugfs ret = %d\n", ret); 726 goto err_dev_dbgfs; 727 } 728 729 return 0; 730 731 err_dev_dbgfs: 732 device_destroy(mei_class, devno); 733 err_dev_create: 734 cdev_del(&dev->cdev); 735 err_dev_add: 736 mei_minor_free(dev); 737 return ret; 738 } 739 EXPORT_SYMBOL_GPL(mei_register); 740 741 void mei_deregister(struct mei_device *dev) 742 { 743 int devno; 744 745 devno = dev->cdev.dev; 746 cdev_del(&dev->cdev); 747 748 mei_dbgfs_deregister(dev); 749 750 device_destroy(mei_class, devno); 751 752 mei_minor_free(dev); 753 } 754 EXPORT_SYMBOL_GPL(mei_deregister); 755 756 static int __init mei_init(void) 757 { 758 int ret; 759 760 mei_class = class_create(THIS_MODULE, "mei"); 761 if (IS_ERR(mei_class)) { 762 pr_err("couldn't create class\n"); 763 ret = PTR_ERR(mei_class); 764 goto err; 765 } 766 767 ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei"); 768 if (ret < 0) { 769 pr_err("unable to allocate char dev region\n"); 770 goto err_class; 771 } 772 773 ret = mei_cl_bus_init(); 774 if (ret < 0) { 775 pr_err("unable to initialize bus\n"); 776 goto err_chrdev; 777 } 778 779 return 0; 780 781 err_chrdev: 782 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); 783 err_class: 784 class_destroy(mei_class); 785 err: 786 return ret; 787 } 788 789 static void __exit mei_exit(void) 790 { 791 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); 792 class_destroy(mei_class); 793 mei_cl_bus_exit(); 794 } 795 796 module_init(mei_init); 797 module_exit(mei_exit); 798 799 MODULE_AUTHOR("Intel Corporation"); 800 MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); 801 MODULE_LICENSE("GPL v2"); 802 803