1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/fs.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/fcntl.h> 24 #include <linux/aio.h> 25 #include <linux/pci.h> 26 #include <linux/poll.h> 27 #include <linux/init.h> 28 #include <linux/ioctl.h> 29 #include <linux/cdev.h> 30 #include <linux/sched.h> 31 #include <linux/uuid.h> 32 #include <linux/compat.h> 33 #include <linux/jiffies.h> 34 #include <linux/interrupt.h> 35 36 #include <linux/mei.h> 37 38 #include "mei_dev.h" 39 #include "client.h" 40 41 /** 42 * mei_open - the open function 43 * 44 * @inode: pointer to inode structure 45 * @file: pointer to file structure 46 * 47 * returns 0 on success, <0 on error 48 */ 49 static int mei_open(struct inode *inode, struct file *file) 50 { 51 struct mei_device *dev; 52 struct mei_cl *cl; 53 54 int err; 55 56 dev = container_of(inode->i_cdev, struct mei_device, cdev); 57 if (!dev) 58 return -ENODEV; 59 60 mutex_lock(&dev->device_lock); 61 62 cl = NULL; 63 64 err = -ENODEV; 65 if (dev->dev_state != MEI_DEV_ENABLED) { 66 dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", 67 mei_dev_state_str(dev->dev_state)); 68 goto err_unlock; 69 } 70 71 err = -ENOMEM; 72 cl = mei_cl_allocate(dev); 73 if (!cl) 74 goto err_unlock; 75 76 /* open_handle_count check is handled in the mei_cl_link */ 77 err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY); 78 if (err) 79 goto err_unlock; 80 81 file->private_data = cl; 82 83 mutex_unlock(&dev->device_lock); 84 85 return nonseekable_open(inode, file); 86 87 err_unlock: 88 mutex_unlock(&dev->device_lock); 89 kfree(cl); 90 return err; 91 } 92 93 /** 94 * mei_release - the release function 95 * 96 * @inode: pointer to inode structure 97 * @file: pointer to file structure 98 * 99 * returns 0 on success, <0 on error 100 */ 101 static int mei_release(struct inode *inode, struct file *file) 102 { 103 struct mei_cl *cl = file->private_data; 104 struct mei_cl_cb *cb; 105 struct mei_device *dev; 106 int rets = 0; 107 108 if (WARN_ON(!cl || !cl->dev)) 109 return -ENODEV; 110 111 dev = cl->dev; 112 113 mutex_lock(&dev->device_lock); 114 if (cl == &dev->iamthif_cl) { 115 rets = mei_amthif_release(dev, file); 116 goto out; 117 } 118 if (cl->state == MEI_FILE_CONNECTED) { 119 cl->state = MEI_FILE_DISCONNECTING; 120 cl_dbg(dev, cl, "disconnecting\n"); 121 rets = mei_cl_disconnect(cl); 122 } 123 mei_cl_flush_queues(cl); 124 cl_dbg(dev, cl, "removing\n"); 125 126 mei_cl_unlink(cl); 127 128 129 /* free read cb */ 130 cb = NULL; 131 if (cl->read_cb) { 132 cb = mei_cl_find_read_cb(cl); 133 /* Remove entry from read list */ 134 if (cb) 135 list_del(&cb->list); 136 137 cb = cl->read_cb; 138 cl->read_cb = NULL; 139 } 140 141 file->private_data = NULL; 142 143 mei_io_cb_free(cb); 144 145 kfree(cl); 146 out: 147 mutex_unlock(&dev->device_lock); 148 return rets; 149 } 150 151 152 /** 153 * mei_read - the read function. 154 * 155 * @file: pointer to file structure 156 * @ubuf: pointer to user buffer 157 * @length: buffer length 158 * @offset: data offset in buffer 159 * 160 * returns >=0 data length on success , <0 on error 161 */ 162 static ssize_t mei_read(struct file *file, char __user *ubuf, 163 size_t length, loff_t *offset) 164 { 165 struct mei_cl *cl = file->private_data; 166 struct mei_cl_cb *cb_pos = NULL; 167 struct mei_cl_cb *cb = NULL; 168 struct mei_device *dev; 169 int rets; 170 int err; 171 172 173 if (WARN_ON(!cl || !cl->dev)) 174 return -ENODEV; 175 176 dev = cl->dev; 177 178 179 mutex_lock(&dev->device_lock); 180 if (dev->dev_state != MEI_DEV_ENABLED) { 181 rets = -ENODEV; 182 goto out; 183 } 184 185 if (length == 0) { 186 rets = 0; 187 goto out; 188 } 189 190 if (cl == &dev->iamthif_cl) { 191 rets = mei_amthif_read(dev, file, ubuf, length, offset); 192 goto out; 193 } 194 195 if (cl->read_cb) { 196 cb = cl->read_cb; 197 /* read what left */ 198 if (cb->buf_idx > *offset) 199 goto copy_buffer; 200 /* offset is beyond buf_idx we have no more data return 0 */ 201 if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { 202 rets = 0; 203 goto free; 204 } 205 /* Offset needs to be cleaned for contiguous reads*/ 206 if (cb->buf_idx == 0 && *offset > 0) 207 *offset = 0; 208 } else if (*offset > 0) { 209 *offset = 0; 210 } 211 212 err = mei_cl_read_start(cl, length); 213 if (err && err != -EBUSY) { 214 dev_dbg(&dev->pdev->dev, 215 "mei start read failure with status = %d\n", err); 216 rets = err; 217 goto out; 218 } 219 220 if (MEI_READ_COMPLETE != cl->reading_state && 221 !waitqueue_active(&cl->rx_wait)) { 222 if (file->f_flags & O_NONBLOCK) { 223 rets = -EAGAIN; 224 goto out; 225 } 226 227 mutex_unlock(&dev->device_lock); 228 229 if (wait_event_interruptible(cl->rx_wait, 230 MEI_READ_COMPLETE == cl->reading_state || 231 mei_cl_is_transitioning(cl))) { 232 233 if (signal_pending(current)) 234 return -EINTR; 235 return -ERESTARTSYS; 236 } 237 238 mutex_lock(&dev->device_lock); 239 if (mei_cl_is_transitioning(cl)) { 240 rets = -EBUSY; 241 goto out; 242 } 243 } 244 245 cb = cl->read_cb; 246 247 if (!cb) { 248 rets = -ENODEV; 249 goto out; 250 } 251 if (cl->reading_state != MEI_READ_COMPLETE) { 252 rets = 0; 253 goto out; 254 } 255 /* now copy the data to user space */ 256 copy_buffer: 257 dev_dbg(&dev->pdev->dev, "buf.size = %d buf.idx= %ld\n", 258 cb->response_buffer.size, cb->buf_idx); 259 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { 260 rets = -EMSGSIZE; 261 goto free; 262 } 263 264 /* length is being truncated to PAGE_SIZE, 265 * however buf_idx may point beyond that */ 266 length = min_t(size_t, length, cb->buf_idx - *offset); 267 268 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { 269 dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); 270 rets = -EFAULT; 271 goto free; 272 } 273 274 rets = length; 275 *offset += length; 276 if ((unsigned long)*offset < cb->buf_idx) 277 goto out; 278 279 free: 280 cb_pos = mei_cl_find_read_cb(cl); 281 /* Remove entry from read list */ 282 if (cb_pos) 283 list_del(&cb_pos->list); 284 mei_io_cb_free(cb); 285 cl->reading_state = MEI_IDLE; 286 cl->read_cb = NULL; 287 out: 288 dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets); 289 mutex_unlock(&dev->device_lock); 290 return rets; 291 } 292 /** 293 * mei_write - the write function. 294 * 295 * @file: pointer to file structure 296 * @ubuf: pointer to user buffer 297 * @length: buffer length 298 * @offset: data offset in buffer 299 * 300 * returns >=0 data length on success , <0 on error 301 */ 302 static ssize_t mei_write(struct file *file, const char __user *ubuf, 303 size_t length, loff_t *offset) 304 { 305 struct mei_cl *cl = file->private_data; 306 struct mei_cl_cb *write_cb = NULL; 307 struct mei_device *dev; 308 unsigned long timeout = 0; 309 int rets; 310 int id; 311 312 if (WARN_ON(!cl || !cl->dev)) 313 return -ENODEV; 314 315 dev = cl->dev; 316 317 mutex_lock(&dev->device_lock); 318 319 if (dev->dev_state != MEI_DEV_ENABLED) { 320 rets = -ENODEV; 321 goto out; 322 } 323 324 id = mei_me_cl_by_id(dev, cl->me_client_id); 325 if (id < 0) { 326 rets = -ENOTTY; 327 goto out; 328 } 329 330 if (length == 0) { 331 rets = 0; 332 goto out; 333 } 334 335 if (length > dev->me_clients[id].props.max_msg_length) { 336 rets = -EFBIG; 337 goto out; 338 } 339 340 if (cl->state != MEI_FILE_CONNECTED) { 341 dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d", 342 cl->host_client_id, cl->me_client_id); 343 rets = -ENODEV; 344 goto out; 345 } 346 if (cl == &dev->iamthif_cl) { 347 write_cb = mei_amthif_find_read_list_entry(dev, file); 348 349 if (write_cb) { 350 timeout = write_cb->read_time + 351 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 352 353 if (time_after(jiffies, timeout) || 354 cl->reading_state == MEI_READ_COMPLETE) { 355 *offset = 0; 356 list_del(&write_cb->list); 357 mei_io_cb_free(write_cb); 358 write_cb = NULL; 359 } 360 } 361 } 362 363 /* free entry used in read */ 364 if (cl->reading_state == MEI_READ_COMPLETE) { 365 *offset = 0; 366 write_cb = mei_cl_find_read_cb(cl); 367 if (write_cb) { 368 list_del(&write_cb->list); 369 mei_io_cb_free(write_cb); 370 write_cb = NULL; 371 cl->reading_state = MEI_IDLE; 372 cl->read_cb = NULL; 373 } 374 } else if (cl->reading_state == MEI_IDLE) 375 *offset = 0; 376 377 378 write_cb = mei_io_cb_init(cl, file); 379 if (!write_cb) { 380 dev_err(&dev->pdev->dev, "write cb allocation failed\n"); 381 rets = -ENOMEM; 382 goto out; 383 } 384 rets = mei_io_cb_alloc_req_buf(write_cb, length); 385 if (rets) 386 goto out; 387 388 rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); 389 if (rets) { 390 dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); 391 rets = -EFAULT; 392 goto out; 393 } 394 395 if (cl == &dev->iamthif_cl) { 396 rets = mei_amthif_write(dev, write_cb); 397 398 if (rets) { 399 dev_err(&dev->pdev->dev, 400 "amthif write failed with status = %d\n", rets); 401 goto out; 402 } 403 mutex_unlock(&dev->device_lock); 404 return length; 405 } 406 407 rets = mei_cl_write(cl, write_cb, false); 408 out: 409 mutex_unlock(&dev->device_lock); 410 if (rets < 0) 411 mei_io_cb_free(write_cb); 412 return rets; 413 } 414 415 /** 416 * mei_ioctl_connect_client - the connect to fw client IOCTL function 417 * 418 * @dev: the device structure 419 * @data: IOCTL connect data, input and output parameters 420 * @file: private data of the file object 421 * 422 * Locking: called under "dev->device_lock" lock 423 * 424 * returns 0 on success, <0 on failure. 425 */ 426 static int mei_ioctl_connect_client(struct file *file, 427 struct mei_connect_client_data *data) 428 { 429 struct mei_device *dev; 430 struct mei_client *client; 431 struct mei_cl *cl; 432 int i; 433 int rets; 434 435 cl = file->private_data; 436 if (WARN_ON(!cl || !cl->dev)) 437 return -ENODEV; 438 439 dev = cl->dev; 440 441 if (dev->dev_state != MEI_DEV_ENABLED) { 442 rets = -ENODEV; 443 goto end; 444 } 445 446 if (cl->state != MEI_FILE_INITIALIZING && 447 cl->state != MEI_FILE_DISCONNECTED) { 448 rets = -EBUSY; 449 goto end; 450 } 451 452 /* find ME client we're trying to connect to */ 453 i = mei_me_cl_by_uuid(dev, &data->in_client_uuid); 454 if (i < 0 || dev->me_clients[i].props.fixed_address) { 455 dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n", 456 &data->in_client_uuid); 457 rets = -ENOTTY; 458 goto end; 459 } 460 461 cl->me_client_id = dev->me_clients[i].client_id; 462 463 dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", 464 cl->me_client_id); 465 dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n", 466 dev->me_clients[i].props.protocol_version); 467 dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n", 468 dev->me_clients[i].props.max_msg_length); 469 470 /* if we're connecting to amthif client then we will use the 471 * existing connection 472 */ 473 if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) { 474 dev_dbg(&dev->pdev->dev, "FW Client is amthi\n"); 475 if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) { 476 rets = -ENODEV; 477 goto end; 478 } 479 mei_cl_unlink(cl); 480 481 kfree(cl); 482 cl = NULL; 483 dev->iamthif_open_count++; 484 file->private_data = &dev->iamthif_cl; 485 486 client = &data->out_client_properties; 487 client->max_msg_length = 488 dev->me_clients[i].props.max_msg_length; 489 client->protocol_version = 490 dev->me_clients[i].props.protocol_version; 491 rets = dev->iamthif_cl.status; 492 493 goto end; 494 } 495 496 497 /* prepare the output buffer */ 498 client = &data->out_client_properties; 499 client->max_msg_length = dev->me_clients[i].props.max_msg_length; 500 client->protocol_version = dev->me_clients[i].props.protocol_version; 501 dev_dbg(&dev->pdev->dev, "Can connect?\n"); 502 503 504 rets = mei_cl_connect(cl, file); 505 506 end: 507 return rets; 508 } 509 510 511 /** 512 * mei_ioctl - the IOCTL function 513 * 514 * @file: pointer to file structure 515 * @cmd: ioctl command 516 * @data: pointer to mei message structure 517 * 518 * returns 0 on success , <0 on error 519 */ 520 static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) 521 { 522 struct mei_device *dev; 523 struct mei_cl *cl = file->private_data; 524 struct mei_connect_client_data *connect_data = NULL; 525 int rets; 526 527 if (cmd != IOCTL_MEI_CONNECT_CLIENT) 528 return -EINVAL; 529 530 if (WARN_ON(!cl || !cl->dev)) 531 return -ENODEV; 532 533 dev = cl->dev; 534 535 dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd); 536 537 mutex_lock(&dev->device_lock); 538 if (dev->dev_state != MEI_DEV_ENABLED) { 539 rets = -ENODEV; 540 goto out; 541 } 542 543 dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); 544 545 connect_data = kzalloc(sizeof(struct mei_connect_client_data), 546 GFP_KERNEL); 547 if (!connect_data) { 548 rets = -ENOMEM; 549 goto out; 550 } 551 dev_dbg(&dev->pdev->dev, "copy connect data from user\n"); 552 if (copy_from_user(connect_data, (char __user *)data, 553 sizeof(struct mei_connect_client_data))) { 554 dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); 555 rets = -EFAULT; 556 goto out; 557 } 558 559 rets = mei_ioctl_connect_client(file, connect_data); 560 561 /* if all is ok, copying the data back to user. */ 562 if (rets) 563 goto out; 564 565 dev_dbg(&dev->pdev->dev, "copy connect data to user\n"); 566 if (copy_to_user((char __user *)data, connect_data, 567 sizeof(struct mei_connect_client_data))) { 568 dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); 569 rets = -EFAULT; 570 goto out; 571 } 572 573 out: 574 kfree(connect_data); 575 mutex_unlock(&dev->device_lock); 576 return rets; 577 } 578 579 /** 580 * mei_compat_ioctl - the compat IOCTL function 581 * 582 * @file: pointer to file structure 583 * @cmd: ioctl command 584 * @data: pointer to mei message structure 585 * 586 * returns 0 on success , <0 on error 587 */ 588 #ifdef CONFIG_COMPAT 589 static long mei_compat_ioctl(struct file *file, 590 unsigned int cmd, unsigned long data) 591 { 592 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data)); 593 } 594 #endif 595 596 597 /** 598 * mei_poll - the poll function 599 * 600 * @file: pointer to file structure 601 * @wait: pointer to poll_table structure 602 * 603 * returns poll mask 604 */ 605 static unsigned int mei_poll(struct file *file, poll_table *wait) 606 { 607 struct mei_cl *cl = file->private_data; 608 struct mei_device *dev; 609 unsigned int mask = 0; 610 611 if (WARN_ON(!cl || !cl->dev)) 612 return POLLERR; 613 614 dev = cl->dev; 615 616 mutex_lock(&dev->device_lock); 617 618 if (!mei_cl_is_connected(cl)) { 619 mask = POLLERR; 620 goto out; 621 } 622 623 mutex_unlock(&dev->device_lock); 624 625 626 if (cl == &dev->iamthif_cl) 627 return mei_amthif_poll(dev, file, wait); 628 629 poll_wait(file, &cl->tx_wait, wait); 630 631 mutex_lock(&dev->device_lock); 632 633 if (!mei_cl_is_connected(cl)) { 634 mask = POLLERR; 635 goto out; 636 } 637 638 mask |= (POLLIN | POLLRDNORM); 639 640 out: 641 mutex_unlock(&dev->device_lock); 642 return mask; 643 } 644 645 /* 646 * file operations structure will be used for mei char device. 647 */ 648 static const struct file_operations mei_fops = { 649 .owner = THIS_MODULE, 650 .read = mei_read, 651 .unlocked_ioctl = mei_ioctl, 652 #ifdef CONFIG_COMPAT 653 .compat_ioctl = mei_compat_ioctl, 654 #endif 655 .open = mei_open, 656 .release = mei_release, 657 .write = mei_write, 658 .poll = mei_poll, 659 .llseek = no_llseek 660 }; 661 662 static struct class *mei_class; 663 static dev_t mei_devt; 664 #define MEI_MAX_DEVS MINORMASK 665 static DEFINE_MUTEX(mei_minor_lock); 666 static DEFINE_IDR(mei_idr); 667 668 /** 669 * mei_minor_get - obtain next free device minor number 670 * 671 * @dev: device pointer 672 * 673 * returns allocated minor, or -ENOSPC if no free minor left 674 */ 675 static int mei_minor_get(struct mei_device *dev) 676 { 677 int ret; 678 679 mutex_lock(&mei_minor_lock); 680 ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL); 681 if (ret >= 0) 682 dev->minor = ret; 683 else if (ret == -ENOSPC) 684 dev_err(&dev->pdev->dev, "too many mei devices\n"); 685 686 mutex_unlock(&mei_minor_lock); 687 return ret; 688 } 689 690 /** 691 * mei_minor_free - mark device minor number as free 692 * 693 * @dev: device pointer 694 */ 695 static void mei_minor_free(struct mei_device *dev) 696 { 697 mutex_lock(&mei_minor_lock); 698 idr_remove(&mei_idr, dev->minor); 699 mutex_unlock(&mei_minor_lock); 700 } 701 702 int mei_register(struct mei_device *dev, struct device *parent) 703 { 704 struct device *clsdev; /* class device */ 705 int ret, devno; 706 707 ret = mei_minor_get(dev); 708 if (ret < 0) 709 return ret; 710 711 /* Fill in the data structures */ 712 devno = MKDEV(MAJOR(mei_devt), dev->minor); 713 cdev_init(&dev->cdev, &mei_fops); 714 dev->cdev.owner = mei_fops.owner; 715 716 /* Add the device */ 717 ret = cdev_add(&dev->cdev, devno, 1); 718 if (ret) { 719 dev_err(parent, "unable to add device %d:%d\n", 720 MAJOR(mei_devt), dev->minor); 721 goto err_dev_add; 722 } 723 724 clsdev = device_create(mei_class, parent, devno, 725 NULL, "mei%d", dev->minor); 726 727 if (IS_ERR(clsdev)) { 728 dev_err(parent, "unable to create device %d:%d\n", 729 MAJOR(mei_devt), dev->minor); 730 ret = PTR_ERR(clsdev); 731 goto err_dev_create; 732 } 733 734 ret = mei_dbgfs_register(dev, dev_name(clsdev)); 735 if (ret) { 736 dev_err(clsdev, "cannot register debugfs ret = %d\n", ret); 737 goto err_dev_dbgfs; 738 } 739 740 return 0; 741 742 err_dev_dbgfs: 743 device_destroy(mei_class, devno); 744 err_dev_create: 745 cdev_del(&dev->cdev); 746 err_dev_add: 747 mei_minor_free(dev); 748 return ret; 749 } 750 EXPORT_SYMBOL_GPL(mei_register); 751 752 void mei_deregister(struct mei_device *dev) 753 { 754 int devno; 755 756 devno = dev->cdev.dev; 757 cdev_del(&dev->cdev); 758 759 mei_dbgfs_deregister(dev); 760 761 device_destroy(mei_class, devno); 762 763 mei_minor_free(dev); 764 } 765 EXPORT_SYMBOL_GPL(mei_deregister); 766 767 static int __init mei_init(void) 768 { 769 int ret; 770 771 mei_class = class_create(THIS_MODULE, "mei"); 772 if (IS_ERR(mei_class)) { 773 pr_err("couldn't create class\n"); 774 ret = PTR_ERR(mei_class); 775 goto err; 776 } 777 778 ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei"); 779 if (ret < 0) { 780 pr_err("unable to allocate char dev region\n"); 781 goto err_class; 782 } 783 784 ret = mei_cl_bus_init(); 785 if (ret < 0) { 786 pr_err("unable to initialize bus\n"); 787 goto err_chrdev; 788 } 789 790 return 0; 791 792 err_chrdev: 793 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); 794 err_class: 795 class_destroy(mei_class); 796 err: 797 return ret; 798 } 799 800 static void __exit mei_exit(void) 801 { 802 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); 803 class_destroy(mei_class); 804 mei_cl_bus_exit(); 805 } 806 807 module_init(mei_init); 808 module_exit(mei_exit); 809 810 MODULE_AUTHOR("Intel Corporation"); 811 MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); 812 MODULE_LICENSE("GPL v2"); 813 814