1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/module.h> 20 #include <linux/moduleparam.h> 21 #include <linux/kernel.h> 22 #include <linux/device.h> 23 #include <linux/fs.h> 24 #include <linux/errno.h> 25 #include <linux/types.h> 26 #include <linux/fcntl.h> 27 #include <linux/aio.h> 28 #include <linux/pci.h> 29 #include <linux/poll.h> 30 #include <linux/init.h> 31 #include <linux/ioctl.h> 32 #include <linux/cdev.h> 33 #include <linux/sched.h> 34 #include <linux/uuid.h> 35 #include <linux/compat.h> 36 #include <linux/jiffies.h> 37 #include <linux/interrupt.h> 38 #include <linux/miscdevice.h> 39 40 #include "mei_dev.h" 41 #include <linux/mei.h> 42 #include "interface.h" 43 44 /* AMT device is a singleton on the platform */ 45 static struct pci_dev *mei_pdev; 46 47 /* mei_pci_tbl - PCI Device ID Table */ 48 static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = { 49 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)}, 50 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)}, 51 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)}, 52 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)}, 53 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)}, 54 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)}, 55 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)}, 56 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)}, 57 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)}, 58 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)}, 59 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)}, 60 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)}, 61 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)}, 62 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)}, 63 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)}, 64 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)}, 65 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)}, 66 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)}, 67 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)}, 68 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)}, 69 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)}, 70 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)}, 71 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)}, 72 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)}, 73 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)}, 74 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)}, 75 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)}, 76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)}, 77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)}, 78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)}, 79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)}, 80 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)}, 81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)}, 82 83 /* required last entry */ 84 {0, } 85 }; 86 87 MODULE_DEVICE_TABLE(pci, mei_pci_tbl); 88 89 static DEFINE_MUTEX(mei_mutex); 90 91 92 /** 93 * find_read_list_entry - find read list entry 94 * 95 * @dev: device structure 96 * @file: pointer to file structure 97 * 98 * returns cb on success, NULL on error 99 */ 100 static struct mei_cl_cb *find_read_list_entry( 101 struct mei_device *dev, 102 struct mei_cl *cl) 103 { 104 struct mei_cl_cb *pos = NULL; 105 struct mei_cl_cb *next = NULL; 106 107 dev_dbg(&dev->pdev->dev, "remove read_list CB\n"); 108 list_for_each_entry_safe(pos, next, &dev->read_list.list, list) 109 if (mei_cl_cmp_id(cl, pos->cl)) 110 return pos; 111 return NULL; 112 } 113 114 /** 115 * mei_open - the open function 116 * 117 * @inode: pointer to inode structure 118 * @file: pointer to file structure 119 * 120 * returns 0 on success, <0 on error 121 */ 122 static int mei_open(struct inode *inode, struct file *file) 123 { 124 struct mei_cl *cl; 125 struct mei_device *dev; 126 unsigned long cl_id; 127 int err; 128 129 err = -ENODEV; 130 if (!mei_pdev) 131 goto out; 132 133 dev = pci_get_drvdata(mei_pdev); 134 if (!dev) 135 goto out; 136 137 mutex_lock(&dev->device_lock); 138 err = -ENOMEM; 139 cl = mei_cl_allocate(dev); 140 if (!cl) 141 goto out_unlock; 142 143 err = -ENODEV; 144 if (dev->dev_state != MEI_DEV_ENABLED) { 145 dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", 146 mei_dev_state_str(dev->dev_state)); 147 goto out_unlock; 148 } 149 err = -EMFILE; 150 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 151 dev_err(&dev->pdev->dev, "open_handle_count exceded %d", 152 MEI_MAX_OPEN_HANDLE_COUNT); 153 goto out_unlock; 154 } 155 156 cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); 157 if (cl_id >= MEI_CLIENTS_MAX) { 158 dev_err(&dev->pdev->dev, "client_id exceded %d", 159 MEI_CLIENTS_MAX) ; 160 goto out_unlock; 161 } 162 163 cl->host_client_id = cl_id; 164 165 dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id); 166 167 dev->open_handle_count++; 168 169 list_add_tail(&cl->link, &dev->file_list); 170 171 set_bit(cl->host_client_id, dev->host_clients_map); 172 cl->state = MEI_FILE_INITIALIZING; 173 cl->sm_state = 0; 174 175 file->private_data = cl; 176 mutex_unlock(&dev->device_lock); 177 178 return nonseekable_open(inode, file); 179 180 out_unlock: 181 mutex_unlock(&dev->device_lock); 182 kfree(cl); 183 out: 184 return err; 185 } 186 187 /** 188 * mei_release - the release function 189 * 190 * @inode: pointer to inode structure 191 * @file: pointer to file structure 192 * 193 * returns 0 on success, <0 on error 194 */ 195 static int mei_release(struct inode *inode, struct file *file) 196 { 197 struct mei_cl *cl = file->private_data; 198 struct mei_cl_cb *cb; 199 struct mei_device *dev; 200 int rets = 0; 201 202 if (WARN_ON(!cl || !cl->dev)) 203 return -ENODEV; 204 205 dev = cl->dev; 206 207 mutex_lock(&dev->device_lock); 208 if (cl == &dev->iamthif_cl) { 209 rets = mei_amthif_release(dev, file); 210 goto out; 211 } 212 if (cl->state == MEI_FILE_CONNECTED) { 213 cl->state = MEI_FILE_DISCONNECTING; 214 dev_dbg(&dev->pdev->dev, 215 "disconnecting client host client = %d, " 216 "ME client = %d\n", 217 cl->host_client_id, 218 cl->me_client_id); 219 rets = mei_disconnect_host_client(dev, cl); 220 } 221 mei_cl_flush_queues(cl); 222 dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n", 223 cl->host_client_id, 224 cl->me_client_id); 225 226 if (dev->open_handle_count > 0) { 227 clear_bit(cl->host_client_id, dev->host_clients_map); 228 dev->open_handle_count--; 229 } 230 mei_me_cl_unlink(dev, cl); 231 232 /* free read cb */ 233 cb = NULL; 234 if (cl->read_cb) { 235 cb = find_read_list_entry(dev, cl); 236 /* Remove entry from read list */ 237 if (cb) 238 list_del(&cb->list); 239 240 cb = cl->read_cb; 241 cl->read_cb = NULL; 242 } 243 244 file->private_data = NULL; 245 246 if (cb) { 247 mei_io_cb_free(cb); 248 cb = NULL; 249 } 250 251 kfree(cl); 252 out: 253 mutex_unlock(&dev->device_lock); 254 return rets; 255 } 256 257 258 /** 259 * mei_read - the read function. 260 * 261 * @file: pointer to file structure 262 * @ubuf: pointer to user buffer 263 * @length: buffer length 264 * @offset: data offset in buffer 265 * 266 * returns >=0 data length on success , <0 on error 267 */ 268 static ssize_t mei_read(struct file *file, char __user *ubuf, 269 size_t length, loff_t *offset) 270 { 271 struct mei_cl *cl = file->private_data; 272 struct mei_cl_cb *cb_pos = NULL; 273 struct mei_cl_cb *cb = NULL; 274 struct mei_device *dev; 275 int i; 276 int rets; 277 int err; 278 279 280 if (WARN_ON(!cl || !cl->dev)) 281 return -ENODEV; 282 283 dev = cl->dev; 284 285 mutex_lock(&dev->device_lock); 286 if (dev->dev_state != MEI_DEV_ENABLED) { 287 rets = -ENODEV; 288 goto out; 289 } 290 291 if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) { 292 /* Do not allow to read watchdog client */ 293 i = mei_me_cl_by_uuid(dev, &mei_wd_guid); 294 if (i >= 0) { 295 struct mei_me_client *me_client = &dev->me_clients[i]; 296 if (cl->me_client_id == me_client->client_id) { 297 rets = -EBADF; 298 goto out; 299 } 300 } 301 } else { 302 cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT; 303 } 304 305 if (cl == &dev->iamthif_cl) { 306 rets = mei_amthif_read(dev, file, ubuf, length, offset); 307 goto out; 308 } 309 310 if (cl->read_cb && cl->read_cb->buf_idx > *offset) { 311 cb = cl->read_cb; 312 goto copy_buffer; 313 } else if (cl->read_cb && cl->read_cb->buf_idx > 0 && 314 cl->read_cb->buf_idx <= *offset) { 315 cb = cl->read_cb; 316 rets = 0; 317 goto free; 318 } else if ((!cl->read_cb || !cl->read_cb->buf_idx) && *offset > 0) { 319 /*Offset needs to be cleaned for contiguous reads*/ 320 *offset = 0; 321 rets = 0; 322 goto out; 323 } 324 325 err = mei_start_read(dev, cl); 326 if (err && err != -EBUSY) { 327 dev_dbg(&dev->pdev->dev, 328 "mei start read failure with status = %d\n", err); 329 rets = err; 330 goto out; 331 } 332 333 if (MEI_READ_COMPLETE != cl->reading_state && 334 !waitqueue_active(&cl->rx_wait)) { 335 if (file->f_flags & O_NONBLOCK) { 336 rets = -EAGAIN; 337 goto out; 338 } 339 340 mutex_unlock(&dev->device_lock); 341 342 if (wait_event_interruptible(cl->rx_wait, 343 (MEI_READ_COMPLETE == cl->reading_state || 344 MEI_FILE_INITIALIZING == cl->state || 345 MEI_FILE_DISCONNECTED == cl->state || 346 MEI_FILE_DISCONNECTING == cl->state))) { 347 if (signal_pending(current)) 348 return -EINTR; 349 return -ERESTARTSYS; 350 } 351 352 mutex_lock(&dev->device_lock); 353 if (MEI_FILE_INITIALIZING == cl->state || 354 MEI_FILE_DISCONNECTED == cl->state || 355 MEI_FILE_DISCONNECTING == cl->state) { 356 rets = -EBUSY; 357 goto out; 358 } 359 } 360 361 cb = cl->read_cb; 362 363 if (!cb) { 364 rets = -ENODEV; 365 goto out; 366 } 367 if (cl->reading_state != MEI_READ_COMPLETE) { 368 rets = 0; 369 goto out; 370 } 371 /* now copy the data to user space */ 372 copy_buffer: 373 dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n", 374 cb->response_buffer.size); 375 dev_dbg(&dev->pdev->dev, "cb->buf_idx - %lu\n", cb->buf_idx); 376 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { 377 rets = -EMSGSIZE; 378 goto free; 379 } 380 381 /* length is being truncated to PAGE_SIZE, 382 * however buf_idx may point beyond that */ 383 length = min_t(size_t, length, cb->buf_idx - *offset); 384 385 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { 386 rets = -EFAULT; 387 goto free; 388 } 389 390 rets = length; 391 *offset += length; 392 if ((unsigned long)*offset < cb->buf_idx) 393 goto out; 394 395 free: 396 cb_pos = find_read_list_entry(dev, cl); 397 /* Remove entry from read list */ 398 if (cb_pos) 399 list_del(&cb_pos->list); 400 mei_io_cb_free(cb); 401 cl->reading_state = MEI_IDLE; 402 cl->read_cb = NULL; 403 cl->read_pending = 0; 404 out: 405 dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets); 406 mutex_unlock(&dev->device_lock); 407 return rets; 408 } 409 /** 410 * mei_write - the write function. 411 * 412 * @file: pointer to file structure 413 * @ubuf: pointer to user buffer 414 * @length: buffer length 415 * @offset: data offset in buffer 416 * 417 * returns >=0 data length on success , <0 on error 418 */ 419 static ssize_t mei_write(struct file *file, const char __user *ubuf, 420 size_t length, loff_t *offset) 421 { 422 struct mei_cl *cl = file->private_data; 423 struct mei_cl_cb *write_cb = NULL; 424 struct mei_msg_hdr mei_hdr; 425 struct mei_device *dev; 426 unsigned long timeout = 0; 427 int rets; 428 int i; 429 430 if (WARN_ON(!cl || !cl->dev)) 431 return -ENODEV; 432 433 dev = cl->dev; 434 435 mutex_lock(&dev->device_lock); 436 437 if (dev->dev_state != MEI_DEV_ENABLED) { 438 rets = -ENODEV; 439 goto err; 440 } 441 442 i = mei_me_cl_by_id(dev, cl->me_client_id); 443 if (i < 0) { 444 rets = -ENODEV; 445 goto err; 446 } 447 if (length > dev->me_clients[i].props.max_msg_length || length <= 0) { 448 rets = -EMSGSIZE; 449 goto err; 450 } 451 452 if (cl->state != MEI_FILE_CONNECTED) { 453 rets = -ENODEV; 454 dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d", 455 cl->host_client_id, cl->me_client_id); 456 goto err; 457 } 458 if (cl == &dev->iamthif_cl) { 459 write_cb = mei_amthif_find_read_list_entry(dev, file); 460 461 if (write_cb) { 462 timeout = write_cb->read_time + 463 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 464 465 if (time_after(jiffies, timeout) || 466 cl->reading_state == MEI_READ_COMPLETE) { 467 *offset = 0; 468 list_del(&write_cb->list); 469 mei_io_cb_free(write_cb); 470 write_cb = NULL; 471 } 472 } 473 } 474 475 /* free entry used in read */ 476 if (cl->reading_state == MEI_READ_COMPLETE) { 477 *offset = 0; 478 write_cb = find_read_list_entry(dev, cl); 479 if (write_cb) { 480 list_del(&write_cb->list); 481 mei_io_cb_free(write_cb); 482 write_cb = NULL; 483 cl->reading_state = MEI_IDLE; 484 cl->read_cb = NULL; 485 cl->read_pending = 0; 486 } 487 } else if (cl->reading_state == MEI_IDLE && !cl->read_pending) 488 *offset = 0; 489 490 491 write_cb = mei_io_cb_init(cl, file); 492 if (!write_cb) { 493 dev_err(&dev->pdev->dev, "write cb allocation failed\n"); 494 rets = -ENOMEM; 495 goto err; 496 } 497 rets = mei_io_cb_alloc_req_buf(write_cb, length); 498 if (rets) 499 goto err; 500 501 dev_dbg(&dev->pdev->dev, "cb request size = %zd\n", length); 502 503 rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); 504 if (rets) 505 goto err; 506 507 cl->sm_state = 0; 508 if (length == 4 && 509 ((memcmp(mei_wd_state_independence_msg[0], 510 write_cb->request_buffer.data, 4) == 0) || 511 (memcmp(mei_wd_state_independence_msg[1], 512 write_cb->request_buffer.data, 4) == 0) || 513 (memcmp(mei_wd_state_independence_msg[2], 514 write_cb->request_buffer.data, 4) == 0))) 515 cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT; 516 517 if (cl == &dev->iamthif_cl) { 518 rets = mei_amthif_write(dev, write_cb); 519 520 if (rets) { 521 dev_err(&dev->pdev->dev, 522 "amthi write failed with status = %d\n", rets); 523 goto err; 524 } 525 mutex_unlock(&dev->device_lock); 526 return length; 527 } 528 529 write_cb->fop_type = MEI_FOP_WRITE; 530 531 dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n", 532 cl->host_client_id, cl->me_client_id); 533 rets = mei_flow_ctrl_creds(dev, cl); 534 if (rets < 0) 535 goto err; 536 537 if (rets == 0 || dev->mei_host_buffer_is_empty == false) { 538 write_cb->buf_idx = 0; 539 mei_hdr.msg_complete = 0; 540 cl->writing_state = MEI_WRITING; 541 goto out; 542 } 543 544 dev->mei_host_buffer_is_empty = false; 545 if (length > mei_hbuf_max_data(dev)) { 546 mei_hdr.length = mei_hbuf_max_data(dev); 547 mei_hdr.msg_complete = 0; 548 } else { 549 mei_hdr.length = length; 550 mei_hdr.msg_complete = 1; 551 } 552 mei_hdr.host_addr = cl->host_client_id; 553 mei_hdr.me_addr = cl->me_client_id; 554 mei_hdr.reserved = 0; 555 dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n", 556 *((u32 *) &mei_hdr)); 557 if (mei_write_message(dev, &mei_hdr, 558 write_cb->request_buffer.data, mei_hdr.length)) { 559 rets = -ENODEV; 560 goto err; 561 } 562 cl->writing_state = MEI_WRITING; 563 write_cb->buf_idx = mei_hdr.length; 564 565 out: 566 if (mei_hdr.msg_complete) { 567 if (mei_flow_ctrl_reduce(dev, cl)) { 568 rets = -ENODEV; 569 goto err; 570 } 571 list_add_tail(&write_cb->list, &dev->write_waiting_list.list); 572 } else { 573 list_add_tail(&write_cb->list, &dev->write_list.list); 574 } 575 576 mutex_unlock(&dev->device_lock); 577 return length; 578 579 err: 580 mutex_unlock(&dev->device_lock); 581 mei_io_cb_free(write_cb); 582 return rets; 583 } 584 585 586 /** 587 * mei_ioctl - the IOCTL function 588 * 589 * @file: pointer to file structure 590 * @cmd: ioctl command 591 * @data: pointer to mei message structure 592 * 593 * returns 0 on success , <0 on error 594 */ 595 static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) 596 { 597 struct mei_device *dev; 598 struct mei_cl *cl = file->private_data; 599 struct mei_connect_client_data *connect_data = NULL; 600 int rets; 601 602 if (cmd != IOCTL_MEI_CONNECT_CLIENT) 603 return -EINVAL; 604 605 if (WARN_ON(!cl || !cl->dev)) 606 return -ENODEV; 607 608 dev = cl->dev; 609 610 dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd); 611 612 mutex_lock(&dev->device_lock); 613 if (dev->dev_state != MEI_DEV_ENABLED) { 614 rets = -ENODEV; 615 goto out; 616 } 617 618 dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); 619 620 connect_data = kzalloc(sizeof(struct mei_connect_client_data), 621 GFP_KERNEL); 622 if (!connect_data) { 623 rets = -ENOMEM; 624 goto out; 625 } 626 dev_dbg(&dev->pdev->dev, "copy connect data from user\n"); 627 if (copy_from_user(connect_data, (char __user *)data, 628 sizeof(struct mei_connect_client_data))) { 629 dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n"); 630 rets = -EFAULT; 631 goto out; 632 } 633 rets = mei_ioctl_connect_client(file, connect_data); 634 635 /* if all is ok, copying the data back to user. */ 636 if (rets) 637 goto out; 638 639 dev_dbg(&dev->pdev->dev, "copy connect data to user\n"); 640 if (copy_to_user((char __user *)data, connect_data, 641 sizeof(struct mei_connect_client_data))) { 642 dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n"); 643 rets = -EFAULT; 644 goto out; 645 } 646 647 out: 648 kfree(connect_data); 649 mutex_unlock(&dev->device_lock); 650 return rets; 651 } 652 653 /** 654 * mei_compat_ioctl - the compat IOCTL function 655 * 656 * @file: pointer to file structure 657 * @cmd: ioctl command 658 * @data: pointer to mei message structure 659 * 660 * returns 0 on success , <0 on error 661 */ 662 #ifdef CONFIG_COMPAT 663 static long mei_compat_ioctl(struct file *file, 664 unsigned int cmd, unsigned long data) 665 { 666 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data)); 667 } 668 #endif 669 670 671 /** 672 * mei_poll - the poll function 673 * 674 * @file: pointer to file structure 675 * @wait: pointer to poll_table structure 676 * 677 * returns poll mask 678 */ 679 static unsigned int mei_poll(struct file *file, poll_table *wait) 680 { 681 struct mei_cl *cl = file->private_data; 682 struct mei_device *dev; 683 unsigned int mask = 0; 684 685 if (WARN_ON(!cl || !cl->dev)) 686 return mask; 687 688 dev = cl->dev; 689 690 mutex_lock(&dev->device_lock); 691 692 if (dev->dev_state != MEI_DEV_ENABLED) 693 goto out; 694 695 696 if (cl == &dev->iamthif_cl) { 697 mask = mei_amthif_poll(dev, file, wait); 698 goto out; 699 } 700 701 mutex_unlock(&dev->device_lock); 702 poll_wait(file, &cl->tx_wait, wait); 703 mutex_lock(&dev->device_lock); 704 if (MEI_WRITE_COMPLETE == cl->writing_state) 705 mask |= (POLLIN | POLLRDNORM); 706 707 out: 708 mutex_unlock(&dev->device_lock); 709 return mask; 710 } 711 712 /* 713 * file operations structure will be used for mei char device. 714 */ 715 static const struct file_operations mei_fops = { 716 .owner = THIS_MODULE, 717 .read = mei_read, 718 .unlocked_ioctl = mei_ioctl, 719 #ifdef CONFIG_COMPAT 720 .compat_ioctl = mei_compat_ioctl, 721 #endif 722 .open = mei_open, 723 .release = mei_release, 724 .write = mei_write, 725 .poll = mei_poll, 726 .llseek = no_llseek 727 }; 728 729 730 /* 731 * Misc Device Struct 732 */ 733 static struct miscdevice mei_misc_device = { 734 .name = "mei", 735 .fops = &mei_fops, 736 .minor = MISC_DYNAMIC_MINOR, 737 }; 738 739 /** 740 * mei_quirk_probe - probe for devices that doesn't valid ME interface 741 * @pdev: PCI device structure 742 * @ent: entry into pci_device_table 743 * 744 * returns true if ME Interface is valid, false otherwise 745 */ 746 static bool mei_quirk_probe(struct pci_dev *pdev, 747 const struct pci_device_id *ent) 748 { 749 u32 reg; 750 if (ent->device == MEI_DEV_ID_PBG_1) { 751 pci_read_config_dword(pdev, 0x48, ®); 752 /* make sure that bit 9 is up and bit 10 is down */ 753 if ((reg & 0x600) == 0x200) { 754 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); 755 return false; 756 } 757 } 758 return true; 759 } 760 /** 761 * mei_probe - Device Initialization Routine 762 * 763 * @pdev: PCI device structure 764 * @ent: entry in kcs_pci_tbl 765 * 766 * returns 0 on success, <0 on failure. 767 */ 768 static int mei_probe(struct pci_dev *pdev, 769 const struct pci_device_id *ent) 770 { 771 struct mei_device *dev; 772 int err; 773 774 mutex_lock(&mei_mutex); 775 776 if (!mei_quirk_probe(pdev, ent)) { 777 err = -ENODEV; 778 goto end; 779 } 780 781 if (mei_pdev) { 782 err = -EEXIST; 783 goto end; 784 } 785 /* enable pci dev */ 786 err = pci_enable_device(pdev); 787 if (err) { 788 dev_err(&pdev->dev, "failed to enable pci device.\n"); 789 goto end; 790 } 791 /* set PCI host mastering */ 792 pci_set_master(pdev); 793 /* pci request regions for mei driver */ 794 err = pci_request_regions(pdev, KBUILD_MODNAME); 795 if (err) { 796 dev_err(&pdev->dev, "failed to get pci regions.\n"); 797 goto disable_device; 798 } 799 /* allocates and initializes the mei dev structure */ 800 dev = mei_device_init(pdev); 801 if (!dev) { 802 err = -ENOMEM; 803 goto release_regions; 804 } 805 /* mapping IO device memory */ 806 dev->mem_addr = pci_iomap(pdev, 0, 0); 807 if (!dev->mem_addr) { 808 dev_err(&pdev->dev, "mapping I/O device memory failure.\n"); 809 err = -ENOMEM; 810 goto free_device; 811 } 812 pci_enable_msi(pdev); 813 814 /* request and enable interrupt */ 815 if (pci_dev_msi_enabled(pdev)) 816 err = request_threaded_irq(pdev->irq, 817 NULL, 818 mei_interrupt_thread_handler, 819 IRQF_ONESHOT, KBUILD_MODNAME, dev); 820 else 821 err = request_threaded_irq(pdev->irq, 822 mei_interrupt_quick_handler, 823 mei_interrupt_thread_handler, 824 IRQF_SHARED, KBUILD_MODNAME, dev); 825 826 if (err) { 827 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", 828 pdev->irq); 829 goto disable_msi; 830 } 831 INIT_DELAYED_WORK(&dev->timer_work, mei_timer); 832 INIT_WORK(&dev->init_work, mei_host_client_init); 833 834 if (mei_hw_init(dev)) { 835 dev_err(&pdev->dev, "init hw failure.\n"); 836 err = -ENODEV; 837 goto release_irq; 838 } 839 840 err = misc_register(&mei_misc_device); 841 if (err) 842 goto release_irq; 843 844 mei_pdev = pdev; 845 pci_set_drvdata(pdev, dev); 846 847 848 schedule_delayed_work(&dev->timer_work, HZ); 849 850 mutex_unlock(&mei_mutex); 851 852 pr_debug("initialization successful.\n"); 853 854 return 0; 855 856 release_irq: 857 /* disable interrupts */ 858 dev->host_hw_state = mei_hcsr_read(dev); 859 mei_disable_interrupts(dev); 860 flush_scheduled_work(); 861 free_irq(pdev->irq, dev); 862 disable_msi: 863 pci_disable_msi(pdev); 864 pci_iounmap(pdev, dev->mem_addr); 865 free_device: 866 kfree(dev); 867 release_regions: 868 pci_release_regions(pdev); 869 disable_device: 870 pci_disable_device(pdev); 871 end: 872 mutex_unlock(&mei_mutex); 873 dev_err(&pdev->dev, "initialization failed.\n"); 874 return err; 875 } 876 877 /** 878 * mei_remove - Device Removal Routine 879 * 880 * @pdev: PCI device structure 881 * 882 * mei_remove is called by the PCI subsystem to alert the driver 883 * that it should release a PCI device. 884 */ 885 static void mei_remove(struct pci_dev *pdev) 886 { 887 struct mei_device *dev; 888 889 if (mei_pdev != pdev) 890 return; 891 892 dev = pci_get_drvdata(pdev); 893 if (!dev) 894 return; 895 896 mutex_lock(&dev->device_lock); 897 898 cancel_delayed_work(&dev->timer_work); 899 900 mei_wd_stop(dev); 901 902 mei_pdev = NULL; 903 904 if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) { 905 dev->iamthif_cl.state = MEI_FILE_DISCONNECTING; 906 mei_disconnect_host_client(dev, &dev->iamthif_cl); 907 } 908 if (dev->wd_cl.state == MEI_FILE_CONNECTED) { 909 dev->wd_cl.state = MEI_FILE_DISCONNECTING; 910 mei_disconnect_host_client(dev, &dev->wd_cl); 911 } 912 913 /* Unregistering watchdog device */ 914 mei_watchdog_unregister(dev); 915 916 /* remove entry if already in list */ 917 dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n"); 918 mei_me_cl_unlink(dev, &dev->wd_cl); 919 mei_me_cl_unlink(dev, &dev->iamthif_cl); 920 921 dev->iamthif_current_cb = NULL; 922 dev->me_clients_num = 0; 923 924 mutex_unlock(&dev->device_lock); 925 926 flush_scheduled_work(); 927 928 /* disable interrupts */ 929 mei_disable_interrupts(dev); 930 931 free_irq(pdev->irq, dev); 932 pci_disable_msi(pdev); 933 pci_set_drvdata(pdev, NULL); 934 935 if (dev->mem_addr) 936 pci_iounmap(pdev, dev->mem_addr); 937 938 kfree(dev); 939 940 pci_release_regions(pdev); 941 pci_disable_device(pdev); 942 943 misc_deregister(&mei_misc_device); 944 } 945 #ifdef CONFIG_PM 946 static int mei_pci_suspend(struct device *device) 947 { 948 struct pci_dev *pdev = to_pci_dev(device); 949 struct mei_device *dev = pci_get_drvdata(pdev); 950 int err; 951 952 if (!dev) 953 return -ENODEV; 954 mutex_lock(&dev->device_lock); 955 956 cancel_delayed_work(&dev->timer_work); 957 958 /* Stop watchdog if exists */ 959 err = mei_wd_stop(dev); 960 /* Set new mei state */ 961 if (dev->dev_state == MEI_DEV_ENABLED || 962 dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) { 963 dev->dev_state = MEI_DEV_POWER_DOWN; 964 mei_reset(dev, 0); 965 } 966 mutex_unlock(&dev->device_lock); 967 968 free_irq(pdev->irq, dev); 969 pci_disable_msi(pdev); 970 971 return err; 972 } 973 974 static int mei_pci_resume(struct device *device) 975 { 976 struct pci_dev *pdev = to_pci_dev(device); 977 struct mei_device *dev; 978 int err; 979 980 dev = pci_get_drvdata(pdev); 981 if (!dev) 982 return -ENODEV; 983 984 pci_enable_msi(pdev); 985 986 /* request and enable interrupt */ 987 if (pci_dev_msi_enabled(pdev)) 988 err = request_threaded_irq(pdev->irq, 989 NULL, 990 mei_interrupt_thread_handler, 991 IRQF_ONESHOT, KBUILD_MODNAME, dev); 992 else 993 err = request_threaded_irq(pdev->irq, 994 mei_interrupt_quick_handler, 995 mei_interrupt_thread_handler, 996 IRQF_SHARED, KBUILD_MODNAME, dev); 997 998 if (err) { 999 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n", 1000 pdev->irq); 1001 return err; 1002 } 1003 1004 mutex_lock(&dev->device_lock); 1005 dev->dev_state = MEI_DEV_POWER_UP; 1006 mei_reset(dev, 1); 1007 mutex_unlock(&dev->device_lock); 1008 1009 /* Start timer if stopped in suspend */ 1010 schedule_delayed_work(&dev->timer_work, HZ); 1011 1012 return err; 1013 } 1014 static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume); 1015 #define MEI_PM_OPS (&mei_pm_ops) 1016 #else 1017 #define MEI_PM_OPS NULL 1018 #endif /* CONFIG_PM */ 1019 /* 1020 * PCI driver structure 1021 */ 1022 static struct pci_driver mei_driver = { 1023 .name = KBUILD_MODNAME, 1024 .id_table = mei_pci_tbl, 1025 .probe = mei_probe, 1026 .remove = mei_remove, 1027 .shutdown = mei_remove, 1028 .driver.pm = MEI_PM_OPS, 1029 }; 1030 1031 module_pci_driver(mei_driver); 1032 1033 MODULE_AUTHOR("Intel Corporation"); 1034 MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); 1035 MODULE_LICENSE("GPL v2"); 1036