1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/module.h> 8 #include <linux/moduleparam.h> 9 #include <linux/kernel.h> 10 #include <linux/device.h> 11 #include <linux/slab.h> 12 #include <linux/fs.h> 13 #include <linux/errno.h> 14 #include <linux/types.h> 15 #include <linux/fcntl.h> 16 #include <linux/poll.h> 17 #include <linux/init.h> 18 #include <linux/ioctl.h> 19 #include <linux/cdev.h> 20 #include <linux/sched/signal.h> 21 #include <linux/uuid.h> 22 #include <linux/compat.h> 23 #include <linux/jiffies.h> 24 #include <linux/interrupt.h> 25 26 #include <linux/mei.h> 27 28 #include "mei_dev.h" 29 #include "client.h" 30 31 static struct class *mei_class; 32 static dev_t mei_devt; 33 #define MEI_MAX_DEVS MINORMASK 34 static DEFINE_MUTEX(mei_minor_lock); 35 static DEFINE_IDR(mei_idr); 36 37 /** 38 * mei_open - the open function 39 * 40 * @inode: pointer to inode structure 41 * @file: pointer to file structure 42 * 43 * Return: 0 on success, <0 on error 44 */ 45 static int mei_open(struct inode *inode, struct file *file) 46 { 47 struct mei_device *dev; 48 struct mei_cl *cl; 49 50 int err; 51 52 dev = container_of(inode->i_cdev, struct mei_device, cdev); 53 if (!dev) 54 return -ENODEV; 55 56 mutex_lock(&dev->device_lock); 57 58 if (dev->dev_state != MEI_DEV_ENABLED) { 59 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", 60 mei_dev_state_str(dev->dev_state)); 61 err = -ENODEV; 62 goto err_unlock; 63 } 64 65 cl = mei_cl_alloc_linked(dev); 66 if (IS_ERR(cl)) { 67 err = PTR_ERR(cl); 68 goto err_unlock; 69 } 70 71 cl->fp = file; 72 file->private_data = cl; 73 74 mutex_unlock(&dev->device_lock); 75 76 return nonseekable_open(inode, file); 77 78 err_unlock: 79 mutex_unlock(&dev->device_lock); 80 return err; 81 } 82 83 /** 84 * mei_cl_vtag_remove_by_fp - remove vtag that corresponds to fp from list 85 * 86 * @cl: host client 87 * @fp: pointer to file structure 88 * 89 */ 90 static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl, 91 const struct file *fp) 92 { 93 struct mei_cl_vtag *vtag_l, *next; 94 95 list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) { 96 if (vtag_l->fp == fp) { 97 list_del(&vtag_l->list); 98 kfree(vtag_l); 99 return; 100 } 101 } 102 } 103 104 /** 105 * mei_release - the release function 106 * 107 * @inode: pointer to inode structure 108 * @file: pointer to file structure 109 * 110 * Return: 0 on success, <0 on error 111 */ 112 static int mei_release(struct inode *inode, struct file *file) 113 { 114 struct mei_cl *cl = file->private_data; 115 struct mei_device *dev; 116 int rets; 117 118 if (WARN_ON(!cl || !cl->dev)) 119 return -ENODEV; 120 121 dev = cl->dev; 122 123 mutex_lock(&dev->device_lock); 124 125 mei_cl_vtag_remove_by_fp(cl, file); 126 127 if (!list_empty(&cl->vtag_map)) { 128 cl_dbg(dev, cl, "not the last vtag\n"); 129 mei_cl_flush_queues(cl, file); 130 rets = 0; 131 goto out; 132 } 133 134 rets = mei_cl_disconnect(cl); 135 /* 136 * Check again: This is necessary since disconnect releases the lock 137 * and another client can connect in the meantime. 138 */ 139 if (!list_empty(&cl->vtag_map)) { 140 cl_dbg(dev, cl, "not the last vtag after disconnect\n"); 141 mei_cl_flush_queues(cl, file); 142 goto out; 143 } 144 145 mei_cl_flush_queues(cl, NULL); 146 cl_dbg(dev, cl, "removing\n"); 147 148 mei_cl_unlink(cl); 149 kfree(cl); 150 151 out: 152 file->private_data = NULL; 153 154 mutex_unlock(&dev->device_lock); 155 return rets; 156 } 157 158 159 /** 160 * mei_read - the read function. 161 * 162 * @file: pointer to file structure 163 * @ubuf: pointer to user buffer 164 * @length: buffer length 165 * @offset: data offset in buffer 166 * 167 * Return: >=0 data length on success , <0 on error 168 */ 169 static ssize_t mei_read(struct file *file, char __user *ubuf, 170 size_t length, loff_t *offset) 171 { 172 struct mei_cl *cl = file->private_data; 173 struct mei_device *dev; 174 struct mei_cl_cb *cb = NULL; 175 bool nonblock = !!(file->f_flags & O_NONBLOCK); 176 ssize_t rets; 177 178 if (WARN_ON(!cl || !cl->dev)) 179 return -ENODEV; 180 181 dev = cl->dev; 182 183 184 mutex_lock(&dev->device_lock); 185 if (dev->dev_state != MEI_DEV_ENABLED) { 186 rets = -ENODEV; 187 goto out; 188 } 189 190 if (length == 0) { 191 rets = 0; 192 goto out; 193 } 194 195 if (ubuf == NULL) { 196 rets = -EMSGSIZE; 197 goto out; 198 } 199 200 cb = mei_cl_read_cb(cl, file); 201 if (cb) 202 goto copy_buffer; 203 204 if (*offset > 0) 205 *offset = 0; 206 207 rets = mei_cl_read_start(cl, length, file); 208 if (rets && rets != -EBUSY) { 209 cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets); 210 goto out; 211 } 212 213 if (nonblock) { 214 rets = -EAGAIN; 215 goto out; 216 } 217 218 mutex_unlock(&dev->device_lock); 219 if (wait_event_interruptible(cl->rx_wait, 220 mei_cl_read_cb(cl, file) || 221 !mei_cl_is_connected(cl))) { 222 if (signal_pending(current)) 223 return -EINTR; 224 return -ERESTARTSYS; 225 } 226 mutex_lock(&dev->device_lock); 227 228 if (!mei_cl_is_connected(cl)) { 229 rets = -ENODEV; 230 goto out; 231 } 232 233 cb = mei_cl_read_cb(cl, file); 234 if (!cb) { 235 rets = 0; 236 goto out; 237 } 238 239 copy_buffer: 240 /* now copy the data to user space */ 241 if (cb->status) { 242 rets = cb->status; 243 cl_dbg(dev, cl, "read operation failed %zd\n", rets); 244 goto free; 245 } 246 247 cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n", 248 cb->buf.size, cb->buf_idx, *offset); 249 if (*offset >= cb->buf_idx) { 250 rets = 0; 251 goto free; 252 } 253 254 /* length is being truncated to PAGE_SIZE, 255 * however buf_idx may point beyond that */ 256 length = min_t(size_t, length, cb->buf_idx - *offset); 257 258 if (copy_to_user(ubuf, cb->buf.data + *offset, length)) { 259 dev_dbg(dev->dev, "failed to copy data to userland\n"); 260 rets = -EFAULT; 261 goto free; 262 } 263 264 rets = length; 265 *offset += length; 266 /* not all data was read, keep the cb */ 267 if (*offset < cb->buf_idx) 268 goto out; 269 270 free: 271 mei_cl_del_rd_completed(cl, cb); 272 *offset = 0; 273 274 out: 275 cl_dbg(dev, cl, "end mei read rets = %zd\n", rets); 276 mutex_unlock(&dev->device_lock); 277 return rets; 278 } 279 280 /** 281 * mei_cl_vtag_by_fp - obtain the vtag by file pointer 282 * 283 * @cl: host client 284 * @fp: pointer to file structure 285 * 286 * Return: vtag value on success, otherwise 0 287 */ 288 static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp) 289 { 290 struct mei_cl_vtag *cl_vtag; 291 292 if (!fp) 293 return 0; 294 295 list_for_each_entry(cl_vtag, &cl->vtag_map, list) 296 if (cl_vtag->fp == fp) 297 return cl_vtag->vtag; 298 return 0; 299 } 300 301 /** 302 * mei_write - the write function. 303 * 304 * @file: pointer to file structure 305 * @ubuf: pointer to user buffer 306 * @length: buffer length 307 * @offset: data offset in buffer 308 * 309 * Return: >=0 data length on success , <0 on error 310 */ 311 static ssize_t mei_write(struct file *file, const char __user *ubuf, 312 size_t length, loff_t *offset) 313 { 314 struct mei_cl *cl = file->private_data; 315 struct mei_cl_cb *cb; 316 struct mei_device *dev; 317 ssize_t rets; 318 319 if (WARN_ON(!cl || !cl->dev)) 320 return -ENODEV; 321 322 dev = cl->dev; 323 324 mutex_lock(&dev->device_lock); 325 326 if (dev->dev_state != MEI_DEV_ENABLED) { 327 rets = -ENODEV; 328 goto out; 329 } 330 331 if (!mei_cl_is_connected(cl)) { 332 cl_err(dev, cl, "is not connected"); 333 rets = -ENODEV; 334 goto out; 335 } 336 337 if (!mei_me_cl_is_active(cl->me_cl)) { 338 rets = -ENOTTY; 339 goto out; 340 } 341 342 if (length > mei_cl_mtu(cl)) { 343 rets = -EFBIG; 344 goto out; 345 } 346 347 if (length == 0) { 348 rets = 0; 349 goto out; 350 } 351 352 while (cl->tx_cb_queued >= dev->tx_queue_limit) { 353 if (file->f_flags & O_NONBLOCK) { 354 rets = -EAGAIN; 355 goto out; 356 } 357 mutex_unlock(&dev->device_lock); 358 rets = wait_event_interruptible(cl->tx_wait, 359 cl->writing_state == MEI_WRITE_COMPLETE || 360 (!mei_cl_is_connected(cl))); 361 mutex_lock(&dev->device_lock); 362 if (rets) { 363 if (signal_pending(current)) 364 rets = -EINTR; 365 goto out; 366 } 367 if (!mei_cl_is_connected(cl)) { 368 rets = -ENODEV; 369 goto out; 370 } 371 } 372 373 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); 374 if (!cb) { 375 rets = -ENOMEM; 376 goto out; 377 } 378 cb->vtag = mei_cl_vtag_by_fp(cl, file); 379 380 rets = copy_from_user(cb->buf.data, ubuf, length); 381 if (rets) { 382 dev_dbg(dev->dev, "failed to copy data from userland\n"); 383 rets = -EFAULT; 384 mei_io_cb_free(cb); 385 goto out; 386 } 387 388 rets = mei_cl_write(cl, cb); 389 out: 390 mutex_unlock(&dev->device_lock); 391 return rets; 392 } 393 394 /** 395 * mei_ioctl_connect_client - the connect to fw client IOCTL function 396 * 397 * @file: private data of the file object 398 * @in_client_uuid: requested UUID for connection 399 * @client: IOCTL connect data, output parameters 400 * 401 * Locking: called under "dev->device_lock" lock 402 * 403 * Return: 0 on success, <0 on failure. 404 */ 405 static int mei_ioctl_connect_client(struct file *file, 406 const uuid_le *in_client_uuid, 407 struct mei_client *client) 408 { 409 struct mei_device *dev; 410 struct mei_me_client *me_cl; 411 struct mei_cl *cl; 412 int rets; 413 414 cl = file->private_data; 415 dev = cl->dev; 416 417 if (cl->state != MEI_FILE_INITIALIZING && 418 cl->state != MEI_FILE_DISCONNECTED) 419 return -EBUSY; 420 421 /* find ME client we're trying to connect to */ 422 me_cl = mei_me_cl_by_uuid(dev, in_client_uuid); 423 if (!me_cl) { 424 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", 425 in_client_uuid); 426 rets = -ENOTTY; 427 goto end; 428 } 429 430 if (me_cl->props.fixed_address) { 431 bool forbidden = dev->override_fixed_address ? 432 !dev->allow_fixed_address : !dev->hbm_f_fa_supported; 433 if (forbidden) { 434 dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n", 435 in_client_uuid); 436 rets = -ENOTTY; 437 goto end; 438 } 439 } 440 441 dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", 442 me_cl->client_id); 443 dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n", 444 me_cl->props.protocol_version); 445 dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n", 446 me_cl->props.max_msg_length); 447 448 /* prepare the output buffer */ 449 client->max_msg_length = me_cl->props.max_msg_length; 450 client->protocol_version = me_cl->props.protocol_version; 451 dev_dbg(dev->dev, "Can connect?\n"); 452 453 rets = mei_cl_connect(cl, me_cl, file); 454 455 end: 456 mei_me_cl_put(me_cl); 457 return rets; 458 } 459 460 /** 461 * mei_vt_support_check - check if client support vtags 462 * 463 * Locking: called under "dev->device_lock" lock 464 * 465 * @dev: mei_device 466 * @uuid: client UUID 467 * 468 * Return: 469 * 0 - supported 470 * -ENOTTY - no such client 471 * -EOPNOTSUPP - vtags are not supported by client 472 */ 473 static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid) 474 { 475 struct mei_me_client *me_cl; 476 int ret; 477 478 if (!dev->hbm_f_vt_supported) 479 return -EOPNOTSUPP; 480 481 me_cl = mei_me_cl_by_uuid(dev, uuid); 482 if (!me_cl) { 483 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", 484 uuid); 485 return -ENOTTY; 486 } 487 ret = me_cl->props.vt_supported ? 0 : -EOPNOTSUPP; 488 mei_me_cl_put(me_cl); 489 490 return ret; 491 } 492 493 /** 494 * mei_ioctl_connect_vtag - connect to fw client with vtag IOCTL function 495 * 496 * @file: private data of the file object 497 * @in_client_uuid: requested UUID for connection 498 * @client: IOCTL connect data, output parameters 499 * @vtag: vm tag 500 * 501 * Locking: called under "dev->device_lock" lock 502 * 503 * Return: 0 on success, <0 on failure. 504 */ 505 static int mei_ioctl_connect_vtag(struct file *file, 506 const uuid_le *in_client_uuid, 507 struct mei_client *client, 508 u8 vtag) 509 { 510 struct mei_device *dev; 511 struct mei_cl *cl; 512 struct mei_cl *pos; 513 struct mei_cl_vtag *cl_vtag; 514 515 cl = file->private_data; 516 dev = cl->dev; 517 518 dev_dbg(dev->dev, "FW Client %pUl vtag %d\n", in_client_uuid, vtag); 519 520 switch (cl->state) { 521 case MEI_FILE_DISCONNECTED: 522 if (mei_cl_vtag_by_fp(cl, file) != vtag) { 523 dev_err(dev->dev, "reconnect with different vtag\n"); 524 return -EINVAL; 525 } 526 break; 527 case MEI_FILE_INITIALIZING: 528 /* malicious connect from another thread may push vtag */ 529 if (!IS_ERR(mei_cl_fp_by_vtag(cl, vtag))) { 530 dev_err(dev->dev, "vtag already filled\n"); 531 return -EINVAL; 532 } 533 534 list_for_each_entry(pos, &dev->file_list, link) { 535 if (pos == cl) 536 continue; 537 if (!pos->me_cl) 538 continue; 539 540 /* only search for same UUID */ 541 if (uuid_le_cmp(*mei_cl_uuid(pos), *in_client_uuid)) 542 continue; 543 544 /* if tag already exist try another fp */ 545 if (!IS_ERR(mei_cl_fp_by_vtag(pos, vtag))) 546 continue; 547 548 /* replace cl with acquired one */ 549 dev_dbg(dev->dev, "replacing with existing cl\n"); 550 mei_cl_unlink(cl); 551 kfree(cl); 552 file->private_data = pos; 553 cl = pos; 554 break; 555 } 556 557 cl_vtag = mei_cl_vtag_alloc(file, vtag); 558 if (IS_ERR(cl_vtag)) 559 return -ENOMEM; 560 561 list_add_tail(&cl_vtag->list, &cl->vtag_map); 562 break; 563 default: 564 return -EBUSY; 565 } 566 567 while (cl->state != MEI_FILE_INITIALIZING && 568 cl->state != MEI_FILE_DISCONNECTED && 569 cl->state != MEI_FILE_CONNECTED) { 570 mutex_unlock(&dev->device_lock); 571 wait_event_timeout(cl->wait, 572 (cl->state == MEI_FILE_CONNECTED || 573 cl->state == MEI_FILE_DISCONNECTED || 574 cl->state == MEI_FILE_DISCONNECT_REQUIRED || 575 cl->state == MEI_FILE_DISCONNECT_REPLY), 576 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 577 mutex_lock(&dev->device_lock); 578 } 579 580 if (!mei_cl_is_connected(cl)) 581 return mei_ioctl_connect_client(file, in_client_uuid, client); 582 583 client->max_msg_length = cl->me_cl->props.max_msg_length; 584 client->protocol_version = cl->me_cl->props.protocol_version; 585 586 return 0; 587 } 588 589 /** 590 * mei_ioctl_client_notify_request - 591 * propagate event notification request to client 592 * 593 * @file: pointer to file structure 594 * @request: 0 - disable, 1 - enable 595 * 596 * Return: 0 on success , <0 on error 597 */ 598 static int mei_ioctl_client_notify_request(const struct file *file, u32 request) 599 { 600 struct mei_cl *cl = file->private_data; 601 602 if (request != MEI_HBM_NOTIFICATION_START && 603 request != MEI_HBM_NOTIFICATION_STOP) 604 return -EINVAL; 605 606 return mei_cl_notify_request(cl, file, (u8)request); 607 } 608 609 /** 610 * mei_ioctl_client_notify_get - wait for notification request 611 * 612 * @file: pointer to file structure 613 * @notify_get: 0 - disable, 1 - enable 614 * 615 * Return: 0 on success , <0 on error 616 */ 617 static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get) 618 { 619 struct mei_cl *cl = file->private_data; 620 bool notify_ev; 621 bool block = (file->f_flags & O_NONBLOCK) == 0; 622 int rets; 623 624 rets = mei_cl_notify_get(cl, block, ¬ify_ev); 625 if (rets) 626 return rets; 627 628 *notify_get = notify_ev ? 1 : 0; 629 return 0; 630 } 631 632 /** 633 * mei_ioctl - the IOCTL function 634 * 635 * @file: pointer to file structure 636 * @cmd: ioctl command 637 * @data: pointer to mei message structure 638 * 639 * Return: 0 on success , <0 on error 640 */ 641 static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) 642 { 643 struct mei_device *dev; 644 struct mei_cl *cl = file->private_data; 645 struct mei_connect_client_data conn; 646 struct mei_connect_client_data_vtag conn_vtag; 647 const uuid_le *cl_uuid; 648 struct mei_client *props; 649 u8 vtag; 650 u32 notify_get, notify_req; 651 int rets; 652 653 654 if (WARN_ON(!cl || !cl->dev)) 655 return -ENODEV; 656 657 dev = cl->dev; 658 659 dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd); 660 661 mutex_lock(&dev->device_lock); 662 if (dev->dev_state != MEI_DEV_ENABLED) { 663 rets = -ENODEV; 664 goto out; 665 } 666 667 switch (cmd) { 668 case IOCTL_MEI_CONNECT_CLIENT: 669 dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); 670 if (copy_from_user(&conn, (char __user *)data, sizeof(conn))) { 671 dev_dbg(dev->dev, "failed to copy data from userland\n"); 672 rets = -EFAULT; 673 goto out; 674 } 675 cl_uuid = &conn.in_client_uuid; 676 props = &conn.out_client_properties; 677 vtag = 0; 678 679 rets = mei_vt_support_check(dev, cl_uuid); 680 if (rets == -ENOTTY) 681 goto out; 682 if (!rets) 683 rets = mei_ioctl_connect_vtag(file, cl_uuid, props, 684 vtag); 685 else 686 rets = mei_ioctl_connect_client(file, cl_uuid, props); 687 if (rets) 688 goto out; 689 690 /* if all is ok, copying the data back to user. */ 691 if (copy_to_user((char __user *)data, &conn, sizeof(conn))) { 692 dev_dbg(dev->dev, "failed to copy data to userland\n"); 693 rets = -EFAULT; 694 goto out; 695 } 696 697 break; 698 699 case IOCTL_MEI_CONNECT_CLIENT_VTAG: 700 dev_dbg(dev->dev, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n"); 701 if (copy_from_user(&conn_vtag, (char __user *)data, 702 sizeof(conn_vtag))) { 703 dev_dbg(dev->dev, "failed to copy data from userland\n"); 704 rets = -EFAULT; 705 goto out; 706 } 707 708 cl_uuid = &conn_vtag.connect.in_client_uuid; 709 props = &conn_vtag.out_client_properties; 710 vtag = conn_vtag.connect.vtag; 711 712 rets = mei_vt_support_check(dev, cl_uuid); 713 if (rets == -EOPNOTSUPP) 714 dev_dbg(dev->dev, "FW Client %pUl does not support vtags\n", 715 cl_uuid); 716 if (rets) 717 goto out; 718 719 if (!vtag) { 720 dev_dbg(dev->dev, "vtag can't be zero\n"); 721 rets = -EINVAL; 722 goto out; 723 } 724 725 rets = mei_ioctl_connect_vtag(file, cl_uuid, props, vtag); 726 if (rets) 727 goto out; 728 729 /* if all is ok, copying the data back to user. */ 730 if (copy_to_user((char __user *)data, &conn_vtag, 731 sizeof(conn_vtag))) { 732 dev_dbg(dev->dev, "failed to copy data to userland\n"); 733 rets = -EFAULT; 734 goto out; 735 } 736 737 break; 738 739 case IOCTL_MEI_NOTIFY_SET: 740 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n"); 741 if (copy_from_user(¬ify_req, 742 (char __user *)data, sizeof(notify_req))) { 743 dev_dbg(dev->dev, "failed to copy data from userland\n"); 744 rets = -EFAULT; 745 goto out; 746 } 747 rets = mei_ioctl_client_notify_request(file, notify_req); 748 break; 749 750 case IOCTL_MEI_NOTIFY_GET: 751 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n"); 752 rets = mei_ioctl_client_notify_get(file, ¬ify_get); 753 if (rets) 754 goto out; 755 756 dev_dbg(dev->dev, "copy connect data to user\n"); 757 if (copy_to_user((char __user *)data, 758 ¬ify_get, sizeof(notify_get))) { 759 dev_dbg(dev->dev, "failed to copy data to userland\n"); 760 rets = -EFAULT; 761 goto out; 762 763 } 764 break; 765 766 default: 767 rets = -ENOIOCTLCMD; 768 } 769 770 out: 771 mutex_unlock(&dev->device_lock); 772 return rets; 773 } 774 775 /** 776 * mei_poll - the poll function 777 * 778 * @file: pointer to file structure 779 * @wait: pointer to poll_table structure 780 * 781 * Return: poll mask 782 */ 783 static __poll_t mei_poll(struct file *file, poll_table *wait) 784 { 785 __poll_t req_events = poll_requested_events(wait); 786 struct mei_cl *cl = file->private_data; 787 struct mei_device *dev; 788 __poll_t mask = 0; 789 bool notify_en; 790 791 if (WARN_ON(!cl || !cl->dev)) 792 return EPOLLERR; 793 794 dev = cl->dev; 795 796 mutex_lock(&dev->device_lock); 797 798 notify_en = cl->notify_en && (req_events & EPOLLPRI); 799 800 if (dev->dev_state != MEI_DEV_ENABLED || 801 !mei_cl_is_connected(cl)) { 802 mask = EPOLLERR; 803 goto out; 804 } 805 806 if (notify_en) { 807 poll_wait(file, &cl->ev_wait, wait); 808 if (cl->notify_ev) 809 mask |= EPOLLPRI; 810 } 811 812 if (req_events & (EPOLLIN | EPOLLRDNORM)) { 813 poll_wait(file, &cl->rx_wait, wait); 814 815 if (mei_cl_read_cb(cl, file)) 816 mask |= EPOLLIN | EPOLLRDNORM; 817 else 818 mei_cl_read_start(cl, mei_cl_mtu(cl), file); 819 } 820 821 if (req_events & (EPOLLOUT | EPOLLWRNORM)) { 822 poll_wait(file, &cl->tx_wait, wait); 823 if (cl->tx_cb_queued < dev->tx_queue_limit) 824 mask |= EPOLLOUT | EPOLLWRNORM; 825 } 826 827 out: 828 mutex_unlock(&dev->device_lock); 829 return mask; 830 } 831 832 /** 833 * mei_cl_is_write_queued - check if the client has pending writes. 834 * 835 * @cl: writing host client 836 * 837 * Return: true if client is writing, false otherwise. 838 */ 839 static bool mei_cl_is_write_queued(struct mei_cl *cl) 840 { 841 struct mei_device *dev = cl->dev; 842 struct mei_cl_cb *cb; 843 844 list_for_each_entry(cb, &dev->write_list, list) 845 if (cb->cl == cl) 846 return true; 847 list_for_each_entry(cb, &dev->write_waiting_list, list) 848 if (cb->cl == cl) 849 return true; 850 return false; 851 } 852 853 /** 854 * mei_fsync - the fsync handler 855 * 856 * @fp: pointer to file structure 857 * @start: unused 858 * @end: unused 859 * @datasync: unused 860 * 861 * Return: 0 on success, -ENODEV if client is not connected 862 */ 863 static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync) 864 { 865 struct mei_cl *cl = fp->private_data; 866 struct mei_device *dev; 867 int rets; 868 869 if (WARN_ON(!cl || !cl->dev)) 870 return -ENODEV; 871 872 dev = cl->dev; 873 874 mutex_lock(&dev->device_lock); 875 876 if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) { 877 rets = -ENODEV; 878 goto out; 879 } 880 881 while (mei_cl_is_write_queued(cl)) { 882 mutex_unlock(&dev->device_lock); 883 rets = wait_event_interruptible(cl->tx_wait, 884 cl->writing_state == MEI_WRITE_COMPLETE || 885 !mei_cl_is_connected(cl)); 886 mutex_lock(&dev->device_lock); 887 if (rets) { 888 if (signal_pending(current)) 889 rets = -EINTR; 890 goto out; 891 } 892 if (!mei_cl_is_connected(cl)) { 893 rets = -ENODEV; 894 goto out; 895 } 896 } 897 rets = 0; 898 out: 899 mutex_unlock(&dev->device_lock); 900 return rets; 901 } 902 903 /** 904 * mei_fasync - asynchronous io support 905 * 906 * @fd: file descriptor 907 * @file: pointer to file structure 908 * @band: band bitmap 909 * 910 * Return: negative on error, 911 * 0 if it did no changes, 912 * and positive a process was added or deleted 913 */ 914 static int mei_fasync(int fd, struct file *file, int band) 915 { 916 917 struct mei_cl *cl = file->private_data; 918 919 if (!mei_cl_is_connected(cl)) 920 return -ENODEV; 921 922 return fasync_helper(fd, file, band, &cl->ev_async); 923 } 924 925 /** 926 * trc_show - mei device trc attribute show method 927 * 928 * @device: device pointer 929 * @attr: attribute pointer 930 * @buf: char out buffer 931 * 932 * Return: number of the bytes printed into buf or error 933 */ 934 static ssize_t trc_show(struct device *device, 935 struct device_attribute *attr, char *buf) 936 { 937 struct mei_device *dev = dev_get_drvdata(device); 938 u32 trc; 939 int ret; 940 941 ret = mei_trc_status(dev, &trc); 942 if (ret) 943 return ret; 944 return sprintf(buf, "%08X\n", trc); 945 } 946 static DEVICE_ATTR_RO(trc); 947 948 /** 949 * fw_status_show - mei device fw_status attribute show method 950 * 951 * @device: device pointer 952 * @attr: attribute pointer 953 * @buf: char out buffer 954 * 955 * Return: number of the bytes printed into buf or error 956 */ 957 static ssize_t fw_status_show(struct device *device, 958 struct device_attribute *attr, char *buf) 959 { 960 struct mei_device *dev = dev_get_drvdata(device); 961 struct mei_fw_status fw_status; 962 int err, i; 963 ssize_t cnt = 0; 964 965 mutex_lock(&dev->device_lock); 966 err = mei_fw_status(dev, &fw_status); 967 mutex_unlock(&dev->device_lock); 968 if (err) { 969 dev_err(device, "read fw_status error = %d\n", err); 970 return err; 971 } 972 973 for (i = 0; i < fw_status.count; i++) 974 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n", 975 fw_status.status[i]); 976 return cnt; 977 } 978 static DEVICE_ATTR_RO(fw_status); 979 980 /** 981 * hbm_ver_show - display HBM protocol version negotiated with FW 982 * 983 * @device: device pointer 984 * @attr: attribute pointer 985 * @buf: char out buffer 986 * 987 * Return: number of the bytes printed into buf or error 988 */ 989 static ssize_t hbm_ver_show(struct device *device, 990 struct device_attribute *attr, char *buf) 991 { 992 struct mei_device *dev = dev_get_drvdata(device); 993 struct hbm_version ver; 994 995 mutex_lock(&dev->device_lock); 996 ver = dev->version; 997 mutex_unlock(&dev->device_lock); 998 999 return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version); 1000 } 1001 static DEVICE_ATTR_RO(hbm_ver); 1002 1003 /** 1004 * hbm_ver_drv_show - display HBM protocol version advertised by driver 1005 * 1006 * @device: device pointer 1007 * @attr: attribute pointer 1008 * @buf: char out buffer 1009 * 1010 * Return: number of the bytes printed into buf or error 1011 */ 1012 static ssize_t hbm_ver_drv_show(struct device *device, 1013 struct device_attribute *attr, char *buf) 1014 { 1015 return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION); 1016 } 1017 static DEVICE_ATTR_RO(hbm_ver_drv); 1018 1019 static ssize_t tx_queue_limit_show(struct device *device, 1020 struct device_attribute *attr, char *buf) 1021 { 1022 struct mei_device *dev = dev_get_drvdata(device); 1023 u8 size = 0; 1024 1025 mutex_lock(&dev->device_lock); 1026 size = dev->tx_queue_limit; 1027 mutex_unlock(&dev->device_lock); 1028 1029 return snprintf(buf, PAGE_SIZE, "%u\n", size); 1030 } 1031 1032 static ssize_t tx_queue_limit_store(struct device *device, 1033 struct device_attribute *attr, 1034 const char *buf, size_t count) 1035 { 1036 struct mei_device *dev = dev_get_drvdata(device); 1037 u8 limit; 1038 unsigned int inp; 1039 int err; 1040 1041 err = kstrtouint(buf, 10, &inp); 1042 if (err) 1043 return err; 1044 if (inp > MEI_TX_QUEUE_LIMIT_MAX || inp < MEI_TX_QUEUE_LIMIT_MIN) 1045 return -EINVAL; 1046 limit = inp; 1047 1048 mutex_lock(&dev->device_lock); 1049 dev->tx_queue_limit = limit; 1050 mutex_unlock(&dev->device_lock); 1051 1052 return count; 1053 } 1054 static DEVICE_ATTR_RW(tx_queue_limit); 1055 1056 /** 1057 * fw_ver_show - display ME FW version 1058 * 1059 * @device: device pointer 1060 * @attr: attribute pointer 1061 * @buf: char out buffer 1062 * 1063 * Return: number of the bytes printed into buf or error 1064 */ 1065 static ssize_t fw_ver_show(struct device *device, 1066 struct device_attribute *attr, char *buf) 1067 { 1068 struct mei_device *dev = dev_get_drvdata(device); 1069 struct mei_fw_version *ver; 1070 ssize_t cnt = 0; 1071 int i; 1072 1073 ver = dev->fw_ver; 1074 1075 for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) 1076 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n", 1077 ver[i].platform, ver[i].major, ver[i].minor, 1078 ver[i].hotfix, ver[i].buildno); 1079 return cnt; 1080 } 1081 static DEVICE_ATTR_RO(fw_ver); 1082 1083 /** 1084 * dev_state_show - display device state 1085 * 1086 * @device: device pointer 1087 * @attr: attribute pointer 1088 * @buf: char out buffer 1089 * 1090 * Return: number of the bytes printed into buf or error 1091 */ 1092 static ssize_t dev_state_show(struct device *device, 1093 struct device_attribute *attr, char *buf) 1094 { 1095 struct mei_device *dev = dev_get_drvdata(device); 1096 enum mei_dev_state dev_state; 1097 1098 mutex_lock(&dev->device_lock); 1099 dev_state = dev->dev_state; 1100 mutex_unlock(&dev->device_lock); 1101 1102 return sprintf(buf, "%s", mei_dev_state_str(dev_state)); 1103 } 1104 static DEVICE_ATTR_RO(dev_state); 1105 1106 /** 1107 * dev_set_devstate: set to new device state and notify sysfs file. 1108 * 1109 * @dev: mei_device 1110 * @state: new device state 1111 */ 1112 void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state) 1113 { 1114 struct device *clsdev; 1115 1116 if (dev->dev_state == state) 1117 return; 1118 1119 dev->dev_state = state; 1120 1121 clsdev = class_find_device_by_devt(mei_class, dev->cdev.dev); 1122 if (clsdev) { 1123 sysfs_notify(&clsdev->kobj, NULL, "dev_state"); 1124 put_device(clsdev); 1125 } 1126 } 1127 1128 /** 1129 * kind_show - display device kind 1130 * 1131 * @device: device pointer 1132 * @attr: attribute pointer 1133 * @buf: char out buffer 1134 * 1135 * Return: number of the bytes printed into buf or error 1136 */ 1137 static ssize_t kind_show(struct device *device, 1138 struct device_attribute *attr, char *buf) 1139 { 1140 struct mei_device *dev = dev_get_drvdata(device); 1141 ssize_t ret; 1142 1143 if (dev->kind) 1144 ret = sprintf(buf, "%s\n", dev->kind); 1145 else 1146 ret = sprintf(buf, "%s\n", "mei"); 1147 1148 return ret; 1149 } 1150 static DEVICE_ATTR_RO(kind); 1151 1152 static struct attribute *mei_attrs[] = { 1153 &dev_attr_fw_status.attr, 1154 &dev_attr_hbm_ver.attr, 1155 &dev_attr_hbm_ver_drv.attr, 1156 &dev_attr_tx_queue_limit.attr, 1157 &dev_attr_fw_ver.attr, 1158 &dev_attr_dev_state.attr, 1159 &dev_attr_trc.attr, 1160 &dev_attr_kind.attr, 1161 NULL 1162 }; 1163 ATTRIBUTE_GROUPS(mei); 1164 1165 /* 1166 * file operations structure will be used for mei char device. 1167 */ 1168 static const struct file_operations mei_fops = { 1169 .owner = THIS_MODULE, 1170 .read = mei_read, 1171 .unlocked_ioctl = mei_ioctl, 1172 .compat_ioctl = compat_ptr_ioctl, 1173 .open = mei_open, 1174 .release = mei_release, 1175 .write = mei_write, 1176 .poll = mei_poll, 1177 .fsync = mei_fsync, 1178 .fasync = mei_fasync, 1179 .llseek = no_llseek 1180 }; 1181 1182 /** 1183 * mei_minor_get - obtain next free device minor number 1184 * 1185 * @dev: device pointer 1186 * 1187 * Return: allocated minor, or -ENOSPC if no free minor left 1188 */ 1189 static int mei_minor_get(struct mei_device *dev) 1190 { 1191 int ret; 1192 1193 mutex_lock(&mei_minor_lock); 1194 ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL); 1195 if (ret >= 0) 1196 dev->minor = ret; 1197 else if (ret == -ENOSPC) 1198 dev_err(dev->dev, "too many mei devices\n"); 1199 1200 mutex_unlock(&mei_minor_lock); 1201 return ret; 1202 } 1203 1204 /** 1205 * mei_minor_free - mark device minor number as free 1206 * 1207 * @dev: device pointer 1208 */ 1209 static void mei_minor_free(struct mei_device *dev) 1210 { 1211 mutex_lock(&mei_minor_lock); 1212 idr_remove(&mei_idr, dev->minor); 1213 mutex_unlock(&mei_minor_lock); 1214 } 1215 1216 int mei_register(struct mei_device *dev, struct device *parent) 1217 { 1218 struct device *clsdev; /* class device */ 1219 int ret, devno; 1220 1221 ret = mei_minor_get(dev); 1222 if (ret < 0) 1223 return ret; 1224 1225 /* Fill in the data structures */ 1226 devno = MKDEV(MAJOR(mei_devt), dev->minor); 1227 cdev_init(&dev->cdev, &mei_fops); 1228 dev->cdev.owner = parent->driver->owner; 1229 1230 /* Add the device */ 1231 ret = cdev_add(&dev->cdev, devno, 1); 1232 if (ret) { 1233 dev_err(parent, "unable to add device %d:%d\n", 1234 MAJOR(mei_devt), dev->minor); 1235 goto err_dev_add; 1236 } 1237 1238 clsdev = device_create_with_groups(mei_class, parent, devno, 1239 dev, mei_groups, 1240 "mei%d", dev->minor); 1241 1242 if (IS_ERR(clsdev)) { 1243 dev_err(parent, "unable to create device %d:%d\n", 1244 MAJOR(mei_devt), dev->minor); 1245 ret = PTR_ERR(clsdev); 1246 goto err_dev_create; 1247 } 1248 1249 mei_dbgfs_register(dev, dev_name(clsdev)); 1250 1251 return 0; 1252 1253 err_dev_create: 1254 cdev_del(&dev->cdev); 1255 err_dev_add: 1256 mei_minor_free(dev); 1257 return ret; 1258 } 1259 EXPORT_SYMBOL_GPL(mei_register); 1260 1261 void mei_deregister(struct mei_device *dev) 1262 { 1263 int devno; 1264 1265 devno = dev->cdev.dev; 1266 cdev_del(&dev->cdev); 1267 1268 mei_dbgfs_deregister(dev); 1269 1270 device_destroy(mei_class, devno); 1271 1272 mei_minor_free(dev); 1273 } 1274 EXPORT_SYMBOL_GPL(mei_deregister); 1275 1276 static int __init mei_init(void) 1277 { 1278 int ret; 1279 1280 mei_class = class_create(THIS_MODULE, "mei"); 1281 if (IS_ERR(mei_class)) { 1282 pr_err("couldn't create class\n"); 1283 ret = PTR_ERR(mei_class); 1284 goto err; 1285 } 1286 1287 ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei"); 1288 if (ret < 0) { 1289 pr_err("unable to allocate char dev region\n"); 1290 goto err_class; 1291 } 1292 1293 ret = mei_cl_bus_init(); 1294 if (ret < 0) { 1295 pr_err("unable to initialize bus\n"); 1296 goto err_chrdev; 1297 } 1298 1299 return 0; 1300 1301 err_chrdev: 1302 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); 1303 err_class: 1304 class_destroy(mei_class); 1305 err: 1306 return ret; 1307 } 1308 1309 static void __exit mei_exit(void) 1310 { 1311 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); 1312 class_destroy(mei_class); 1313 mei_cl_bus_exit(); 1314 } 1315 1316 module_init(mei_init); 1317 module_exit(mei_exit); 1318 1319 MODULE_AUTHOR("Intel Corporation"); 1320 MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); 1321 MODULE_LICENSE("GPL v2"); 1322 1323