1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/pci.h> 18 #include <linux/sched.h> 19 #include <linux/wait.h> 20 #include <linux/delay.h> 21 #include <linux/pm_runtime.h> 22 23 #include <linux/mei.h> 24 25 #include "mei_dev.h" 26 #include "hbm.h" 27 #include "client.h" 28 29 /** 30 * mei_me_cl_by_uuid - locate index of me client 31 * 32 * @dev: mei device 33 * 34 * Locking: called under "dev->device_lock" lock 35 * 36 * returns me client index or -ENOENT if not found 37 */ 38 int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) 39 { 40 int i; 41 42 for (i = 0; i < dev->me_clients_num; ++i) 43 if (uuid_le_cmp(*uuid, 44 dev->me_clients[i].props.protocol_name) == 0) 45 return i; 46 47 return -ENOENT; 48 } 49 50 51 /** 52 * mei_me_cl_by_id return index to me_clients for client_id 53 * 54 * @dev: the device structure 55 * @client_id: me client id 56 * 57 * Locking: called under "dev->device_lock" lock 58 * 59 * returns index on success, -ENOENT on failure. 60 */ 61 62 int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 63 { 64 int i; 65 66 for (i = 0; i < dev->me_clients_num; i++) 67 if (dev->me_clients[i].client_id == client_id) 68 return i; 69 70 return -ENOENT; 71 } 72 73 74 /** 75 * mei_cl_cmp_id - tells if the clients are the same 76 * 77 * @cl1: host client 1 78 * @cl2: host client 2 79 * 80 * returns true - if the clients has same host and me ids 81 * false - otherwise 82 */ 83 static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, 84 const struct mei_cl *cl2) 85 { 86 return cl1 && cl2 && 87 (cl1->host_client_id == cl2->host_client_id) && 88 (cl1->me_client_id == cl2->me_client_id); 89 } 90 91 /** 92 * mei_io_list_flush - removes cbs belonging to cl. 93 * 94 * @list: an instance of our list structure 95 * @cl: host client, can be NULL for flushing the whole list 96 * @free: whether to free the cbs 97 */ 98 static void __mei_io_list_flush(struct mei_cl_cb *list, 99 struct mei_cl *cl, bool free) 100 { 101 struct mei_cl_cb *cb; 102 struct mei_cl_cb *next; 103 104 /* enable removing everything if no cl is specified */ 105 list_for_each_entry_safe(cb, next, &list->list, list) { 106 if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) { 107 list_del(&cb->list); 108 if (free) 109 mei_io_cb_free(cb); 110 } 111 } 112 } 113 114 /** 115 * mei_io_list_flush - removes list entry belonging to cl. 116 * 117 * @list: An instance of our list structure 118 * @cl: host client 119 */ 120 static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) 121 { 122 __mei_io_list_flush(list, cl, false); 123 } 124 125 126 /** 127 * mei_io_list_free - removes cb belonging to cl and free them 128 * 129 * @list: An instance of our list structure 130 * @cl: host client 131 */ 132 static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) 133 { 134 __mei_io_list_flush(list, cl, true); 135 } 136 137 /** 138 * mei_io_cb_free - free mei_cb_private related memory 139 * 140 * @cb: mei callback struct 141 */ 142 void mei_io_cb_free(struct mei_cl_cb *cb) 143 { 144 if (cb == NULL) 145 return; 146 147 kfree(cb->request_buffer.data); 148 kfree(cb->response_buffer.data); 149 kfree(cb); 150 } 151 152 /** 153 * mei_io_cb_init - allocate and initialize io callback 154 * 155 * @cl - mei client 156 * @fp: pointer to file structure 157 * 158 * returns mei_cl_cb pointer or NULL; 159 */ 160 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) 161 { 162 struct mei_cl_cb *cb; 163 164 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 165 if (!cb) 166 return NULL; 167 168 mei_io_list_init(cb); 169 170 cb->file_object = fp; 171 cb->cl = cl; 172 cb->buf_idx = 0; 173 return cb; 174 } 175 176 /** 177 * mei_io_cb_alloc_req_buf - allocate request buffer 178 * 179 * @cb: io callback structure 180 * @length: size of the buffer 181 * 182 * returns 0 on success 183 * -EINVAL if cb is NULL 184 * -ENOMEM if allocation failed 185 */ 186 int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) 187 { 188 if (!cb) 189 return -EINVAL; 190 191 if (length == 0) 192 return 0; 193 194 cb->request_buffer.data = kmalloc(length, GFP_KERNEL); 195 if (!cb->request_buffer.data) 196 return -ENOMEM; 197 cb->request_buffer.size = length; 198 return 0; 199 } 200 /** 201 * mei_io_cb_alloc_resp_buf - allocate response buffer 202 * 203 * @cb: io callback structure 204 * @length: size of the buffer 205 * 206 * returns 0 on success 207 * -EINVAL if cb is NULL 208 * -ENOMEM if allocation failed 209 */ 210 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) 211 { 212 if (!cb) 213 return -EINVAL; 214 215 if (length == 0) 216 return 0; 217 218 cb->response_buffer.data = kmalloc(length, GFP_KERNEL); 219 if (!cb->response_buffer.data) 220 return -ENOMEM; 221 cb->response_buffer.size = length; 222 return 0; 223 } 224 225 226 227 /** 228 * mei_cl_flush_queues - flushes queue lists belonging to cl. 229 * 230 * @cl: host client 231 */ 232 int mei_cl_flush_queues(struct mei_cl *cl) 233 { 234 struct mei_device *dev; 235 236 if (WARN_ON(!cl || !cl->dev)) 237 return -EINVAL; 238 239 dev = cl->dev; 240 241 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 242 mei_io_list_flush(&cl->dev->read_list, cl); 243 mei_io_list_free(&cl->dev->write_list, cl); 244 mei_io_list_free(&cl->dev->write_waiting_list, cl); 245 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); 246 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); 247 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); 248 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); 249 return 0; 250 } 251 252 253 /** 254 * mei_cl_init - initializes cl. 255 * 256 * @cl: host client to be initialized 257 * @dev: mei device 258 */ 259 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 260 { 261 memset(cl, 0, sizeof(struct mei_cl)); 262 init_waitqueue_head(&cl->wait); 263 init_waitqueue_head(&cl->rx_wait); 264 init_waitqueue_head(&cl->tx_wait); 265 INIT_LIST_HEAD(&cl->link); 266 INIT_LIST_HEAD(&cl->device_link); 267 cl->reading_state = MEI_IDLE; 268 cl->writing_state = MEI_IDLE; 269 cl->dev = dev; 270 } 271 272 /** 273 * mei_cl_allocate - allocates cl structure and sets it up. 274 * 275 * @dev: mei device 276 * returns The allocated file or NULL on failure 277 */ 278 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 279 { 280 struct mei_cl *cl; 281 282 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); 283 if (!cl) 284 return NULL; 285 286 mei_cl_init(cl, dev); 287 288 return cl; 289 } 290 291 /** 292 * mei_cl_find_read_cb - find this cl's callback in the read list 293 * 294 * @cl: host client 295 * 296 * returns cb on success, NULL on error 297 */ 298 struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) 299 { 300 struct mei_device *dev = cl->dev; 301 struct mei_cl_cb *cb; 302 303 list_for_each_entry(cb, &dev->read_list.list, list) 304 if (mei_cl_cmp_id(cl, cb->cl)) 305 return cb; 306 return NULL; 307 } 308 309 /** mei_cl_link: allocate host id in the host map 310 * 311 * @cl - host client 312 * @id - fixed host id or -1 for generic one 313 * 314 * returns 0 on success 315 * -EINVAL on incorrect values 316 * -ENONET if client not found 317 */ 318 int mei_cl_link(struct mei_cl *cl, int id) 319 { 320 struct mei_device *dev; 321 long open_handle_count; 322 323 if (WARN_ON(!cl || !cl->dev)) 324 return -EINVAL; 325 326 dev = cl->dev; 327 328 /* If Id is not assigned get one*/ 329 if (id == MEI_HOST_CLIENT_ID_ANY) 330 id = find_first_zero_bit(dev->host_clients_map, 331 MEI_CLIENTS_MAX); 332 333 if (id >= MEI_CLIENTS_MAX) { 334 dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX); 335 return -EMFILE; 336 } 337 338 open_handle_count = dev->open_handle_count + dev->iamthif_open_count; 339 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 340 dev_err(&dev->pdev->dev, "open_handle_count exceeded %d", 341 MEI_MAX_OPEN_HANDLE_COUNT); 342 return -EMFILE; 343 } 344 345 dev->open_handle_count++; 346 347 cl->host_client_id = id; 348 list_add_tail(&cl->link, &dev->file_list); 349 350 set_bit(id, dev->host_clients_map); 351 352 cl->state = MEI_FILE_INITIALIZING; 353 354 cl_dbg(dev, cl, "link cl\n"); 355 return 0; 356 } 357 358 /** 359 * mei_cl_unlink - remove me_cl from the list 360 * 361 * @cl: host client 362 */ 363 int mei_cl_unlink(struct mei_cl *cl) 364 { 365 struct mei_device *dev; 366 367 /* don't shout on error exit path */ 368 if (!cl) 369 return 0; 370 371 /* wd and amthif might not be initialized */ 372 if (!cl->dev) 373 return 0; 374 375 dev = cl->dev; 376 377 cl_dbg(dev, cl, "unlink client"); 378 379 if (dev->open_handle_count > 0) 380 dev->open_handle_count--; 381 382 /* never clear the 0 bit */ 383 if (cl->host_client_id) 384 clear_bit(cl->host_client_id, dev->host_clients_map); 385 386 list_del_init(&cl->link); 387 388 cl->state = MEI_FILE_INITIALIZING; 389 390 return 0; 391 } 392 393 394 void mei_host_client_init(struct work_struct *work) 395 { 396 struct mei_device *dev = container_of(work, 397 struct mei_device, init_work); 398 struct mei_client_properties *client_props; 399 int i; 400 401 mutex_lock(&dev->device_lock); 402 403 for (i = 0; i < dev->me_clients_num; i++) { 404 client_props = &dev->me_clients[i].props; 405 406 if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid)) 407 mei_amthif_host_init(dev); 408 else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid)) 409 mei_wd_host_init(dev); 410 else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid)) 411 mei_nfc_host_init(dev); 412 413 } 414 415 dev->dev_state = MEI_DEV_ENABLED; 416 dev->reset_count = 0; 417 418 mutex_unlock(&dev->device_lock); 419 420 pm_runtime_mark_last_busy(&dev->pdev->dev); 421 dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n"); 422 pm_runtime_autosuspend(&dev->pdev->dev); 423 } 424 425 /** 426 * mei_hbuf_acquire: try to acquire host buffer 427 * 428 * @dev: the device structure 429 * returns true if host buffer was acquired 430 */ 431 bool mei_hbuf_acquire(struct mei_device *dev) 432 { 433 if (mei_pg_state(dev) == MEI_PG_ON || 434 dev->pg_event == MEI_PG_EVENT_WAIT) { 435 dev_dbg(&dev->pdev->dev, "device is in pg\n"); 436 return false; 437 } 438 439 if (!dev->hbuf_is_ready) { 440 dev_dbg(&dev->pdev->dev, "hbuf is not ready\n"); 441 return false; 442 } 443 444 dev->hbuf_is_ready = false; 445 446 return true; 447 } 448 449 /** 450 * mei_cl_disconnect - disconnect host client from the me one 451 * 452 * @cl: host client 453 * 454 * Locking: called under "dev->device_lock" lock 455 * 456 * returns 0 on success, <0 on failure. 457 */ 458 int mei_cl_disconnect(struct mei_cl *cl) 459 { 460 struct mei_device *dev; 461 struct mei_cl_cb *cb; 462 int rets, err; 463 464 if (WARN_ON(!cl || !cl->dev)) 465 return -ENODEV; 466 467 dev = cl->dev; 468 469 cl_dbg(dev, cl, "disconnecting"); 470 471 if (cl->state != MEI_FILE_DISCONNECTING) 472 return 0; 473 474 rets = pm_runtime_get(&dev->pdev->dev); 475 if (rets < 0 && rets != -EINPROGRESS) { 476 pm_runtime_put_noidle(&dev->pdev->dev); 477 cl_err(dev, cl, "rpm: get failed %d\n", rets); 478 return rets; 479 } 480 481 cb = mei_io_cb_init(cl, NULL); 482 if (!cb) { 483 rets = -ENOMEM; 484 goto free; 485 } 486 487 cb->fop_type = MEI_FOP_CLOSE; 488 if (mei_hbuf_acquire(dev)) { 489 if (mei_hbm_cl_disconnect_req(dev, cl)) { 490 rets = -ENODEV; 491 cl_err(dev, cl, "failed to disconnect.\n"); 492 goto free; 493 } 494 mdelay(10); /* Wait for hardware disconnection ready */ 495 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 496 } else { 497 cl_dbg(dev, cl, "add disconnect cb to control write list\n"); 498 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 499 500 } 501 mutex_unlock(&dev->device_lock); 502 503 err = wait_event_timeout(dev->wait_recvd_msg, 504 MEI_FILE_DISCONNECTED == cl->state, 505 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 506 507 mutex_lock(&dev->device_lock); 508 if (MEI_FILE_DISCONNECTED == cl->state) { 509 rets = 0; 510 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 511 } else { 512 rets = -ENODEV; 513 if (MEI_FILE_DISCONNECTED != cl->state) 514 cl_err(dev, cl, "wrong status client disconnect.\n"); 515 516 if (err) 517 cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err); 518 519 cl_err(dev, cl, "failed to disconnect from FW client.\n"); 520 } 521 522 mei_io_list_flush(&dev->ctrl_rd_list, cl); 523 mei_io_list_flush(&dev->ctrl_wr_list, cl); 524 free: 525 cl_dbg(dev, cl, "rpm: autosuspend\n"); 526 pm_runtime_mark_last_busy(&dev->pdev->dev); 527 pm_runtime_put_autosuspend(&dev->pdev->dev); 528 529 mei_io_cb_free(cb); 530 return rets; 531 } 532 533 534 /** 535 * mei_cl_is_other_connecting - checks if other 536 * client with the same me client id is connecting 537 * 538 * @cl: private data of the file object 539 * 540 * returns true if other client is connected, false - otherwise. 541 */ 542 bool mei_cl_is_other_connecting(struct mei_cl *cl) 543 { 544 struct mei_device *dev; 545 struct mei_cl *ocl; /* the other client */ 546 547 if (WARN_ON(!cl || !cl->dev)) 548 return false; 549 550 dev = cl->dev; 551 552 list_for_each_entry(ocl, &dev->file_list, link) { 553 if (ocl->state == MEI_FILE_CONNECTING && 554 ocl != cl && 555 cl->me_client_id == ocl->me_client_id) 556 return true; 557 558 } 559 560 return false; 561 } 562 563 /** 564 * mei_cl_connect - connect host client to the me one 565 * 566 * @cl: host client 567 * 568 * Locking: called under "dev->device_lock" lock 569 * 570 * returns 0 on success, <0 on failure. 571 */ 572 int mei_cl_connect(struct mei_cl *cl, struct file *file) 573 { 574 struct mei_device *dev; 575 struct mei_cl_cb *cb; 576 int rets; 577 578 if (WARN_ON(!cl || !cl->dev)) 579 return -ENODEV; 580 581 dev = cl->dev; 582 583 rets = pm_runtime_get(&dev->pdev->dev); 584 if (rets < 0 && rets != -EINPROGRESS) { 585 pm_runtime_put_noidle(&dev->pdev->dev); 586 cl_err(dev, cl, "rpm: get failed %d\n", rets); 587 return rets; 588 } 589 590 cb = mei_io_cb_init(cl, file); 591 if (!cb) { 592 rets = -ENOMEM; 593 goto out; 594 } 595 596 cb->fop_type = MEI_FOP_CONNECT; 597 598 /* run hbuf acquire last so we don't have to undo */ 599 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 600 cl->state = MEI_FILE_CONNECTING; 601 if (mei_hbm_cl_connect_req(dev, cl)) { 602 rets = -ENODEV; 603 goto out; 604 } 605 cl->timer_count = MEI_CONNECT_TIMEOUT; 606 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 607 } else { 608 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 609 } 610 611 mutex_unlock(&dev->device_lock); 612 wait_event_timeout(dev->wait_recvd_msg, 613 (cl->state == MEI_FILE_CONNECTED || 614 cl->state == MEI_FILE_DISCONNECTED), 615 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 616 mutex_lock(&dev->device_lock); 617 618 if (cl->state != MEI_FILE_CONNECTED) { 619 /* something went really wrong */ 620 if (!cl->status) 621 cl->status = -EFAULT; 622 623 mei_io_list_flush(&dev->ctrl_rd_list, cl); 624 mei_io_list_flush(&dev->ctrl_wr_list, cl); 625 } 626 627 rets = cl->status; 628 629 out: 630 cl_dbg(dev, cl, "rpm: autosuspend\n"); 631 pm_runtime_mark_last_busy(&dev->pdev->dev); 632 pm_runtime_put_autosuspend(&dev->pdev->dev); 633 634 mei_io_cb_free(cb); 635 return rets; 636 } 637 638 /** 639 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. 640 * 641 * @cl: private data of the file object 642 * 643 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise. 644 * -ENOENT if mei_cl is not present 645 * -EINVAL if single_recv_buf == 0 646 */ 647 int mei_cl_flow_ctrl_creds(struct mei_cl *cl) 648 { 649 struct mei_device *dev; 650 struct mei_me_client *me_cl; 651 int id; 652 653 if (WARN_ON(!cl || !cl->dev)) 654 return -EINVAL; 655 656 dev = cl->dev; 657 658 if (!dev->me_clients_num) 659 return 0; 660 661 if (cl->mei_flow_ctrl_creds > 0) 662 return 1; 663 664 id = mei_me_cl_by_id(dev, cl->me_client_id); 665 if (id < 0) { 666 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 667 return id; 668 } 669 670 me_cl = &dev->me_clients[id]; 671 if (me_cl->mei_flow_ctrl_creds) { 672 if (WARN_ON(me_cl->props.single_recv_buf == 0)) 673 return -EINVAL; 674 return 1; 675 } 676 return 0; 677 } 678 679 /** 680 * mei_cl_flow_ctrl_reduce - reduces flow_control. 681 * 682 * @cl: private data of the file object 683 * 684 * @returns 685 * 0 on success 686 * -ENOENT when me client is not found 687 * -EINVAL when ctrl credits are <= 0 688 */ 689 int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) 690 { 691 struct mei_device *dev; 692 struct mei_me_client *me_cl; 693 int id; 694 695 if (WARN_ON(!cl || !cl->dev)) 696 return -EINVAL; 697 698 dev = cl->dev; 699 700 id = mei_me_cl_by_id(dev, cl->me_client_id); 701 if (id < 0) { 702 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 703 return id; 704 } 705 706 me_cl = &dev->me_clients[id]; 707 if (me_cl->props.single_recv_buf != 0) { 708 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) 709 return -EINVAL; 710 me_cl->mei_flow_ctrl_creds--; 711 } else { 712 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) 713 return -EINVAL; 714 cl->mei_flow_ctrl_creds--; 715 } 716 return 0; 717 } 718 719 /** 720 * mei_cl_read_start - the start read client message function. 721 * 722 * @cl: host client 723 * 724 * returns 0 on success, <0 on failure. 725 */ 726 int mei_cl_read_start(struct mei_cl *cl, size_t length) 727 { 728 struct mei_device *dev; 729 struct mei_cl_cb *cb; 730 int rets; 731 int i; 732 733 if (WARN_ON(!cl || !cl->dev)) 734 return -ENODEV; 735 736 dev = cl->dev; 737 738 if (!mei_cl_is_connected(cl)) 739 return -ENODEV; 740 741 if (cl->read_cb) { 742 cl_dbg(dev, cl, "read is pending.\n"); 743 return -EBUSY; 744 } 745 i = mei_me_cl_by_id(dev, cl->me_client_id); 746 if (i < 0) { 747 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 748 return -ENOTTY; 749 } 750 751 rets = pm_runtime_get(&dev->pdev->dev); 752 if (rets < 0 && rets != -EINPROGRESS) { 753 pm_runtime_put_noidle(&dev->pdev->dev); 754 cl_err(dev, cl, "rpm: get failed %d\n", rets); 755 return rets; 756 } 757 758 cb = mei_io_cb_init(cl, NULL); 759 if (!cb) { 760 rets = -ENOMEM; 761 goto out; 762 } 763 764 /* always allocate at least client max message */ 765 length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); 766 rets = mei_io_cb_alloc_resp_buf(cb, length); 767 if (rets) 768 goto out; 769 770 cb->fop_type = MEI_FOP_READ; 771 if (mei_hbuf_acquire(dev)) { 772 rets = mei_hbm_cl_flow_control_req(dev, cl); 773 if (rets < 0) 774 goto out; 775 776 list_add_tail(&cb->list, &dev->read_list.list); 777 } else { 778 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 779 } 780 781 cl->read_cb = cb; 782 783 out: 784 cl_dbg(dev, cl, "rpm: autosuspend\n"); 785 pm_runtime_mark_last_busy(&dev->pdev->dev); 786 pm_runtime_put_autosuspend(&dev->pdev->dev); 787 788 if (rets) 789 mei_io_cb_free(cb); 790 791 return rets; 792 } 793 794 /** 795 * mei_cl_irq_write - write a message to device 796 * from the interrupt thread context 797 * 798 * @cl: client 799 * @cb: callback block. 800 * @cmpl_list: complete list. 801 * 802 * returns 0, OK; otherwise error. 803 */ 804 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 805 struct mei_cl_cb *cmpl_list) 806 { 807 struct mei_device *dev; 808 struct mei_msg_data *buf; 809 struct mei_msg_hdr mei_hdr; 810 size_t len; 811 u32 msg_slots; 812 int slots; 813 int rets; 814 815 if (WARN_ON(!cl || !cl->dev)) 816 return -ENODEV; 817 818 dev = cl->dev; 819 820 buf = &cb->request_buffer; 821 822 rets = mei_cl_flow_ctrl_creds(cl); 823 if (rets < 0) 824 return rets; 825 826 if (rets == 0) { 827 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 828 return 0; 829 } 830 831 slots = mei_hbuf_empty_slots(dev); 832 len = buf->size - cb->buf_idx; 833 msg_slots = mei_data2slots(len); 834 835 mei_hdr.host_addr = cl->host_client_id; 836 mei_hdr.me_addr = cl->me_client_id; 837 mei_hdr.reserved = 0; 838 mei_hdr.internal = cb->internal; 839 840 if (slots >= msg_slots) { 841 mei_hdr.length = len; 842 mei_hdr.msg_complete = 1; 843 /* Split the message only if we can write the whole host buffer */ 844 } else if (slots == dev->hbuf_depth) { 845 msg_slots = slots; 846 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); 847 mei_hdr.length = len; 848 mei_hdr.msg_complete = 0; 849 } else { 850 /* wait for next time the host buffer is empty */ 851 return 0; 852 } 853 854 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", 855 cb->request_buffer.size, cb->buf_idx); 856 857 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); 858 if (rets) { 859 cl->status = rets; 860 list_move_tail(&cb->list, &cmpl_list->list); 861 return rets; 862 } 863 864 cl->status = 0; 865 cl->writing_state = MEI_WRITING; 866 cb->buf_idx += mei_hdr.length; 867 868 if (mei_hdr.msg_complete) { 869 if (mei_cl_flow_ctrl_reduce(cl)) 870 return -EIO; 871 list_move_tail(&cb->list, &dev->write_waiting_list.list); 872 } 873 874 return 0; 875 } 876 877 /** 878 * mei_cl_write - submit a write cb to mei device 879 assumes device_lock is locked 880 * 881 * @cl: host client 882 * @cl: write callback with filled data 883 * 884 * returns number of bytes sent on success, <0 on failure. 885 */ 886 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) 887 { 888 struct mei_device *dev; 889 struct mei_msg_data *buf; 890 struct mei_msg_hdr mei_hdr; 891 int rets; 892 893 894 if (WARN_ON(!cl || !cl->dev)) 895 return -ENODEV; 896 897 if (WARN_ON(!cb)) 898 return -EINVAL; 899 900 dev = cl->dev; 901 902 903 buf = &cb->request_buffer; 904 905 cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size); 906 907 rets = pm_runtime_get(&dev->pdev->dev); 908 if (rets < 0 && rets != -EINPROGRESS) { 909 pm_runtime_put_noidle(&dev->pdev->dev); 910 cl_err(dev, cl, "rpm: get failed %d\n", rets); 911 return rets; 912 } 913 914 cb->fop_type = MEI_FOP_WRITE; 915 cb->buf_idx = 0; 916 cl->writing_state = MEI_IDLE; 917 918 mei_hdr.host_addr = cl->host_client_id; 919 mei_hdr.me_addr = cl->me_client_id; 920 mei_hdr.reserved = 0; 921 mei_hdr.msg_complete = 0; 922 mei_hdr.internal = cb->internal; 923 924 rets = mei_cl_flow_ctrl_creds(cl); 925 if (rets < 0) 926 goto err; 927 928 if (rets == 0) { 929 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 930 rets = buf->size; 931 goto out; 932 } 933 if (!mei_hbuf_acquire(dev)) { 934 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); 935 rets = buf->size; 936 goto out; 937 } 938 939 /* Check for a maximum length */ 940 if (buf->size > mei_hbuf_max_len(dev)) { 941 mei_hdr.length = mei_hbuf_max_len(dev); 942 mei_hdr.msg_complete = 0; 943 } else { 944 mei_hdr.length = buf->size; 945 mei_hdr.msg_complete = 1; 946 } 947 948 rets = mei_write_message(dev, &mei_hdr, buf->data); 949 if (rets) 950 goto err; 951 952 cl->writing_state = MEI_WRITING; 953 cb->buf_idx = mei_hdr.length; 954 955 out: 956 if (mei_hdr.msg_complete) { 957 rets = mei_cl_flow_ctrl_reduce(cl); 958 if (rets < 0) 959 goto err; 960 961 list_add_tail(&cb->list, &dev->write_waiting_list.list); 962 } else { 963 list_add_tail(&cb->list, &dev->write_list.list); 964 } 965 966 967 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 968 969 mutex_unlock(&dev->device_lock); 970 rets = wait_event_interruptible(cl->tx_wait, 971 cl->writing_state == MEI_WRITE_COMPLETE); 972 mutex_lock(&dev->device_lock); 973 /* wait_event_interruptible returns -ERESTARTSYS */ 974 if (rets) { 975 if (signal_pending(current)) 976 rets = -EINTR; 977 goto err; 978 } 979 } 980 981 rets = buf->size; 982 err: 983 cl_dbg(dev, cl, "rpm: autosuspend\n"); 984 pm_runtime_mark_last_busy(&dev->pdev->dev); 985 pm_runtime_put_autosuspend(&dev->pdev->dev); 986 987 return rets; 988 } 989 990 991 /** 992 * mei_cl_complete - processes completed operation for a client 993 * 994 * @cl: private data of the file object. 995 * @cb: callback block. 996 */ 997 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 998 { 999 if (cb->fop_type == MEI_FOP_WRITE) { 1000 mei_io_cb_free(cb); 1001 cb = NULL; 1002 cl->writing_state = MEI_WRITE_COMPLETE; 1003 if (waitqueue_active(&cl->tx_wait)) 1004 wake_up_interruptible(&cl->tx_wait); 1005 1006 } else if (cb->fop_type == MEI_FOP_READ && 1007 MEI_READING == cl->reading_state) { 1008 cl->reading_state = MEI_READ_COMPLETE; 1009 if (waitqueue_active(&cl->rx_wait)) 1010 wake_up_interruptible(&cl->rx_wait); 1011 else 1012 mei_cl_bus_rx_event(cl); 1013 1014 } 1015 } 1016 1017 1018 /** 1019 * mei_cl_all_disconnect - disconnect forcefully all connected clients 1020 * 1021 * @dev - mei device 1022 */ 1023 1024 void mei_cl_all_disconnect(struct mei_device *dev) 1025 { 1026 struct mei_cl *cl; 1027 1028 list_for_each_entry(cl, &dev->file_list, link) { 1029 cl->state = MEI_FILE_DISCONNECTED; 1030 cl->mei_flow_ctrl_creds = 0; 1031 cl->timer_count = 0; 1032 } 1033 } 1034 1035 1036 /** 1037 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted 1038 * 1039 * @dev - mei device 1040 */ 1041 void mei_cl_all_wakeup(struct mei_device *dev) 1042 { 1043 struct mei_cl *cl; 1044 list_for_each_entry(cl, &dev->file_list, link) { 1045 if (waitqueue_active(&cl->rx_wait)) { 1046 cl_dbg(dev, cl, "Waking up reading client!\n"); 1047 wake_up_interruptible(&cl->rx_wait); 1048 } 1049 if (waitqueue_active(&cl->tx_wait)) { 1050 cl_dbg(dev, cl, "Waking up writing client!\n"); 1051 wake_up_interruptible(&cl->tx_wait); 1052 } 1053 } 1054 } 1055 1056 /** 1057 * mei_cl_all_write_clear - clear all pending writes 1058 1059 * @dev - mei device 1060 */ 1061 void mei_cl_all_write_clear(struct mei_device *dev) 1062 { 1063 mei_io_list_free(&dev->write_list, NULL); 1064 mei_io_list_free(&dev->write_waiting_list, NULL); 1065 } 1066 1067 1068