1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/pci.h> 18 #include <linux/sched.h> 19 #include <linux/wait.h> 20 #include <linux/delay.h> 21 #include <linux/pm_runtime.h> 22 23 #include <linux/mei.h> 24 25 #include "mei_dev.h" 26 #include "hbm.h" 27 #include "client.h" 28 29 /** 30 * mei_me_cl_by_uuid - locate index of me client 31 * 32 * @dev: mei device 33 * 34 * Locking: called under "dev->device_lock" lock 35 * 36 * returns me client index or -ENOENT if not found 37 */ 38 int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) 39 { 40 int i; 41 42 for (i = 0; i < dev->me_clients_num; ++i) 43 if (uuid_le_cmp(*uuid, 44 dev->me_clients[i].props.protocol_name) == 0) 45 return i; 46 47 return -ENOENT; 48 } 49 50 51 /** 52 * mei_me_cl_by_id return index to me_clients for client_id 53 * 54 * @dev: the device structure 55 * @client_id: me client id 56 * 57 * Locking: called under "dev->device_lock" lock 58 * 59 * returns index on success, -ENOENT on failure. 60 */ 61 62 int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 63 { 64 int i; 65 66 for (i = 0; i < dev->me_clients_num; i++) 67 if (dev->me_clients[i].client_id == client_id) 68 return i; 69 70 return -ENOENT; 71 } 72 73 74 /** 75 * mei_cl_cmp_id - tells if the clients are the same 76 * 77 * @cl1: host client 1 78 * @cl2: host client 2 79 * 80 * returns true - if the clients has same host and me ids 81 * false - otherwise 82 */ 83 static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, 84 const struct mei_cl *cl2) 85 { 86 return cl1 && cl2 && 87 (cl1->host_client_id == cl2->host_client_id) && 88 (cl1->me_client_id == cl2->me_client_id); 89 } 90 91 /** 92 * mei_io_list_flush - removes cbs belonging to cl. 93 * 94 * @list: an instance of our list structure 95 * @cl: host client, can be NULL for flushing the whole list 96 * @free: whether to free the cbs 97 */ 98 static void __mei_io_list_flush(struct mei_cl_cb *list, 99 struct mei_cl *cl, bool free) 100 { 101 struct mei_cl_cb *cb; 102 struct mei_cl_cb *next; 103 104 /* enable removing everything if no cl is specified */ 105 list_for_each_entry_safe(cb, next, &list->list, list) { 106 if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) { 107 list_del(&cb->list); 108 if (free) 109 mei_io_cb_free(cb); 110 } 111 } 112 } 113 114 /** 115 * mei_io_list_flush - removes list entry belonging to cl. 116 * 117 * @list: An instance of our list structure 118 * @cl: host client 119 */ 120 static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) 121 { 122 __mei_io_list_flush(list, cl, false); 123 } 124 125 126 /** 127 * mei_io_list_free - removes cb belonging to cl and free them 128 * 129 * @list: An instance of our list structure 130 * @cl: host client 131 */ 132 static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) 133 { 134 __mei_io_list_flush(list, cl, true); 135 } 136 137 /** 138 * mei_io_cb_free - free mei_cb_private related memory 139 * 140 * @cb: mei callback struct 141 */ 142 void mei_io_cb_free(struct mei_cl_cb *cb) 143 { 144 if (cb == NULL) 145 return; 146 147 kfree(cb->request_buffer.data); 148 kfree(cb->response_buffer.data); 149 kfree(cb); 150 } 151 152 /** 153 * mei_io_cb_init - allocate and initialize io callback 154 * 155 * @cl - mei client 156 * @fp: pointer to file structure 157 * 158 * returns mei_cl_cb pointer or NULL; 159 */ 160 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) 161 { 162 struct mei_cl_cb *cb; 163 164 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 165 if (!cb) 166 return NULL; 167 168 mei_io_list_init(cb); 169 170 cb->file_object = fp; 171 cb->cl = cl; 172 cb->buf_idx = 0; 173 return cb; 174 } 175 176 /** 177 * mei_io_cb_alloc_req_buf - allocate request buffer 178 * 179 * @cb: io callback structure 180 * @length: size of the buffer 181 * 182 * returns 0 on success 183 * -EINVAL if cb is NULL 184 * -ENOMEM if allocation failed 185 */ 186 int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) 187 { 188 if (!cb) 189 return -EINVAL; 190 191 if (length == 0) 192 return 0; 193 194 cb->request_buffer.data = kmalloc(length, GFP_KERNEL); 195 if (!cb->request_buffer.data) 196 return -ENOMEM; 197 cb->request_buffer.size = length; 198 return 0; 199 } 200 /** 201 * mei_io_cb_alloc_resp_buf - allocate response buffer 202 * 203 * @cb: io callback structure 204 * @length: size of the buffer 205 * 206 * returns 0 on success 207 * -EINVAL if cb is NULL 208 * -ENOMEM if allocation failed 209 */ 210 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) 211 { 212 if (!cb) 213 return -EINVAL; 214 215 if (length == 0) 216 return 0; 217 218 cb->response_buffer.data = kmalloc(length, GFP_KERNEL); 219 if (!cb->response_buffer.data) 220 return -ENOMEM; 221 cb->response_buffer.size = length; 222 return 0; 223 } 224 225 226 227 /** 228 * mei_cl_flush_queues - flushes queue lists belonging to cl. 229 * 230 * @cl: host client 231 */ 232 int mei_cl_flush_queues(struct mei_cl *cl) 233 { 234 struct mei_device *dev; 235 236 if (WARN_ON(!cl || !cl->dev)) 237 return -EINVAL; 238 239 dev = cl->dev; 240 241 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 242 mei_io_list_flush(&cl->dev->read_list, cl); 243 mei_io_list_free(&cl->dev->write_list, cl); 244 mei_io_list_free(&cl->dev->write_waiting_list, cl); 245 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); 246 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); 247 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); 248 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); 249 return 0; 250 } 251 252 253 /** 254 * mei_cl_init - initializes cl. 255 * 256 * @cl: host client to be initialized 257 * @dev: mei device 258 */ 259 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 260 { 261 memset(cl, 0, sizeof(struct mei_cl)); 262 init_waitqueue_head(&cl->wait); 263 init_waitqueue_head(&cl->rx_wait); 264 init_waitqueue_head(&cl->tx_wait); 265 INIT_LIST_HEAD(&cl->link); 266 INIT_LIST_HEAD(&cl->device_link); 267 cl->reading_state = MEI_IDLE; 268 cl->writing_state = MEI_IDLE; 269 cl->dev = dev; 270 } 271 272 /** 273 * mei_cl_allocate - allocates cl structure and sets it up. 274 * 275 * @dev: mei device 276 * returns The allocated file or NULL on failure 277 */ 278 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 279 { 280 struct mei_cl *cl; 281 282 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); 283 if (!cl) 284 return NULL; 285 286 mei_cl_init(cl, dev); 287 288 return cl; 289 } 290 291 /** 292 * mei_cl_find_read_cb - find this cl's callback in the read list 293 * 294 * @cl: host client 295 * 296 * returns cb on success, NULL on error 297 */ 298 struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) 299 { 300 struct mei_device *dev = cl->dev; 301 struct mei_cl_cb *cb; 302 303 list_for_each_entry(cb, &dev->read_list.list, list) 304 if (mei_cl_cmp_id(cl, cb->cl)) 305 return cb; 306 return NULL; 307 } 308 309 /** mei_cl_link: allocate host id in the host map 310 * 311 * @cl - host client 312 * @id - fixed host id or -1 for generic one 313 * 314 * returns 0 on success 315 * -EINVAL on incorrect values 316 * -ENONET if client not found 317 */ 318 int mei_cl_link(struct mei_cl *cl, int id) 319 { 320 struct mei_device *dev; 321 long open_handle_count; 322 323 if (WARN_ON(!cl || !cl->dev)) 324 return -EINVAL; 325 326 dev = cl->dev; 327 328 /* If Id is not assigned get one*/ 329 if (id == MEI_HOST_CLIENT_ID_ANY) 330 id = find_first_zero_bit(dev->host_clients_map, 331 MEI_CLIENTS_MAX); 332 333 if (id >= MEI_CLIENTS_MAX) { 334 dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX); 335 return -EMFILE; 336 } 337 338 open_handle_count = dev->open_handle_count + dev->iamthif_open_count; 339 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 340 dev_err(&dev->pdev->dev, "open_handle_count exceeded %d", 341 MEI_MAX_OPEN_HANDLE_COUNT); 342 return -EMFILE; 343 } 344 345 dev->open_handle_count++; 346 347 cl->host_client_id = id; 348 list_add_tail(&cl->link, &dev->file_list); 349 350 set_bit(id, dev->host_clients_map); 351 352 cl->state = MEI_FILE_INITIALIZING; 353 354 cl_dbg(dev, cl, "link cl\n"); 355 return 0; 356 } 357 358 /** 359 * mei_cl_unlink - remove me_cl from the list 360 * 361 * @cl: host client 362 */ 363 int mei_cl_unlink(struct mei_cl *cl) 364 { 365 struct mei_device *dev; 366 367 /* don't shout on error exit path */ 368 if (!cl) 369 return 0; 370 371 /* wd and amthif might not be initialized */ 372 if (!cl->dev) 373 return 0; 374 375 dev = cl->dev; 376 377 cl_dbg(dev, cl, "unlink client"); 378 379 if (dev->open_handle_count > 0) 380 dev->open_handle_count--; 381 382 /* never clear the 0 bit */ 383 if (cl->host_client_id) 384 clear_bit(cl->host_client_id, dev->host_clients_map); 385 386 list_del_init(&cl->link); 387 388 cl->state = MEI_FILE_INITIALIZING; 389 390 return 0; 391 } 392 393 394 void mei_host_client_init(struct work_struct *work) 395 { 396 struct mei_device *dev = container_of(work, 397 struct mei_device, init_work); 398 struct mei_client_properties *client_props; 399 int i; 400 401 mutex_lock(&dev->device_lock); 402 403 for (i = 0; i < dev->me_clients_num; i++) { 404 client_props = &dev->me_clients[i].props; 405 406 if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid)) 407 mei_amthif_host_init(dev); 408 else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid)) 409 mei_wd_host_init(dev); 410 else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid)) 411 mei_nfc_host_init(dev); 412 413 } 414 415 dev->dev_state = MEI_DEV_ENABLED; 416 dev->reset_count = 0; 417 418 mutex_unlock(&dev->device_lock); 419 420 pm_runtime_mark_last_busy(&dev->pdev->dev); 421 dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n"); 422 pm_runtime_autosuspend(&dev->pdev->dev); 423 } 424 425 /** 426 * mei_hbuf_acquire: try to acquire host buffer 427 * 428 * @dev: the device structure 429 * returns true if host buffer was acquired 430 */ 431 bool mei_hbuf_acquire(struct mei_device *dev) 432 { 433 if (mei_pg_state(dev) == MEI_PG_ON || 434 dev->pg_event == MEI_PG_EVENT_WAIT) { 435 dev_dbg(&dev->pdev->dev, "device is in pg\n"); 436 return false; 437 } 438 439 if (!dev->hbuf_is_ready) { 440 dev_dbg(&dev->pdev->dev, "hbuf is not ready\n"); 441 return false; 442 } 443 444 dev->hbuf_is_ready = false; 445 446 return true; 447 } 448 449 /** 450 * mei_cl_disconnect - disconnect host client from the me one 451 * 452 * @cl: host client 453 * 454 * Locking: called under "dev->device_lock" lock 455 * 456 * returns 0 on success, <0 on failure. 457 */ 458 int mei_cl_disconnect(struct mei_cl *cl) 459 { 460 struct mei_device *dev; 461 struct mei_cl_cb *cb; 462 int rets; 463 464 if (WARN_ON(!cl || !cl->dev)) 465 return -ENODEV; 466 467 dev = cl->dev; 468 469 cl_dbg(dev, cl, "disconnecting"); 470 471 if (cl->state != MEI_FILE_DISCONNECTING) 472 return 0; 473 474 rets = pm_runtime_get(&dev->pdev->dev); 475 if (rets < 0 && rets != -EINPROGRESS) { 476 pm_runtime_put_noidle(&dev->pdev->dev); 477 cl_err(dev, cl, "rpm: get failed %d\n", rets); 478 return rets; 479 } 480 481 cb = mei_io_cb_init(cl, NULL); 482 if (!cb) { 483 rets = -ENOMEM; 484 goto free; 485 } 486 487 cb->fop_type = MEI_FOP_CLOSE; 488 if (mei_hbuf_acquire(dev)) { 489 if (mei_hbm_cl_disconnect_req(dev, cl)) { 490 rets = -ENODEV; 491 cl_err(dev, cl, "failed to disconnect.\n"); 492 goto free; 493 } 494 cl->timer_count = MEI_CONNECT_TIMEOUT; 495 mdelay(10); /* Wait for hardware disconnection ready */ 496 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 497 } else { 498 cl_dbg(dev, cl, "add disconnect cb to control write list\n"); 499 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 500 501 } 502 mutex_unlock(&dev->device_lock); 503 504 wait_event_timeout(dev->wait_recvd_msg, 505 MEI_FILE_DISCONNECTED == cl->state, 506 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 507 508 mutex_lock(&dev->device_lock); 509 510 if (MEI_FILE_DISCONNECTED == cl->state) { 511 rets = 0; 512 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 513 } else { 514 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); 515 rets = -ETIME; 516 } 517 518 mei_io_list_flush(&dev->ctrl_rd_list, cl); 519 mei_io_list_flush(&dev->ctrl_wr_list, cl); 520 free: 521 cl_dbg(dev, cl, "rpm: autosuspend\n"); 522 pm_runtime_mark_last_busy(&dev->pdev->dev); 523 pm_runtime_put_autosuspend(&dev->pdev->dev); 524 525 mei_io_cb_free(cb); 526 return rets; 527 } 528 529 530 /** 531 * mei_cl_is_other_connecting - checks if other 532 * client with the same me client id is connecting 533 * 534 * @cl: private data of the file object 535 * 536 * returns true if other client is connected, false - otherwise. 537 */ 538 bool mei_cl_is_other_connecting(struct mei_cl *cl) 539 { 540 struct mei_device *dev; 541 struct mei_cl *ocl; /* the other client */ 542 543 if (WARN_ON(!cl || !cl->dev)) 544 return false; 545 546 dev = cl->dev; 547 548 list_for_each_entry(ocl, &dev->file_list, link) { 549 if (ocl->state == MEI_FILE_CONNECTING && 550 ocl != cl && 551 cl->me_client_id == ocl->me_client_id) 552 return true; 553 554 } 555 556 return false; 557 } 558 559 /** 560 * mei_cl_connect - connect host client to the me one 561 * 562 * @cl: host client 563 * 564 * Locking: called under "dev->device_lock" lock 565 * 566 * returns 0 on success, <0 on failure. 567 */ 568 int mei_cl_connect(struct mei_cl *cl, struct file *file) 569 { 570 struct mei_device *dev; 571 struct mei_cl_cb *cb; 572 int rets; 573 574 if (WARN_ON(!cl || !cl->dev)) 575 return -ENODEV; 576 577 dev = cl->dev; 578 579 rets = pm_runtime_get(&dev->pdev->dev); 580 if (rets < 0 && rets != -EINPROGRESS) { 581 pm_runtime_put_noidle(&dev->pdev->dev); 582 cl_err(dev, cl, "rpm: get failed %d\n", rets); 583 return rets; 584 } 585 586 cb = mei_io_cb_init(cl, file); 587 if (!cb) { 588 rets = -ENOMEM; 589 goto out; 590 } 591 592 cb->fop_type = MEI_FOP_CONNECT; 593 594 /* run hbuf acquire last so we don't have to undo */ 595 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 596 cl->state = MEI_FILE_CONNECTING; 597 if (mei_hbm_cl_connect_req(dev, cl)) { 598 rets = -ENODEV; 599 goto out; 600 } 601 cl->timer_count = MEI_CONNECT_TIMEOUT; 602 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 603 } else { 604 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 605 } 606 607 mutex_unlock(&dev->device_lock); 608 wait_event_timeout(dev->wait_recvd_msg, 609 (cl->state == MEI_FILE_CONNECTED || 610 cl->state == MEI_FILE_DISCONNECTED), 611 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 612 mutex_lock(&dev->device_lock); 613 614 if (cl->state != MEI_FILE_CONNECTED) { 615 cl->state = MEI_FILE_DISCONNECTED; 616 /* something went really wrong */ 617 if (!cl->status) 618 cl->status = -EFAULT; 619 620 mei_io_list_flush(&dev->ctrl_rd_list, cl); 621 mei_io_list_flush(&dev->ctrl_wr_list, cl); 622 } 623 624 rets = cl->status; 625 626 out: 627 cl_dbg(dev, cl, "rpm: autosuspend\n"); 628 pm_runtime_mark_last_busy(&dev->pdev->dev); 629 pm_runtime_put_autosuspend(&dev->pdev->dev); 630 631 mei_io_cb_free(cb); 632 return rets; 633 } 634 635 /** 636 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. 637 * 638 * @cl: private data of the file object 639 * 640 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise. 641 * -ENOENT if mei_cl is not present 642 * -EINVAL if single_recv_buf == 0 643 */ 644 int mei_cl_flow_ctrl_creds(struct mei_cl *cl) 645 { 646 struct mei_device *dev; 647 struct mei_me_client *me_cl; 648 int id; 649 650 if (WARN_ON(!cl || !cl->dev)) 651 return -EINVAL; 652 653 dev = cl->dev; 654 655 if (!dev->me_clients_num) 656 return 0; 657 658 if (cl->mei_flow_ctrl_creds > 0) 659 return 1; 660 661 id = mei_me_cl_by_id(dev, cl->me_client_id); 662 if (id < 0) { 663 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 664 return id; 665 } 666 667 me_cl = &dev->me_clients[id]; 668 if (me_cl->mei_flow_ctrl_creds) { 669 if (WARN_ON(me_cl->props.single_recv_buf == 0)) 670 return -EINVAL; 671 return 1; 672 } 673 return 0; 674 } 675 676 /** 677 * mei_cl_flow_ctrl_reduce - reduces flow_control. 678 * 679 * @cl: private data of the file object 680 * 681 * @returns 682 * 0 on success 683 * -ENOENT when me client is not found 684 * -EINVAL when ctrl credits are <= 0 685 */ 686 int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) 687 { 688 struct mei_device *dev; 689 struct mei_me_client *me_cl; 690 int id; 691 692 if (WARN_ON(!cl || !cl->dev)) 693 return -EINVAL; 694 695 dev = cl->dev; 696 697 id = mei_me_cl_by_id(dev, cl->me_client_id); 698 if (id < 0) { 699 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 700 return id; 701 } 702 703 me_cl = &dev->me_clients[id]; 704 if (me_cl->props.single_recv_buf != 0) { 705 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) 706 return -EINVAL; 707 me_cl->mei_flow_ctrl_creds--; 708 } else { 709 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) 710 return -EINVAL; 711 cl->mei_flow_ctrl_creds--; 712 } 713 return 0; 714 } 715 716 /** 717 * mei_cl_read_start - the start read client message function. 718 * 719 * @cl: host client 720 * 721 * returns 0 on success, <0 on failure. 722 */ 723 int mei_cl_read_start(struct mei_cl *cl, size_t length) 724 { 725 struct mei_device *dev; 726 struct mei_cl_cb *cb; 727 int rets; 728 int i; 729 730 if (WARN_ON(!cl || !cl->dev)) 731 return -ENODEV; 732 733 dev = cl->dev; 734 735 if (!mei_cl_is_connected(cl)) 736 return -ENODEV; 737 738 if (cl->read_cb) { 739 cl_dbg(dev, cl, "read is pending.\n"); 740 return -EBUSY; 741 } 742 i = mei_me_cl_by_id(dev, cl->me_client_id); 743 if (i < 0) { 744 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 745 return -ENOTTY; 746 } 747 748 rets = pm_runtime_get(&dev->pdev->dev); 749 if (rets < 0 && rets != -EINPROGRESS) { 750 pm_runtime_put_noidle(&dev->pdev->dev); 751 cl_err(dev, cl, "rpm: get failed %d\n", rets); 752 return rets; 753 } 754 755 cb = mei_io_cb_init(cl, NULL); 756 if (!cb) { 757 rets = -ENOMEM; 758 goto out; 759 } 760 761 /* always allocate at least client max message */ 762 length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); 763 rets = mei_io_cb_alloc_resp_buf(cb, length); 764 if (rets) 765 goto out; 766 767 cb->fop_type = MEI_FOP_READ; 768 if (mei_hbuf_acquire(dev)) { 769 rets = mei_hbm_cl_flow_control_req(dev, cl); 770 if (rets < 0) 771 goto out; 772 773 list_add_tail(&cb->list, &dev->read_list.list); 774 } else { 775 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 776 } 777 778 cl->read_cb = cb; 779 780 out: 781 cl_dbg(dev, cl, "rpm: autosuspend\n"); 782 pm_runtime_mark_last_busy(&dev->pdev->dev); 783 pm_runtime_put_autosuspend(&dev->pdev->dev); 784 785 if (rets) 786 mei_io_cb_free(cb); 787 788 return rets; 789 } 790 791 /** 792 * mei_cl_irq_write - write a message to device 793 * from the interrupt thread context 794 * 795 * @cl: client 796 * @cb: callback block. 797 * @cmpl_list: complete list. 798 * 799 * returns 0, OK; otherwise error. 800 */ 801 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 802 struct mei_cl_cb *cmpl_list) 803 { 804 struct mei_device *dev; 805 struct mei_msg_data *buf; 806 struct mei_msg_hdr mei_hdr; 807 size_t len; 808 u32 msg_slots; 809 int slots; 810 int rets; 811 812 if (WARN_ON(!cl || !cl->dev)) 813 return -ENODEV; 814 815 dev = cl->dev; 816 817 buf = &cb->request_buffer; 818 819 rets = mei_cl_flow_ctrl_creds(cl); 820 if (rets < 0) 821 return rets; 822 823 if (rets == 0) { 824 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 825 return 0; 826 } 827 828 slots = mei_hbuf_empty_slots(dev); 829 len = buf->size - cb->buf_idx; 830 msg_slots = mei_data2slots(len); 831 832 mei_hdr.host_addr = cl->host_client_id; 833 mei_hdr.me_addr = cl->me_client_id; 834 mei_hdr.reserved = 0; 835 mei_hdr.internal = cb->internal; 836 837 if (slots >= msg_slots) { 838 mei_hdr.length = len; 839 mei_hdr.msg_complete = 1; 840 /* Split the message only if we can write the whole host buffer */ 841 } else if (slots == dev->hbuf_depth) { 842 msg_slots = slots; 843 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); 844 mei_hdr.length = len; 845 mei_hdr.msg_complete = 0; 846 } else { 847 /* wait for next time the host buffer is empty */ 848 return 0; 849 } 850 851 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", 852 cb->request_buffer.size, cb->buf_idx); 853 854 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); 855 if (rets) { 856 cl->status = rets; 857 list_move_tail(&cb->list, &cmpl_list->list); 858 return rets; 859 } 860 861 cl->status = 0; 862 cl->writing_state = MEI_WRITING; 863 cb->buf_idx += mei_hdr.length; 864 865 if (mei_hdr.msg_complete) { 866 if (mei_cl_flow_ctrl_reduce(cl)) 867 return -EIO; 868 list_move_tail(&cb->list, &dev->write_waiting_list.list); 869 } 870 871 return 0; 872 } 873 874 /** 875 * mei_cl_write - submit a write cb to mei device 876 assumes device_lock is locked 877 * 878 * @cl: host client 879 * @cl: write callback with filled data 880 * 881 * returns number of bytes sent on success, <0 on failure. 882 */ 883 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) 884 { 885 struct mei_device *dev; 886 struct mei_msg_data *buf; 887 struct mei_msg_hdr mei_hdr; 888 int rets; 889 890 891 if (WARN_ON(!cl || !cl->dev)) 892 return -ENODEV; 893 894 if (WARN_ON(!cb)) 895 return -EINVAL; 896 897 dev = cl->dev; 898 899 900 buf = &cb->request_buffer; 901 902 cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size); 903 904 rets = pm_runtime_get(&dev->pdev->dev); 905 if (rets < 0 && rets != -EINPROGRESS) { 906 pm_runtime_put_noidle(&dev->pdev->dev); 907 cl_err(dev, cl, "rpm: get failed %d\n", rets); 908 return rets; 909 } 910 911 cb->fop_type = MEI_FOP_WRITE; 912 cb->buf_idx = 0; 913 cl->writing_state = MEI_IDLE; 914 915 mei_hdr.host_addr = cl->host_client_id; 916 mei_hdr.me_addr = cl->me_client_id; 917 mei_hdr.reserved = 0; 918 mei_hdr.msg_complete = 0; 919 mei_hdr.internal = cb->internal; 920 921 rets = mei_cl_flow_ctrl_creds(cl); 922 if (rets < 0) 923 goto err; 924 925 if (rets == 0) { 926 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 927 rets = buf->size; 928 goto out; 929 } 930 if (!mei_hbuf_acquire(dev)) { 931 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); 932 rets = buf->size; 933 goto out; 934 } 935 936 /* Check for a maximum length */ 937 if (buf->size > mei_hbuf_max_len(dev)) { 938 mei_hdr.length = mei_hbuf_max_len(dev); 939 mei_hdr.msg_complete = 0; 940 } else { 941 mei_hdr.length = buf->size; 942 mei_hdr.msg_complete = 1; 943 } 944 945 rets = mei_write_message(dev, &mei_hdr, buf->data); 946 if (rets) 947 goto err; 948 949 cl->writing_state = MEI_WRITING; 950 cb->buf_idx = mei_hdr.length; 951 952 out: 953 if (mei_hdr.msg_complete) { 954 rets = mei_cl_flow_ctrl_reduce(cl); 955 if (rets < 0) 956 goto err; 957 958 list_add_tail(&cb->list, &dev->write_waiting_list.list); 959 } else { 960 list_add_tail(&cb->list, &dev->write_list.list); 961 } 962 963 964 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 965 966 mutex_unlock(&dev->device_lock); 967 rets = wait_event_interruptible(cl->tx_wait, 968 cl->writing_state == MEI_WRITE_COMPLETE); 969 mutex_lock(&dev->device_lock); 970 /* wait_event_interruptible returns -ERESTARTSYS */ 971 if (rets) { 972 if (signal_pending(current)) 973 rets = -EINTR; 974 goto err; 975 } 976 } 977 978 rets = buf->size; 979 err: 980 cl_dbg(dev, cl, "rpm: autosuspend\n"); 981 pm_runtime_mark_last_busy(&dev->pdev->dev); 982 pm_runtime_put_autosuspend(&dev->pdev->dev); 983 984 return rets; 985 } 986 987 988 /** 989 * mei_cl_complete - processes completed operation for a client 990 * 991 * @cl: private data of the file object. 992 * @cb: callback block. 993 */ 994 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 995 { 996 if (cb->fop_type == MEI_FOP_WRITE) { 997 mei_io_cb_free(cb); 998 cb = NULL; 999 cl->writing_state = MEI_WRITE_COMPLETE; 1000 if (waitqueue_active(&cl->tx_wait)) 1001 wake_up_interruptible(&cl->tx_wait); 1002 1003 } else if (cb->fop_type == MEI_FOP_READ && 1004 MEI_READING == cl->reading_state) { 1005 cl->reading_state = MEI_READ_COMPLETE; 1006 if (waitqueue_active(&cl->rx_wait)) 1007 wake_up_interruptible(&cl->rx_wait); 1008 else 1009 mei_cl_bus_rx_event(cl); 1010 1011 } 1012 } 1013 1014 1015 /** 1016 * mei_cl_all_disconnect - disconnect forcefully all connected clients 1017 * 1018 * @dev - mei device 1019 */ 1020 1021 void mei_cl_all_disconnect(struct mei_device *dev) 1022 { 1023 struct mei_cl *cl; 1024 1025 list_for_each_entry(cl, &dev->file_list, link) { 1026 cl->state = MEI_FILE_DISCONNECTED; 1027 cl->mei_flow_ctrl_creds = 0; 1028 cl->timer_count = 0; 1029 } 1030 } 1031 1032 1033 /** 1034 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted 1035 * 1036 * @dev - mei device 1037 */ 1038 void mei_cl_all_wakeup(struct mei_device *dev) 1039 { 1040 struct mei_cl *cl; 1041 list_for_each_entry(cl, &dev->file_list, link) { 1042 if (waitqueue_active(&cl->rx_wait)) { 1043 cl_dbg(dev, cl, "Waking up reading client!\n"); 1044 wake_up_interruptible(&cl->rx_wait); 1045 } 1046 if (waitqueue_active(&cl->tx_wait)) { 1047 cl_dbg(dev, cl, "Waking up writing client!\n"); 1048 wake_up_interruptible(&cl->tx_wait); 1049 } 1050 } 1051 } 1052 1053 /** 1054 * mei_cl_all_write_clear - clear all pending writes 1055 1056 * @dev - mei device 1057 */ 1058 void mei_cl_all_write_clear(struct mei_device *dev) 1059 { 1060 mei_io_list_free(&dev->write_list, NULL); 1061 mei_io_list_free(&dev->write_waiting_list, NULL); 1062 } 1063 1064 1065