1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/sched.h> 18 #include <linux/wait.h> 19 #include <linux/delay.h> 20 #include <linux/slab.h> 21 #include <linux/pm_runtime.h> 22 23 #include <linux/mei.h> 24 25 #include "mei_dev.h" 26 #include "hbm.h" 27 #include "client.h" 28 29 /** 30 * mei_me_cl_init - initialize me client 31 * 32 * @me_cl: me client 33 */ 34 void mei_me_cl_init(struct mei_me_client *me_cl) 35 { 36 INIT_LIST_HEAD(&me_cl->list); 37 kref_init(&me_cl->refcnt); 38 } 39 40 /** 41 * mei_me_cl_get - increases me client refcount 42 * 43 * @me_cl: me client 44 * 45 * Locking: called under "dev->device_lock" lock 46 * 47 * Return: me client or NULL 48 */ 49 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) 50 { 51 if (me_cl && kref_get_unless_zero(&me_cl->refcnt)) 52 return me_cl; 53 54 return NULL; 55 } 56 57 /** 58 * mei_me_cl_release - free me client 59 * 60 * Locking: called under "dev->device_lock" lock 61 * 62 * @ref: me_client refcount 63 */ 64 static void mei_me_cl_release(struct kref *ref) 65 { 66 struct mei_me_client *me_cl = 67 container_of(ref, struct mei_me_client, refcnt); 68 69 kfree(me_cl); 70 } 71 72 /** 73 * mei_me_cl_put - decrease me client refcount and free client if necessary 74 * 75 * Locking: called under "dev->device_lock" lock 76 * 77 * @me_cl: me client 78 */ 79 void mei_me_cl_put(struct mei_me_client *me_cl) 80 { 81 if (me_cl) 82 kref_put(&me_cl->refcnt, mei_me_cl_release); 83 } 84 85 /** 86 * __mei_me_cl_del - delete me client from the list and decrease 87 * reference counter 88 * 89 * @dev: mei device 90 * @me_cl: me client 91 * 92 * Locking: dev->me_clients_rwsem 93 */ 94 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 95 { 96 if (!me_cl) 97 return; 98 99 list_del_init(&me_cl->list); 100 mei_me_cl_put(me_cl); 101 } 102 103 /** 104 * mei_me_cl_del - delete me client from the list and decrease 105 * reference counter 106 * 107 * @dev: mei device 108 * @me_cl: me client 109 */ 110 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 111 { 112 down_write(&dev->me_clients_rwsem); 113 __mei_me_cl_del(dev, me_cl); 114 up_write(&dev->me_clients_rwsem); 115 } 116 117 /** 118 * mei_me_cl_add - add me client to the list 119 * 120 * @dev: mei device 121 * @me_cl: me client 122 */ 123 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl) 124 { 125 down_write(&dev->me_clients_rwsem); 126 list_add(&me_cl->list, &dev->me_clients); 127 up_write(&dev->me_clients_rwsem); 128 } 129 130 /** 131 * __mei_me_cl_by_uuid - locate me client by uuid 132 * increases ref count 133 * 134 * @dev: mei device 135 * @uuid: me client uuid 136 * 137 * Return: me client or NULL if not found 138 * 139 * Locking: dev->me_clients_rwsem 140 */ 141 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev, 142 const uuid_le *uuid) 143 { 144 struct mei_me_client *me_cl; 145 const uuid_le *pn; 146 147 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 148 149 list_for_each_entry(me_cl, &dev->me_clients, list) { 150 pn = &me_cl->props.protocol_name; 151 if (uuid_le_cmp(*uuid, *pn) == 0) 152 return mei_me_cl_get(me_cl); 153 } 154 155 return NULL; 156 } 157 158 /** 159 * mei_me_cl_by_uuid - locate me client by uuid 160 * increases ref count 161 * 162 * @dev: mei device 163 * @uuid: me client uuid 164 * 165 * Return: me client or NULL if not found 166 * 167 * Locking: dev->me_clients_rwsem 168 */ 169 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, 170 const uuid_le *uuid) 171 { 172 struct mei_me_client *me_cl; 173 174 down_read(&dev->me_clients_rwsem); 175 me_cl = __mei_me_cl_by_uuid(dev, uuid); 176 up_read(&dev->me_clients_rwsem); 177 178 return me_cl; 179 } 180 181 /** 182 * mei_me_cl_by_id - locate me client by client id 183 * increases ref count 184 * 185 * @dev: the device structure 186 * @client_id: me client id 187 * 188 * Return: me client or NULL if not found 189 * 190 * Locking: dev->me_clients_rwsem 191 */ 192 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 193 { 194 195 struct mei_me_client *__me_cl, *me_cl = NULL; 196 197 down_read(&dev->me_clients_rwsem); 198 list_for_each_entry(__me_cl, &dev->me_clients, list) { 199 if (__me_cl->client_id == client_id) { 200 me_cl = mei_me_cl_get(__me_cl); 201 break; 202 } 203 } 204 up_read(&dev->me_clients_rwsem); 205 206 return me_cl; 207 } 208 209 /** 210 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid 211 * increases ref count 212 * 213 * @dev: the device structure 214 * @uuid: me client uuid 215 * @client_id: me client id 216 * 217 * Return: me client or null if not found 218 * 219 * Locking: dev->me_clients_rwsem 220 */ 221 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, 222 const uuid_le *uuid, u8 client_id) 223 { 224 struct mei_me_client *me_cl; 225 const uuid_le *pn; 226 227 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 228 229 list_for_each_entry(me_cl, &dev->me_clients, list) { 230 pn = &me_cl->props.protocol_name; 231 if (uuid_le_cmp(*uuid, *pn) == 0 && 232 me_cl->client_id == client_id) 233 return mei_me_cl_get(me_cl); 234 } 235 236 return NULL; 237 } 238 239 240 /** 241 * mei_me_cl_by_uuid_id - locate me client by client id and uuid 242 * increases ref count 243 * 244 * @dev: the device structure 245 * @uuid: me client uuid 246 * @client_id: me client id 247 * 248 * Return: me client or null if not found 249 */ 250 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 251 const uuid_le *uuid, u8 client_id) 252 { 253 struct mei_me_client *me_cl; 254 255 down_read(&dev->me_clients_rwsem); 256 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id); 257 up_read(&dev->me_clients_rwsem); 258 259 return me_cl; 260 } 261 262 /** 263 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid 264 * 265 * @dev: the device structure 266 * @uuid: me client uuid 267 * 268 * Locking: called under "dev->device_lock" lock 269 */ 270 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) 271 { 272 struct mei_me_client *me_cl; 273 274 dev_dbg(dev->dev, "remove %pUl\n", uuid); 275 276 down_write(&dev->me_clients_rwsem); 277 me_cl = __mei_me_cl_by_uuid(dev, uuid); 278 __mei_me_cl_del(dev, me_cl); 279 up_write(&dev->me_clients_rwsem); 280 } 281 282 /** 283 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id 284 * 285 * @dev: the device structure 286 * @uuid: me client uuid 287 * @id: me client id 288 * 289 * Locking: called under "dev->device_lock" lock 290 */ 291 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) 292 { 293 struct mei_me_client *me_cl; 294 295 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); 296 297 down_write(&dev->me_clients_rwsem); 298 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); 299 __mei_me_cl_del(dev, me_cl); 300 up_write(&dev->me_clients_rwsem); 301 } 302 303 /** 304 * mei_me_cl_rm_all - remove all me clients 305 * 306 * @dev: the device structure 307 * 308 * Locking: called under "dev->device_lock" lock 309 */ 310 void mei_me_cl_rm_all(struct mei_device *dev) 311 { 312 struct mei_me_client *me_cl, *next; 313 314 down_write(&dev->me_clients_rwsem); 315 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 316 __mei_me_cl_del(dev, me_cl); 317 up_write(&dev->me_clients_rwsem); 318 } 319 320 /** 321 * mei_cl_cmp_id - tells if the clients are the same 322 * 323 * @cl1: host client 1 324 * @cl2: host client 2 325 * 326 * Return: true - if the clients has same host and me ids 327 * false - otherwise 328 */ 329 static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, 330 const struct mei_cl *cl2) 331 { 332 return cl1 && cl2 && 333 (cl1->host_client_id == cl2->host_client_id) && 334 (mei_cl_me_id(cl1) == mei_cl_me_id(cl2)); 335 } 336 337 /** 338 * mei_io_cb_free - free mei_cb_private related memory 339 * 340 * @cb: mei callback struct 341 */ 342 void mei_io_cb_free(struct mei_cl_cb *cb) 343 { 344 if (cb == NULL) 345 return; 346 347 list_del(&cb->list); 348 kfree(cb->buf.data); 349 kfree(cb); 350 } 351 352 /** 353 * mei_io_cb_init - allocate and initialize io callback 354 * 355 * @cl: mei client 356 * @type: operation type 357 * @fp: pointer to file structure 358 * 359 * Return: mei_cl_cb pointer or NULL; 360 */ 361 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, 362 enum mei_cb_file_ops type, 363 const struct file *fp) 364 { 365 struct mei_cl_cb *cb; 366 367 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 368 if (!cb) 369 return NULL; 370 371 INIT_LIST_HEAD(&cb->list); 372 cb->fp = fp; 373 cb->cl = cl; 374 cb->buf_idx = 0; 375 cb->fop_type = type; 376 return cb; 377 } 378 379 /** 380 * __mei_io_list_flush - removes and frees cbs belonging to cl. 381 * 382 * @list: an instance of our list structure 383 * @cl: host client, can be NULL for flushing the whole list 384 * @free: whether to free the cbs 385 */ 386 static void __mei_io_list_flush(struct mei_cl_cb *list, 387 struct mei_cl *cl, bool free) 388 { 389 struct mei_cl_cb *cb, *next; 390 391 /* enable removing everything if no cl is specified */ 392 list_for_each_entry_safe(cb, next, &list->list, list) { 393 if (!cl || mei_cl_cmp_id(cl, cb->cl)) { 394 list_del_init(&cb->list); 395 if (free) 396 mei_io_cb_free(cb); 397 } 398 } 399 } 400 401 /** 402 * mei_io_list_flush - removes list entry belonging to cl. 403 * 404 * @list: An instance of our list structure 405 * @cl: host client 406 */ 407 void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) 408 { 409 __mei_io_list_flush(list, cl, false); 410 } 411 412 /** 413 * mei_io_list_free - removes cb belonging to cl and free them 414 * 415 * @list: An instance of our list structure 416 * @cl: host client 417 */ 418 static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) 419 { 420 __mei_io_list_flush(list, cl, true); 421 } 422 423 /** 424 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb 425 * 426 * @cl: host client 427 * @length: size of the buffer 428 * @fop_type: operation type 429 * @fp: associated file pointer (might be NULL) 430 * 431 * Return: cb on success and NULL on failure 432 */ 433 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, 434 enum mei_cb_file_ops fop_type, 435 const struct file *fp) 436 { 437 struct mei_cl_cb *cb; 438 439 cb = mei_io_cb_init(cl, fop_type, fp); 440 if (!cb) 441 return NULL; 442 443 if (length == 0) 444 return cb; 445 446 cb->buf.data = kmalloc(length, GFP_KERNEL); 447 if (!cb->buf.data) { 448 mei_io_cb_free(cb); 449 return NULL; 450 } 451 cb->buf.size = length; 452 453 return cb; 454 } 455 456 /** 457 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating 458 * and enqueuing of the control commands cb 459 * 460 * @cl: host client 461 * @length: size of the buffer 462 * @fop_type: operation type 463 * @fp: associated file pointer (might be NULL) 464 * 465 * Return: cb on success and NULL on failure 466 * Locking: called under "dev->device_lock" lock 467 */ 468 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, 469 enum mei_cb_file_ops fop_type, 470 const struct file *fp) 471 { 472 struct mei_cl_cb *cb; 473 474 /* for RX always allocate at least client's mtu */ 475 if (length) 476 length = max_t(size_t, length, mei_cl_mtu(cl)); 477 478 cb = mei_cl_alloc_cb(cl, length, fop_type, fp); 479 if (!cb) 480 return NULL; 481 482 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list.list); 483 return cb; 484 } 485 486 /** 487 * mei_cl_read_cb - find this cl's callback in the read list 488 * for a specific file 489 * 490 * @cl: host client 491 * @fp: file pointer (matching cb file object), may be NULL 492 * 493 * Return: cb on success, NULL if cb is not found 494 */ 495 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp) 496 { 497 struct mei_cl_cb *cb; 498 499 list_for_each_entry(cb, &cl->rd_completed, list) 500 if (!fp || fp == cb->fp) 501 return cb; 502 503 return NULL; 504 } 505 506 /** 507 * mei_cl_read_cb_flush - free client's read pending and completed cbs 508 * for a specific file 509 * 510 * @cl: host client 511 * @fp: file pointer (matching cb file object), may be NULL 512 */ 513 void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp) 514 { 515 struct mei_cl_cb *cb, *next; 516 517 list_for_each_entry_safe(cb, next, &cl->rd_completed, list) 518 if (!fp || fp == cb->fp) 519 mei_io_cb_free(cb); 520 521 522 list_for_each_entry_safe(cb, next, &cl->rd_pending, list) 523 if (!fp || fp == cb->fp) 524 mei_io_cb_free(cb); 525 } 526 527 /** 528 * mei_cl_flush_queues - flushes queue lists belonging to cl. 529 * 530 * @cl: host client 531 * @fp: file pointer (matching cb file object), may be NULL 532 * 533 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. 534 */ 535 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) 536 { 537 struct mei_device *dev; 538 539 if (WARN_ON(!cl || !cl->dev)) 540 return -EINVAL; 541 542 dev = cl->dev; 543 544 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 545 mei_io_list_free(&cl->dev->write_list, cl); 546 mei_io_list_free(&cl->dev->write_waiting_list, cl); 547 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); 548 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); 549 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); 550 551 mei_cl_read_cb_flush(cl, fp); 552 553 return 0; 554 } 555 556 557 /** 558 * mei_cl_init - initializes cl. 559 * 560 * @cl: host client to be initialized 561 * @dev: mei device 562 */ 563 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 564 { 565 memset(cl, 0, sizeof(struct mei_cl)); 566 init_waitqueue_head(&cl->wait); 567 init_waitqueue_head(&cl->rx_wait); 568 init_waitqueue_head(&cl->tx_wait); 569 init_waitqueue_head(&cl->ev_wait); 570 INIT_LIST_HEAD(&cl->rd_completed); 571 INIT_LIST_HEAD(&cl->rd_pending); 572 INIT_LIST_HEAD(&cl->link); 573 cl->writing_state = MEI_IDLE; 574 cl->state = MEI_FILE_UNINITIALIZED; 575 cl->dev = dev; 576 } 577 578 /** 579 * mei_cl_allocate - allocates cl structure and sets it up. 580 * 581 * @dev: mei device 582 * Return: The allocated file or NULL on failure 583 */ 584 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 585 { 586 struct mei_cl *cl; 587 588 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); 589 if (!cl) 590 return NULL; 591 592 mei_cl_init(cl, dev); 593 594 return cl; 595 } 596 597 /** 598 * mei_cl_link - allocate host id in the host map 599 * 600 * @cl: host client 601 * 602 * Return: 0 on success 603 * -EINVAL on incorrect values 604 * -EMFILE if open count exceeded. 605 */ 606 int mei_cl_link(struct mei_cl *cl) 607 { 608 struct mei_device *dev; 609 long open_handle_count; 610 int id; 611 612 if (WARN_ON(!cl || !cl->dev)) 613 return -EINVAL; 614 615 dev = cl->dev; 616 617 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); 618 if (id >= MEI_CLIENTS_MAX) { 619 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); 620 return -EMFILE; 621 } 622 623 open_handle_count = dev->open_handle_count + dev->iamthif_open_count; 624 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 625 dev_err(dev->dev, "open_handle_count exceeded %d", 626 MEI_MAX_OPEN_HANDLE_COUNT); 627 return -EMFILE; 628 } 629 630 dev->open_handle_count++; 631 632 cl->host_client_id = id; 633 list_add_tail(&cl->link, &dev->file_list); 634 635 set_bit(id, dev->host_clients_map); 636 637 cl->state = MEI_FILE_INITIALIZING; 638 639 cl_dbg(dev, cl, "link cl\n"); 640 return 0; 641 } 642 643 /** 644 * mei_cl_unlink - remove host client from the list 645 * 646 * @cl: host client 647 * 648 * Return: always 0 649 */ 650 int mei_cl_unlink(struct mei_cl *cl) 651 { 652 struct mei_device *dev; 653 654 /* don't shout on error exit path */ 655 if (!cl) 656 return 0; 657 658 /* amthif might not be initialized */ 659 if (!cl->dev) 660 return 0; 661 662 dev = cl->dev; 663 664 cl_dbg(dev, cl, "unlink client"); 665 666 if (dev->open_handle_count > 0) 667 dev->open_handle_count--; 668 669 /* never clear the 0 bit */ 670 if (cl->host_client_id) 671 clear_bit(cl->host_client_id, dev->host_clients_map); 672 673 list_del_init(&cl->link); 674 675 cl->state = MEI_FILE_UNINITIALIZED; 676 cl->writing_state = MEI_IDLE; 677 678 WARN_ON(!list_empty(&cl->rd_completed) || 679 !list_empty(&cl->rd_pending) || 680 !list_empty(&cl->link)); 681 682 return 0; 683 } 684 685 void mei_host_client_init(struct mei_device *dev) 686 { 687 dev->dev_state = MEI_DEV_ENABLED; 688 dev->reset_count = 0; 689 690 schedule_work(&dev->bus_rescan_work); 691 692 pm_runtime_mark_last_busy(dev->dev); 693 dev_dbg(dev->dev, "rpm: autosuspend\n"); 694 pm_request_autosuspend(dev->dev); 695 } 696 697 /** 698 * mei_hbuf_acquire - try to acquire host buffer 699 * 700 * @dev: the device structure 701 * Return: true if host buffer was acquired 702 */ 703 bool mei_hbuf_acquire(struct mei_device *dev) 704 { 705 if (mei_pg_state(dev) == MEI_PG_ON || 706 mei_pg_in_transition(dev)) { 707 dev_dbg(dev->dev, "device is in pg\n"); 708 return false; 709 } 710 711 if (!dev->hbuf_is_ready) { 712 dev_dbg(dev->dev, "hbuf is not ready\n"); 713 return false; 714 } 715 716 dev->hbuf_is_ready = false; 717 718 return true; 719 } 720 721 /** 722 * mei_cl_wake_all - wake up readers, writers and event waiters so 723 * they can be interrupted 724 * 725 * @cl: host client 726 */ 727 static void mei_cl_wake_all(struct mei_cl *cl) 728 { 729 struct mei_device *dev = cl->dev; 730 731 /* synchronized under device mutex */ 732 if (waitqueue_active(&cl->rx_wait)) { 733 cl_dbg(dev, cl, "Waking up reading client!\n"); 734 wake_up_interruptible(&cl->rx_wait); 735 } 736 /* synchronized under device mutex */ 737 if (waitqueue_active(&cl->tx_wait)) { 738 cl_dbg(dev, cl, "Waking up writing client!\n"); 739 wake_up_interruptible(&cl->tx_wait); 740 } 741 /* synchronized under device mutex */ 742 if (waitqueue_active(&cl->ev_wait)) { 743 cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); 744 wake_up_interruptible(&cl->ev_wait); 745 } 746 /* synchronized under device mutex */ 747 if (waitqueue_active(&cl->wait)) { 748 cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); 749 wake_up(&cl->wait); 750 } 751 } 752 753 /** 754 * mei_cl_set_disconnected - set disconnected state and clear 755 * associated states and resources 756 * 757 * @cl: host client 758 */ 759 void mei_cl_set_disconnected(struct mei_cl *cl) 760 { 761 struct mei_device *dev = cl->dev; 762 763 if (cl->state == MEI_FILE_DISCONNECTED || 764 cl->state <= MEI_FILE_INITIALIZING) 765 return; 766 767 cl->state = MEI_FILE_DISCONNECTED; 768 mei_io_list_free(&dev->write_list, cl); 769 mei_io_list_free(&dev->write_waiting_list, cl); 770 mei_io_list_flush(&dev->ctrl_rd_list, cl); 771 mei_io_list_flush(&dev->ctrl_wr_list, cl); 772 mei_cl_wake_all(cl); 773 cl->rx_flow_ctrl_creds = 0; 774 cl->tx_flow_ctrl_creds = 0; 775 cl->timer_count = 0; 776 777 if (!cl->me_cl) 778 return; 779 780 if (!WARN_ON(cl->me_cl->connect_count == 0)) 781 cl->me_cl->connect_count--; 782 783 if (cl->me_cl->connect_count == 0) 784 cl->me_cl->tx_flow_ctrl_creds = 0; 785 786 mei_me_cl_put(cl->me_cl); 787 cl->me_cl = NULL; 788 } 789 790 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl) 791 { 792 if (!mei_me_cl_get(me_cl)) 793 return -ENOENT; 794 795 /* only one connection is allowed for fixed address clients */ 796 if (me_cl->props.fixed_address) { 797 if (me_cl->connect_count) { 798 mei_me_cl_put(me_cl); 799 return -EBUSY; 800 } 801 } 802 803 cl->me_cl = me_cl; 804 cl->state = MEI_FILE_CONNECTING; 805 cl->me_cl->connect_count++; 806 807 return 0; 808 } 809 810 /* 811 * mei_cl_send_disconnect - send disconnect request 812 * 813 * @cl: host client 814 * @cb: callback block 815 * 816 * Return: 0, OK; otherwise, error. 817 */ 818 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) 819 { 820 struct mei_device *dev; 821 int ret; 822 823 dev = cl->dev; 824 825 ret = mei_hbm_cl_disconnect_req(dev, cl); 826 cl->status = ret; 827 if (ret) { 828 cl->state = MEI_FILE_DISCONNECT_REPLY; 829 return ret; 830 } 831 832 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 833 cl->timer_count = MEI_CONNECT_TIMEOUT; 834 mei_schedule_stall_timer(dev); 835 836 return 0; 837 } 838 839 /** 840 * mei_cl_irq_disconnect - processes close related operation from 841 * interrupt thread context - send disconnect request 842 * 843 * @cl: client 844 * @cb: callback block. 845 * @cmpl_list: complete list. 846 * 847 * Return: 0, OK; otherwise, error. 848 */ 849 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, 850 struct mei_cl_cb *cmpl_list) 851 { 852 struct mei_device *dev = cl->dev; 853 u32 msg_slots; 854 int slots; 855 int ret; 856 857 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 858 slots = mei_hbuf_empty_slots(dev); 859 860 if (slots < msg_slots) 861 return -EMSGSIZE; 862 863 ret = mei_cl_send_disconnect(cl, cb); 864 if (ret) 865 list_move_tail(&cb->list, &cmpl_list->list); 866 867 return ret; 868 } 869 870 /** 871 * __mei_cl_disconnect - disconnect host client from the me one 872 * internal function runtime pm has to be already acquired 873 * 874 * @cl: host client 875 * 876 * Return: 0 on success, <0 on failure. 877 */ 878 static int __mei_cl_disconnect(struct mei_cl *cl) 879 { 880 struct mei_device *dev; 881 struct mei_cl_cb *cb; 882 int rets; 883 884 dev = cl->dev; 885 886 cl->state = MEI_FILE_DISCONNECTING; 887 888 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL); 889 if (!cb) { 890 rets = -ENOMEM; 891 goto out; 892 } 893 894 if (mei_hbuf_acquire(dev)) { 895 rets = mei_cl_send_disconnect(cl, cb); 896 if (rets) { 897 cl_err(dev, cl, "failed to disconnect.\n"); 898 goto out; 899 } 900 } 901 902 mutex_unlock(&dev->device_lock); 903 wait_event_timeout(cl->wait, 904 cl->state == MEI_FILE_DISCONNECT_REPLY || 905 cl->state == MEI_FILE_DISCONNECTED, 906 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 907 mutex_lock(&dev->device_lock); 908 909 rets = cl->status; 910 if (cl->state != MEI_FILE_DISCONNECT_REPLY && 911 cl->state != MEI_FILE_DISCONNECTED) { 912 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); 913 rets = -ETIME; 914 } 915 916 out: 917 /* we disconnect also on error */ 918 mei_cl_set_disconnected(cl); 919 if (!rets) 920 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 921 922 mei_io_cb_free(cb); 923 return rets; 924 } 925 926 /** 927 * mei_cl_disconnect - disconnect host client from the me one 928 * 929 * @cl: host client 930 * 931 * Locking: called under "dev->device_lock" lock 932 * 933 * Return: 0 on success, <0 on failure. 934 */ 935 int mei_cl_disconnect(struct mei_cl *cl) 936 { 937 struct mei_device *dev; 938 int rets; 939 940 if (WARN_ON(!cl || !cl->dev)) 941 return -ENODEV; 942 943 dev = cl->dev; 944 945 cl_dbg(dev, cl, "disconnecting"); 946 947 if (!mei_cl_is_connected(cl)) 948 return 0; 949 950 if (mei_cl_is_fixed_address(cl)) { 951 mei_cl_set_disconnected(cl); 952 return 0; 953 } 954 955 rets = pm_runtime_get(dev->dev); 956 if (rets < 0 && rets != -EINPROGRESS) { 957 pm_runtime_put_noidle(dev->dev); 958 cl_err(dev, cl, "rpm: get failed %d\n", rets); 959 return rets; 960 } 961 962 rets = __mei_cl_disconnect(cl); 963 964 cl_dbg(dev, cl, "rpm: autosuspend\n"); 965 pm_runtime_mark_last_busy(dev->dev); 966 pm_runtime_put_autosuspend(dev->dev); 967 968 return rets; 969 } 970 971 972 /** 973 * mei_cl_is_other_connecting - checks if other 974 * client with the same me client id is connecting 975 * 976 * @cl: private data of the file object 977 * 978 * Return: true if other client is connected, false - otherwise. 979 */ 980 static bool mei_cl_is_other_connecting(struct mei_cl *cl) 981 { 982 struct mei_device *dev; 983 struct mei_cl_cb *cb; 984 985 dev = cl->dev; 986 987 list_for_each_entry(cb, &dev->ctrl_rd_list.list, list) { 988 if (cb->fop_type == MEI_FOP_CONNECT && 989 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl)) 990 return true; 991 } 992 993 return false; 994 } 995 996 /** 997 * mei_cl_send_connect - send connect request 998 * 999 * @cl: host client 1000 * @cb: callback block 1001 * 1002 * Return: 0, OK; otherwise, error. 1003 */ 1004 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) 1005 { 1006 struct mei_device *dev; 1007 int ret; 1008 1009 dev = cl->dev; 1010 1011 ret = mei_hbm_cl_connect_req(dev, cl); 1012 cl->status = ret; 1013 if (ret) { 1014 cl->state = MEI_FILE_DISCONNECT_REPLY; 1015 return ret; 1016 } 1017 1018 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 1019 cl->timer_count = MEI_CONNECT_TIMEOUT; 1020 mei_schedule_stall_timer(dev); 1021 return 0; 1022 } 1023 1024 /** 1025 * mei_cl_irq_connect - send connect request in irq_thread context 1026 * 1027 * @cl: host client 1028 * @cb: callback block 1029 * @cmpl_list: complete list 1030 * 1031 * Return: 0, OK; otherwise, error. 1032 */ 1033 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 1034 struct mei_cl_cb *cmpl_list) 1035 { 1036 struct mei_device *dev = cl->dev; 1037 u32 msg_slots; 1038 int slots; 1039 int rets; 1040 1041 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 1042 slots = mei_hbuf_empty_slots(dev); 1043 1044 if (mei_cl_is_other_connecting(cl)) 1045 return 0; 1046 1047 if (slots < msg_slots) 1048 return -EMSGSIZE; 1049 1050 rets = mei_cl_send_connect(cl, cb); 1051 if (rets) 1052 list_move_tail(&cb->list, &cmpl_list->list); 1053 1054 return rets; 1055 } 1056 1057 /** 1058 * mei_cl_connect - connect host client to the me one 1059 * 1060 * @cl: host client 1061 * @me_cl: me client 1062 * @fp: pointer to file structure 1063 * 1064 * Locking: called under "dev->device_lock" lock 1065 * 1066 * Return: 0 on success, <0 on failure. 1067 */ 1068 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, 1069 const struct file *fp) 1070 { 1071 struct mei_device *dev; 1072 struct mei_cl_cb *cb; 1073 int rets; 1074 1075 if (WARN_ON(!cl || !cl->dev || !me_cl)) 1076 return -ENODEV; 1077 1078 dev = cl->dev; 1079 1080 rets = mei_cl_set_connecting(cl, me_cl); 1081 if (rets) 1082 return rets; 1083 1084 if (mei_cl_is_fixed_address(cl)) { 1085 cl->state = MEI_FILE_CONNECTED; 1086 return 0; 1087 } 1088 1089 rets = pm_runtime_get(dev->dev); 1090 if (rets < 0 && rets != -EINPROGRESS) { 1091 pm_runtime_put_noidle(dev->dev); 1092 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1093 goto nortpm; 1094 } 1095 1096 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp); 1097 if (!cb) { 1098 rets = -ENOMEM; 1099 goto out; 1100 } 1101 1102 /* run hbuf acquire last so we don't have to undo */ 1103 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 1104 rets = mei_cl_send_connect(cl, cb); 1105 if (rets) 1106 goto out; 1107 } 1108 1109 mutex_unlock(&dev->device_lock); 1110 wait_event_timeout(cl->wait, 1111 (cl->state == MEI_FILE_CONNECTED || 1112 cl->state == MEI_FILE_DISCONNECTED || 1113 cl->state == MEI_FILE_DISCONNECT_REQUIRED || 1114 cl->state == MEI_FILE_DISCONNECT_REPLY), 1115 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1116 mutex_lock(&dev->device_lock); 1117 1118 if (!mei_cl_is_connected(cl)) { 1119 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { 1120 mei_io_list_flush(&dev->ctrl_rd_list, cl); 1121 mei_io_list_flush(&dev->ctrl_wr_list, cl); 1122 /* ignore disconnect return valuue; 1123 * in case of failure reset will be invoked 1124 */ 1125 __mei_cl_disconnect(cl); 1126 rets = -EFAULT; 1127 goto out; 1128 } 1129 1130 /* timeout or something went really wrong */ 1131 if (!cl->status) 1132 cl->status = -EFAULT; 1133 } 1134 1135 rets = cl->status; 1136 out: 1137 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1138 pm_runtime_mark_last_busy(dev->dev); 1139 pm_runtime_put_autosuspend(dev->dev); 1140 1141 mei_io_cb_free(cb); 1142 1143 nortpm: 1144 if (!mei_cl_is_connected(cl)) 1145 mei_cl_set_disconnected(cl); 1146 1147 return rets; 1148 } 1149 1150 /** 1151 * mei_cl_alloc_linked - allocate and link host client 1152 * 1153 * @dev: the device structure 1154 * 1155 * Return: cl on success ERR_PTR on failure 1156 */ 1157 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev) 1158 { 1159 struct mei_cl *cl; 1160 int ret; 1161 1162 cl = mei_cl_allocate(dev); 1163 if (!cl) { 1164 ret = -ENOMEM; 1165 goto err; 1166 } 1167 1168 ret = mei_cl_link(cl); 1169 if (ret) 1170 goto err; 1171 1172 return cl; 1173 err: 1174 kfree(cl); 1175 return ERR_PTR(ret); 1176 } 1177 1178 /** 1179 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl. 1180 * 1181 * @cl: host client 1182 * 1183 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise. 1184 */ 1185 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl) 1186 { 1187 if (WARN_ON(!cl || !cl->me_cl)) 1188 return -EINVAL; 1189 1190 if (cl->tx_flow_ctrl_creds > 0) 1191 return 1; 1192 1193 if (mei_cl_is_fixed_address(cl)) 1194 return 1; 1195 1196 if (mei_cl_is_single_recv_buf(cl)) { 1197 if (cl->me_cl->tx_flow_ctrl_creds > 0) 1198 return 1; 1199 } 1200 return 0; 1201 } 1202 1203 /** 1204 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits 1205 * for a client 1206 * 1207 * @cl: host client 1208 * 1209 * Return: 1210 * 0 on success 1211 * -EINVAL when ctrl credits are <= 0 1212 */ 1213 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl) 1214 { 1215 if (WARN_ON(!cl || !cl->me_cl)) 1216 return -EINVAL; 1217 1218 if (mei_cl_is_fixed_address(cl)) 1219 return 0; 1220 1221 if (mei_cl_is_single_recv_buf(cl)) { 1222 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0)) 1223 return -EINVAL; 1224 cl->me_cl->tx_flow_ctrl_creds--; 1225 } else { 1226 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0)) 1227 return -EINVAL; 1228 cl->tx_flow_ctrl_creds--; 1229 } 1230 return 0; 1231 } 1232 1233 /** 1234 * mei_cl_notify_fop2req - convert fop to proper request 1235 * 1236 * @fop: client notification start response command 1237 * 1238 * Return: MEI_HBM_NOTIFICATION_START/STOP 1239 */ 1240 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) 1241 { 1242 if (fop == MEI_FOP_NOTIFY_START) 1243 return MEI_HBM_NOTIFICATION_START; 1244 else 1245 return MEI_HBM_NOTIFICATION_STOP; 1246 } 1247 1248 /** 1249 * mei_cl_notify_req2fop - convert notification request top file operation type 1250 * 1251 * @req: hbm notification request type 1252 * 1253 * Return: MEI_FOP_NOTIFY_START/STOP 1254 */ 1255 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) 1256 { 1257 if (req == MEI_HBM_NOTIFICATION_START) 1258 return MEI_FOP_NOTIFY_START; 1259 else 1260 return MEI_FOP_NOTIFY_STOP; 1261 } 1262 1263 /** 1264 * mei_cl_irq_notify - send notification request in irq_thread context 1265 * 1266 * @cl: client 1267 * @cb: callback block. 1268 * @cmpl_list: complete list. 1269 * 1270 * Return: 0 on such and error otherwise. 1271 */ 1272 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, 1273 struct mei_cl_cb *cmpl_list) 1274 { 1275 struct mei_device *dev = cl->dev; 1276 u32 msg_slots; 1277 int slots; 1278 int ret; 1279 bool request; 1280 1281 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 1282 slots = mei_hbuf_empty_slots(dev); 1283 1284 if (slots < msg_slots) 1285 return -EMSGSIZE; 1286 1287 request = mei_cl_notify_fop2req(cb->fop_type); 1288 ret = mei_hbm_cl_notify_req(dev, cl, request); 1289 if (ret) { 1290 cl->status = ret; 1291 list_move_tail(&cb->list, &cmpl_list->list); 1292 return ret; 1293 } 1294 1295 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 1296 return 0; 1297 } 1298 1299 /** 1300 * mei_cl_notify_request - send notification stop/start request 1301 * 1302 * @cl: host client 1303 * @fp: associate request with file 1304 * @request: 1 for start or 0 for stop 1305 * 1306 * Locking: called under "dev->device_lock" lock 1307 * 1308 * Return: 0 on such and error otherwise. 1309 */ 1310 int mei_cl_notify_request(struct mei_cl *cl, 1311 const struct file *fp, u8 request) 1312 { 1313 struct mei_device *dev; 1314 struct mei_cl_cb *cb; 1315 enum mei_cb_file_ops fop_type; 1316 int rets; 1317 1318 if (WARN_ON(!cl || !cl->dev)) 1319 return -ENODEV; 1320 1321 dev = cl->dev; 1322 1323 if (!dev->hbm_f_ev_supported) { 1324 cl_dbg(dev, cl, "notifications not supported\n"); 1325 return -EOPNOTSUPP; 1326 } 1327 1328 rets = pm_runtime_get(dev->dev); 1329 if (rets < 0 && rets != -EINPROGRESS) { 1330 pm_runtime_put_noidle(dev->dev); 1331 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1332 return rets; 1333 } 1334 1335 fop_type = mei_cl_notify_req2fop(request); 1336 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp); 1337 if (!cb) { 1338 rets = -ENOMEM; 1339 goto out; 1340 } 1341 1342 if (mei_hbuf_acquire(dev)) { 1343 if (mei_hbm_cl_notify_req(dev, cl, request)) { 1344 rets = -ENODEV; 1345 goto out; 1346 } 1347 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 1348 } 1349 1350 mutex_unlock(&dev->device_lock); 1351 wait_event_timeout(cl->wait, 1352 cl->notify_en == request || !mei_cl_is_connected(cl), 1353 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1354 mutex_lock(&dev->device_lock); 1355 1356 if (cl->notify_en != request && !cl->status) 1357 cl->status = -EFAULT; 1358 1359 rets = cl->status; 1360 1361 out: 1362 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1363 pm_runtime_mark_last_busy(dev->dev); 1364 pm_runtime_put_autosuspend(dev->dev); 1365 1366 mei_io_cb_free(cb); 1367 return rets; 1368 } 1369 1370 /** 1371 * mei_cl_notify - raise notification 1372 * 1373 * @cl: host client 1374 * 1375 * Locking: called under "dev->device_lock" lock 1376 */ 1377 void mei_cl_notify(struct mei_cl *cl) 1378 { 1379 struct mei_device *dev; 1380 1381 if (!cl || !cl->dev) 1382 return; 1383 1384 dev = cl->dev; 1385 1386 if (!cl->notify_en) 1387 return; 1388 1389 cl_dbg(dev, cl, "notify event"); 1390 cl->notify_ev = true; 1391 if (!mei_cl_bus_notify_event(cl)) 1392 wake_up_interruptible(&cl->ev_wait); 1393 1394 if (cl->ev_async) 1395 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); 1396 1397 } 1398 1399 /** 1400 * mei_cl_notify_get - get or wait for notification event 1401 * 1402 * @cl: host client 1403 * @block: this request is blocking 1404 * @notify_ev: true if notification event was received 1405 * 1406 * Locking: called under "dev->device_lock" lock 1407 * 1408 * Return: 0 on such and error otherwise. 1409 */ 1410 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) 1411 { 1412 struct mei_device *dev; 1413 int rets; 1414 1415 *notify_ev = false; 1416 1417 if (WARN_ON(!cl || !cl->dev)) 1418 return -ENODEV; 1419 1420 dev = cl->dev; 1421 1422 if (!mei_cl_is_connected(cl)) 1423 return -ENODEV; 1424 1425 if (cl->notify_ev) 1426 goto out; 1427 1428 if (!block) 1429 return -EAGAIN; 1430 1431 mutex_unlock(&dev->device_lock); 1432 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); 1433 mutex_lock(&dev->device_lock); 1434 1435 if (rets < 0) 1436 return rets; 1437 1438 out: 1439 *notify_ev = cl->notify_ev; 1440 cl->notify_ev = false; 1441 return 0; 1442 } 1443 1444 /** 1445 * mei_cl_read_start - the start read client message function. 1446 * 1447 * @cl: host client 1448 * @length: number of bytes to read 1449 * @fp: pointer to file structure 1450 * 1451 * Return: 0 on success, <0 on failure. 1452 */ 1453 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) 1454 { 1455 struct mei_device *dev; 1456 struct mei_cl_cb *cb; 1457 int rets; 1458 1459 if (WARN_ON(!cl || !cl->dev)) 1460 return -ENODEV; 1461 1462 dev = cl->dev; 1463 1464 if (!mei_cl_is_connected(cl)) 1465 return -ENODEV; 1466 1467 if (!mei_me_cl_is_active(cl->me_cl)) { 1468 cl_err(dev, cl, "no such me client\n"); 1469 return -ENOTTY; 1470 } 1471 1472 if (mei_cl_is_fixed_address(cl) || cl == &dev->iamthif_cl) 1473 return 0; 1474 1475 /* HW currently supports only one pending read */ 1476 if (cl->rx_flow_ctrl_creds) 1477 return -EBUSY; 1478 1479 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp); 1480 if (!cb) 1481 return -ENOMEM; 1482 1483 rets = pm_runtime_get(dev->dev); 1484 if (rets < 0 && rets != -EINPROGRESS) { 1485 pm_runtime_put_noidle(dev->dev); 1486 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1487 goto nortpm; 1488 } 1489 1490 rets = 0; 1491 if (mei_hbuf_acquire(dev)) { 1492 rets = mei_hbm_cl_flow_control_req(dev, cl); 1493 if (rets < 0) 1494 goto out; 1495 1496 list_move_tail(&cb->list, &cl->rd_pending); 1497 } 1498 cl->rx_flow_ctrl_creds++; 1499 1500 out: 1501 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1502 pm_runtime_mark_last_busy(dev->dev); 1503 pm_runtime_put_autosuspend(dev->dev); 1504 nortpm: 1505 if (rets) 1506 mei_io_cb_free(cb); 1507 1508 return rets; 1509 } 1510 1511 /** 1512 * mei_cl_irq_write - write a message to device 1513 * from the interrupt thread context 1514 * 1515 * @cl: client 1516 * @cb: callback block. 1517 * @cmpl_list: complete list. 1518 * 1519 * Return: 0, OK; otherwise error. 1520 */ 1521 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 1522 struct mei_cl_cb *cmpl_list) 1523 { 1524 struct mei_device *dev; 1525 struct mei_msg_data *buf; 1526 struct mei_msg_hdr mei_hdr; 1527 size_t len; 1528 u32 msg_slots; 1529 int slots; 1530 int rets; 1531 bool first_chunk; 1532 1533 if (WARN_ON(!cl || !cl->dev)) 1534 return -ENODEV; 1535 1536 dev = cl->dev; 1537 1538 buf = &cb->buf; 1539 1540 first_chunk = cb->buf_idx == 0; 1541 1542 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1; 1543 if (rets < 0) 1544 return rets; 1545 1546 if (rets == 0) { 1547 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1548 return 0; 1549 } 1550 1551 slots = mei_hbuf_empty_slots(dev); 1552 len = buf->size - cb->buf_idx; 1553 msg_slots = mei_data2slots(len); 1554 1555 mei_hdr.host_addr = mei_cl_host_addr(cl); 1556 mei_hdr.me_addr = mei_cl_me_id(cl); 1557 mei_hdr.reserved = 0; 1558 mei_hdr.internal = cb->internal; 1559 1560 if (slots >= msg_slots) { 1561 mei_hdr.length = len; 1562 mei_hdr.msg_complete = 1; 1563 /* Split the message only if we can write the whole host buffer */ 1564 } else if (slots == dev->hbuf_depth) { 1565 msg_slots = slots; 1566 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); 1567 mei_hdr.length = len; 1568 mei_hdr.msg_complete = 0; 1569 } else { 1570 /* wait for next time the host buffer is empty */ 1571 return 0; 1572 } 1573 1574 cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n", 1575 cb->buf.size, cb->buf_idx); 1576 1577 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); 1578 if (rets) { 1579 cl->status = rets; 1580 list_move_tail(&cb->list, &cmpl_list->list); 1581 return rets; 1582 } 1583 1584 cl->status = 0; 1585 cl->writing_state = MEI_WRITING; 1586 cb->buf_idx += mei_hdr.length; 1587 cb->completed = mei_hdr.msg_complete == 1; 1588 1589 if (first_chunk) { 1590 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) 1591 return -EIO; 1592 } 1593 1594 if (mei_hdr.msg_complete) 1595 list_move_tail(&cb->list, &dev->write_waiting_list.list); 1596 1597 return 0; 1598 } 1599 1600 /** 1601 * mei_cl_write - submit a write cb to mei device 1602 * assumes device_lock is locked 1603 * 1604 * @cl: host client 1605 * @cb: write callback with filled data 1606 * 1607 * Return: number of bytes sent on success, <0 on failure. 1608 */ 1609 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) 1610 { 1611 struct mei_device *dev; 1612 struct mei_msg_data *buf; 1613 struct mei_msg_hdr mei_hdr; 1614 int size; 1615 int rets; 1616 bool blocking; 1617 1618 if (WARN_ON(!cl || !cl->dev)) 1619 return -ENODEV; 1620 1621 if (WARN_ON(!cb)) 1622 return -EINVAL; 1623 1624 dev = cl->dev; 1625 1626 buf = &cb->buf; 1627 size = buf->size; 1628 blocking = cb->blocking; 1629 1630 cl_dbg(dev, cl, "size=%d\n", size); 1631 1632 rets = pm_runtime_get(dev->dev); 1633 if (rets < 0 && rets != -EINPROGRESS) { 1634 pm_runtime_put_noidle(dev->dev); 1635 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1636 goto free; 1637 } 1638 1639 cb->buf_idx = 0; 1640 cl->writing_state = MEI_IDLE; 1641 1642 mei_hdr.host_addr = mei_cl_host_addr(cl); 1643 mei_hdr.me_addr = mei_cl_me_id(cl); 1644 mei_hdr.reserved = 0; 1645 mei_hdr.msg_complete = 0; 1646 mei_hdr.internal = cb->internal; 1647 1648 rets = mei_cl_tx_flow_ctrl_creds(cl); 1649 if (rets < 0) 1650 goto err; 1651 1652 if (rets == 0) { 1653 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1654 rets = size; 1655 goto out; 1656 } 1657 if (!mei_hbuf_acquire(dev)) { 1658 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); 1659 rets = size; 1660 goto out; 1661 } 1662 1663 /* Check for a maximum length */ 1664 if (size > mei_hbuf_max_len(dev)) { 1665 mei_hdr.length = mei_hbuf_max_len(dev); 1666 mei_hdr.msg_complete = 0; 1667 } else { 1668 mei_hdr.length = size; 1669 mei_hdr.msg_complete = 1; 1670 } 1671 1672 rets = mei_write_message(dev, &mei_hdr, buf->data); 1673 if (rets) 1674 goto err; 1675 1676 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl); 1677 if (rets) 1678 goto err; 1679 1680 cl->writing_state = MEI_WRITING; 1681 cb->buf_idx = mei_hdr.length; 1682 cb->completed = mei_hdr.msg_complete == 1; 1683 1684 out: 1685 if (mei_hdr.msg_complete) 1686 list_add_tail(&cb->list, &dev->write_waiting_list.list); 1687 else 1688 list_add_tail(&cb->list, &dev->write_list.list); 1689 1690 cb = NULL; 1691 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 1692 1693 mutex_unlock(&dev->device_lock); 1694 rets = wait_event_interruptible(cl->tx_wait, 1695 cl->writing_state == MEI_WRITE_COMPLETE || 1696 (!mei_cl_is_connected(cl))); 1697 mutex_lock(&dev->device_lock); 1698 /* wait_event_interruptible returns -ERESTARTSYS */ 1699 if (rets) { 1700 if (signal_pending(current)) 1701 rets = -EINTR; 1702 goto err; 1703 } 1704 if (cl->writing_state != MEI_WRITE_COMPLETE) { 1705 rets = -EFAULT; 1706 goto err; 1707 } 1708 } 1709 1710 rets = size; 1711 err: 1712 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1713 pm_runtime_mark_last_busy(dev->dev); 1714 pm_runtime_put_autosuspend(dev->dev); 1715 free: 1716 mei_io_cb_free(cb); 1717 1718 return rets; 1719 } 1720 1721 1722 /** 1723 * mei_cl_complete - processes completed operation for a client 1724 * 1725 * @cl: private data of the file object. 1726 * @cb: callback block. 1727 */ 1728 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 1729 { 1730 struct mei_device *dev = cl->dev; 1731 1732 switch (cb->fop_type) { 1733 case MEI_FOP_WRITE: 1734 mei_io_cb_free(cb); 1735 cl->writing_state = MEI_WRITE_COMPLETE; 1736 if (waitqueue_active(&cl->tx_wait)) { 1737 wake_up_interruptible(&cl->tx_wait); 1738 } else { 1739 pm_runtime_mark_last_busy(dev->dev); 1740 pm_request_autosuspend(dev->dev); 1741 } 1742 break; 1743 1744 case MEI_FOP_READ: 1745 list_add_tail(&cb->list, &cl->rd_completed); 1746 if (!mei_cl_is_fixed_address(cl) && 1747 !WARN_ON(!cl->rx_flow_ctrl_creds)) 1748 cl->rx_flow_ctrl_creds--; 1749 if (!mei_cl_bus_rx_event(cl)) 1750 wake_up_interruptible(&cl->rx_wait); 1751 break; 1752 1753 case MEI_FOP_CONNECT: 1754 case MEI_FOP_DISCONNECT: 1755 case MEI_FOP_NOTIFY_STOP: 1756 case MEI_FOP_NOTIFY_START: 1757 if (waitqueue_active(&cl->wait)) 1758 wake_up(&cl->wait); 1759 1760 break; 1761 case MEI_FOP_DISCONNECT_RSP: 1762 mei_io_cb_free(cb); 1763 mei_cl_set_disconnected(cl); 1764 break; 1765 default: 1766 BUG_ON(0); 1767 } 1768 } 1769 1770 1771 /** 1772 * mei_cl_all_disconnect - disconnect forcefully all connected clients 1773 * 1774 * @dev: mei device 1775 */ 1776 void mei_cl_all_disconnect(struct mei_device *dev) 1777 { 1778 struct mei_cl *cl; 1779 1780 list_for_each_entry(cl, &dev->file_list, link) 1781 mei_cl_set_disconnected(cl); 1782 } 1783