1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/sched/signal.h> 18 #include <linux/wait.h> 19 #include <linux/delay.h> 20 #include <linux/slab.h> 21 #include <linux/pm_runtime.h> 22 23 #include <linux/mei.h> 24 25 #include "mei_dev.h" 26 #include "hbm.h" 27 #include "client.h" 28 29 /** 30 * mei_me_cl_init - initialize me client 31 * 32 * @me_cl: me client 33 */ 34 void mei_me_cl_init(struct mei_me_client *me_cl) 35 { 36 INIT_LIST_HEAD(&me_cl->list); 37 kref_init(&me_cl->refcnt); 38 } 39 40 /** 41 * mei_me_cl_get - increases me client refcount 42 * 43 * @me_cl: me client 44 * 45 * Locking: called under "dev->device_lock" lock 46 * 47 * Return: me client or NULL 48 */ 49 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) 50 { 51 if (me_cl && kref_get_unless_zero(&me_cl->refcnt)) 52 return me_cl; 53 54 return NULL; 55 } 56 57 /** 58 * mei_me_cl_release - free me client 59 * 60 * Locking: called under "dev->device_lock" lock 61 * 62 * @ref: me_client refcount 63 */ 64 static void mei_me_cl_release(struct kref *ref) 65 { 66 struct mei_me_client *me_cl = 67 container_of(ref, struct mei_me_client, refcnt); 68 69 kfree(me_cl); 70 } 71 72 /** 73 * mei_me_cl_put - decrease me client refcount and free client if necessary 74 * 75 * Locking: called under "dev->device_lock" lock 76 * 77 * @me_cl: me client 78 */ 79 void mei_me_cl_put(struct mei_me_client *me_cl) 80 { 81 if (me_cl) 82 kref_put(&me_cl->refcnt, mei_me_cl_release); 83 } 84 85 /** 86 * __mei_me_cl_del - delete me client from the list and decrease 87 * reference counter 88 * 89 * @dev: mei device 90 * @me_cl: me client 91 * 92 * Locking: dev->me_clients_rwsem 93 */ 94 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 95 { 96 if (!me_cl) 97 return; 98 99 list_del_init(&me_cl->list); 100 mei_me_cl_put(me_cl); 101 } 102 103 /** 104 * mei_me_cl_del - delete me client from the list and decrease 105 * reference counter 106 * 107 * @dev: mei device 108 * @me_cl: me client 109 */ 110 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 111 { 112 down_write(&dev->me_clients_rwsem); 113 __mei_me_cl_del(dev, me_cl); 114 up_write(&dev->me_clients_rwsem); 115 } 116 117 /** 118 * mei_me_cl_add - add me client to the list 119 * 120 * @dev: mei device 121 * @me_cl: me client 122 */ 123 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl) 124 { 125 down_write(&dev->me_clients_rwsem); 126 list_add(&me_cl->list, &dev->me_clients); 127 up_write(&dev->me_clients_rwsem); 128 } 129 130 /** 131 * __mei_me_cl_by_uuid - locate me client by uuid 132 * increases ref count 133 * 134 * @dev: mei device 135 * @uuid: me client uuid 136 * 137 * Return: me client or NULL if not found 138 * 139 * Locking: dev->me_clients_rwsem 140 */ 141 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev, 142 const uuid_le *uuid) 143 { 144 struct mei_me_client *me_cl; 145 const uuid_le *pn; 146 147 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 148 149 list_for_each_entry(me_cl, &dev->me_clients, list) { 150 pn = &me_cl->props.protocol_name; 151 if (uuid_le_cmp(*uuid, *pn) == 0) 152 return mei_me_cl_get(me_cl); 153 } 154 155 return NULL; 156 } 157 158 /** 159 * mei_me_cl_by_uuid - locate me client by uuid 160 * increases ref count 161 * 162 * @dev: mei device 163 * @uuid: me client uuid 164 * 165 * Return: me client or NULL if not found 166 * 167 * Locking: dev->me_clients_rwsem 168 */ 169 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, 170 const uuid_le *uuid) 171 { 172 struct mei_me_client *me_cl; 173 174 down_read(&dev->me_clients_rwsem); 175 me_cl = __mei_me_cl_by_uuid(dev, uuid); 176 up_read(&dev->me_clients_rwsem); 177 178 return me_cl; 179 } 180 181 /** 182 * mei_me_cl_by_id - locate me client by client id 183 * increases ref count 184 * 185 * @dev: the device structure 186 * @client_id: me client id 187 * 188 * Return: me client or NULL if not found 189 * 190 * Locking: dev->me_clients_rwsem 191 */ 192 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 193 { 194 195 struct mei_me_client *__me_cl, *me_cl = NULL; 196 197 down_read(&dev->me_clients_rwsem); 198 list_for_each_entry(__me_cl, &dev->me_clients, list) { 199 if (__me_cl->client_id == client_id) { 200 me_cl = mei_me_cl_get(__me_cl); 201 break; 202 } 203 } 204 up_read(&dev->me_clients_rwsem); 205 206 return me_cl; 207 } 208 209 /** 210 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid 211 * increases ref count 212 * 213 * @dev: the device structure 214 * @uuid: me client uuid 215 * @client_id: me client id 216 * 217 * Return: me client or null if not found 218 * 219 * Locking: dev->me_clients_rwsem 220 */ 221 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, 222 const uuid_le *uuid, u8 client_id) 223 { 224 struct mei_me_client *me_cl; 225 const uuid_le *pn; 226 227 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 228 229 list_for_each_entry(me_cl, &dev->me_clients, list) { 230 pn = &me_cl->props.protocol_name; 231 if (uuid_le_cmp(*uuid, *pn) == 0 && 232 me_cl->client_id == client_id) 233 return mei_me_cl_get(me_cl); 234 } 235 236 return NULL; 237 } 238 239 240 /** 241 * mei_me_cl_by_uuid_id - locate me client by client id and uuid 242 * increases ref count 243 * 244 * @dev: the device structure 245 * @uuid: me client uuid 246 * @client_id: me client id 247 * 248 * Return: me client or null if not found 249 */ 250 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 251 const uuid_le *uuid, u8 client_id) 252 { 253 struct mei_me_client *me_cl; 254 255 down_read(&dev->me_clients_rwsem); 256 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id); 257 up_read(&dev->me_clients_rwsem); 258 259 return me_cl; 260 } 261 262 /** 263 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid 264 * 265 * @dev: the device structure 266 * @uuid: me client uuid 267 * 268 * Locking: called under "dev->device_lock" lock 269 */ 270 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) 271 { 272 struct mei_me_client *me_cl; 273 274 dev_dbg(dev->dev, "remove %pUl\n", uuid); 275 276 down_write(&dev->me_clients_rwsem); 277 me_cl = __mei_me_cl_by_uuid(dev, uuid); 278 __mei_me_cl_del(dev, me_cl); 279 up_write(&dev->me_clients_rwsem); 280 } 281 282 /** 283 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id 284 * 285 * @dev: the device structure 286 * @uuid: me client uuid 287 * @id: me client id 288 * 289 * Locking: called under "dev->device_lock" lock 290 */ 291 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) 292 { 293 struct mei_me_client *me_cl; 294 295 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); 296 297 down_write(&dev->me_clients_rwsem); 298 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); 299 __mei_me_cl_del(dev, me_cl); 300 up_write(&dev->me_clients_rwsem); 301 } 302 303 /** 304 * mei_me_cl_rm_all - remove all me clients 305 * 306 * @dev: the device structure 307 * 308 * Locking: called under "dev->device_lock" lock 309 */ 310 void mei_me_cl_rm_all(struct mei_device *dev) 311 { 312 struct mei_me_client *me_cl, *next; 313 314 down_write(&dev->me_clients_rwsem); 315 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 316 __mei_me_cl_del(dev, me_cl); 317 up_write(&dev->me_clients_rwsem); 318 } 319 320 /** 321 * mei_io_cb_free - free mei_cb_private related memory 322 * 323 * @cb: mei callback struct 324 */ 325 void mei_io_cb_free(struct mei_cl_cb *cb) 326 { 327 if (cb == NULL) 328 return; 329 330 list_del(&cb->list); 331 kfree(cb->buf.data); 332 kfree(cb); 333 } 334 335 /** 336 * mei_tx_cb_queue - queue tx callback 337 * 338 * Locking: called under "dev->device_lock" lock 339 * 340 * @cb: mei callback struct 341 * @head: an instance of list to queue on 342 */ 343 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, 344 struct list_head *head) 345 { 346 list_add_tail(&cb->list, head); 347 cb->cl->tx_cb_queued++; 348 } 349 350 /** 351 * mei_tx_cb_dequeue - dequeue tx callback 352 * 353 * Locking: called under "dev->device_lock" lock 354 * 355 * @cb: mei callback struct to dequeue and free 356 */ 357 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) 358 { 359 if (!WARN_ON(cb->cl->tx_cb_queued == 0)) 360 cb->cl->tx_cb_queued--; 361 362 mei_io_cb_free(cb); 363 } 364 365 /** 366 * mei_io_cb_init - allocate and initialize io callback 367 * 368 * @cl: mei client 369 * @type: operation type 370 * @fp: pointer to file structure 371 * 372 * Return: mei_cl_cb pointer or NULL; 373 */ 374 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, 375 enum mei_cb_file_ops type, 376 const struct file *fp) 377 { 378 struct mei_cl_cb *cb; 379 380 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 381 if (!cb) 382 return NULL; 383 384 INIT_LIST_HEAD(&cb->list); 385 cb->fp = fp; 386 cb->cl = cl; 387 cb->buf_idx = 0; 388 cb->fop_type = type; 389 return cb; 390 } 391 392 /** 393 * mei_io_list_flush_cl - removes cbs belonging to the cl. 394 * 395 * @head: an instance of our list structure 396 * @cl: host client 397 */ 398 static void mei_io_list_flush_cl(struct list_head *head, 399 const struct mei_cl *cl) 400 { 401 struct mei_cl_cb *cb, *next; 402 403 list_for_each_entry_safe(cb, next, head, list) { 404 if (cl == cb->cl) { 405 list_del_init(&cb->list); 406 if (cb->fop_type == MEI_FOP_READ) 407 mei_io_cb_free(cb); 408 } 409 } 410 } 411 412 /** 413 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them 414 * 415 * @head: An instance of our list structure 416 * @cl: host client 417 */ 418 static void mei_io_tx_list_free_cl(struct list_head *head, 419 const struct mei_cl *cl) 420 { 421 struct mei_cl_cb *cb, *next; 422 423 list_for_each_entry_safe(cb, next, head, list) { 424 if (cl == cb->cl) 425 mei_tx_cb_dequeue(cb); 426 } 427 } 428 429 /** 430 * mei_io_list_free_fp - free cb from a list that matches file pointer 431 * 432 * @head: io list 433 * @fp: file pointer (matching cb file object), may be NULL 434 */ 435 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp) 436 { 437 struct mei_cl_cb *cb, *next; 438 439 list_for_each_entry_safe(cb, next, head, list) 440 if (!fp || fp == cb->fp) 441 mei_io_cb_free(cb); 442 } 443 444 /** 445 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb 446 * 447 * @cl: host client 448 * @length: size of the buffer 449 * @fop_type: operation type 450 * @fp: associated file pointer (might be NULL) 451 * 452 * Return: cb on success and NULL on failure 453 */ 454 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, 455 enum mei_cb_file_ops fop_type, 456 const struct file *fp) 457 { 458 struct mei_cl_cb *cb; 459 460 cb = mei_io_cb_init(cl, fop_type, fp); 461 if (!cb) 462 return NULL; 463 464 if (length == 0) 465 return cb; 466 467 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); 468 if (!cb->buf.data) { 469 mei_io_cb_free(cb); 470 return NULL; 471 } 472 cb->buf.size = length; 473 474 return cb; 475 } 476 477 /** 478 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating 479 * and enqueuing of the control commands cb 480 * 481 * @cl: host client 482 * @length: size of the buffer 483 * @fop_type: operation type 484 * @fp: associated file pointer (might be NULL) 485 * 486 * Return: cb on success and NULL on failure 487 * Locking: called under "dev->device_lock" lock 488 */ 489 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, 490 enum mei_cb_file_ops fop_type, 491 const struct file *fp) 492 { 493 struct mei_cl_cb *cb; 494 495 /* for RX always allocate at least client's mtu */ 496 if (length) 497 length = max_t(size_t, length, mei_cl_mtu(cl)); 498 499 cb = mei_cl_alloc_cb(cl, length, fop_type, fp); 500 if (!cb) 501 return NULL; 502 503 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list); 504 return cb; 505 } 506 507 /** 508 * mei_cl_read_cb - find this cl's callback in the read list 509 * for a specific file 510 * 511 * @cl: host client 512 * @fp: file pointer (matching cb file object), may be NULL 513 * 514 * Return: cb on success, NULL if cb is not found 515 */ 516 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp) 517 { 518 struct mei_cl_cb *cb; 519 520 list_for_each_entry(cb, &cl->rd_completed, list) 521 if (!fp || fp == cb->fp) 522 return cb; 523 524 return NULL; 525 } 526 527 /** 528 * mei_cl_flush_queues - flushes queue lists belonging to cl. 529 * 530 * @cl: host client 531 * @fp: file pointer (matching cb file object), may be NULL 532 * 533 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. 534 */ 535 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) 536 { 537 struct mei_device *dev; 538 539 if (WARN_ON(!cl || !cl->dev)) 540 return -EINVAL; 541 542 dev = cl->dev; 543 544 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 545 mei_io_tx_list_free_cl(&cl->dev->write_list, cl); 546 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl); 547 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); 548 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); 549 mei_io_list_free_fp(&cl->rd_pending, fp); 550 mei_io_list_free_fp(&cl->rd_completed, fp); 551 552 return 0; 553 } 554 555 /** 556 * mei_cl_init - initializes cl. 557 * 558 * @cl: host client to be initialized 559 * @dev: mei device 560 */ 561 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 562 { 563 memset(cl, 0, sizeof(struct mei_cl)); 564 init_waitqueue_head(&cl->wait); 565 init_waitqueue_head(&cl->rx_wait); 566 init_waitqueue_head(&cl->tx_wait); 567 init_waitqueue_head(&cl->ev_wait); 568 INIT_LIST_HEAD(&cl->rd_completed); 569 INIT_LIST_HEAD(&cl->rd_pending); 570 INIT_LIST_HEAD(&cl->link); 571 cl->writing_state = MEI_IDLE; 572 cl->state = MEI_FILE_UNINITIALIZED; 573 cl->dev = dev; 574 } 575 576 /** 577 * mei_cl_allocate - allocates cl structure and sets it up. 578 * 579 * @dev: mei device 580 * Return: The allocated file or NULL on failure 581 */ 582 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 583 { 584 struct mei_cl *cl; 585 586 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); 587 if (!cl) 588 return NULL; 589 590 mei_cl_init(cl, dev); 591 592 return cl; 593 } 594 595 /** 596 * mei_cl_link - allocate host id in the host map 597 * 598 * @cl: host client 599 * 600 * Return: 0 on success 601 * -EINVAL on incorrect values 602 * -EMFILE if open count exceeded. 603 */ 604 int mei_cl_link(struct mei_cl *cl) 605 { 606 struct mei_device *dev; 607 int id; 608 609 if (WARN_ON(!cl || !cl->dev)) 610 return -EINVAL; 611 612 dev = cl->dev; 613 614 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); 615 if (id >= MEI_CLIENTS_MAX) { 616 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); 617 return -EMFILE; 618 } 619 620 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 621 dev_err(dev->dev, "open_handle_count exceeded %d", 622 MEI_MAX_OPEN_HANDLE_COUNT); 623 return -EMFILE; 624 } 625 626 dev->open_handle_count++; 627 628 cl->host_client_id = id; 629 list_add_tail(&cl->link, &dev->file_list); 630 631 set_bit(id, dev->host_clients_map); 632 633 cl->state = MEI_FILE_INITIALIZING; 634 635 cl_dbg(dev, cl, "link cl\n"); 636 return 0; 637 } 638 639 /** 640 * mei_cl_unlink - remove host client from the list 641 * 642 * @cl: host client 643 * 644 * Return: always 0 645 */ 646 int mei_cl_unlink(struct mei_cl *cl) 647 { 648 struct mei_device *dev; 649 650 /* don't shout on error exit path */ 651 if (!cl) 652 return 0; 653 654 if (WARN_ON(!cl->dev)) 655 return 0; 656 657 dev = cl->dev; 658 659 cl_dbg(dev, cl, "unlink client"); 660 661 if (dev->open_handle_count > 0) 662 dev->open_handle_count--; 663 664 /* never clear the 0 bit */ 665 if (cl->host_client_id) 666 clear_bit(cl->host_client_id, dev->host_clients_map); 667 668 list_del_init(&cl->link); 669 670 cl->state = MEI_FILE_UNINITIALIZED; 671 cl->writing_state = MEI_IDLE; 672 673 WARN_ON(!list_empty(&cl->rd_completed) || 674 !list_empty(&cl->rd_pending) || 675 !list_empty(&cl->link)); 676 677 return 0; 678 } 679 680 void mei_host_client_init(struct mei_device *dev) 681 { 682 dev->dev_state = MEI_DEV_ENABLED; 683 dev->reset_count = 0; 684 685 schedule_work(&dev->bus_rescan_work); 686 687 pm_runtime_mark_last_busy(dev->dev); 688 dev_dbg(dev->dev, "rpm: autosuspend\n"); 689 pm_request_autosuspend(dev->dev); 690 } 691 692 /** 693 * mei_hbuf_acquire - try to acquire host buffer 694 * 695 * @dev: the device structure 696 * Return: true if host buffer was acquired 697 */ 698 bool mei_hbuf_acquire(struct mei_device *dev) 699 { 700 if (mei_pg_state(dev) == MEI_PG_ON || 701 mei_pg_in_transition(dev)) { 702 dev_dbg(dev->dev, "device is in pg\n"); 703 return false; 704 } 705 706 if (!dev->hbuf_is_ready) { 707 dev_dbg(dev->dev, "hbuf is not ready\n"); 708 return false; 709 } 710 711 dev->hbuf_is_ready = false; 712 713 return true; 714 } 715 716 /** 717 * mei_cl_wake_all - wake up readers, writers and event waiters so 718 * they can be interrupted 719 * 720 * @cl: host client 721 */ 722 static void mei_cl_wake_all(struct mei_cl *cl) 723 { 724 struct mei_device *dev = cl->dev; 725 726 /* synchronized under device mutex */ 727 if (waitqueue_active(&cl->rx_wait)) { 728 cl_dbg(dev, cl, "Waking up reading client!\n"); 729 wake_up_interruptible(&cl->rx_wait); 730 } 731 /* synchronized under device mutex */ 732 if (waitqueue_active(&cl->tx_wait)) { 733 cl_dbg(dev, cl, "Waking up writing client!\n"); 734 wake_up_interruptible(&cl->tx_wait); 735 } 736 /* synchronized under device mutex */ 737 if (waitqueue_active(&cl->ev_wait)) { 738 cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); 739 wake_up_interruptible(&cl->ev_wait); 740 } 741 /* synchronized under device mutex */ 742 if (waitqueue_active(&cl->wait)) { 743 cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); 744 wake_up(&cl->wait); 745 } 746 } 747 748 /** 749 * mei_cl_set_disconnected - set disconnected state and clear 750 * associated states and resources 751 * 752 * @cl: host client 753 */ 754 static void mei_cl_set_disconnected(struct mei_cl *cl) 755 { 756 struct mei_device *dev = cl->dev; 757 758 if (cl->state == MEI_FILE_DISCONNECTED || 759 cl->state <= MEI_FILE_INITIALIZING) 760 return; 761 762 cl->state = MEI_FILE_DISCONNECTED; 763 mei_io_tx_list_free_cl(&dev->write_list, cl); 764 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl); 765 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); 766 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); 767 mei_cl_wake_all(cl); 768 cl->rx_flow_ctrl_creds = 0; 769 cl->tx_flow_ctrl_creds = 0; 770 cl->timer_count = 0; 771 772 if (!cl->me_cl) 773 return; 774 775 if (!WARN_ON(cl->me_cl->connect_count == 0)) 776 cl->me_cl->connect_count--; 777 778 if (cl->me_cl->connect_count == 0) 779 cl->me_cl->tx_flow_ctrl_creds = 0; 780 781 mei_me_cl_put(cl->me_cl); 782 cl->me_cl = NULL; 783 } 784 785 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl) 786 { 787 if (!mei_me_cl_get(me_cl)) 788 return -ENOENT; 789 790 /* only one connection is allowed for fixed address clients */ 791 if (me_cl->props.fixed_address) { 792 if (me_cl->connect_count) { 793 mei_me_cl_put(me_cl); 794 return -EBUSY; 795 } 796 } 797 798 cl->me_cl = me_cl; 799 cl->state = MEI_FILE_CONNECTING; 800 cl->me_cl->connect_count++; 801 802 return 0; 803 } 804 805 /* 806 * mei_cl_send_disconnect - send disconnect request 807 * 808 * @cl: host client 809 * @cb: callback block 810 * 811 * Return: 0, OK; otherwise, error. 812 */ 813 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) 814 { 815 struct mei_device *dev; 816 int ret; 817 818 dev = cl->dev; 819 820 ret = mei_hbm_cl_disconnect_req(dev, cl); 821 cl->status = ret; 822 if (ret) { 823 cl->state = MEI_FILE_DISCONNECT_REPLY; 824 return ret; 825 } 826 827 list_move_tail(&cb->list, &dev->ctrl_rd_list); 828 cl->timer_count = MEI_CONNECT_TIMEOUT; 829 mei_schedule_stall_timer(dev); 830 831 return 0; 832 } 833 834 /** 835 * mei_cl_irq_disconnect - processes close related operation from 836 * interrupt thread context - send disconnect request 837 * 838 * @cl: client 839 * @cb: callback block. 840 * @cmpl_list: complete list. 841 * 842 * Return: 0, OK; otherwise, error. 843 */ 844 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, 845 struct list_head *cmpl_list) 846 { 847 struct mei_device *dev = cl->dev; 848 u32 msg_slots; 849 int slots; 850 int ret; 851 852 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 853 slots = mei_hbuf_empty_slots(dev); 854 if (slots < 0) 855 return -EOVERFLOW; 856 857 if ((u32)slots < msg_slots) 858 return -EMSGSIZE; 859 860 ret = mei_cl_send_disconnect(cl, cb); 861 if (ret) 862 list_move_tail(&cb->list, cmpl_list); 863 864 return ret; 865 } 866 867 /** 868 * __mei_cl_disconnect - disconnect host client from the me one 869 * internal function runtime pm has to be already acquired 870 * 871 * @cl: host client 872 * 873 * Return: 0 on success, <0 on failure. 874 */ 875 static int __mei_cl_disconnect(struct mei_cl *cl) 876 { 877 struct mei_device *dev; 878 struct mei_cl_cb *cb; 879 int rets; 880 881 dev = cl->dev; 882 883 cl->state = MEI_FILE_DISCONNECTING; 884 885 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL); 886 if (!cb) { 887 rets = -ENOMEM; 888 goto out; 889 } 890 891 if (mei_hbuf_acquire(dev)) { 892 rets = mei_cl_send_disconnect(cl, cb); 893 if (rets) { 894 cl_err(dev, cl, "failed to disconnect.\n"); 895 goto out; 896 } 897 } 898 899 mutex_unlock(&dev->device_lock); 900 wait_event_timeout(cl->wait, 901 cl->state == MEI_FILE_DISCONNECT_REPLY || 902 cl->state == MEI_FILE_DISCONNECTED, 903 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 904 mutex_lock(&dev->device_lock); 905 906 rets = cl->status; 907 if (cl->state != MEI_FILE_DISCONNECT_REPLY && 908 cl->state != MEI_FILE_DISCONNECTED) { 909 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); 910 rets = -ETIME; 911 } 912 913 out: 914 /* we disconnect also on error */ 915 mei_cl_set_disconnected(cl); 916 if (!rets) 917 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 918 919 mei_io_cb_free(cb); 920 return rets; 921 } 922 923 /** 924 * mei_cl_disconnect - disconnect host client from the me one 925 * 926 * @cl: host client 927 * 928 * Locking: called under "dev->device_lock" lock 929 * 930 * Return: 0 on success, <0 on failure. 931 */ 932 int mei_cl_disconnect(struct mei_cl *cl) 933 { 934 struct mei_device *dev; 935 int rets; 936 937 if (WARN_ON(!cl || !cl->dev)) 938 return -ENODEV; 939 940 dev = cl->dev; 941 942 cl_dbg(dev, cl, "disconnecting"); 943 944 if (!mei_cl_is_connected(cl)) 945 return 0; 946 947 if (mei_cl_is_fixed_address(cl)) { 948 mei_cl_set_disconnected(cl); 949 return 0; 950 } 951 952 if (dev->dev_state == MEI_DEV_POWER_DOWN) { 953 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n"); 954 mei_cl_set_disconnected(cl); 955 return 0; 956 } 957 958 rets = pm_runtime_get(dev->dev); 959 if (rets < 0 && rets != -EINPROGRESS) { 960 pm_runtime_put_noidle(dev->dev); 961 cl_err(dev, cl, "rpm: get failed %d\n", rets); 962 return rets; 963 } 964 965 rets = __mei_cl_disconnect(cl); 966 967 cl_dbg(dev, cl, "rpm: autosuspend\n"); 968 pm_runtime_mark_last_busy(dev->dev); 969 pm_runtime_put_autosuspend(dev->dev); 970 971 return rets; 972 } 973 974 975 /** 976 * mei_cl_is_other_connecting - checks if other 977 * client with the same me client id is connecting 978 * 979 * @cl: private data of the file object 980 * 981 * Return: true if other client is connected, false - otherwise. 982 */ 983 static bool mei_cl_is_other_connecting(struct mei_cl *cl) 984 { 985 struct mei_device *dev; 986 struct mei_cl_cb *cb; 987 988 dev = cl->dev; 989 990 list_for_each_entry(cb, &dev->ctrl_rd_list, list) { 991 if (cb->fop_type == MEI_FOP_CONNECT && 992 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl)) 993 return true; 994 } 995 996 return false; 997 } 998 999 /** 1000 * mei_cl_send_connect - send connect request 1001 * 1002 * @cl: host client 1003 * @cb: callback block 1004 * 1005 * Return: 0, OK; otherwise, error. 1006 */ 1007 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) 1008 { 1009 struct mei_device *dev; 1010 int ret; 1011 1012 dev = cl->dev; 1013 1014 ret = mei_hbm_cl_connect_req(dev, cl); 1015 cl->status = ret; 1016 if (ret) { 1017 cl->state = MEI_FILE_DISCONNECT_REPLY; 1018 return ret; 1019 } 1020 1021 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1022 cl->timer_count = MEI_CONNECT_TIMEOUT; 1023 mei_schedule_stall_timer(dev); 1024 return 0; 1025 } 1026 1027 /** 1028 * mei_cl_irq_connect - send connect request in irq_thread context 1029 * 1030 * @cl: host client 1031 * @cb: callback block 1032 * @cmpl_list: complete list 1033 * 1034 * Return: 0, OK; otherwise, error. 1035 */ 1036 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 1037 struct list_head *cmpl_list) 1038 { 1039 struct mei_device *dev = cl->dev; 1040 u32 msg_slots; 1041 int slots; 1042 int rets; 1043 1044 if (mei_cl_is_other_connecting(cl)) 1045 return 0; 1046 1047 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 1048 slots = mei_hbuf_empty_slots(dev); 1049 if (slots < 0) 1050 return -EOVERFLOW; 1051 1052 if ((u32)slots < msg_slots) 1053 return -EMSGSIZE; 1054 1055 rets = mei_cl_send_connect(cl, cb); 1056 if (rets) 1057 list_move_tail(&cb->list, cmpl_list); 1058 1059 return rets; 1060 } 1061 1062 /** 1063 * mei_cl_connect - connect host client to the me one 1064 * 1065 * @cl: host client 1066 * @me_cl: me client 1067 * @fp: pointer to file structure 1068 * 1069 * Locking: called under "dev->device_lock" lock 1070 * 1071 * Return: 0 on success, <0 on failure. 1072 */ 1073 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, 1074 const struct file *fp) 1075 { 1076 struct mei_device *dev; 1077 struct mei_cl_cb *cb; 1078 int rets; 1079 1080 if (WARN_ON(!cl || !cl->dev || !me_cl)) 1081 return -ENODEV; 1082 1083 dev = cl->dev; 1084 1085 rets = mei_cl_set_connecting(cl, me_cl); 1086 if (rets) 1087 goto nortpm; 1088 1089 if (mei_cl_is_fixed_address(cl)) { 1090 cl->state = MEI_FILE_CONNECTED; 1091 rets = 0; 1092 goto nortpm; 1093 } 1094 1095 rets = pm_runtime_get(dev->dev); 1096 if (rets < 0 && rets != -EINPROGRESS) { 1097 pm_runtime_put_noidle(dev->dev); 1098 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1099 goto nortpm; 1100 } 1101 1102 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp); 1103 if (!cb) { 1104 rets = -ENOMEM; 1105 goto out; 1106 } 1107 1108 /* run hbuf acquire last so we don't have to undo */ 1109 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 1110 rets = mei_cl_send_connect(cl, cb); 1111 if (rets) 1112 goto out; 1113 } 1114 1115 mutex_unlock(&dev->device_lock); 1116 wait_event_timeout(cl->wait, 1117 (cl->state == MEI_FILE_CONNECTED || 1118 cl->state == MEI_FILE_DISCONNECTED || 1119 cl->state == MEI_FILE_DISCONNECT_REQUIRED || 1120 cl->state == MEI_FILE_DISCONNECT_REPLY), 1121 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1122 mutex_lock(&dev->device_lock); 1123 1124 if (!mei_cl_is_connected(cl)) { 1125 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { 1126 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); 1127 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); 1128 /* ignore disconnect return valuue; 1129 * in case of failure reset will be invoked 1130 */ 1131 __mei_cl_disconnect(cl); 1132 rets = -EFAULT; 1133 goto out; 1134 } 1135 1136 /* timeout or something went really wrong */ 1137 if (!cl->status) 1138 cl->status = -EFAULT; 1139 } 1140 1141 rets = cl->status; 1142 out: 1143 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1144 pm_runtime_mark_last_busy(dev->dev); 1145 pm_runtime_put_autosuspend(dev->dev); 1146 1147 mei_io_cb_free(cb); 1148 1149 nortpm: 1150 if (!mei_cl_is_connected(cl)) 1151 mei_cl_set_disconnected(cl); 1152 1153 return rets; 1154 } 1155 1156 /** 1157 * mei_cl_alloc_linked - allocate and link host client 1158 * 1159 * @dev: the device structure 1160 * 1161 * Return: cl on success ERR_PTR on failure 1162 */ 1163 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev) 1164 { 1165 struct mei_cl *cl; 1166 int ret; 1167 1168 cl = mei_cl_allocate(dev); 1169 if (!cl) { 1170 ret = -ENOMEM; 1171 goto err; 1172 } 1173 1174 ret = mei_cl_link(cl); 1175 if (ret) 1176 goto err; 1177 1178 return cl; 1179 err: 1180 kfree(cl); 1181 return ERR_PTR(ret); 1182 } 1183 1184 /** 1185 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl. 1186 * 1187 * @cl: host client 1188 * 1189 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise. 1190 */ 1191 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl) 1192 { 1193 if (WARN_ON(!cl || !cl->me_cl)) 1194 return -EINVAL; 1195 1196 if (cl->tx_flow_ctrl_creds > 0) 1197 return 1; 1198 1199 if (mei_cl_is_fixed_address(cl)) 1200 return 1; 1201 1202 if (mei_cl_is_single_recv_buf(cl)) { 1203 if (cl->me_cl->tx_flow_ctrl_creds > 0) 1204 return 1; 1205 } 1206 return 0; 1207 } 1208 1209 /** 1210 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits 1211 * for a client 1212 * 1213 * @cl: host client 1214 * 1215 * Return: 1216 * 0 on success 1217 * -EINVAL when ctrl credits are <= 0 1218 */ 1219 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl) 1220 { 1221 if (WARN_ON(!cl || !cl->me_cl)) 1222 return -EINVAL; 1223 1224 if (mei_cl_is_fixed_address(cl)) 1225 return 0; 1226 1227 if (mei_cl_is_single_recv_buf(cl)) { 1228 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0)) 1229 return -EINVAL; 1230 cl->me_cl->tx_flow_ctrl_creds--; 1231 } else { 1232 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0)) 1233 return -EINVAL; 1234 cl->tx_flow_ctrl_creds--; 1235 } 1236 return 0; 1237 } 1238 1239 /** 1240 * mei_cl_notify_fop2req - convert fop to proper request 1241 * 1242 * @fop: client notification start response command 1243 * 1244 * Return: MEI_HBM_NOTIFICATION_START/STOP 1245 */ 1246 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) 1247 { 1248 if (fop == MEI_FOP_NOTIFY_START) 1249 return MEI_HBM_NOTIFICATION_START; 1250 else 1251 return MEI_HBM_NOTIFICATION_STOP; 1252 } 1253 1254 /** 1255 * mei_cl_notify_req2fop - convert notification request top file operation type 1256 * 1257 * @req: hbm notification request type 1258 * 1259 * Return: MEI_FOP_NOTIFY_START/STOP 1260 */ 1261 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) 1262 { 1263 if (req == MEI_HBM_NOTIFICATION_START) 1264 return MEI_FOP_NOTIFY_START; 1265 else 1266 return MEI_FOP_NOTIFY_STOP; 1267 } 1268 1269 /** 1270 * mei_cl_irq_notify - send notification request in irq_thread context 1271 * 1272 * @cl: client 1273 * @cb: callback block. 1274 * @cmpl_list: complete list. 1275 * 1276 * Return: 0 on such and error otherwise. 1277 */ 1278 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, 1279 struct list_head *cmpl_list) 1280 { 1281 struct mei_device *dev = cl->dev; 1282 u32 msg_slots; 1283 int slots; 1284 int ret; 1285 bool request; 1286 1287 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 1288 slots = mei_hbuf_empty_slots(dev); 1289 if (slots < 0) 1290 return -EOVERFLOW; 1291 1292 if ((u32)slots < msg_slots) 1293 return -EMSGSIZE; 1294 1295 request = mei_cl_notify_fop2req(cb->fop_type); 1296 ret = mei_hbm_cl_notify_req(dev, cl, request); 1297 if (ret) { 1298 cl->status = ret; 1299 list_move_tail(&cb->list, cmpl_list); 1300 return ret; 1301 } 1302 1303 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1304 return 0; 1305 } 1306 1307 /** 1308 * mei_cl_notify_request - send notification stop/start request 1309 * 1310 * @cl: host client 1311 * @fp: associate request with file 1312 * @request: 1 for start or 0 for stop 1313 * 1314 * Locking: called under "dev->device_lock" lock 1315 * 1316 * Return: 0 on such and error otherwise. 1317 */ 1318 int mei_cl_notify_request(struct mei_cl *cl, 1319 const struct file *fp, u8 request) 1320 { 1321 struct mei_device *dev; 1322 struct mei_cl_cb *cb; 1323 enum mei_cb_file_ops fop_type; 1324 int rets; 1325 1326 if (WARN_ON(!cl || !cl->dev)) 1327 return -ENODEV; 1328 1329 dev = cl->dev; 1330 1331 if (!dev->hbm_f_ev_supported) { 1332 cl_dbg(dev, cl, "notifications not supported\n"); 1333 return -EOPNOTSUPP; 1334 } 1335 1336 if (!mei_cl_is_connected(cl)) 1337 return -ENODEV; 1338 1339 rets = pm_runtime_get(dev->dev); 1340 if (rets < 0 && rets != -EINPROGRESS) { 1341 pm_runtime_put_noidle(dev->dev); 1342 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1343 return rets; 1344 } 1345 1346 fop_type = mei_cl_notify_req2fop(request); 1347 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp); 1348 if (!cb) { 1349 rets = -ENOMEM; 1350 goto out; 1351 } 1352 1353 if (mei_hbuf_acquire(dev)) { 1354 if (mei_hbm_cl_notify_req(dev, cl, request)) { 1355 rets = -ENODEV; 1356 goto out; 1357 } 1358 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1359 } 1360 1361 mutex_unlock(&dev->device_lock); 1362 wait_event_timeout(cl->wait, 1363 cl->notify_en == request || 1364 cl->status || 1365 !mei_cl_is_connected(cl), 1366 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1367 mutex_lock(&dev->device_lock); 1368 1369 if (cl->notify_en != request && !cl->status) 1370 cl->status = -EFAULT; 1371 1372 rets = cl->status; 1373 1374 out: 1375 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1376 pm_runtime_mark_last_busy(dev->dev); 1377 pm_runtime_put_autosuspend(dev->dev); 1378 1379 mei_io_cb_free(cb); 1380 return rets; 1381 } 1382 1383 /** 1384 * mei_cl_notify - raise notification 1385 * 1386 * @cl: host client 1387 * 1388 * Locking: called under "dev->device_lock" lock 1389 */ 1390 void mei_cl_notify(struct mei_cl *cl) 1391 { 1392 struct mei_device *dev; 1393 1394 if (!cl || !cl->dev) 1395 return; 1396 1397 dev = cl->dev; 1398 1399 if (!cl->notify_en) 1400 return; 1401 1402 cl_dbg(dev, cl, "notify event"); 1403 cl->notify_ev = true; 1404 if (!mei_cl_bus_notify_event(cl)) 1405 wake_up_interruptible(&cl->ev_wait); 1406 1407 if (cl->ev_async) 1408 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); 1409 1410 } 1411 1412 /** 1413 * mei_cl_notify_get - get or wait for notification event 1414 * 1415 * @cl: host client 1416 * @block: this request is blocking 1417 * @notify_ev: true if notification event was received 1418 * 1419 * Locking: called under "dev->device_lock" lock 1420 * 1421 * Return: 0 on such and error otherwise. 1422 */ 1423 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) 1424 { 1425 struct mei_device *dev; 1426 int rets; 1427 1428 *notify_ev = false; 1429 1430 if (WARN_ON(!cl || !cl->dev)) 1431 return -ENODEV; 1432 1433 dev = cl->dev; 1434 1435 if (!dev->hbm_f_ev_supported) { 1436 cl_dbg(dev, cl, "notifications not supported\n"); 1437 return -EOPNOTSUPP; 1438 } 1439 1440 if (!mei_cl_is_connected(cl)) 1441 return -ENODEV; 1442 1443 if (cl->notify_ev) 1444 goto out; 1445 1446 if (!block) 1447 return -EAGAIN; 1448 1449 mutex_unlock(&dev->device_lock); 1450 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); 1451 mutex_lock(&dev->device_lock); 1452 1453 if (rets < 0) 1454 return rets; 1455 1456 out: 1457 *notify_ev = cl->notify_ev; 1458 cl->notify_ev = false; 1459 return 0; 1460 } 1461 1462 /** 1463 * mei_cl_read_start - the start read client message function. 1464 * 1465 * @cl: host client 1466 * @length: number of bytes to read 1467 * @fp: pointer to file structure 1468 * 1469 * Return: 0 on success, <0 on failure. 1470 */ 1471 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) 1472 { 1473 struct mei_device *dev; 1474 struct mei_cl_cb *cb; 1475 int rets; 1476 1477 if (WARN_ON(!cl || !cl->dev)) 1478 return -ENODEV; 1479 1480 dev = cl->dev; 1481 1482 if (!mei_cl_is_connected(cl)) 1483 return -ENODEV; 1484 1485 if (!mei_me_cl_is_active(cl->me_cl)) { 1486 cl_err(dev, cl, "no such me client\n"); 1487 return -ENOTTY; 1488 } 1489 1490 if (mei_cl_is_fixed_address(cl)) 1491 return 0; 1492 1493 /* HW currently supports only one pending read */ 1494 if (cl->rx_flow_ctrl_creds) 1495 return -EBUSY; 1496 1497 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp); 1498 if (!cb) 1499 return -ENOMEM; 1500 1501 rets = pm_runtime_get(dev->dev); 1502 if (rets < 0 && rets != -EINPROGRESS) { 1503 pm_runtime_put_noidle(dev->dev); 1504 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1505 goto nortpm; 1506 } 1507 1508 rets = 0; 1509 if (mei_hbuf_acquire(dev)) { 1510 rets = mei_hbm_cl_flow_control_req(dev, cl); 1511 if (rets < 0) 1512 goto out; 1513 1514 list_move_tail(&cb->list, &cl->rd_pending); 1515 } 1516 cl->rx_flow_ctrl_creds++; 1517 1518 out: 1519 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1520 pm_runtime_mark_last_busy(dev->dev); 1521 pm_runtime_put_autosuspend(dev->dev); 1522 nortpm: 1523 if (rets) 1524 mei_io_cb_free(cb); 1525 1526 return rets; 1527 } 1528 1529 /** 1530 * mei_msg_hdr_init - initialize mei message header 1531 * 1532 * @mei_hdr: mei message header 1533 * @cb: message callback structure 1534 */ 1535 static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb) 1536 { 1537 mei_hdr->host_addr = mei_cl_host_addr(cb->cl); 1538 mei_hdr->me_addr = mei_cl_me_id(cb->cl); 1539 mei_hdr->length = 0; 1540 mei_hdr->reserved = 0; 1541 mei_hdr->msg_complete = 0; 1542 mei_hdr->dma_ring = 0; 1543 mei_hdr->internal = cb->internal; 1544 } 1545 1546 /** 1547 * mei_cl_irq_write - write a message to device 1548 * from the interrupt thread context 1549 * 1550 * @cl: client 1551 * @cb: callback block. 1552 * @cmpl_list: complete list. 1553 * 1554 * Return: 0, OK; otherwise error. 1555 */ 1556 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 1557 struct list_head *cmpl_list) 1558 { 1559 struct mei_device *dev; 1560 struct mei_msg_data *buf; 1561 struct mei_msg_hdr mei_hdr; 1562 size_t hdr_len = sizeof(mei_hdr); 1563 size_t len; 1564 size_t hbuf_len, dr_len; 1565 int hbuf_slots; 1566 u32 dr_slots; 1567 u32 dma_len; 1568 int rets; 1569 bool first_chunk; 1570 const void *data; 1571 1572 if (WARN_ON(!cl || !cl->dev)) 1573 return -ENODEV; 1574 1575 dev = cl->dev; 1576 1577 buf = &cb->buf; 1578 1579 first_chunk = cb->buf_idx == 0; 1580 1581 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1; 1582 if (rets < 0) 1583 goto err; 1584 1585 if (rets == 0) { 1586 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1587 return 0; 1588 } 1589 1590 len = buf->size - cb->buf_idx; 1591 data = buf->data + cb->buf_idx; 1592 hbuf_slots = mei_hbuf_empty_slots(dev); 1593 if (hbuf_slots < 0) { 1594 rets = -EOVERFLOW; 1595 goto err; 1596 } 1597 1598 hbuf_len = mei_slots2data(hbuf_slots); 1599 dr_slots = mei_dma_ring_empty_slots(dev); 1600 dr_len = mei_slots2data(dr_slots); 1601 1602 mei_msg_hdr_init(&mei_hdr, cb); 1603 1604 /** 1605 * Split the message only if we can write the whole host buffer 1606 * otherwise wait for next time the host buffer is empty. 1607 */ 1608 if (len + hdr_len <= hbuf_len) { 1609 mei_hdr.length = len; 1610 mei_hdr.msg_complete = 1; 1611 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { 1612 mei_hdr.dma_ring = 1; 1613 if (len > dr_len) 1614 len = dr_len; 1615 else 1616 mei_hdr.msg_complete = 1; 1617 1618 mei_hdr.length = sizeof(dma_len); 1619 dma_len = len; 1620 data = &dma_len; 1621 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { 1622 len = hbuf_len - hdr_len; 1623 mei_hdr.length = len; 1624 } else { 1625 return 0; 1626 } 1627 1628 if (mei_hdr.dma_ring) 1629 mei_dma_ring_write(dev, buf->data + cb->buf_idx, len); 1630 1631 rets = mei_write_message(dev, &mei_hdr, hdr_len, data, mei_hdr.length); 1632 if (rets) 1633 goto err; 1634 1635 cl->status = 0; 1636 cl->writing_state = MEI_WRITING; 1637 cb->buf_idx += len; 1638 1639 if (first_chunk) { 1640 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { 1641 rets = -EIO; 1642 goto err; 1643 } 1644 } 1645 1646 if (mei_hdr.msg_complete) 1647 list_move_tail(&cb->list, &dev->write_waiting_list); 1648 1649 return 0; 1650 1651 err: 1652 cl->status = rets; 1653 list_move_tail(&cb->list, cmpl_list); 1654 return rets; 1655 } 1656 1657 /** 1658 * mei_cl_write - submit a write cb to mei device 1659 * assumes device_lock is locked 1660 * 1661 * @cl: host client 1662 * @cb: write callback with filled data 1663 * 1664 * Return: number of bytes sent on success, <0 on failure. 1665 */ 1666 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) 1667 { 1668 struct mei_device *dev; 1669 struct mei_msg_data *buf; 1670 struct mei_msg_hdr mei_hdr; 1671 size_t hdr_len = sizeof(mei_hdr); 1672 size_t len, hbuf_len, dr_len; 1673 int hbuf_slots; 1674 u32 dr_slots; 1675 u32 dma_len; 1676 ssize_t rets; 1677 bool blocking; 1678 const void *data; 1679 1680 if (WARN_ON(!cl || !cl->dev)) 1681 return -ENODEV; 1682 1683 if (WARN_ON(!cb)) 1684 return -EINVAL; 1685 1686 dev = cl->dev; 1687 1688 buf = &cb->buf; 1689 len = buf->size; 1690 1691 cl_dbg(dev, cl, "len=%zd\n", len); 1692 1693 blocking = cb->blocking; 1694 data = buf->data; 1695 1696 rets = pm_runtime_get(dev->dev); 1697 if (rets < 0 && rets != -EINPROGRESS) { 1698 pm_runtime_put_noidle(dev->dev); 1699 cl_err(dev, cl, "rpm: get failed %zd\n", rets); 1700 goto free; 1701 } 1702 1703 cb->buf_idx = 0; 1704 cl->writing_state = MEI_IDLE; 1705 1706 1707 rets = mei_cl_tx_flow_ctrl_creds(cl); 1708 if (rets < 0) 1709 goto err; 1710 1711 mei_msg_hdr_init(&mei_hdr, cb); 1712 1713 if (rets == 0) { 1714 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1715 rets = len; 1716 goto out; 1717 } 1718 1719 if (!mei_hbuf_acquire(dev)) { 1720 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); 1721 rets = len; 1722 goto out; 1723 } 1724 1725 hbuf_slots = mei_hbuf_empty_slots(dev); 1726 if (hbuf_slots < 0) { 1727 rets = -EOVERFLOW; 1728 goto out; 1729 } 1730 1731 hbuf_len = mei_slots2data(hbuf_slots); 1732 dr_slots = mei_dma_ring_empty_slots(dev); 1733 dr_len = mei_slots2data(dr_slots); 1734 1735 if (len + hdr_len <= hbuf_len) { 1736 mei_hdr.length = len; 1737 mei_hdr.msg_complete = 1; 1738 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { 1739 mei_hdr.dma_ring = 1; 1740 if (len > dr_len) 1741 len = dr_len; 1742 else 1743 mei_hdr.msg_complete = 1; 1744 1745 mei_hdr.length = sizeof(dma_len); 1746 dma_len = len; 1747 data = &dma_len; 1748 } else { 1749 len = hbuf_len - hdr_len; 1750 mei_hdr.length = len; 1751 } 1752 1753 if (mei_hdr.dma_ring) 1754 mei_dma_ring_write(dev, buf->data, len); 1755 1756 rets = mei_write_message(dev, &mei_hdr, hdr_len, 1757 data, mei_hdr.length); 1758 if (rets) 1759 goto err; 1760 1761 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl); 1762 if (rets) 1763 goto err; 1764 1765 cl->writing_state = MEI_WRITING; 1766 cb->buf_idx = len; 1767 /* restore return value */ 1768 len = buf->size; 1769 1770 out: 1771 if (mei_hdr.msg_complete) 1772 mei_tx_cb_enqueue(cb, &dev->write_waiting_list); 1773 else 1774 mei_tx_cb_enqueue(cb, &dev->write_list); 1775 1776 cb = NULL; 1777 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 1778 1779 mutex_unlock(&dev->device_lock); 1780 rets = wait_event_interruptible(cl->tx_wait, 1781 cl->writing_state == MEI_WRITE_COMPLETE || 1782 (!mei_cl_is_connected(cl))); 1783 mutex_lock(&dev->device_lock); 1784 /* wait_event_interruptible returns -ERESTARTSYS */ 1785 if (rets) { 1786 if (signal_pending(current)) 1787 rets = -EINTR; 1788 goto err; 1789 } 1790 if (cl->writing_state != MEI_WRITE_COMPLETE) { 1791 rets = -EFAULT; 1792 goto err; 1793 } 1794 } 1795 1796 rets = len; 1797 err: 1798 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1799 pm_runtime_mark_last_busy(dev->dev); 1800 pm_runtime_put_autosuspend(dev->dev); 1801 free: 1802 mei_io_cb_free(cb); 1803 1804 return rets; 1805 } 1806 1807 1808 /** 1809 * mei_cl_complete - processes completed operation for a client 1810 * 1811 * @cl: private data of the file object. 1812 * @cb: callback block. 1813 */ 1814 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 1815 { 1816 struct mei_device *dev = cl->dev; 1817 1818 switch (cb->fop_type) { 1819 case MEI_FOP_WRITE: 1820 mei_tx_cb_dequeue(cb); 1821 cl->writing_state = MEI_WRITE_COMPLETE; 1822 if (waitqueue_active(&cl->tx_wait)) { 1823 wake_up_interruptible(&cl->tx_wait); 1824 } else { 1825 pm_runtime_mark_last_busy(dev->dev); 1826 pm_request_autosuspend(dev->dev); 1827 } 1828 break; 1829 1830 case MEI_FOP_READ: 1831 list_add_tail(&cb->list, &cl->rd_completed); 1832 if (!mei_cl_is_fixed_address(cl) && 1833 !WARN_ON(!cl->rx_flow_ctrl_creds)) 1834 cl->rx_flow_ctrl_creds--; 1835 if (!mei_cl_bus_rx_event(cl)) 1836 wake_up_interruptible(&cl->rx_wait); 1837 break; 1838 1839 case MEI_FOP_CONNECT: 1840 case MEI_FOP_DISCONNECT: 1841 case MEI_FOP_NOTIFY_STOP: 1842 case MEI_FOP_NOTIFY_START: 1843 if (waitqueue_active(&cl->wait)) 1844 wake_up(&cl->wait); 1845 1846 break; 1847 case MEI_FOP_DISCONNECT_RSP: 1848 mei_io_cb_free(cb); 1849 mei_cl_set_disconnected(cl); 1850 break; 1851 default: 1852 BUG_ON(0); 1853 } 1854 } 1855 1856 1857 /** 1858 * mei_cl_all_disconnect - disconnect forcefully all connected clients 1859 * 1860 * @dev: mei device 1861 */ 1862 void mei_cl_all_disconnect(struct mei_device *dev) 1863 { 1864 struct mei_cl *cl; 1865 1866 list_for_each_entry(cl, &dev->file_list, link) 1867 mei_cl_set_disconnected(cl); 1868 } 1869