1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/sched/signal.h> 8 #include <linux/wait.h> 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/dma-mapping.h> 13 14 #include <linux/mei.h> 15 16 #include "mei_dev.h" 17 #include "hbm.h" 18 #include "client.h" 19 20 /** 21 * mei_me_cl_init - initialize me client 22 * 23 * @me_cl: me client 24 */ 25 void mei_me_cl_init(struct mei_me_client *me_cl) 26 { 27 INIT_LIST_HEAD(&me_cl->list); 28 kref_init(&me_cl->refcnt); 29 } 30 31 /** 32 * mei_me_cl_get - increases me client refcount 33 * 34 * @me_cl: me client 35 * 36 * Locking: called under "dev->device_lock" lock 37 * 38 * Return: me client or NULL 39 */ 40 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) 41 { 42 if (me_cl && kref_get_unless_zero(&me_cl->refcnt)) 43 return me_cl; 44 45 return NULL; 46 } 47 48 /** 49 * mei_me_cl_release - free me client 50 * 51 * Locking: called under "dev->device_lock" lock 52 * 53 * @ref: me_client refcount 54 */ 55 static void mei_me_cl_release(struct kref *ref) 56 { 57 struct mei_me_client *me_cl = 58 container_of(ref, struct mei_me_client, refcnt); 59 60 kfree(me_cl); 61 } 62 63 /** 64 * mei_me_cl_put - decrease me client refcount and free client if necessary 65 * 66 * Locking: called under "dev->device_lock" lock 67 * 68 * @me_cl: me client 69 */ 70 void mei_me_cl_put(struct mei_me_client *me_cl) 71 { 72 if (me_cl) 73 kref_put(&me_cl->refcnt, mei_me_cl_release); 74 } 75 76 /** 77 * __mei_me_cl_del - delete me client from the list and decrease 78 * reference counter 79 * 80 * @dev: mei device 81 * @me_cl: me client 82 * 83 * Locking: dev->me_clients_rwsem 84 */ 85 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 86 { 87 if (!me_cl) 88 return; 89 90 list_del_init(&me_cl->list); 91 mei_me_cl_put(me_cl); 92 } 93 94 /** 95 * mei_me_cl_del - delete me client from the list and decrease 96 * reference counter 97 * 98 * @dev: mei device 99 * @me_cl: me client 100 */ 101 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 102 { 103 down_write(&dev->me_clients_rwsem); 104 __mei_me_cl_del(dev, me_cl); 105 up_write(&dev->me_clients_rwsem); 106 } 107 108 /** 109 * mei_me_cl_add - add me client to the list 110 * 111 * @dev: mei device 112 * @me_cl: me client 113 */ 114 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl) 115 { 116 down_write(&dev->me_clients_rwsem); 117 list_add(&me_cl->list, &dev->me_clients); 118 up_write(&dev->me_clients_rwsem); 119 } 120 121 /** 122 * __mei_me_cl_by_uuid - locate me client by uuid 123 * increases ref count 124 * 125 * @dev: mei device 126 * @uuid: me client uuid 127 * 128 * Return: me client or NULL if not found 129 * 130 * Locking: dev->me_clients_rwsem 131 */ 132 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev, 133 const uuid_le *uuid) 134 { 135 struct mei_me_client *me_cl; 136 const uuid_le *pn; 137 138 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 139 140 list_for_each_entry(me_cl, &dev->me_clients, list) { 141 pn = &me_cl->props.protocol_name; 142 if (uuid_le_cmp(*uuid, *pn) == 0) 143 return mei_me_cl_get(me_cl); 144 } 145 146 return NULL; 147 } 148 149 /** 150 * mei_me_cl_by_uuid - locate me client by uuid 151 * increases ref count 152 * 153 * @dev: mei device 154 * @uuid: me client uuid 155 * 156 * Return: me client or NULL if not found 157 * 158 * Locking: dev->me_clients_rwsem 159 */ 160 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, 161 const uuid_le *uuid) 162 { 163 struct mei_me_client *me_cl; 164 165 down_read(&dev->me_clients_rwsem); 166 me_cl = __mei_me_cl_by_uuid(dev, uuid); 167 up_read(&dev->me_clients_rwsem); 168 169 return me_cl; 170 } 171 172 /** 173 * mei_me_cl_by_id - locate me client by client id 174 * increases ref count 175 * 176 * @dev: the device structure 177 * @client_id: me client id 178 * 179 * Return: me client or NULL if not found 180 * 181 * Locking: dev->me_clients_rwsem 182 */ 183 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 184 { 185 186 struct mei_me_client *__me_cl, *me_cl = NULL; 187 188 down_read(&dev->me_clients_rwsem); 189 list_for_each_entry(__me_cl, &dev->me_clients, list) { 190 if (__me_cl->client_id == client_id) { 191 me_cl = mei_me_cl_get(__me_cl); 192 break; 193 } 194 } 195 up_read(&dev->me_clients_rwsem); 196 197 return me_cl; 198 } 199 200 /** 201 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid 202 * increases ref count 203 * 204 * @dev: the device structure 205 * @uuid: me client uuid 206 * @client_id: me client id 207 * 208 * Return: me client or null if not found 209 * 210 * Locking: dev->me_clients_rwsem 211 */ 212 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, 213 const uuid_le *uuid, u8 client_id) 214 { 215 struct mei_me_client *me_cl; 216 const uuid_le *pn; 217 218 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 219 220 list_for_each_entry(me_cl, &dev->me_clients, list) { 221 pn = &me_cl->props.protocol_name; 222 if (uuid_le_cmp(*uuid, *pn) == 0 && 223 me_cl->client_id == client_id) 224 return mei_me_cl_get(me_cl); 225 } 226 227 return NULL; 228 } 229 230 231 /** 232 * mei_me_cl_by_uuid_id - locate me client by client id and uuid 233 * increases ref count 234 * 235 * @dev: the device structure 236 * @uuid: me client uuid 237 * @client_id: me client id 238 * 239 * Return: me client or null if not found 240 */ 241 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 242 const uuid_le *uuid, u8 client_id) 243 { 244 struct mei_me_client *me_cl; 245 246 down_read(&dev->me_clients_rwsem); 247 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id); 248 up_read(&dev->me_clients_rwsem); 249 250 return me_cl; 251 } 252 253 /** 254 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid 255 * 256 * @dev: the device structure 257 * @uuid: me client uuid 258 * 259 * Locking: called under "dev->device_lock" lock 260 */ 261 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) 262 { 263 struct mei_me_client *me_cl; 264 265 dev_dbg(dev->dev, "remove %pUl\n", uuid); 266 267 down_write(&dev->me_clients_rwsem); 268 me_cl = __mei_me_cl_by_uuid(dev, uuid); 269 __mei_me_cl_del(dev, me_cl); 270 mei_me_cl_put(me_cl); 271 up_write(&dev->me_clients_rwsem); 272 } 273 274 /** 275 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id 276 * 277 * @dev: the device structure 278 * @uuid: me client uuid 279 * @id: me client id 280 * 281 * Locking: called under "dev->device_lock" lock 282 */ 283 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) 284 { 285 struct mei_me_client *me_cl; 286 287 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); 288 289 down_write(&dev->me_clients_rwsem); 290 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); 291 __mei_me_cl_del(dev, me_cl); 292 mei_me_cl_put(me_cl); 293 up_write(&dev->me_clients_rwsem); 294 } 295 296 /** 297 * mei_me_cl_rm_all - remove all me clients 298 * 299 * @dev: the device structure 300 * 301 * Locking: called under "dev->device_lock" lock 302 */ 303 void mei_me_cl_rm_all(struct mei_device *dev) 304 { 305 struct mei_me_client *me_cl, *next; 306 307 down_write(&dev->me_clients_rwsem); 308 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 309 __mei_me_cl_del(dev, me_cl); 310 up_write(&dev->me_clients_rwsem); 311 } 312 313 /** 314 * mei_io_cb_free - free mei_cb_private related memory 315 * 316 * @cb: mei callback struct 317 */ 318 void mei_io_cb_free(struct mei_cl_cb *cb) 319 { 320 if (cb == NULL) 321 return; 322 323 list_del(&cb->list); 324 kfree(cb->buf.data); 325 kfree(cb); 326 } 327 328 /** 329 * mei_tx_cb_queue - queue tx callback 330 * 331 * Locking: called under "dev->device_lock" lock 332 * 333 * @cb: mei callback struct 334 * @head: an instance of list to queue on 335 */ 336 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, 337 struct list_head *head) 338 { 339 list_add_tail(&cb->list, head); 340 cb->cl->tx_cb_queued++; 341 } 342 343 /** 344 * mei_tx_cb_dequeue - dequeue tx callback 345 * 346 * Locking: called under "dev->device_lock" lock 347 * 348 * @cb: mei callback struct to dequeue and free 349 */ 350 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) 351 { 352 if (!WARN_ON(cb->cl->tx_cb_queued == 0)) 353 cb->cl->tx_cb_queued--; 354 355 mei_io_cb_free(cb); 356 } 357 358 /** 359 * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp 360 * 361 * Locking: called under "dev->device_lock" lock 362 * 363 * @cl: mei client 364 * @fp: pointer to file structure 365 */ 366 static void mei_cl_set_read_by_fp(const struct mei_cl *cl, 367 const struct file *fp) 368 { 369 struct mei_cl_vtag *cl_vtag; 370 371 list_for_each_entry(cl_vtag, &cl->vtag_map, list) { 372 if (cl_vtag->fp == fp) { 373 cl_vtag->pending_read = true; 374 return; 375 } 376 } 377 } 378 379 /** 380 * mei_io_cb_init - allocate and initialize io callback 381 * 382 * @cl: mei client 383 * @type: operation type 384 * @fp: pointer to file structure 385 * 386 * Return: mei_cl_cb pointer or NULL; 387 */ 388 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, 389 enum mei_cb_file_ops type, 390 const struct file *fp) 391 { 392 struct mei_cl_cb *cb; 393 394 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 395 if (!cb) 396 return NULL; 397 398 INIT_LIST_HEAD(&cb->list); 399 cb->fp = fp; 400 cb->cl = cl; 401 cb->buf_idx = 0; 402 cb->fop_type = type; 403 cb->vtag = 0; 404 405 return cb; 406 } 407 408 /** 409 * mei_io_list_flush_cl - removes cbs belonging to the cl. 410 * 411 * @head: an instance of our list structure 412 * @cl: host client 413 */ 414 static void mei_io_list_flush_cl(struct list_head *head, 415 const struct mei_cl *cl) 416 { 417 struct mei_cl_cb *cb, *next; 418 419 list_for_each_entry_safe(cb, next, head, list) { 420 if (cl == cb->cl) { 421 list_del_init(&cb->list); 422 if (cb->fop_type == MEI_FOP_READ) 423 mei_io_cb_free(cb); 424 } 425 } 426 } 427 428 /** 429 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them 430 * 431 * @head: An instance of our list structure 432 * @cl: host client 433 * @fp: file pointer (matching cb file object), may be NULL 434 */ 435 static void mei_io_tx_list_free_cl(struct list_head *head, 436 const struct mei_cl *cl, 437 const struct file *fp) 438 { 439 struct mei_cl_cb *cb, *next; 440 441 list_for_each_entry_safe(cb, next, head, list) { 442 if (cl == cb->cl && (!fp || fp == cb->fp)) 443 mei_tx_cb_dequeue(cb); 444 } 445 } 446 447 /** 448 * mei_io_list_free_fp - free cb from a list that matches file pointer 449 * 450 * @head: io list 451 * @fp: file pointer (matching cb file object), may be NULL 452 */ 453 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp) 454 { 455 struct mei_cl_cb *cb, *next; 456 457 list_for_each_entry_safe(cb, next, head, list) 458 if (!fp || fp == cb->fp) 459 mei_io_cb_free(cb); 460 } 461 462 /** 463 * mei_cl_free_pending - free pending cb 464 * 465 * @cl: host client 466 */ 467 static void mei_cl_free_pending(struct mei_cl *cl) 468 { 469 struct mei_cl_cb *cb; 470 471 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); 472 mei_io_cb_free(cb); 473 } 474 475 /** 476 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb 477 * 478 * @cl: host client 479 * @length: size of the buffer 480 * @fop_type: operation type 481 * @fp: associated file pointer (might be NULL) 482 * 483 * Return: cb on success and NULL on failure 484 */ 485 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, 486 enum mei_cb_file_ops fop_type, 487 const struct file *fp) 488 { 489 struct mei_cl_cb *cb; 490 491 cb = mei_io_cb_init(cl, fop_type, fp); 492 if (!cb) 493 return NULL; 494 495 if (length == 0) 496 return cb; 497 498 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); 499 if (!cb->buf.data) { 500 mei_io_cb_free(cb); 501 return NULL; 502 } 503 cb->buf.size = length; 504 505 return cb; 506 } 507 508 /** 509 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating 510 * and enqueuing of the control commands cb 511 * 512 * @cl: host client 513 * @length: size of the buffer 514 * @fop_type: operation type 515 * @fp: associated file pointer (might be NULL) 516 * 517 * Return: cb on success and NULL on failure 518 * Locking: called under "dev->device_lock" lock 519 */ 520 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, 521 enum mei_cb_file_ops fop_type, 522 const struct file *fp) 523 { 524 struct mei_cl_cb *cb; 525 526 /* for RX always allocate at least client's mtu */ 527 if (length) 528 length = max_t(size_t, length, mei_cl_mtu(cl)); 529 530 cb = mei_cl_alloc_cb(cl, length, fop_type, fp); 531 if (!cb) 532 return NULL; 533 534 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list); 535 return cb; 536 } 537 538 /** 539 * mei_cl_read_cb - find this cl's callback in the read list 540 * for a specific file 541 * 542 * @cl: host client 543 * @fp: file pointer (matching cb file object), may be NULL 544 * 545 * Return: cb on success, NULL if cb is not found 546 */ 547 struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp) 548 { 549 struct mei_cl_cb *cb; 550 struct mei_cl_cb *ret_cb = NULL; 551 552 spin_lock(&cl->rd_completed_lock); 553 list_for_each_entry(cb, &cl->rd_completed, list) 554 if (!fp || fp == cb->fp) { 555 ret_cb = cb; 556 break; 557 } 558 spin_unlock(&cl->rd_completed_lock); 559 return ret_cb; 560 } 561 562 /** 563 * mei_cl_flush_queues - flushes queue lists belonging to cl. 564 * 565 * @cl: host client 566 * @fp: file pointer (matching cb file object), may be NULL 567 * 568 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. 569 */ 570 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) 571 { 572 struct mei_device *dev; 573 574 if (WARN_ON(!cl || !cl->dev)) 575 return -EINVAL; 576 577 dev = cl->dev; 578 579 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 580 mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp); 581 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp); 582 /* free pending and control cb only in final flush */ 583 if (!fp) { 584 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); 585 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); 586 mei_cl_free_pending(cl); 587 } 588 spin_lock(&cl->rd_completed_lock); 589 mei_io_list_free_fp(&cl->rd_completed, fp); 590 spin_unlock(&cl->rd_completed_lock); 591 592 return 0; 593 } 594 595 /** 596 * mei_cl_init - initializes cl. 597 * 598 * @cl: host client to be initialized 599 * @dev: mei device 600 */ 601 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 602 { 603 memset(cl, 0, sizeof(*cl)); 604 init_waitqueue_head(&cl->wait); 605 init_waitqueue_head(&cl->rx_wait); 606 init_waitqueue_head(&cl->tx_wait); 607 init_waitqueue_head(&cl->ev_wait); 608 INIT_LIST_HEAD(&cl->vtag_map); 609 spin_lock_init(&cl->rd_completed_lock); 610 INIT_LIST_HEAD(&cl->rd_completed); 611 INIT_LIST_HEAD(&cl->rd_pending); 612 INIT_LIST_HEAD(&cl->link); 613 cl->writing_state = MEI_IDLE; 614 cl->state = MEI_FILE_UNINITIALIZED; 615 cl->dev = dev; 616 } 617 618 /** 619 * mei_cl_allocate - allocates cl structure and sets it up. 620 * 621 * @dev: mei device 622 * Return: The allocated file or NULL on failure 623 */ 624 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 625 { 626 struct mei_cl *cl; 627 628 cl = kmalloc(sizeof(*cl), GFP_KERNEL); 629 if (!cl) 630 return NULL; 631 632 mei_cl_init(cl, dev); 633 634 return cl; 635 } 636 637 /** 638 * mei_cl_link - allocate host id in the host map 639 * 640 * @cl: host client 641 * 642 * Return: 0 on success 643 * -EINVAL on incorrect values 644 * -EMFILE if open count exceeded. 645 */ 646 int mei_cl_link(struct mei_cl *cl) 647 { 648 struct mei_device *dev; 649 int id; 650 651 if (WARN_ON(!cl || !cl->dev)) 652 return -EINVAL; 653 654 dev = cl->dev; 655 656 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); 657 if (id >= MEI_CLIENTS_MAX) { 658 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); 659 return -EMFILE; 660 } 661 662 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 663 dev_err(dev->dev, "open_handle_count exceeded %d", 664 MEI_MAX_OPEN_HANDLE_COUNT); 665 return -EMFILE; 666 } 667 668 dev->open_handle_count++; 669 670 cl->host_client_id = id; 671 list_add_tail(&cl->link, &dev->file_list); 672 673 set_bit(id, dev->host_clients_map); 674 675 cl->state = MEI_FILE_INITIALIZING; 676 677 cl_dbg(dev, cl, "link cl\n"); 678 return 0; 679 } 680 681 /** 682 * mei_cl_unlink - remove host client from the list 683 * 684 * @cl: host client 685 * 686 * Return: always 0 687 */ 688 int mei_cl_unlink(struct mei_cl *cl) 689 { 690 struct mei_device *dev; 691 692 /* don't shout on error exit path */ 693 if (!cl) 694 return 0; 695 696 if (WARN_ON(!cl->dev)) 697 return 0; 698 699 dev = cl->dev; 700 701 cl_dbg(dev, cl, "unlink client"); 702 703 if (dev->open_handle_count > 0) 704 dev->open_handle_count--; 705 706 /* never clear the 0 bit */ 707 if (cl->host_client_id) 708 clear_bit(cl->host_client_id, dev->host_clients_map); 709 710 list_del_init(&cl->link); 711 712 cl->state = MEI_FILE_UNINITIALIZED; 713 cl->writing_state = MEI_IDLE; 714 715 WARN_ON(!list_empty(&cl->rd_completed) || 716 !list_empty(&cl->rd_pending) || 717 !list_empty(&cl->link)); 718 719 return 0; 720 } 721 722 void mei_host_client_init(struct mei_device *dev) 723 { 724 mei_set_devstate(dev, MEI_DEV_ENABLED); 725 dev->reset_count = 0; 726 727 schedule_work(&dev->bus_rescan_work); 728 729 pm_runtime_mark_last_busy(dev->dev); 730 dev_dbg(dev->dev, "rpm: autosuspend\n"); 731 pm_request_autosuspend(dev->dev); 732 } 733 734 /** 735 * mei_hbuf_acquire - try to acquire host buffer 736 * 737 * @dev: the device structure 738 * Return: true if host buffer was acquired 739 */ 740 bool mei_hbuf_acquire(struct mei_device *dev) 741 { 742 if (mei_pg_state(dev) == MEI_PG_ON || 743 mei_pg_in_transition(dev)) { 744 dev_dbg(dev->dev, "device is in pg\n"); 745 return false; 746 } 747 748 if (!dev->hbuf_is_ready) { 749 dev_dbg(dev->dev, "hbuf is not ready\n"); 750 return false; 751 } 752 753 dev->hbuf_is_ready = false; 754 755 return true; 756 } 757 758 /** 759 * mei_cl_wake_all - wake up readers, writers and event waiters so 760 * they can be interrupted 761 * 762 * @cl: host client 763 */ 764 static void mei_cl_wake_all(struct mei_cl *cl) 765 { 766 struct mei_device *dev = cl->dev; 767 768 /* synchronized under device mutex */ 769 if (waitqueue_active(&cl->rx_wait)) { 770 cl_dbg(dev, cl, "Waking up reading client!\n"); 771 wake_up_interruptible(&cl->rx_wait); 772 } 773 /* synchronized under device mutex */ 774 if (waitqueue_active(&cl->tx_wait)) { 775 cl_dbg(dev, cl, "Waking up writing client!\n"); 776 wake_up_interruptible(&cl->tx_wait); 777 } 778 /* synchronized under device mutex */ 779 if (waitqueue_active(&cl->ev_wait)) { 780 cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); 781 wake_up_interruptible(&cl->ev_wait); 782 } 783 /* synchronized under device mutex */ 784 if (waitqueue_active(&cl->wait)) { 785 cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); 786 wake_up(&cl->wait); 787 } 788 } 789 790 /** 791 * mei_cl_set_disconnected - set disconnected state and clear 792 * associated states and resources 793 * 794 * @cl: host client 795 */ 796 static void mei_cl_set_disconnected(struct mei_cl *cl) 797 { 798 struct mei_device *dev = cl->dev; 799 800 if (cl->state == MEI_FILE_DISCONNECTED || 801 cl->state <= MEI_FILE_INITIALIZING) 802 return; 803 804 cl->state = MEI_FILE_DISCONNECTED; 805 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL); 806 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL); 807 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); 808 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); 809 mei_cl_wake_all(cl); 810 cl->rx_flow_ctrl_creds = 0; 811 cl->tx_flow_ctrl_creds = 0; 812 cl->timer_count = 0; 813 814 if (!cl->me_cl) 815 return; 816 817 if (!WARN_ON(cl->me_cl->connect_count == 0)) 818 cl->me_cl->connect_count--; 819 820 if (cl->me_cl->connect_count == 0) 821 cl->me_cl->tx_flow_ctrl_creds = 0; 822 823 mei_me_cl_put(cl->me_cl); 824 cl->me_cl = NULL; 825 } 826 827 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl) 828 { 829 if (!mei_me_cl_get(me_cl)) 830 return -ENOENT; 831 832 /* only one connection is allowed for fixed address clients */ 833 if (me_cl->props.fixed_address) { 834 if (me_cl->connect_count) { 835 mei_me_cl_put(me_cl); 836 return -EBUSY; 837 } 838 } 839 840 cl->me_cl = me_cl; 841 cl->state = MEI_FILE_CONNECTING; 842 cl->me_cl->connect_count++; 843 844 return 0; 845 } 846 847 /* 848 * mei_cl_send_disconnect - send disconnect request 849 * 850 * @cl: host client 851 * @cb: callback block 852 * 853 * Return: 0, OK; otherwise, error. 854 */ 855 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) 856 { 857 struct mei_device *dev; 858 int ret; 859 860 dev = cl->dev; 861 862 ret = mei_hbm_cl_disconnect_req(dev, cl); 863 cl->status = ret; 864 if (ret) { 865 cl->state = MEI_FILE_DISCONNECT_REPLY; 866 return ret; 867 } 868 869 list_move_tail(&cb->list, &dev->ctrl_rd_list); 870 cl->timer_count = MEI_CONNECT_TIMEOUT; 871 mei_schedule_stall_timer(dev); 872 873 return 0; 874 } 875 876 /** 877 * mei_cl_irq_disconnect - processes close related operation from 878 * interrupt thread context - send disconnect request 879 * 880 * @cl: client 881 * @cb: callback block. 882 * @cmpl_list: complete list. 883 * 884 * Return: 0, OK; otherwise, error. 885 */ 886 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, 887 struct list_head *cmpl_list) 888 { 889 struct mei_device *dev = cl->dev; 890 u32 msg_slots; 891 int slots; 892 int ret; 893 894 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 895 slots = mei_hbuf_empty_slots(dev); 896 if (slots < 0) 897 return -EOVERFLOW; 898 899 if ((u32)slots < msg_slots) 900 return -EMSGSIZE; 901 902 ret = mei_cl_send_disconnect(cl, cb); 903 if (ret) 904 list_move_tail(&cb->list, cmpl_list); 905 906 return ret; 907 } 908 909 /** 910 * __mei_cl_disconnect - disconnect host client from the me one 911 * internal function runtime pm has to be already acquired 912 * 913 * @cl: host client 914 * 915 * Return: 0 on success, <0 on failure. 916 */ 917 static int __mei_cl_disconnect(struct mei_cl *cl) 918 { 919 struct mei_device *dev; 920 struct mei_cl_cb *cb; 921 int rets; 922 923 dev = cl->dev; 924 925 cl->state = MEI_FILE_DISCONNECTING; 926 927 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL); 928 if (!cb) { 929 rets = -ENOMEM; 930 goto out; 931 } 932 933 if (mei_hbuf_acquire(dev)) { 934 rets = mei_cl_send_disconnect(cl, cb); 935 if (rets) { 936 cl_err(dev, cl, "failed to disconnect.\n"); 937 goto out; 938 } 939 } 940 941 mutex_unlock(&dev->device_lock); 942 wait_event_timeout(cl->wait, 943 cl->state == MEI_FILE_DISCONNECT_REPLY || 944 cl->state == MEI_FILE_DISCONNECTED, 945 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 946 mutex_lock(&dev->device_lock); 947 948 rets = cl->status; 949 if (cl->state != MEI_FILE_DISCONNECT_REPLY && 950 cl->state != MEI_FILE_DISCONNECTED) { 951 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); 952 rets = -ETIME; 953 } 954 955 out: 956 /* we disconnect also on error */ 957 mei_cl_set_disconnected(cl); 958 if (!rets) 959 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 960 961 mei_io_cb_free(cb); 962 return rets; 963 } 964 965 /** 966 * mei_cl_disconnect - disconnect host client from the me one 967 * 968 * @cl: host client 969 * 970 * Locking: called under "dev->device_lock" lock 971 * 972 * Return: 0 on success, <0 on failure. 973 */ 974 int mei_cl_disconnect(struct mei_cl *cl) 975 { 976 struct mei_device *dev; 977 int rets; 978 979 if (WARN_ON(!cl || !cl->dev)) 980 return -ENODEV; 981 982 dev = cl->dev; 983 984 cl_dbg(dev, cl, "disconnecting"); 985 986 if (!mei_cl_is_connected(cl)) 987 return 0; 988 989 if (mei_cl_is_fixed_address(cl)) { 990 mei_cl_set_disconnected(cl); 991 return 0; 992 } 993 994 if (dev->dev_state == MEI_DEV_POWERING_DOWN || 995 dev->dev_state == MEI_DEV_POWER_DOWN) { 996 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n"); 997 mei_cl_set_disconnected(cl); 998 return 0; 999 } 1000 1001 rets = pm_runtime_get(dev->dev); 1002 if (rets < 0 && rets != -EINPROGRESS) { 1003 pm_runtime_put_noidle(dev->dev); 1004 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1005 return rets; 1006 } 1007 1008 rets = __mei_cl_disconnect(cl); 1009 1010 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1011 pm_runtime_mark_last_busy(dev->dev); 1012 pm_runtime_put_autosuspend(dev->dev); 1013 1014 return rets; 1015 } 1016 1017 1018 /** 1019 * mei_cl_is_other_connecting - checks if other 1020 * client with the same me client id is connecting 1021 * 1022 * @cl: private data of the file object 1023 * 1024 * Return: true if other client is connected, false - otherwise. 1025 */ 1026 static bool mei_cl_is_other_connecting(struct mei_cl *cl) 1027 { 1028 struct mei_device *dev; 1029 struct mei_cl_cb *cb; 1030 1031 dev = cl->dev; 1032 1033 list_for_each_entry(cb, &dev->ctrl_rd_list, list) { 1034 if (cb->fop_type == MEI_FOP_CONNECT && 1035 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl)) 1036 return true; 1037 } 1038 1039 return false; 1040 } 1041 1042 /** 1043 * mei_cl_send_connect - send connect request 1044 * 1045 * @cl: host client 1046 * @cb: callback block 1047 * 1048 * Return: 0, OK; otherwise, error. 1049 */ 1050 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) 1051 { 1052 struct mei_device *dev; 1053 int ret; 1054 1055 dev = cl->dev; 1056 1057 ret = mei_hbm_cl_connect_req(dev, cl); 1058 cl->status = ret; 1059 if (ret) { 1060 cl->state = MEI_FILE_DISCONNECT_REPLY; 1061 return ret; 1062 } 1063 1064 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1065 cl->timer_count = MEI_CONNECT_TIMEOUT; 1066 mei_schedule_stall_timer(dev); 1067 return 0; 1068 } 1069 1070 /** 1071 * mei_cl_irq_connect - send connect request in irq_thread context 1072 * 1073 * @cl: host client 1074 * @cb: callback block 1075 * @cmpl_list: complete list 1076 * 1077 * Return: 0, OK; otherwise, error. 1078 */ 1079 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 1080 struct list_head *cmpl_list) 1081 { 1082 struct mei_device *dev = cl->dev; 1083 u32 msg_slots; 1084 int slots; 1085 int rets; 1086 1087 if (mei_cl_is_other_connecting(cl)) 1088 return 0; 1089 1090 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 1091 slots = mei_hbuf_empty_slots(dev); 1092 if (slots < 0) 1093 return -EOVERFLOW; 1094 1095 if ((u32)slots < msg_slots) 1096 return -EMSGSIZE; 1097 1098 rets = mei_cl_send_connect(cl, cb); 1099 if (rets) 1100 list_move_tail(&cb->list, cmpl_list); 1101 1102 return rets; 1103 } 1104 1105 /** 1106 * mei_cl_connect - connect host client to the me one 1107 * 1108 * @cl: host client 1109 * @me_cl: me client 1110 * @fp: pointer to file structure 1111 * 1112 * Locking: called under "dev->device_lock" lock 1113 * 1114 * Return: 0 on success, <0 on failure. 1115 */ 1116 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, 1117 const struct file *fp) 1118 { 1119 struct mei_device *dev; 1120 struct mei_cl_cb *cb; 1121 int rets; 1122 1123 if (WARN_ON(!cl || !cl->dev || !me_cl)) 1124 return -ENODEV; 1125 1126 dev = cl->dev; 1127 1128 rets = mei_cl_set_connecting(cl, me_cl); 1129 if (rets) 1130 goto nortpm; 1131 1132 if (mei_cl_is_fixed_address(cl)) { 1133 cl->state = MEI_FILE_CONNECTED; 1134 rets = 0; 1135 goto nortpm; 1136 } 1137 1138 rets = pm_runtime_get(dev->dev); 1139 if (rets < 0 && rets != -EINPROGRESS) { 1140 pm_runtime_put_noidle(dev->dev); 1141 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1142 goto nortpm; 1143 } 1144 1145 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp); 1146 if (!cb) { 1147 rets = -ENOMEM; 1148 goto out; 1149 } 1150 1151 /* run hbuf acquire last so we don't have to undo */ 1152 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 1153 rets = mei_cl_send_connect(cl, cb); 1154 if (rets) 1155 goto out; 1156 } 1157 1158 mutex_unlock(&dev->device_lock); 1159 wait_event_timeout(cl->wait, 1160 (cl->state == MEI_FILE_CONNECTED || 1161 cl->state == MEI_FILE_DISCONNECTED || 1162 cl->state == MEI_FILE_DISCONNECT_REQUIRED || 1163 cl->state == MEI_FILE_DISCONNECT_REPLY), 1164 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1165 mutex_lock(&dev->device_lock); 1166 1167 if (!mei_cl_is_connected(cl)) { 1168 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { 1169 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); 1170 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); 1171 /* ignore disconnect return valuue; 1172 * in case of failure reset will be invoked 1173 */ 1174 __mei_cl_disconnect(cl); 1175 rets = -EFAULT; 1176 goto out; 1177 } 1178 1179 /* timeout or something went really wrong */ 1180 if (!cl->status) 1181 cl->status = -EFAULT; 1182 } 1183 1184 rets = cl->status; 1185 out: 1186 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1187 pm_runtime_mark_last_busy(dev->dev); 1188 pm_runtime_put_autosuspend(dev->dev); 1189 1190 mei_io_cb_free(cb); 1191 1192 nortpm: 1193 if (!mei_cl_is_connected(cl)) 1194 mei_cl_set_disconnected(cl); 1195 1196 return rets; 1197 } 1198 1199 /** 1200 * mei_cl_alloc_linked - allocate and link host client 1201 * 1202 * @dev: the device structure 1203 * 1204 * Return: cl on success ERR_PTR on failure 1205 */ 1206 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev) 1207 { 1208 struct mei_cl *cl; 1209 int ret; 1210 1211 cl = mei_cl_allocate(dev); 1212 if (!cl) { 1213 ret = -ENOMEM; 1214 goto err; 1215 } 1216 1217 ret = mei_cl_link(cl); 1218 if (ret) 1219 goto err; 1220 1221 return cl; 1222 err: 1223 kfree(cl); 1224 return ERR_PTR(ret); 1225 } 1226 1227 /** 1228 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl. 1229 * 1230 * @cl: host client 1231 * 1232 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise. 1233 */ 1234 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl) 1235 { 1236 if (WARN_ON(!cl || !cl->me_cl)) 1237 return -EINVAL; 1238 1239 if (cl->tx_flow_ctrl_creds > 0) 1240 return 1; 1241 1242 if (mei_cl_is_fixed_address(cl)) 1243 return 1; 1244 1245 if (mei_cl_is_single_recv_buf(cl)) { 1246 if (cl->me_cl->tx_flow_ctrl_creds > 0) 1247 return 1; 1248 } 1249 return 0; 1250 } 1251 1252 /** 1253 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits 1254 * for a client 1255 * 1256 * @cl: host client 1257 * 1258 * Return: 1259 * 0 on success 1260 * -EINVAL when ctrl credits are <= 0 1261 */ 1262 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl) 1263 { 1264 if (WARN_ON(!cl || !cl->me_cl)) 1265 return -EINVAL; 1266 1267 if (mei_cl_is_fixed_address(cl)) 1268 return 0; 1269 1270 if (mei_cl_is_single_recv_buf(cl)) { 1271 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0)) 1272 return -EINVAL; 1273 cl->me_cl->tx_flow_ctrl_creds--; 1274 } else { 1275 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0)) 1276 return -EINVAL; 1277 cl->tx_flow_ctrl_creds--; 1278 } 1279 return 0; 1280 } 1281 1282 /** 1283 * mei_cl_vtag_alloc - allocate and fill the vtag structure 1284 * 1285 * @fp: pointer to file structure 1286 * @vtag: vm tag 1287 * 1288 * Return: 1289 * * Pointer to allocated struct - on success 1290 * * ERR_PTR(-ENOMEM) on memory allocation failure 1291 */ 1292 struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag) 1293 { 1294 struct mei_cl_vtag *cl_vtag; 1295 1296 cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL); 1297 if (!cl_vtag) 1298 return ERR_PTR(-ENOMEM); 1299 1300 INIT_LIST_HEAD(&cl_vtag->list); 1301 cl_vtag->vtag = vtag; 1302 cl_vtag->fp = fp; 1303 1304 return cl_vtag; 1305 } 1306 1307 /** 1308 * mei_cl_fp_by_vtag - obtain the file pointer by vtag 1309 * 1310 * @cl: host client 1311 * @vtag: virtual tag 1312 * 1313 * Return: 1314 * * A file pointer - on success 1315 * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list 1316 */ 1317 const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag) 1318 { 1319 struct mei_cl_vtag *vtag_l; 1320 1321 list_for_each_entry(vtag_l, &cl->vtag_map, list) 1322 /* The client on bus has one fixed fp */ 1323 if ((cl->cldev && mei_cldev_enabled(cl->cldev)) || 1324 vtag_l->vtag == vtag) 1325 return vtag_l->fp; 1326 1327 return ERR_PTR(-ENOENT); 1328 } 1329 1330 /** 1331 * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag 1332 * 1333 * @cl: host client 1334 * @vtag: vm tag 1335 */ 1336 static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag) 1337 { 1338 struct mei_cl_vtag *vtag_l; 1339 1340 list_for_each_entry(vtag_l, &cl->vtag_map, list) { 1341 if (vtag_l->vtag == vtag) { 1342 vtag_l->pending_read = false; 1343 break; 1344 } 1345 } 1346 } 1347 1348 /** 1349 * mei_cl_read_vtag_add_fc - add flow control for next pending reader 1350 * in the vtag list 1351 * 1352 * @cl: host client 1353 */ 1354 static void mei_cl_read_vtag_add_fc(struct mei_cl *cl) 1355 { 1356 struct mei_cl_vtag *cl_vtag; 1357 1358 list_for_each_entry(cl_vtag, &cl->vtag_map, list) { 1359 if (cl_vtag->pending_read) { 1360 if (mei_cl_enqueue_ctrl_wr_cb(cl, 1361 mei_cl_mtu(cl), 1362 MEI_FOP_READ, 1363 cl_vtag->fp)) 1364 cl->rx_flow_ctrl_creds++; 1365 break; 1366 } 1367 } 1368 } 1369 1370 /** 1371 * mei_cl_vt_support_check - check if client support vtags 1372 * 1373 * @cl: host client 1374 * 1375 * Return: 1376 * * 0 - supported, or not connected at all 1377 * * -EOPNOTSUPP - vtags are not supported by client 1378 */ 1379 int mei_cl_vt_support_check(const struct mei_cl *cl) 1380 { 1381 struct mei_device *dev = cl->dev; 1382 1383 if (!dev->hbm_f_vt_supported) 1384 return -EOPNOTSUPP; 1385 1386 if (!cl->me_cl) 1387 return 0; 1388 1389 return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP; 1390 } 1391 1392 /** 1393 * mei_cl_add_rd_completed - add read completed callback to list with lock 1394 * and vtag check 1395 * 1396 * @cl: host client 1397 * @cb: callback block 1398 * 1399 */ 1400 void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) 1401 { 1402 const struct file *fp; 1403 1404 if (!mei_cl_vt_support_check(cl)) { 1405 fp = mei_cl_fp_by_vtag(cl, cb->vtag); 1406 if (IS_ERR(fp)) { 1407 /* client already disconnected, discarding */ 1408 mei_io_cb_free(cb); 1409 return; 1410 } 1411 cb->fp = fp; 1412 mei_cl_reset_read_by_vtag(cl, cb->vtag); 1413 mei_cl_read_vtag_add_fc(cl); 1414 } 1415 1416 spin_lock(&cl->rd_completed_lock); 1417 list_add_tail(&cb->list, &cl->rd_completed); 1418 spin_unlock(&cl->rd_completed_lock); 1419 } 1420 1421 /** 1422 * mei_cl_del_rd_completed - free read completed callback with lock 1423 * 1424 * @cl: host client 1425 * @cb: callback block 1426 * 1427 */ 1428 void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) 1429 { 1430 spin_lock(&cl->rd_completed_lock); 1431 mei_io_cb_free(cb); 1432 spin_unlock(&cl->rd_completed_lock); 1433 } 1434 1435 /** 1436 * mei_cl_notify_fop2req - convert fop to proper request 1437 * 1438 * @fop: client notification start response command 1439 * 1440 * Return: MEI_HBM_NOTIFICATION_START/STOP 1441 */ 1442 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) 1443 { 1444 if (fop == MEI_FOP_NOTIFY_START) 1445 return MEI_HBM_NOTIFICATION_START; 1446 else 1447 return MEI_HBM_NOTIFICATION_STOP; 1448 } 1449 1450 /** 1451 * mei_cl_notify_req2fop - convert notification request top file operation type 1452 * 1453 * @req: hbm notification request type 1454 * 1455 * Return: MEI_FOP_NOTIFY_START/STOP 1456 */ 1457 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) 1458 { 1459 if (req == MEI_HBM_NOTIFICATION_START) 1460 return MEI_FOP_NOTIFY_START; 1461 else 1462 return MEI_FOP_NOTIFY_STOP; 1463 } 1464 1465 /** 1466 * mei_cl_irq_notify - send notification request in irq_thread context 1467 * 1468 * @cl: client 1469 * @cb: callback block. 1470 * @cmpl_list: complete list. 1471 * 1472 * Return: 0 on such and error otherwise. 1473 */ 1474 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, 1475 struct list_head *cmpl_list) 1476 { 1477 struct mei_device *dev = cl->dev; 1478 u32 msg_slots; 1479 int slots; 1480 int ret; 1481 bool request; 1482 1483 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 1484 slots = mei_hbuf_empty_slots(dev); 1485 if (slots < 0) 1486 return -EOVERFLOW; 1487 1488 if ((u32)slots < msg_slots) 1489 return -EMSGSIZE; 1490 1491 request = mei_cl_notify_fop2req(cb->fop_type); 1492 ret = mei_hbm_cl_notify_req(dev, cl, request); 1493 if (ret) { 1494 cl->status = ret; 1495 list_move_tail(&cb->list, cmpl_list); 1496 return ret; 1497 } 1498 1499 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1500 return 0; 1501 } 1502 1503 /** 1504 * mei_cl_notify_request - send notification stop/start request 1505 * 1506 * @cl: host client 1507 * @fp: associate request with file 1508 * @request: 1 for start or 0 for stop 1509 * 1510 * Locking: called under "dev->device_lock" lock 1511 * 1512 * Return: 0 on such and error otherwise. 1513 */ 1514 int mei_cl_notify_request(struct mei_cl *cl, 1515 const struct file *fp, u8 request) 1516 { 1517 struct mei_device *dev; 1518 struct mei_cl_cb *cb; 1519 enum mei_cb_file_ops fop_type; 1520 int rets; 1521 1522 if (WARN_ON(!cl || !cl->dev)) 1523 return -ENODEV; 1524 1525 dev = cl->dev; 1526 1527 if (!dev->hbm_f_ev_supported) { 1528 cl_dbg(dev, cl, "notifications not supported\n"); 1529 return -EOPNOTSUPP; 1530 } 1531 1532 if (!mei_cl_is_connected(cl)) 1533 return -ENODEV; 1534 1535 rets = pm_runtime_get(dev->dev); 1536 if (rets < 0 && rets != -EINPROGRESS) { 1537 pm_runtime_put_noidle(dev->dev); 1538 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1539 return rets; 1540 } 1541 1542 fop_type = mei_cl_notify_req2fop(request); 1543 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp); 1544 if (!cb) { 1545 rets = -ENOMEM; 1546 goto out; 1547 } 1548 1549 if (mei_hbuf_acquire(dev)) { 1550 if (mei_hbm_cl_notify_req(dev, cl, request)) { 1551 rets = -ENODEV; 1552 goto out; 1553 } 1554 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1555 } 1556 1557 mutex_unlock(&dev->device_lock); 1558 wait_event_timeout(cl->wait, 1559 cl->notify_en == request || 1560 cl->status || 1561 !mei_cl_is_connected(cl), 1562 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1563 mutex_lock(&dev->device_lock); 1564 1565 if (cl->notify_en != request && !cl->status) 1566 cl->status = -EFAULT; 1567 1568 rets = cl->status; 1569 1570 out: 1571 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1572 pm_runtime_mark_last_busy(dev->dev); 1573 pm_runtime_put_autosuspend(dev->dev); 1574 1575 mei_io_cb_free(cb); 1576 return rets; 1577 } 1578 1579 /** 1580 * mei_cl_notify - raise notification 1581 * 1582 * @cl: host client 1583 * 1584 * Locking: called under "dev->device_lock" lock 1585 */ 1586 void mei_cl_notify(struct mei_cl *cl) 1587 { 1588 struct mei_device *dev; 1589 1590 if (!cl || !cl->dev) 1591 return; 1592 1593 dev = cl->dev; 1594 1595 if (!cl->notify_en) 1596 return; 1597 1598 cl_dbg(dev, cl, "notify event"); 1599 cl->notify_ev = true; 1600 if (!mei_cl_bus_notify_event(cl)) 1601 wake_up_interruptible(&cl->ev_wait); 1602 1603 if (cl->ev_async) 1604 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); 1605 1606 } 1607 1608 /** 1609 * mei_cl_notify_get - get or wait for notification event 1610 * 1611 * @cl: host client 1612 * @block: this request is blocking 1613 * @notify_ev: true if notification event was received 1614 * 1615 * Locking: called under "dev->device_lock" lock 1616 * 1617 * Return: 0 on such and error otherwise. 1618 */ 1619 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) 1620 { 1621 struct mei_device *dev; 1622 int rets; 1623 1624 *notify_ev = false; 1625 1626 if (WARN_ON(!cl || !cl->dev)) 1627 return -ENODEV; 1628 1629 dev = cl->dev; 1630 1631 if (!dev->hbm_f_ev_supported) { 1632 cl_dbg(dev, cl, "notifications not supported\n"); 1633 return -EOPNOTSUPP; 1634 } 1635 1636 if (!mei_cl_is_connected(cl)) 1637 return -ENODEV; 1638 1639 if (cl->notify_ev) 1640 goto out; 1641 1642 if (!block) 1643 return -EAGAIN; 1644 1645 mutex_unlock(&dev->device_lock); 1646 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); 1647 mutex_lock(&dev->device_lock); 1648 1649 if (rets < 0) 1650 return rets; 1651 1652 out: 1653 *notify_ev = cl->notify_ev; 1654 cl->notify_ev = false; 1655 return 0; 1656 } 1657 1658 /** 1659 * mei_cl_read_start - the start read client message function. 1660 * 1661 * @cl: host client 1662 * @length: number of bytes to read 1663 * @fp: pointer to file structure 1664 * 1665 * Return: 0 on success, <0 on failure. 1666 */ 1667 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) 1668 { 1669 struct mei_device *dev; 1670 struct mei_cl_cb *cb; 1671 int rets; 1672 1673 if (WARN_ON(!cl || !cl->dev)) 1674 return -ENODEV; 1675 1676 dev = cl->dev; 1677 1678 if (!mei_cl_is_connected(cl)) 1679 return -ENODEV; 1680 1681 if (!mei_me_cl_is_active(cl->me_cl)) { 1682 cl_err(dev, cl, "no such me client\n"); 1683 return -ENOTTY; 1684 } 1685 1686 if (mei_cl_is_fixed_address(cl)) 1687 return 0; 1688 1689 /* HW currently supports only one pending read */ 1690 if (cl->rx_flow_ctrl_creds) { 1691 mei_cl_set_read_by_fp(cl, fp); 1692 return -EBUSY; 1693 } 1694 1695 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp); 1696 if (!cb) 1697 return -ENOMEM; 1698 1699 mei_cl_set_read_by_fp(cl, fp); 1700 1701 rets = pm_runtime_get(dev->dev); 1702 if (rets < 0 && rets != -EINPROGRESS) { 1703 pm_runtime_put_noidle(dev->dev); 1704 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1705 goto nortpm; 1706 } 1707 1708 rets = 0; 1709 if (mei_hbuf_acquire(dev)) { 1710 rets = mei_hbm_cl_flow_control_req(dev, cl); 1711 if (rets < 0) 1712 goto out; 1713 1714 list_move_tail(&cb->list, &cl->rd_pending); 1715 } 1716 cl->rx_flow_ctrl_creds++; 1717 1718 out: 1719 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1720 pm_runtime_mark_last_busy(dev->dev); 1721 pm_runtime_put_autosuspend(dev->dev); 1722 nortpm: 1723 if (rets) 1724 mei_io_cb_free(cb); 1725 1726 return rets; 1727 } 1728 1729 static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag) 1730 { 1731 ext->type = MEI_EXT_HDR_VTAG; 1732 ext->ext_payload[0] = vtag; 1733 ext->length = mei_data2slots(sizeof(*ext)); 1734 return ext->length; 1735 } 1736 1737 /** 1738 * mei_msg_hdr_init - allocate and initialize mei message header 1739 * 1740 * @cb: message callback structure 1741 * 1742 * Return: a pointer to initialized header or ERR_PTR on failure 1743 */ 1744 static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb) 1745 { 1746 size_t hdr_len; 1747 struct mei_ext_meta_hdr *meta; 1748 struct mei_ext_hdr *ext; 1749 struct mei_msg_hdr *mei_hdr; 1750 bool is_ext, is_vtag; 1751 1752 if (!cb) 1753 return ERR_PTR(-EINVAL); 1754 1755 /* Extended header for vtag is attached only on the first fragment */ 1756 is_vtag = (cb->vtag && cb->buf_idx == 0); 1757 is_ext = is_vtag; 1758 1759 /* Compute extended header size */ 1760 hdr_len = sizeof(*mei_hdr); 1761 1762 if (!is_ext) 1763 goto setup_hdr; 1764 1765 hdr_len += sizeof(*meta); 1766 if (is_vtag) 1767 hdr_len += sizeof(*ext); 1768 1769 setup_hdr: 1770 mei_hdr = kzalloc(hdr_len, GFP_KERNEL); 1771 if (!mei_hdr) 1772 return ERR_PTR(-ENOMEM); 1773 1774 mei_hdr->host_addr = mei_cl_host_addr(cb->cl); 1775 mei_hdr->me_addr = mei_cl_me_id(cb->cl); 1776 mei_hdr->internal = cb->internal; 1777 mei_hdr->extended = is_ext; 1778 1779 if (!is_ext) 1780 goto out; 1781 1782 meta = (struct mei_ext_meta_hdr *)mei_hdr->extension; 1783 if (is_vtag) { 1784 meta->count++; 1785 meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag); 1786 } 1787 out: 1788 mei_hdr->length = hdr_len - sizeof(*mei_hdr); 1789 return mei_hdr; 1790 } 1791 1792 /** 1793 * mei_cl_irq_write - write a message to device 1794 * from the interrupt thread context 1795 * 1796 * @cl: client 1797 * @cb: callback block. 1798 * @cmpl_list: complete list. 1799 * 1800 * Return: 0, OK; otherwise error. 1801 */ 1802 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 1803 struct list_head *cmpl_list) 1804 { 1805 struct mei_device *dev; 1806 struct mei_msg_data *buf; 1807 struct mei_msg_hdr *mei_hdr = NULL; 1808 size_t hdr_len; 1809 size_t hbuf_len, dr_len; 1810 size_t buf_len; 1811 size_t data_len; 1812 int hbuf_slots; 1813 u32 dr_slots; 1814 u32 dma_len; 1815 int rets; 1816 bool first_chunk; 1817 const void *data; 1818 1819 if (WARN_ON(!cl || !cl->dev)) 1820 return -ENODEV; 1821 1822 dev = cl->dev; 1823 1824 buf = &cb->buf; 1825 1826 first_chunk = cb->buf_idx == 0; 1827 1828 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1; 1829 if (rets < 0) 1830 goto err; 1831 1832 if (rets == 0) { 1833 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1834 return 0; 1835 } 1836 1837 buf_len = buf->size - cb->buf_idx; 1838 data = buf->data + cb->buf_idx; 1839 hbuf_slots = mei_hbuf_empty_slots(dev); 1840 if (hbuf_slots < 0) { 1841 rets = -EOVERFLOW; 1842 goto err; 1843 } 1844 1845 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK; 1846 dr_slots = mei_dma_ring_empty_slots(dev); 1847 dr_len = mei_slots2data(dr_slots); 1848 1849 mei_hdr = mei_msg_hdr_init(cb); 1850 if (IS_ERR(mei_hdr)) { 1851 rets = PTR_ERR(mei_hdr); 1852 mei_hdr = NULL; 1853 goto err; 1854 } 1855 1856 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n", 1857 mei_hdr->extended, cb->vtag); 1858 1859 hdr_len = sizeof(*mei_hdr) + mei_hdr->length; 1860 1861 /** 1862 * Split the message only if we can write the whole host buffer 1863 * otherwise wait for next time the host buffer is empty. 1864 */ 1865 if (hdr_len + buf_len <= hbuf_len) { 1866 data_len = buf_len; 1867 mei_hdr->msg_complete = 1; 1868 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { 1869 mei_hdr->dma_ring = 1; 1870 if (buf_len > dr_len) 1871 buf_len = dr_len; 1872 else 1873 mei_hdr->msg_complete = 1; 1874 1875 data_len = sizeof(dma_len); 1876 dma_len = buf_len; 1877 data = &dma_len; 1878 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { 1879 buf_len = hbuf_len - hdr_len; 1880 data_len = buf_len; 1881 } else { 1882 kfree(mei_hdr); 1883 return 0; 1884 } 1885 mei_hdr->length += data_len; 1886 1887 if (mei_hdr->dma_ring) 1888 mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len); 1889 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len); 1890 1891 if (rets) 1892 goto err; 1893 1894 cl->status = 0; 1895 cl->writing_state = MEI_WRITING; 1896 cb->buf_idx += buf_len; 1897 1898 if (first_chunk) { 1899 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { 1900 rets = -EIO; 1901 goto err; 1902 } 1903 } 1904 1905 if (mei_hdr->msg_complete) 1906 list_move_tail(&cb->list, &dev->write_waiting_list); 1907 1908 kfree(mei_hdr); 1909 return 0; 1910 1911 err: 1912 kfree(mei_hdr); 1913 cl->status = rets; 1914 list_move_tail(&cb->list, cmpl_list); 1915 return rets; 1916 } 1917 1918 /** 1919 * mei_cl_write - submit a write cb to mei device 1920 * assumes device_lock is locked 1921 * 1922 * @cl: host client 1923 * @cb: write callback with filled data 1924 * 1925 * Return: number of bytes sent on success, <0 on failure. 1926 */ 1927 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) 1928 { 1929 struct mei_device *dev; 1930 struct mei_msg_data *buf; 1931 struct mei_msg_hdr *mei_hdr = NULL; 1932 size_t hdr_len; 1933 size_t hbuf_len, dr_len; 1934 size_t buf_len; 1935 size_t data_len; 1936 int hbuf_slots; 1937 u32 dr_slots; 1938 u32 dma_len; 1939 ssize_t rets; 1940 bool blocking; 1941 const void *data; 1942 1943 if (WARN_ON(!cl || !cl->dev)) 1944 return -ENODEV; 1945 1946 if (WARN_ON(!cb)) 1947 return -EINVAL; 1948 1949 dev = cl->dev; 1950 1951 buf = &cb->buf; 1952 buf_len = buf->size; 1953 1954 cl_dbg(dev, cl, "buf_len=%zd\n", buf_len); 1955 1956 blocking = cb->blocking; 1957 data = buf->data; 1958 1959 rets = pm_runtime_get(dev->dev); 1960 if (rets < 0 && rets != -EINPROGRESS) { 1961 pm_runtime_put_noidle(dev->dev); 1962 cl_err(dev, cl, "rpm: get failed %zd\n", rets); 1963 goto free; 1964 } 1965 1966 cb->buf_idx = 0; 1967 cl->writing_state = MEI_IDLE; 1968 1969 1970 rets = mei_cl_tx_flow_ctrl_creds(cl); 1971 if (rets < 0) 1972 goto err; 1973 1974 mei_hdr = mei_msg_hdr_init(cb); 1975 if (IS_ERR(mei_hdr)) { 1976 rets = -PTR_ERR(mei_hdr); 1977 mei_hdr = NULL; 1978 goto err; 1979 } 1980 1981 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n", 1982 mei_hdr->extended, cb->vtag); 1983 1984 hdr_len = sizeof(*mei_hdr) + mei_hdr->length; 1985 1986 if (rets == 0) { 1987 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1988 rets = buf_len; 1989 goto out; 1990 } 1991 1992 if (!mei_hbuf_acquire(dev)) { 1993 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); 1994 rets = buf_len; 1995 goto out; 1996 } 1997 1998 hbuf_slots = mei_hbuf_empty_slots(dev); 1999 if (hbuf_slots < 0) { 2000 rets = -EOVERFLOW; 2001 goto out; 2002 } 2003 2004 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK; 2005 dr_slots = mei_dma_ring_empty_slots(dev); 2006 dr_len = mei_slots2data(dr_slots); 2007 2008 if (hdr_len + buf_len <= hbuf_len) { 2009 data_len = buf_len; 2010 mei_hdr->msg_complete = 1; 2011 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { 2012 mei_hdr->dma_ring = 1; 2013 if (buf_len > dr_len) 2014 buf_len = dr_len; 2015 else 2016 mei_hdr->msg_complete = 1; 2017 2018 data_len = sizeof(dma_len); 2019 dma_len = buf_len; 2020 data = &dma_len; 2021 } else { 2022 buf_len = hbuf_len - hdr_len; 2023 data_len = buf_len; 2024 } 2025 2026 mei_hdr->length += data_len; 2027 2028 if (mei_hdr->dma_ring) 2029 mei_dma_ring_write(dev, buf->data, buf_len); 2030 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len); 2031 2032 if (rets) 2033 goto err; 2034 2035 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl); 2036 if (rets) 2037 goto err; 2038 2039 cl->writing_state = MEI_WRITING; 2040 cb->buf_idx = buf_len; 2041 /* restore return value */ 2042 buf_len = buf->size; 2043 2044 out: 2045 if (mei_hdr->msg_complete) 2046 mei_tx_cb_enqueue(cb, &dev->write_waiting_list); 2047 else 2048 mei_tx_cb_enqueue(cb, &dev->write_list); 2049 2050 cb = NULL; 2051 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 2052 2053 mutex_unlock(&dev->device_lock); 2054 rets = wait_event_interruptible(cl->tx_wait, 2055 cl->writing_state == MEI_WRITE_COMPLETE || 2056 (!mei_cl_is_connected(cl))); 2057 mutex_lock(&dev->device_lock); 2058 /* wait_event_interruptible returns -ERESTARTSYS */ 2059 if (rets) { 2060 if (signal_pending(current)) 2061 rets = -EINTR; 2062 goto err; 2063 } 2064 if (cl->writing_state != MEI_WRITE_COMPLETE) { 2065 rets = -EFAULT; 2066 goto err; 2067 } 2068 } 2069 2070 rets = buf_len; 2071 err: 2072 cl_dbg(dev, cl, "rpm: autosuspend\n"); 2073 pm_runtime_mark_last_busy(dev->dev); 2074 pm_runtime_put_autosuspend(dev->dev); 2075 free: 2076 mei_io_cb_free(cb); 2077 2078 kfree(mei_hdr); 2079 2080 return rets; 2081 } 2082 2083 /** 2084 * mei_cl_complete - processes completed operation for a client 2085 * 2086 * @cl: private data of the file object. 2087 * @cb: callback block. 2088 */ 2089 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 2090 { 2091 struct mei_device *dev = cl->dev; 2092 2093 switch (cb->fop_type) { 2094 case MEI_FOP_WRITE: 2095 mei_tx_cb_dequeue(cb); 2096 cl->writing_state = MEI_WRITE_COMPLETE; 2097 if (waitqueue_active(&cl->tx_wait)) { 2098 wake_up_interruptible(&cl->tx_wait); 2099 } else { 2100 pm_runtime_mark_last_busy(dev->dev); 2101 pm_request_autosuspend(dev->dev); 2102 } 2103 break; 2104 2105 case MEI_FOP_READ: 2106 mei_cl_add_rd_completed(cl, cb); 2107 if (!mei_cl_is_fixed_address(cl) && 2108 !WARN_ON(!cl->rx_flow_ctrl_creds)) 2109 cl->rx_flow_ctrl_creds--; 2110 if (!mei_cl_bus_rx_event(cl)) 2111 wake_up_interruptible(&cl->rx_wait); 2112 break; 2113 2114 case MEI_FOP_CONNECT: 2115 case MEI_FOP_DISCONNECT: 2116 case MEI_FOP_NOTIFY_STOP: 2117 case MEI_FOP_NOTIFY_START: 2118 case MEI_FOP_DMA_MAP: 2119 case MEI_FOP_DMA_UNMAP: 2120 if (waitqueue_active(&cl->wait)) 2121 wake_up(&cl->wait); 2122 2123 break; 2124 case MEI_FOP_DISCONNECT_RSP: 2125 mei_io_cb_free(cb); 2126 mei_cl_set_disconnected(cl); 2127 break; 2128 default: 2129 BUG_ON(0); 2130 } 2131 } 2132 2133 2134 /** 2135 * mei_cl_all_disconnect - disconnect forcefully all connected clients 2136 * 2137 * @dev: mei device 2138 */ 2139 void mei_cl_all_disconnect(struct mei_device *dev) 2140 { 2141 struct mei_cl *cl; 2142 2143 list_for_each_entry(cl, &dev->file_list, link) 2144 mei_cl_set_disconnected(cl); 2145 } 2146 2147 static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id) 2148 { 2149 struct mei_cl *cl; 2150 2151 list_for_each_entry(cl, &dev->file_list, link) 2152 if (cl->dma.buffer_id == buffer_id) 2153 return cl; 2154 return NULL; 2155 } 2156 2157 /** 2158 * mei_cl_irq_dma_map - send client dma map request in irq_thread context 2159 * 2160 * @cl: client 2161 * @cb: callback block. 2162 * @cmpl_list: complete list. 2163 * 2164 * Return: 0 on such and error otherwise. 2165 */ 2166 int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb, 2167 struct list_head *cmpl_list) 2168 { 2169 struct mei_device *dev = cl->dev; 2170 u32 msg_slots; 2171 int slots; 2172 int ret; 2173 2174 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request)); 2175 slots = mei_hbuf_empty_slots(dev); 2176 if (slots < 0) 2177 return -EOVERFLOW; 2178 2179 if ((u32)slots < msg_slots) 2180 return -EMSGSIZE; 2181 2182 ret = mei_hbm_cl_dma_map_req(dev, cl); 2183 if (ret) { 2184 cl->status = ret; 2185 list_move_tail(&cb->list, cmpl_list); 2186 return ret; 2187 } 2188 2189 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2190 return 0; 2191 } 2192 2193 /** 2194 * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context 2195 * 2196 * @cl: client 2197 * @cb: callback block. 2198 * @cmpl_list: complete list. 2199 * 2200 * Return: 0 on such and error otherwise. 2201 */ 2202 int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb, 2203 struct list_head *cmpl_list) 2204 { 2205 struct mei_device *dev = cl->dev; 2206 u32 msg_slots; 2207 int slots; 2208 int ret; 2209 2210 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request)); 2211 slots = mei_hbuf_empty_slots(dev); 2212 if (slots < 0) 2213 return -EOVERFLOW; 2214 2215 if ((u32)slots < msg_slots) 2216 return -EMSGSIZE; 2217 2218 ret = mei_hbm_cl_dma_unmap_req(dev, cl); 2219 if (ret) { 2220 cl->status = ret; 2221 list_move_tail(&cb->list, cmpl_list); 2222 return ret; 2223 } 2224 2225 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2226 return 0; 2227 } 2228 2229 static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size) 2230 { 2231 cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size, 2232 &cl->dma.daddr, GFP_KERNEL); 2233 if (!cl->dma.vaddr) 2234 return -ENOMEM; 2235 2236 cl->dma.buffer_id = buf_id; 2237 cl->dma.size = size; 2238 2239 return 0; 2240 } 2241 2242 static void mei_cl_dma_free(struct mei_cl *cl) 2243 { 2244 cl->dma.buffer_id = 0; 2245 dmam_free_coherent(cl->dev->dev, 2246 cl->dma.size, cl->dma.vaddr, cl->dma.daddr); 2247 cl->dma.size = 0; 2248 cl->dma.vaddr = NULL; 2249 cl->dma.daddr = 0; 2250 } 2251 2252 /** 2253 * mei_cl_alloc_and_map - send client dma map request 2254 * 2255 * @cl: host client 2256 * @fp: pointer to file structure 2257 * @buffer_id: id of the mapped buffer 2258 * @size: size of the buffer 2259 * 2260 * Locking: called under "dev->device_lock" lock 2261 * 2262 * Return: 2263 * * -ENODEV 2264 * * -EINVAL 2265 * * -EOPNOTSUPP 2266 * * -EPROTO 2267 * * -ENOMEM; 2268 */ 2269 int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, 2270 u8 buffer_id, size_t size) 2271 { 2272 struct mei_device *dev; 2273 struct mei_cl_cb *cb; 2274 int rets; 2275 2276 if (WARN_ON(!cl || !cl->dev)) 2277 return -ENODEV; 2278 2279 dev = cl->dev; 2280 2281 if (!dev->hbm_f_cd_supported) { 2282 cl_dbg(dev, cl, "client dma is not supported\n"); 2283 return -EOPNOTSUPP; 2284 } 2285 2286 if (buffer_id == 0) 2287 return -EINVAL; 2288 2289 if (mei_cl_is_connected(cl)) 2290 return -EPROTO; 2291 2292 if (cl->dma_mapped) 2293 return -EPROTO; 2294 2295 if (mei_cl_dma_map_find(dev, buffer_id)) { 2296 cl_dbg(dev, cl, "client dma with id %d is already allocated\n", 2297 cl->dma.buffer_id); 2298 return -EPROTO; 2299 } 2300 2301 rets = pm_runtime_get(dev->dev); 2302 if (rets < 0 && rets != -EINPROGRESS) { 2303 pm_runtime_put_noidle(dev->dev); 2304 cl_err(dev, cl, "rpm: get failed %d\n", rets); 2305 return rets; 2306 } 2307 2308 rets = mei_cl_dma_alloc(cl, buffer_id, size); 2309 if (rets) { 2310 pm_runtime_put_noidle(dev->dev); 2311 return rets; 2312 } 2313 2314 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp); 2315 if (!cb) { 2316 rets = -ENOMEM; 2317 goto out; 2318 } 2319 2320 if (mei_hbuf_acquire(dev)) { 2321 if (mei_hbm_cl_dma_map_req(dev, cl)) { 2322 rets = -ENODEV; 2323 goto out; 2324 } 2325 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2326 } 2327 2328 mutex_unlock(&dev->device_lock); 2329 wait_event_timeout(cl->wait, 2330 cl->dma_mapped || cl->status, 2331 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 2332 mutex_lock(&dev->device_lock); 2333 2334 if (!cl->dma_mapped && !cl->status) 2335 cl->status = -EFAULT; 2336 2337 rets = cl->status; 2338 2339 out: 2340 if (rets) 2341 mei_cl_dma_free(cl); 2342 2343 cl_dbg(dev, cl, "rpm: autosuspend\n"); 2344 pm_runtime_mark_last_busy(dev->dev); 2345 pm_runtime_put_autosuspend(dev->dev); 2346 2347 mei_io_cb_free(cb); 2348 return rets; 2349 } 2350 2351 /** 2352 * mei_cl_unmap_and_free - send client dma unmap request 2353 * 2354 * @cl: host client 2355 * @fp: pointer to file structure 2356 * 2357 * Locking: called under "dev->device_lock" lock 2358 * 2359 * Return: 0 on such and error otherwise. 2360 */ 2361 int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) 2362 { 2363 struct mei_device *dev; 2364 struct mei_cl_cb *cb; 2365 int rets; 2366 2367 if (WARN_ON(!cl || !cl->dev)) 2368 return -ENODEV; 2369 2370 dev = cl->dev; 2371 2372 if (!dev->hbm_f_cd_supported) { 2373 cl_dbg(dev, cl, "client dma is not supported\n"); 2374 return -EOPNOTSUPP; 2375 } 2376 2377 /* do not allow unmap for connected client */ 2378 if (mei_cl_is_connected(cl)) 2379 return -EPROTO; 2380 2381 if (!cl->dma_mapped) 2382 return -EPROTO; 2383 2384 rets = pm_runtime_get(dev->dev); 2385 if (rets < 0 && rets != -EINPROGRESS) { 2386 pm_runtime_put_noidle(dev->dev); 2387 cl_err(dev, cl, "rpm: get failed %d\n", rets); 2388 return rets; 2389 } 2390 2391 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp); 2392 if (!cb) { 2393 rets = -ENOMEM; 2394 goto out; 2395 } 2396 2397 if (mei_hbuf_acquire(dev)) { 2398 if (mei_hbm_cl_dma_unmap_req(dev, cl)) { 2399 rets = -ENODEV; 2400 goto out; 2401 } 2402 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2403 } 2404 2405 mutex_unlock(&dev->device_lock); 2406 wait_event_timeout(cl->wait, 2407 !cl->dma_mapped || cl->status, 2408 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 2409 mutex_lock(&dev->device_lock); 2410 2411 if (cl->dma_mapped && !cl->status) 2412 cl->status = -EFAULT; 2413 2414 rets = cl->status; 2415 2416 if (!rets) 2417 mei_cl_dma_free(cl); 2418 out: 2419 cl_dbg(dev, cl, "rpm: autosuspend\n"); 2420 pm_runtime_mark_last_busy(dev->dev); 2421 pm_runtime_put_autosuspend(dev->dev); 2422 2423 mei_io_cb_free(cb); 2424 return rets; 2425 } 2426