1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2012-2019, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/module.h> 8 #include <linux/device.h> 9 #include <linux/kernel.h> 10 #include <linux/sched/signal.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/slab.h> 14 #include <linux/mutex.h> 15 #include <linux/interrupt.h> 16 #include <linux/scatterlist.h> 17 #include <linux/mei_cl_bus.h> 18 19 #include "mei_dev.h" 20 #include "client.h" 21 22 #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) 23 24 /** 25 * __mei_cl_send - internal client send (write) 26 * 27 * @cl: host client 28 * @buf: buffer to send 29 * @length: buffer length 30 * @vtag: virtual tag 31 * @mode: sending mode 32 * 33 * Return: written size bytes or < 0 on error 34 */ 35 ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, 36 unsigned int mode) 37 { 38 return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT); 39 } 40 41 /** 42 * __mei_cl_send_timeout - internal client send (write) 43 * 44 * @cl: host client 45 * @buf: buffer to send 46 * @length: buffer length 47 * @vtag: virtual tag 48 * @mode: sending mode 49 * @timeout: send timeout in milliseconds. 50 * effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set. 51 * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait. 52 * 53 * Return: written size bytes or < 0 on error 54 */ 55 ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, 56 unsigned int mode, unsigned long timeout) 57 { 58 struct mei_device *bus; 59 struct mei_cl_cb *cb; 60 ssize_t rets; 61 62 if (WARN_ON(!cl || !cl->dev)) 63 return -ENODEV; 64 65 bus = cl->dev; 66 67 mutex_lock(&bus->device_lock); 68 if (bus->dev_state != MEI_DEV_ENABLED && 69 bus->dev_state != MEI_DEV_POWERING_DOWN) { 70 rets = -ENODEV; 71 goto out; 72 } 73 74 if (!mei_cl_is_connected(cl)) { 75 rets = -ENODEV; 76 goto out; 77 } 78 79 /* Check if we have an ME client device */ 80 if (!mei_me_cl_is_active(cl->me_cl)) { 81 rets = -ENOTTY; 82 goto out; 83 } 84 85 if (vtag) { 86 /* Check if vtag is supported by client */ 87 rets = mei_cl_vt_support_check(cl); 88 if (rets) 89 goto out; 90 } 91 92 if (length > mei_cl_mtu(cl)) { 93 rets = -EFBIG; 94 goto out; 95 } 96 97 while (cl->tx_cb_queued >= bus->tx_queue_limit) { 98 mutex_unlock(&bus->device_lock); 99 rets = wait_event_interruptible(cl->tx_wait, 100 cl->writing_state == MEI_WRITE_COMPLETE || 101 (!mei_cl_is_connected(cl))); 102 mutex_lock(&bus->device_lock); 103 if (rets) { 104 if (signal_pending(current)) 105 rets = -EINTR; 106 goto out; 107 } 108 if (!mei_cl_is_connected(cl)) { 109 rets = -ENODEV; 110 goto out; 111 } 112 } 113 114 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL); 115 if (!cb) { 116 rets = -ENOMEM; 117 goto out; 118 } 119 cb->vtag = vtag; 120 121 cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL); 122 cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING); 123 memcpy(cb->buf.data, buf, length); 124 /* hack we point data to header */ 125 if (mode & MEI_CL_IO_SGL) { 126 cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data; 127 cb->buf.data = NULL; 128 cb->buf.size = 0; 129 } 130 131 rets = mei_cl_write(cl, cb, timeout); 132 133 if (mode & MEI_CL_IO_SGL && rets == 0) 134 rets = length; 135 136 out: 137 mutex_unlock(&bus->device_lock); 138 139 return rets; 140 } 141 142 /** 143 * __mei_cl_recv - internal client receive (read) 144 * 145 * @cl: host client 146 * @buf: buffer to receive 147 * @length: buffer length 148 * @mode: io mode 149 * @vtag: virtual tag 150 * @timeout: recv timeout, 0 for infinite timeout 151 * 152 * Return: read size in bytes of < 0 on error 153 */ 154 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag, 155 unsigned int mode, unsigned long timeout) 156 { 157 struct mei_device *bus; 158 struct mei_cl_cb *cb; 159 size_t r_length; 160 ssize_t rets; 161 bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK); 162 163 if (WARN_ON(!cl || !cl->dev)) 164 return -ENODEV; 165 166 bus = cl->dev; 167 168 mutex_lock(&bus->device_lock); 169 if (bus->dev_state != MEI_DEV_ENABLED && 170 bus->dev_state != MEI_DEV_POWERING_DOWN) { 171 rets = -ENODEV; 172 goto out; 173 } 174 175 cb = mei_cl_read_cb(cl, NULL); 176 if (cb) 177 goto copy; 178 179 rets = mei_cl_read_start(cl, length, NULL); 180 if (rets && rets != -EBUSY) 181 goto out; 182 183 if (nonblock) { 184 rets = -EAGAIN; 185 goto out; 186 } 187 188 /* wait on event only if there is no other waiter */ 189 /* synchronized under device mutex */ 190 if (!waitqueue_active(&cl->rx_wait)) { 191 192 mutex_unlock(&bus->device_lock); 193 194 if (timeout) { 195 rets = wait_event_interruptible_timeout 196 (cl->rx_wait, 197 mei_cl_read_cb(cl, NULL) || 198 (!mei_cl_is_connected(cl)), 199 msecs_to_jiffies(timeout)); 200 if (rets == 0) 201 return -ETIME; 202 if (rets < 0) { 203 if (signal_pending(current)) 204 return -EINTR; 205 return -ERESTARTSYS; 206 } 207 } else { 208 if (wait_event_interruptible 209 (cl->rx_wait, 210 mei_cl_read_cb(cl, NULL) || 211 (!mei_cl_is_connected(cl)))) { 212 if (signal_pending(current)) 213 return -EINTR; 214 return -ERESTARTSYS; 215 } 216 } 217 218 mutex_lock(&bus->device_lock); 219 220 if (!mei_cl_is_connected(cl)) { 221 rets = -ENODEV; 222 goto out; 223 } 224 } 225 226 cb = mei_cl_read_cb(cl, NULL); 227 if (!cb) { 228 rets = 0; 229 goto out; 230 } 231 232 copy: 233 if (cb->status) { 234 rets = cb->status; 235 goto free; 236 } 237 238 /* for the GSC type - copy the extended header to the buffer */ 239 if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) { 240 r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32)); 241 memcpy(buf, cb->ext_hdr, r_length); 242 } else { 243 r_length = min_t(size_t, length, cb->buf_idx); 244 memcpy(buf, cb->buf.data, r_length); 245 } 246 rets = r_length; 247 248 if (vtag) 249 *vtag = cb->vtag; 250 251 free: 252 mei_cl_del_rd_completed(cl, cb); 253 out: 254 mutex_unlock(&bus->device_lock); 255 256 return rets; 257 } 258 259 /** 260 * mei_cldev_send_vtag - me device send with vtag (write) 261 * 262 * @cldev: me client device 263 * @buf: buffer to send 264 * @length: buffer length 265 * @vtag: virtual tag 266 * 267 * Return: 268 * * written size in bytes 269 * * < 0 on error 270 */ 271 272 ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf, 273 size_t length, u8 vtag) 274 { 275 struct mei_cl *cl = cldev->cl; 276 277 return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING); 278 } 279 EXPORT_SYMBOL_GPL(mei_cldev_send_vtag); 280 281 /** 282 * mei_cldev_recv_vtag - client receive with vtag (read) 283 * 284 * @cldev: me client device 285 * @buf: buffer to receive 286 * @length: buffer length 287 * @vtag: virtual tag 288 * 289 * Return: 290 * * read size in bytes 291 * * < 0 on error 292 */ 293 294 ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, 295 u8 *vtag) 296 { 297 struct mei_cl *cl = cldev->cl; 298 299 return __mei_cl_recv(cl, buf, length, vtag, 0, 0); 300 } 301 EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag); 302 303 /** 304 * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read) 305 * 306 * @cldev: me client device 307 * @buf: buffer to receive 308 * @length: buffer length 309 * @vtag: virtual tag 310 * 311 * Return: 312 * * read size in bytes 313 * * -EAGAIN if function will block. 314 * * < 0 on other error 315 */ 316 ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, 317 size_t length, u8 *vtag) 318 { 319 struct mei_cl *cl = cldev->cl; 320 321 return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0); 322 } 323 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag); 324 325 /** 326 * mei_cldev_send - me device send (write) 327 * 328 * @cldev: me client device 329 * @buf: buffer to send 330 * @length: buffer length 331 * 332 * Return: 333 * * written size in bytes 334 * * < 0 on error 335 */ 336 ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length) 337 { 338 return mei_cldev_send_vtag(cldev, buf, length, 0); 339 } 340 EXPORT_SYMBOL_GPL(mei_cldev_send); 341 342 /** 343 * mei_cldev_recv - client receive (read) 344 * 345 * @cldev: me client device 346 * @buf: buffer to receive 347 * @length: buffer length 348 * 349 * Return: read size in bytes of < 0 on error 350 */ 351 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) 352 { 353 return mei_cldev_recv_vtag(cldev, buf, length, NULL); 354 } 355 EXPORT_SYMBOL_GPL(mei_cldev_recv); 356 357 /** 358 * mei_cldev_recv_nonblock - non block client receive (read) 359 * 360 * @cldev: me client device 361 * @buf: buffer to receive 362 * @length: buffer length 363 * 364 * Return: read size in bytes of < 0 on error 365 * -EAGAIN if function will block. 366 */ 367 ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, 368 size_t length) 369 { 370 return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL); 371 } 372 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); 373 374 /** 375 * mei_cl_bus_rx_work - dispatch rx event for a bus device 376 * 377 * @work: work 378 */ 379 static void mei_cl_bus_rx_work(struct work_struct *work) 380 { 381 struct mei_cl_device *cldev; 382 struct mei_device *bus; 383 384 cldev = container_of(work, struct mei_cl_device, rx_work); 385 386 bus = cldev->bus; 387 388 if (cldev->rx_cb) 389 cldev->rx_cb(cldev); 390 391 mutex_lock(&bus->device_lock); 392 if (mei_cl_is_connected(cldev->cl)) 393 mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 394 mutex_unlock(&bus->device_lock); 395 } 396 397 /** 398 * mei_cl_bus_notif_work - dispatch FW notif event for a bus device 399 * 400 * @work: work 401 */ 402 static void mei_cl_bus_notif_work(struct work_struct *work) 403 { 404 struct mei_cl_device *cldev; 405 406 cldev = container_of(work, struct mei_cl_device, notif_work); 407 408 if (cldev->notif_cb) 409 cldev->notif_cb(cldev); 410 } 411 412 /** 413 * mei_cl_bus_notify_event - schedule notify cb on bus client 414 * 415 * @cl: host client 416 * 417 * Return: true if event was scheduled 418 * false if the client is not waiting for event 419 */ 420 bool mei_cl_bus_notify_event(struct mei_cl *cl) 421 { 422 struct mei_cl_device *cldev = cl->cldev; 423 424 if (!cldev || !cldev->notif_cb) 425 return false; 426 427 if (!cl->notify_ev) 428 return false; 429 430 schedule_work(&cldev->notif_work); 431 432 cl->notify_ev = false; 433 434 return true; 435 } 436 437 /** 438 * mei_cl_bus_rx_event - schedule rx event 439 * 440 * @cl: host client 441 * 442 * Return: true if event was scheduled 443 * false if the client is not waiting for event 444 */ 445 bool mei_cl_bus_rx_event(struct mei_cl *cl) 446 { 447 struct mei_cl_device *cldev = cl->cldev; 448 449 if (!cldev || !cldev->rx_cb) 450 return false; 451 452 schedule_work(&cldev->rx_work); 453 454 return true; 455 } 456 457 /** 458 * mei_cldev_register_rx_cb - register Rx event callback 459 * 460 * @cldev: me client devices 461 * @rx_cb: callback function 462 * 463 * Return: 0 on success 464 * -EALREADY if an callback is already registered 465 * <0 on other errors 466 */ 467 int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb) 468 { 469 struct mei_device *bus = cldev->bus; 470 int ret; 471 472 if (!rx_cb) 473 return -EINVAL; 474 if (cldev->rx_cb) 475 return -EALREADY; 476 477 cldev->rx_cb = rx_cb; 478 INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work); 479 480 mutex_lock(&bus->device_lock); 481 if (mei_cl_is_connected(cldev->cl)) 482 ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 483 else 484 ret = -ENODEV; 485 mutex_unlock(&bus->device_lock); 486 if (ret && ret != -EBUSY) { 487 cancel_work_sync(&cldev->rx_work); 488 cldev->rx_cb = NULL; 489 return ret; 490 } 491 492 return 0; 493 } 494 EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb); 495 496 /** 497 * mei_cldev_register_notif_cb - register FW notification event callback 498 * 499 * @cldev: me client devices 500 * @notif_cb: callback function 501 * 502 * Return: 0 on success 503 * -EALREADY if an callback is already registered 504 * <0 on other errors 505 */ 506 int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, 507 mei_cldev_cb_t notif_cb) 508 { 509 struct mei_device *bus = cldev->bus; 510 int ret; 511 512 if (!notif_cb) 513 return -EINVAL; 514 515 if (cldev->notif_cb) 516 return -EALREADY; 517 518 cldev->notif_cb = notif_cb; 519 INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work); 520 521 mutex_lock(&bus->device_lock); 522 ret = mei_cl_notify_request(cldev->cl, NULL, 1); 523 mutex_unlock(&bus->device_lock); 524 if (ret) { 525 cancel_work_sync(&cldev->notif_work); 526 cldev->notif_cb = NULL; 527 return ret; 528 } 529 530 return 0; 531 } 532 EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb); 533 534 /** 535 * mei_cldev_get_drvdata - driver data getter 536 * 537 * @cldev: mei client device 538 * 539 * Return: driver private data 540 */ 541 void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev) 542 { 543 return dev_get_drvdata(&cldev->dev); 544 } 545 EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata); 546 547 /** 548 * mei_cldev_set_drvdata - driver data setter 549 * 550 * @cldev: mei client device 551 * @data: data to store 552 */ 553 void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data) 554 { 555 dev_set_drvdata(&cldev->dev, data); 556 } 557 EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata); 558 559 /** 560 * mei_cldev_uuid - return uuid of the underlying me client 561 * 562 * @cldev: mei client device 563 * 564 * Return: me client uuid 565 */ 566 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev) 567 { 568 return mei_me_cl_uuid(cldev->me_cl); 569 } 570 EXPORT_SYMBOL_GPL(mei_cldev_uuid); 571 572 /** 573 * mei_cldev_ver - return protocol version of the underlying me client 574 * 575 * @cldev: mei client device 576 * 577 * Return: me client protocol version 578 */ 579 u8 mei_cldev_ver(const struct mei_cl_device *cldev) 580 { 581 return mei_me_cl_ver(cldev->me_cl); 582 } 583 EXPORT_SYMBOL_GPL(mei_cldev_ver); 584 585 /** 586 * mei_cldev_enabled - check whether the device is enabled 587 * 588 * @cldev: mei client device 589 * 590 * Return: true if me client is initialized and connected 591 */ 592 bool mei_cldev_enabled(const struct mei_cl_device *cldev) 593 { 594 return mei_cl_is_connected(cldev->cl); 595 } 596 EXPORT_SYMBOL_GPL(mei_cldev_enabled); 597 598 /** 599 * mei_cl_bus_module_get - acquire module of the underlying 600 * hw driver. 601 * 602 * @cldev: mei client device 603 * 604 * Return: true on success; false if the module was removed. 605 */ 606 static bool mei_cl_bus_module_get(struct mei_cl_device *cldev) 607 { 608 return try_module_get(cldev->bus->dev->driver->owner); 609 } 610 611 /** 612 * mei_cl_bus_module_put - release the underlying hw module. 613 * 614 * @cldev: mei client device 615 */ 616 static void mei_cl_bus_module_put(struct mei_cl_device *cldev) 617 { 618 module_put(cldev->bus->dev->driver->owner); 619 } 620 621 /** 622 * mei_cl_bus_vtag - get bus vtag entry wrapper 623 * The tag for bus client is always first. 624 * 625 * @cl: host client 626 * 627 * Return: bus vtag or NULL 628 */ 629 static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl) 630 { 631 return list_first_entry_or_null(&cl->vtag_map, 632 struct mei_cl_vtag, list); 633 } 634 635 /** 636 * mei_cl_bus_vtag_alloc - add bus client entry to vtag map 637 * 638 * @cldev: me client device 639 * 640 * Return: 641 * * 0 on success 642 * * -ENOMEM if memory allocation failed 643 */ 644 static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev) 645 { 646 struct mei_cl *cl = cldev->cl; 647 struct mei_cl_vtag *cl_vtag; 648 649 /* 650 * Bail out if the client does not supports vtags 651 * or has already allocated one 652 */ 653 if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl)) 654 return 0; 655 656 cl_vtag = mei_cl_vtag_alloc(NULL, 0); 657 if (IS_ERR(cl_vtag)) 658 return -ENOMEM; 659 660 list_add_tail(&cl_vtag->list, &cl->vtag_map); 661 662 return 0; 663 } 664 665 /** 666 * mei_cl_bus_vtag_free - remove the bus entry from vtag map 667 * 668 * @cldev: me client device 669 */ 670 static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev) 671 { 672 struct mei_cl *cl = cldev->cl; 673 struct mei_cl_vtag *cl_vtag; 674 675 cl_vtag = mei_cl_bus_vtag(cl); 676 if (!cl_vtag) 677 return; 678 679 list_del(&cl_vtag->list); 680 kfree(cl_vtag); 681 } 682 683 void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size) 684 { 685 struct mei_device *bus; 686 struct mei_cl *cl; 687 int ret; 688 689 if (!cldev || !buffer_id || !size) 690 return ERR_PTR(-EINVAL); 691 692 if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) { 693 dev_err(&cldev->dev, "Map size should be aligned to %lu\n", 694 MEI_FW_PAGE_SIZE); 695 return ERR_PTR(-EINVAL); 696 } 697 698 cl = cldev->cl; 699 bus = cldev->bus; 700 701 mutex_lock(&bus->device_lock); 702 if (cl->state == MEI_FILE_UNINITIALIZED) { 703 ret = mei_cl_link(cl); 704 if (ret) 705 goto notlinked; 706 /* update pointers */ 707 cl->cldev = cldev; 708 } 709 710 ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size); 711 if (ret) 712 mei_cl_unlink(cl); 713 notlinked: 714 mutex_unlock(&bus->device_lock); 715 if (ret) 716 return ERR_PTR(ret); 717 return cl->dma.vaddr; 718 } 719 EXPORT_SYMBOL_GPL(mei_cldev_dma_map); 720 721 int mei_cldev_dma_unmap(struct mei_cl_device *cldev) 722 { 723 struct mei_device *bus; 724 struct mei_cl *cl; 725 int ret; 726 727 if (!cldev) 728 return -EINVAL; 729 730 cl = cldev->cl; 731 bus = cldev->bus; 732 733 mutex_lock(&bus->device_lock); 734 ret = mei_cl_dma_unmap(cl, NULL); 735 736 mei_cl_flush_queues(cl, NULL); 737 mei_cl_unlink(cl); 738 mutex_unlock(&bus->device_lock); 739 return ret; 740 } 741 EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap); 742 743 /** 744 * mei_cldev_enable - enable me client device 745 * create connection with me client 746 * 747 * @cldev: me client device 748 * 749 * Return: 0 on success and < 0 on error 750 */ 751 int mei_cldev_enable(struct mei_cl_device *cldev) 752 { 753 struct mei_device *bus = cldev->bus; 754 struct mei_cl *cl; 755 int ret; 756 757 cl = cldev->cl; 758 759 mutex_lock(&bus->device_lock); 760 if (cl->state == MEI_FILE_UNINITIALIZED) { 761 ret = mei_cl_link(cl); 762 if (ret) 763 goto notlinked; 764 /* update pointers */ 765 cl->cldev = cldev; 766 } 767 768 if (mei_cl_is_connected(cl)) { 769 ret = 0; 770 goto out; 771 } 772 773 if (!mei_me_cl_is_active(cldev->me_cl)) { 774 dev_err(&cldev->dev, "me client is not active\n"); 775 ret = -ENOTTY; 776 goto out; 777 } 778 779 ret = mei_cl_bus_vtag_alloc(cldev); 780 if (ret) 781 goto out; 782 783 ret = mei_cl_connect(cl, cldev->me_cl, NULL); 784 if (ret < 0) { 785 dev_err(&cldev->dev, "cannot connect\n"); 786 mei_cl_bus_vtag_free(cldev); 787 } 788 789 out: 790 if (ret) 791 mei_cl_unlink(cl); 792 notlinked: 793 mutex_unlock(&bus->device_lock); 794 795 return ret; 796 } 797 EXPORT_SYMBOL_GPL(mei_cldev_enable); 798 799 /** 800 * mei_cldev_unregister_callbacks - internal wrapper for unregistering 801 * callbacks. 802 * 803 * @cldev: client device 804 */ 805 static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev) 806 { 807 if (cldev->rx_cb) { 808 cancel_work_sync(&cldev->rx_work); 809 cldev->rx_cb = NULL; 810 } 811 812 if (cldev->notif_cb) { 813 cancel_work_sync(&cldev->notif_work); 814 cldev->notif_cb = NULL; 815 } 816 } 817 818 /** 819 * mei_cldev_disable - disable me client device 820 * disconnect form the me client 821 * 822 * @cldev: me client device 823 * 824 * Return: 0 on success and < 0 on error 825 */ 826 int mei_cldev_disable(struct mei_cl_device *cldev) 827 { 828 struct mei_device *bus; 829 struct mei_cl *cl; 830 int err; 831 832 if (!cldev) 833 return -ENODEV; 834 835 cl = cldev->cl; 836 837 bus = cldev->bus; 838 839 mei_cldev_unregister_callbacks(cldev); 840 841 mutex_lock(&bus->device_lock); 842 843 mei_cl_bus_vtag_free(cldev); 844 845 if (!mei_cl_is_connected(cl)) { 846 dev_dbg(bus->dev, "Already disconnected\n"); 847 err = 0; 848 goto out; 849 } 850 851 err = mei_cl_disconnect(cl); 852 if (err < 0) 853 dev_err(bus->dev, "Could not disconnect from the ME client\n"); 854 855 out: 856 /* Flush queues and remove any pending read unless we have mapped DMA */ 857 if (!cl->dma_mapped) { 858 mei_cl_flush_queues(cl, NULL); 859 mei_cl_unlink(cl); 860 } 861 862 mutex_unlock(&bus->device_lock); 863 return err; 864 } 865 EXPORT_SYMBOL_GPL(mei_cldev_disable); 866 867 /** 868 * mei_cldev_send_gsc_command - sends a gsc command, by sending 869 * a gsl mei message to gsc and receiving reply from gsc 870 * 871 * @cldev: me client device 872 * @client_id: client id to send the command to 873 * @fence_id: fence id to send the command to 874 * @sg_in: scatter gather list containing addresses for rx message buffer 875 * @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes 876 * @sg_out: scatter gather list containing addresses for tx message buffer 877 * 878 * Return: 879 * * written size in bytes 880 * * < 0 on error 881 */ 882 ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev, 883 u8 client_id, u32 fence_id, 884 struct scatterlist *sg_in, 885 size_t total_in_len, 886 struct scatterlist *sg_out) 887 { 888 struct mei_cl *cl; 889 struct mei_device *bus; 890 ssize_t ret = 0; 891 892 struct mei_ext_hdr_gsc_h2f *ext_hdr; 893 size_t buf_sz = sizeof(struct mei_ext_hdr_gsc_h2f); 894 int sg_out_nents, sg_in_nents; 895 int i; 896 struct scatterlist *sg; 897 struct mei_ext_hdr_gsc_f2h rx_msg; 898 unsigned int sg_len; 899 900 if (!cldev || !sg_in || !sg_out) 901 return -EINVAL; 902 903 cl = cldev->cl; 904 bus = cldev->bus; 905 906 dev_dbg(bus->dev, "client_id %u, fence_id %u\n", client_id, fence_id); 907 908 if (!bus->hbm_f_gsc_supported) 909 return -EOPNOTSUPP; 910 911 sg_out_nents = sg_nents(sg_out); 912 sg_in_nents = sg_nents(sg_in); 913 /* at least one entry in tx and rx sgls must be present */ 914 if (sg_out_nents <= 0 || sg_in_nents <= 0) 915 return -EINVAL; 916 917 buf_sz += (sg_out_nents + sg_in_nents) * sizeof(struct mei_gsc_sgl); 918 ext_hdr = kzalloc(buf_sz, GFP_KERNEL); 919 if (!ext_hdr) 920 return -ENOMEM; 921 922 /* construct the GSC message */ 923 ext_hdr->hdr.type = MEI_EXT_HDR_GSC; 924 ext_hdr->hdr.length = buf_sz / sizeof(u32); /* length is in dw */ 925 926 ext_hdr->client_id = client_id; 927 ext_hdr->addr_type = GSC_ADDRESS_TYPE_PHYSICAL_SGL; 928 ext_hdr->fence_id = fence_id; 929 ext_hdr->input_address_count = sg_in_nents; 930 ext_hdr->output_address_count = sg_out_nents; 931 ext_hdr->reserved[0] = 0; 932 ext_hdr->reserved[1] = 0; 933 934 /* copy in-sgl to the message */ 935 for (i = 0, sg = sg_in; i < sg_in_nents; i++, sg++) { 936 ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg)); 937 ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg)); 938 sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE); 939 ext_hdr->sgl[i].length = (sg_len <= total_in_len) ? sg_len : total_in_len; 940 total_in_len -= ext_hdr->sgl[i].length; 941 } 942 943 /* copy out-sgl to the message */ 944 for (i = sg_in_nents, sg = sg_out; i < sg_in_nents + sg_out_nents; i++, sg++) { 945 ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg)); 946 ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg)); 947 sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE); 948 ext_hdr->sgl[i].length = sg_len; 949 } 950 951 /* send the message to GSC */ 952 ret = __mei_cl_send(cl, (u8 *)ext_hdr, buf_sz, 0, MEI_CL_IO_SGL); 953 if (ret < 0) { 954 dev_err(bus->dev, "__mei_cl_send failed, returned %zd\n", ret); 955 goto end; 956 } 957 if (ret != buf_sz) { 958 dev_err(bus->dev, "__mei_cl_send returned %zd instead of expected %zd\n", 959 ret, buf_sz); 960 ret = -EIO; 961 goto end; 962 } 963 964 /* receive the reply from GSC, note that at this point sg_in should contain the reply */ 965 ret = __mei_cl_recv(cl, (u8 *)&rx_msg, sizeof(rx_msg), NULL, MEI_CL_IO_SGL, 0); 966 967 if (ret != sizeof(rx_msg)) { 968 dev_err(bus->dev, "__mei_cl_recv returned %zd instead of expected %zd\n", 969 ret, sizeof(rx_msg)); 970 if (ret >= 0) 971 ret = -EIO; 972 goto end; 973 } 974 975 /* check rx_msg.client_id and rx_msg.fence_id match the ones we send */ 976 if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) { 977 dev_err(bus->dev, "received client_id/fence_id %u/%u instead of %u/%u sent\n", 978 rx_msg.client_id, rx_msg.fence_id, client_id, fence_id); 979 ret = -EFAULT; 980 goto end; 981 } 982 983 dev_dbg(bus->dev, "gsc command: successfully written %u bytes\n", rx_msg.written); 984 ret = rx_msg.written; 985 986 end: 987 kfree(ext_hdr); 988 return ret; 989 } 990 EXPORT_SYMBOL_GPL(mei_cldev_send_gsc_command); 991 992 /** 993 * mei_cl_device_find - find matching entry in the driver id table 994 * 995 * @cldev: me client device 996 * @cldrv: me client driver 997 * 998 * Return: id on success; NULL if no id is matching 999 */ 1000 static const 1001 struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev, 1002 const struct mei_cl_driver *cldrv) 1003 { 1004 const struct mei_cl_device_id *id; 1005 const uuid_le *uuid; 1006 u8 version; 1007 bool match; 1008 1009 uuid = mei_me_cl_uuid(cldev->me_cl); 1010 version = mei_me_cl_ver(cldev->me_cl); 1011 1012 id = cldrv->id_table; 1013 while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) { 1014 if (!uuid_le_cmp(*uuid, id->uuid)) { 1015 match = true; 1016 1017 if (cldev->name[0]) 1018 if (strncmp(cldev->name, id->name, 1019 sizeof(id->name))) 1020 match = false; 1021 1022 if (id->version != MEI_CL_VERSION_ANY) 1023 if (id->version != version) 1024 match = false; 1025 if (match) 1026 return id; 1027 } 1028 1029 id++; 1030 } 1031 1032 return NULL; 1033 } 1034 1035 /** 1036 * mei_cl_device_match - device match function 1037 * 1038 * @dev: device 1039 * @drv: driver 1040 * 1041 * Return: 1 if matching device was found 0 otherwise 1042 */ 1043 static int mei_cl_device_match(struct device *dev, struct device_driver *drv) 1044 { 1045 const struct mei_cl_device *cldev = to_mei_cl_device(dev); 1046 const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv); 1047 const struct mei_cl_device_id *found_id; 1048 1049 if (!cldev) 1050 return 0; 1051 1052 if (!cldev->do_match) 1053 return 0; 1054 1055 if (!cldrv || !cldrv->id_table) 1056 return 0; 1057 1058 found_id = mei_cl_device_find(cldev, cldrv); 1059 if (found_id) 1060 return 1; 1061 1062 return 0; 1063 } 1064 1065 /** 1066 * mei_cl_device_probe - bus probe function 1067 * 1068 * @dev: device 1069 * 1070 * Return: 0 on success; < 0 otherwise 1071 */ 1072 static int mei_cl_device_probe(struct device *dev) 1073 { 1074 struct mei_cl_device *cldev; 1075 struct mei_cl_driver *cldrv; 1076 const struct mei_cl_device_id *id; 1077 int ret; 1078 1079 cldev = to_mei_cl_device(dev); 1080 cldrv = to_mei_cl_driver(dev->driver); 1081 1082 if (!cldev) 1083 return 0; 1084 1085 if (!cldrv || !cldrv->probe) 1086 return -ENODEV; 1087 1088 id = mei_cl_device_find(cldev, cldrv); 1089 if (!id) 1090 return -ENODEV; 1091 1092 if (!mei_cl_bus_module_get(cldev)) { 1093 dev_err(&cldev->dev, "get hw module failed"); 1094 return -ENODEV; 1095 } 1096 1097 ret = cldrv->probe(cldev, id); 1098 if (ret) { 1099 mei_cl_bus_module_put(cldev); 1100 return ret; 1101 } 1102 1103 __module_get(THIS_MODULE); 1104 return 0; 1105 } 1106 1107 /** 1108 * mei_cl_device_remove - remove device from the bus 1109 * 1110 * @dev: device 1111 * 1112 * Return: 0 on success; < 0 otherwise 1113 */ 1114 static void mei_cl_device_remove(struct device *dev) 1115 { 1116 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1117 struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver); 1118 1119 if (cldrv->remove) 1120 cldrv->remove(cldev); 1121 1122 mei_cldev_unregister_callbacks(cldev); 1123 1124 mei_cl_bus_module_put(cldev); 1125 module_put(THIS_MODULE); 1126 } 1127 1128 static ssize_t name_show(struct device *dev, struct device_attribute *a, 1129 char *buf) 1130 { 1131 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1132 1133 return scnprintf(buf, PAGE_SIZE, "%s", cldev->name); 1134 } 1135 static DEVICE_ATTR_RO(name); 1136 1137 static ssize_t uuid_show(struct device *dev, struct device_attribute *a, 1138 char *buf) 1139 { 1140 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1141 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 1142 1143 return sprintf(buf, "%pUl", uuid); 1144 } 1145 static DEVICE_ATTR_RO(uuid); 1146 1147 static ssize_t version_show(struct device *dev, struct device_attribute *a, 1148 char *buf) 1149 { 1150 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1151 u8 version = mei_me_cl_ver(cldev->me_cl); 1152 1153 return sprintf(buf, "%02X", version); 1154 } 1155 static DEVICE_ATTR_RO(version); 1156 1157 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 1158 char *buf) 1159 { 1160 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1161 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 1162 u8 version = mei_me_cl_ver(cldev->me_cl); 1163 1164 return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:", 1165 cldev->name, uuid, version); 1166 } 1167 static DEVICE_ATTR_RO(modalias); 1168 1169 static ssize_t max_conn_show(struct device *dev, struct device_attribute *a, 1170 char *buf) 1171 { 1172 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1173 u8 maxconn = mei_me_cl_max_conn(cldev->me_cl); 1174 1175 return sprintf(buf, "%d", maxconn); 1176 } 1177 static DEVICE_ATTR_RO(max_conn); 1178 1179 static ssize_t fixed_show(struct device *dev, struct device_attribute *a, 1180 char *buf) 1181 { 1182 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1183 u8 fixed = mei_me_cl_fixed(cldev->me_cl); 1184 1185 return sprintf(buf, "%d", fixed); 1186 } 1187 static DEVICE_ATTR_RO(fixed); 1188 1189 static ssize_t vtag_show(struct device *dev, struct device_attribute *a, 1190 char *buf) 1191 { 1192 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1193 bool vt = mei_me_cl_vt(cldev->me_cl); 1194 1195 return sprintf(buf, "%d", vt); 1196 } 1197 static DEVICE_ATTR_RO(vtag); 1198 1199 static ssize_t max_len_show(struct device *dev, struct device_attribute *a, 1200 char *buf) 1201 { 1202 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1203 u32 maxlen = mei_me_cl_max_len(cldev->me_cl); 1204 1205 return sprintf(buf, "%u", maxlen); 1206 } 1207 static DEVICE_ATTR_RO(max_len); 1208 1209 static struct attribute *mei_cldev_attrs[] = { 1210 &dev_attr_name.attr, 1211 &dev_attr_uuid.attr, 1212 &dev_attr_version.attr, 1213 &dev_attr_modalias.attr, 1214 &dev_attr_max_conn.attr, 1215 &dev_attr_fixed.attr, 1216 &dev_attr_vtag.attr, 1217 &dev_attr_max_len.attr, 1218 NULL, 1219 }; 1220 ATTRIBUTE_GROUPS(mei_cldev); 1221 1222 /** 1223 * mei_cl_device_uevent - me client bus uevent handler 1224 * 1225 * @dev: device 1226 * @env: uevent kobject 1227 * 1228 * Return: 0 on success -ENOMEM on when add_uevent_var fails 1229 */ 1230 static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env) 1231 { 1232 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1233 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 1234 u8 version = mei_me_cl_ver(cldev->me_cl); 1235 1236 if (add_uevent_var(env, "MEI_CL_VERSION=%d", version)) 1237 return -ENOMEM; 1238 1239 if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid)) 1240 return -ENOMEM; 1241 1242 if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name)) 1243 return -ENOMEM; 1244 1245 if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:", 1246 cldev->name, uuid, version)) 1247 return -ENOMEM; 1248 1249 return 0; 1250 } 1251 1252 static struct bus_type mei_cl_bus_type = { 1253 .name = "mei", 1254 .dev_groups = mei_cldev_groups, 1255 .match = mei_cl_device_match, 1256 .probe = mei_cl_device_probe, 1257 .remove = mei_cl_device_remove, 1258 .uevent = mei_cl_device_uevent, 1259 }; 1260 1261 static struct mei_device *mei_dev_bus_get(struct mei_device *bus) 1262 { 1263 if (bus) 1264 get_device(bus->dev); 1265 1266 return bus; 1267 } 1268 1269 static void mei_dev_bus_put(struct mei_device *bus) 1270 { 1271 if (bus) 1272 put_device(bus->dev); 1273 } 1274 1275 static void mei_cl_bus_dev_release(struct device *dev) 1276 { 1277 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1278 1279 if (!cldev) 1280 return; 1281 1282 mei_cl_flush_queues(cldev->cl, NULL); 1283 mei_me_cl_put(cldev->me_cl); 1284 mei_dev_bus_put(cldev->bus); 1285 kfree(cldev->cl); 1286 kfree(cldev); 1287 } 1288 1289 static const struct device_type mei_cl_device_type = { 1290 .release = mei_cl_bus_dev_release, 1291 }; 1292 1293 /** 1294 * mei_cl_bus_set_name - set device name for me client device 1295 * <controller>-<client device> 1296 * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb 1297 * 1298 * @cldev: me client device 1299 */ 1300 static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev) 1301 { 1302 dev_set_name(&cldev->dev, "%s-%pUl", 1303 dev_name(cldev->bus->dev), 1304 mei_me_cl_uuid(cldev->me_cl)); 1305 } 1306 1307 /** 1308 * mei_cl_bus_dev_alloc - initialize and allocate mei client device 1309 * 1310 * @bus: mei device 1311 * @me_cl: me client 1312 * 1313 * Return: allocated device structur or NULL on allocation failure 1314 */ 1315 static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus, 1316 struct mei_me_client *me_cl) 1317 { 1318 struct mei_cl_device *cldev; 1319 struct mei_cl *cl; 1320 1321 cldev = kzalloc(sizeof(*cldev), GFP_KERNEL); 1322 if (!cldev) 1323 return NULL; 1324 1325 cl = mei_cl_allocate(bus); 1326 if (!cl) { 1327 kfree(cldev); 1328 return NULL; 1329 } 1330 1331 device_initialize(&cldev->dev); 1332 cldev->dev.parent = bus->dev; 1333 cldev->dev.bus = &mei_cl_bus_type; 1334 cldev->dev.type = &mei_cl_device_type; 1335 cldev->bus = mei_dev_bus_get(bus); 1336 cldev->me_cl = mei_me_cl_get(me_cl); 1337 cldev->cl = cl; 1338 mei_cl_bus_set_name(cldev); 1339 cldev->is_added = 0; 1340 INIT_LIST_HEAD(&cldev->bus_list); 1341 1342 return cldev; 1343 } 1344 1345 /** 1346 * mei_cl_bus_dev_setup - setup me client device 1347 * run fix up routines and set the device name 1348 * 1349 * @bus: mei device 1350 * @cldev: me client device 1351 * 1352 * Return: true if the device is eligible for enumeration 1353 */ 1354 static bool mei_cl_bus_dev_setup(struct mei_device *bus, 1355 struct mei_cl_device *cldev) 1356 { 1357 cldev->do_match = 1; 1358 mei_cl_bus_dev_fixup(cldev); 1359 1360 /* the device name can change during fix up */ 1361 if (cldev->do_match) 1362 mei_cl_bus_set_name(cldev); 1363 1364 return cldev->do_match == 1; 1365 } 1366 1367 /** 1368 * mei_cl_bus_dev_add - add me client devices 1369 * 1370 * @cldev: me client device 1371 * 1372 * Return: 0 on success; < 0 on failre 1373 */ 1374 static int mei_cl_bus_dev_add(struct mei_cl_device *cldev) 1375 { 1376 int ret; 1377 1378 dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n", 1379 mei_me_cl_uuid(cldev->me_cl), 1380 mei_me_cl_ver(cldev->me_cl)); 1381 ret = device_add(&cldev->dev); 1382 if (!ret) 1383 cldev->is_added = 1; 1384 1385 return ret; 1386 } 1387 1388 /** 1389 * mei_cl_bus_dev_stop - stop the driver 1390 * 1391 * @cldev: me client device 1392 */ 1393 static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev) 1394 { 1395 if (cldev->is_added) 1396 device_release_driver(&cldev->dev); 1397 } 1398 1399 /** 1400 * mei_cl_bus_dev_destroy - destroy me client devices object 1401 * 1402 * @cldev: me client device 1403 * 1404 * Locking: called under "dev->cl_bus_lock" lock 1405 */ 1406 static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev) 1407 { 1408 1409 WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock)); 1410 1411 if (!cldev->is_added) 1412 return; 1413 1414 device_del(&cldev->dev); 1415 1416 list_del_init(&cldev->bus_list); 1417 1418 cldev->is_added = 0; 1419 put_device(&cldev->dev); 1420 } 1421 1422 /** 1423 * mei_cl_bus_remove_device - remove a devices form the bus 1424 * 1425 * @cldev: me client device 1426 */ 1427 static void mei_cl_bus_remove_device(struct mei_cl_device *cldev) 1428 { 1429 mei_cl_bus_dev_stop(cldev); 1430 mei_cl_bus_dev_destroy(cldev); 1431 } 1432 1433 /** 1434 * mei_cl_bus_remove_devices - remove all devices form the bus 1435 * 1436 * @bus: mei device 1437 */ 1438 void mei_cl_bus_remove_devices(struct mei_device *bus) 1439 { 1440 struct mei_cl_device *cldev, *next; 1441 1442 mutex_lock(&bus->cl_bus_lock); 1443 list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list) 1444 mei_cl_bus_remove_device(cldev); 1445 mutex_unlock(&bus->cl_bus_lock); 1446 } 1447 1448 1449 /** 1450 * mei_cl_bus_dev_init - allocate and initializes an mei client devices 1451 * based on me client 1452 * 1453 * @bus: mei device 1454 * @me_cl: me client 1455 * 1456 * Locking: called under "dev->cl_bus_lock" lock 1457 */ 1458 static void mei_cl_bus_dev_init(struct mei_device *bus, 1459 struct mei_me_client *me_cl) 1460 { 1461 struct mei_cl_device *cldev; 1462 1463 WARN_ON(!mutex_is_locked(&bus->cl_bus_lock)); 1464 1465 dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl)); 1466 1467 if (me_cl->bus_added) 1468 return; 1469 1470 cldev = mei_cl_bus_dev_alloc(bus, me_cl); 1471 if (!cldev) 1472 return; 1473 1474 me_cl->bus_added = true; 1475 list_add_tail(&cldev->bus_list, &bus->device_list); 1476 1477 } 1478 1479 /** 1480 * mei_cl_bus_rescan - scan me clients list and add create 1481 * devices for eligible clients 1482 * 1483 * @bus: mei device 1484 */ 1485 static void mei_cl_bus_rescan(struct mei_device *bus) 1486 { 1487 struct mei_cl_device *cldev, *n; 1488 struct mei_me_client *me_cl; 1489 1490 mutex_lock(&bus->cl_bus_lock); 1491 1492 down_read(&bus->me_clients_rwsem); 1493 list_for_each_entry(me_cl, &bus->me_clients, list) 1494 mei_cl_bus_dev_init(bus, me_cl); 1495 up_read(&bus->me_clients_rwsem); 1496 1497 list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) { 1498 1499 if (!mei_me_cl_is_active(cldev->me_cl)) { 1500 mei_cl_bus_remove_device(cldev); 1501 continue; 1502 } 1503 1504 if (cldev->is_added) 1505 continue; 1506 1507 if (mei_cl_bus_dev_setup(bus, cldev)) 1508 mei_cl_bus_dev_add(cldev); 1509 else { 1510 list_del_init(&cldev->bus_list); 1511 put_device(&cldev->dev); 1512 } 1513 } 1514 mutex_unlock(&bus->cl_bus_lock); 1515 1516 dev_dbg(bus->dev, "rescan end"); 1517 } 1518 1519 void mei_cl_bus_rescan_work(struct work_struct *work) 1520 { 1521 struct mei_device *bus = 1522 container_of(work, struct mei_device, bus_rescan_work); 1523 1524 mei_cl_bus_rescan(bus); 1525 } 1526 1527 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, 1528 struct module *owner) 1529 { 1530 int err; 1531 1532 cldrv->driver.name = cldrv->name; 1533 cldrv->driver.owner = owner; 1534 cldrv->driver.bus = &mei_cl_bus_type; 1535 1536 err = driver_register(&cldrv->driver); 1537 if (err) 1538 return err; 1539 1540 pr_debug("mei: driver [%s] registered\n", cldrv->driver.name); 1541 1542 return 0; 1543 } 1544 EXPORT_SYMBOL_GPL(__mei_cldev_driver_register); 1545 1546 void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv) 1547 { 1548 driver_unregister(&cldrv->driver); 1549 1550 pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name); 1551 } 1552 EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister); 1553 1554 1555 int __init mei_cl_bus_init(void) 1556 { 1557 return bus_register(&mei_cl_bus_type); 1558 } 1559 1560 void __exit mei_cl_bus_exit(void) 1561 { 1562 bus_unregister(&mei_cl_bus_type); 1563 } 1564