1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ISHTP client logic 4 * 5 * Copyright (c) 2003-2016, Intel Corporation. 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/sched.h> 10 #include <linux/wait.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <asm/cacheflush.h> 14 #include "hbm.h" 15 #include "client.h" 16 17 int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl) 18 { 19 unsigned long tx_free_flags; 20 int size; 21 22 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); 23 size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length; 24 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); 25 26 return size; 27 } 28 EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size); 29 30 int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl) 31 { 32 return cl->tx_ring_free_size; 33 } 34 EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings); 35 36 /** 37 * ishtp_read_list_flush() - Flush read queue 38 * @cl: ishtp client instance 39 * 40 * Used to remove all entries from read queue for a client 41 */ 42 static void ishtp_read_list_flush(struct ishtp_cl *cl) 43 { 44 struct ishtp_cl_rb *rb; 45 struct ishtp_cl_rb *next; 46 unsigned long flags; 47 48 spin_lock_irqsave(&cl->dev->read_list_spinlock, flags); 49 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list) 50 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { 51 list_del(&rb->list); 52 ishtp_io_rb_free(rb); 53 } 54 spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags); 55 } 56 57 /** 58 * ishtp_cl_flush_queues() - Flush all queues for a client 59 * @cl: ishtp client instance 60 * 61 * Used to remove all queues for a client. This is called when a client device 62 * needs reset due to error, S3 resume or during module removal 63 * 64 * Return: 0 on success else -EINVAL if device is NULL 65 */ 66 int ishtp_cl_flush_queues(struct ishtp_cl *cl) 67 { 68 if (WARN_ON(!cl || !cl->dev)) 69 return -EINVAL; 70 71 ishtp_read_list_flush(cl); 72 73 return 0; 74 } 75 EXPORT_SYMBOL(ishtp_cl_flush_queues); 76 77 /** 78 * ishtp_cl_init() - Initialize all fields of a client device 79 * @cl: ishtp client instance 80 * @dev: ishtp device 81 * 82 * Initializes a client device fields: Init spinlocks, init queues etc. 83 * This function is called during new client creation 84 */ 85 static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev) 86 { 87 memset(cl, 0, sizeof(struct ishtp_cl)); 88 init_waitqueue_head(&cl->wait_ctrl_res); 89 spin_lock_init(&cl->free_list_spinlock); 90 spin_lock_init(&cl->in_process_spinlock); 91 spin_lock_init(&cl->tx_list_spinlock); 92 spin_lock_init(&cl->tx_free_list_spinlock); 93 spin_lock_init(&cl->fc_spinlock); 94 INIT_LIST_HEAD(&cl->link); 95 cl->dev = dev; 96 97 INIT_LIST_HEAD(&cl->free_rb_list.list); 98 INIT_LIST_HEAD(&cl->tx_list.list); 99 INIT_LIST_HEAD(&cl->tx_free_list.list); 100 INIT_LIST_HEAD(&cl->in_process_list.list); 101 102 cl->rx_ring_size = CL_DEF_RX_RING_SIZE; 103 cl->tx_ring_size = CL_DEF_TX_RING_SIZE; 104 cl->tx_ring_free_size = cl->tx_ring_size; 105 106 /* dma */ 107 cl->last_tx_path = CL_TX_PATH_IPC; 108 cl->last_dma_acked = 1; 109 cl->last_dma_addr = NULL; 110 cl->last_ipc_acked = 1; 111 } 112 113 /** 114 * ishtp_cl_allocate() - allocates client structure and sets it up. 115 * @cl_device: ishtp client device 116 * 117 * Allocate memory for new client device and call to initialize each field. 118 * 119 * Return: The allocated client instance or NULL on failure 120 */ 121 struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device) 122 { 123 struct ishtp_cl *cl; 124 125 cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL); 126 if (!cl) 127 return NULL; 128 129 ishtp_cl_init(cl, cl_device->ishtp_dev); 130 return cl; 131 } 132 EXPORT_SYMBOL(ishtp_cl_allocate); 133 134 /** 135 * ishtp_cl_free() - Frees a client device 136 * @cl: client device instance 137 * 138 * Frees a client device 139 */ 140 void ishtp_cl_free(struct ishtp_cl *cl) 141 { 142 struct ishtp_device *dev; 143 unsigned long flags; 144 145 if (!cl) 146 return; 147 148 dev = cl->dev; 149 if (!dev) 150 return; 151 152 spin_lock_irqsave(&dev->cl_list_lock, flags); 153 ishtp_cl_free_rx_ring(cl); 154 ishtp_cl_free_tx_ring(cl); 155 kfree(cl); 156 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 157 } 158 EXPORT_SYMBOL(ishtp_cl_free); 159 160 /** 161 * ishtp_cl_link() - Reserve a host id and link the client instance 162 * @cl: client device instance 163 * 164 * This allocates a single bit in the hostmap. This function will make sure 165 * that not many client sessions are opened at the same time. Once allocated 166 * the client device instance is added to the ishtp device in the current 167 * client list 168 * 169 * Return: 0 or error code on failure 170 */ 171 int ishtp_cl_link(struct ishtp_cl *cl) 172 { 173 struct ishtp_device *dev; 174 unsigned long flags, flags_cl; 175 int id, ret = 0; 176 177 if (WARN_ON(!cl || !cl->dev)) 178 return -EINVAL; 179 180 dev = cl->dev; 181 182 spin_lock_irqsave(&dev->device_lock, flags); 183 184 if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) { 185 ret = -EMFILE; 186 goto unlock_dev; 187 } 188 189 id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX); 190 191 if (id >= ISHTP_CLIENTS_MAX) { 192 spin_unlock_irqrestore(&dev->device_lock, flags); 193 dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX); 194 return -ENOENT; 195 } 196 197 dev->open_handle_count++; 198 cl->host_client_id = id; 199 spin_lock_irqsave(&dev->cl_list_lock, flags_cl); 200 if (dev->dev_state != ISHTP_DEV_ENABLED) { 201 ret = -ENODEV; 202 goto unlock_cl; 203 } 204 list_add_tail(&cl->link, &dev->cl_list); 205 set_bit(id, dev->host_clients_map); 206 cl->state = ISHTP_CL_INITIALIZING; 207 208 unlock_cl: 209 spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl); 210 unlock_dev: 211 spin_unlock_irqrestore(&dev->device_lock, flags); 212 return ret; 213 } 214 EXPORT_SYMBOL(ishtp_cl_link); 215 216 /** 217 * ishtp_cl_unlink() - remove fw_cl from the client device list 218 * @cl: client device instance 219 * 220 * Remove a previously linked device to a ishtp device 221 */ 222 void ishtp_cl_unlink(struct ishtp_cl *cl) 223 { 224 struct ishtp_device *dev; 225 struct ishtp_cl *pos; 226 unsigned long flags; 227 228 /* don't shout on error exit path */ 229 if (!cl || !cl->dev) 230 return; 231 232 dev = cl->dev; 233 234 spin_lock_irqsave(&dev->device_lock, flags); 235 if (dev->open_handle_count > 0) { 236 clear_bit(cl->host_client_id, dev->host_clients_map); 237 dev->open_handle_count--; 238 } 239 spin_unlock_irqrestore(&dev->device_lock, flags); 240 241 /* 242 * This checks that 'cl' is actually linked into device's structure, 243 * before attempting 'list_del' 244 */ 245 spin_lock_irqsave(&dev->cl_list_lock, flags); 246 list_for_each_entry(pos, &dev->cl_list, link) 247 if (cl->host_client_id == pos->host_client_id) { 248 list_del_init(&pos->link); 249 break; 250 } 251 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 252 } 253 EXPORT_SYMBOL(ishtp_cl_unlink); 254 255 /** 256 * ishtp_cl_disconnect() - Send disconnect request to firmware 257 * @cl: client device instance 258 * 259 * Send a disconnect request for a client to firmware. 260 * 261 * Return: 0 if successful disconnect response from the firmware or error 262 * code on failure 263 */ 264 int ishtp_cl_disconnect(struct ishtp_cl *cl) 265 { 266 struct ishtp_device *dev; 267 268 if (WARN_ON(!cl || !cl->dev)) 269 return -ENODEV; 270 271 dev = cl->dev; 272 273 dev->print_log(dev, "%s() state %d\n", __func__, cl->state); 274 275 if (cl->state != ISHTP_CL_DISCONNECTING) { 276 dev->print_log(dev, "%s() Disconnect in progress\n", __func__); 277 return 0; 278 } 279 280 if (ishtp_hbm_cl_disconnect_req(dev, cl)) { 281 dev->print_log(dev, "%s() Failed to disconnect\n", __func__); 282 dev_err(&cl->device->dev, "failed to disconnect.\n"); 283 return -ENODEV; 284 } 285 286 wait_event_interruptible_timeout(cl->wait_ctrl_res, 287 (dev->dev_state != ISHTP_DEV_ENABLED || 288 cl->state == ISHTP_CL_DISCONNECTED), 289 ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT)); 290 291 /* 292 * If FW reset arrived, this will happen. Don't check cl->, 293 * as 'cl' may be freed already 294 */ 295 if (dev->dev_state != ISHTP_DEV_ENABLED) { 296 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n", 297 __func__); 298 return -ENODEV; 299 } 300 301 if (cl->state == ISHTP_CL_DISCONNECTED) { 302 dev->print_log(dev, "%s() successful\n", __func__); 303 return 0; 304 } 305 306 return -ENODEV; 307 } 308 EXPORT_SYMBOL(ishtp_cl_disconnect); 309 310 /** 311 * ishtp_cl_is_other_connecting() - Check other client is connecting 312 * @cl: client device instance 313 * 314 * Checks if other client with the same fw client id is connecting 315 * 316 * Return: true if other client is connected else false 317 */ 318 static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl) 319 { 320 struct ishtp_device *dev; 321 struct ishtp_cl *pos; 322 unsigned long flags; 323 324 if (WARN_ON(!cl || !cl->dev)) 325 return false; 326 327 dev = cl->dev; 328 spin_lock_irqsave(&dev->cl_list_lock, flags); 329 list_for_each_entry(pos, &dev->cl_list, link) { 330 if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) && 331 cl->fw_client_id == pos->fw_client_id) { 332 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 333 return true; 334 } 335 } 336 spin_unlock_irqrestore(&dev->cl_list_lock, flags); 337 338 return false; 339 } 340 341 /** 342 * ishtp_cl_connect() - Send connect request to firmware 343 * @cl: client device instance 344 * 345 * Send a connect request for a client to firmware. If successful it will 346 * RX and TX ring buffers 347 * 348 * Return: 0 if successful connect response from the firmware and able 349 * to bind and allocate ring buffers or error code on failure 350 */ 351 int ishtp_cl_connect(struct ishtp_cl *cl) 352 { 353 struct ishtp_device *dev; 354 int rets; 355 356 if (WARN_ON(!cl || !cl->dev)) 357 return -ENODEV; 358 359 dev = cl->dev; 360 361 dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state); 362 363 if (ishtp_cl_is_other_connecting(cl)) { 364 dev->print_log(dev, "%s() Busy\n", __func__); 365 return -EBUSY; 366 } 367 368 if (ishtp_hbm_cl_connect_req(dev, cl)) { 369 dev->print_log(dev, "%s() HBM connect req fail\n", __func__); 370 return -ENODEV; 371 } 372 373 rets = wait_event_interruptible_timeout(cl->wait_ctrl_res, 374 (dev->dev_state == ISHTP_DEV_ENABLED && 375 (cl->state == ISHTP_CL_CONNECTED || 376 cl->state == ISHTP_CL_DISCONNECTED)), 377 ishtp_secs_to_jiffies( 378 ISHTP_CL_CONNECT_TIMEOUT)); 379 /* 380 * If FW reset arrived, this will happen. Don't check cl->, 381 * as 'cl' may be freed already 382 */ 383 if (dev->dev_state != ISHTP_DEV_ENABLED) { 384 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n", 385 __func__); 386 return -EFAULT; 387 } 388 389 if (cl->state != ISHTP_CL_CONNECTED) { 390 dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n", 391 __func__); 392 return -EFAULT; 393 } 394 395 rets = cl->status; 396 if (rets) { 397 dev->print_log(dev, "%s() Invalid status\n", __func__); 398 return rets; 399 } 400 401 rets = ishtp_cl_device_bind(cl); 402 if (rets) { 403 dev->print_log(dev, "%s() Bind error\n", __func__); 404 ishtp_cl_disconnect(cl); 405 return rets; 406 } 407 408 rets = ishtp_cl_alloc_rx_ring(cl); 409 if (rets) { 410 dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__); 411 /* if failed allocation, disconnect */ 412 ishtp_cl_disconnect(cl); 413 return rets; 414 } 415 416 rets = ishtp_cl_alloc_tx_ring(cl); 417 if (rets) { 418 dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__); 419 /* if failed allocation, disconnect */ 420 ishtp_cl_free_rx_ring(cl); 421 ishtp_cl_disconnect(cl); 422 return rets; 423 } 424 425 /* Upon successful connection and allocation, emit flow-control */ 426 rets = ishtp_cl_read_start(cl); 427 428 dev->print_log(dev, "%s() successful\n", __func__); 429 430 return rets; 431 } 432 EXPORT_SYMBOL(ishtp_cl_connect); 433 434 /** 435 * ishtp_cl_read_start() - Prepare to read client message 436 * @cl: client device instance 437 * 438 * Get a free buffer from pool of free read buffers and add to read buffer 439 * pool to add contents. Send a flow control request to firmware to be able 440 * send next message. 441 * 442 * Return: 0 if successful or error code on failure 443 */ 444 int ishtp_cl_read_start(struct ishtp_cl *cl) 445 { 446 struct ishtp_device *dev; 447 struct ishtp_cl_rb *rb; 448 int rets; 449 int i; 450 unsigned long flags; 451 unsigned long dev_flags; 452 453 if (WARN_ON(!cl || !cl->dev)) 454 return -ENODEV; 455 456 dev = cl->dev; 457 458 if (cl->state != ISHTP_CL_CONNECTED) 459 return -ENODEV; 460 461 if (dev->dev_state != ISHTP_DEV_ENABLED) 462 return -ENODEV; 463 464 i = ishtp_fw_cl_by_id(dev, cl->fw_client_id); 465 if (i < 0) { 466 dev_err(&cl->device->dev, "no such fw client %d\n", 467 cl->fw_client_id); 468 return -ENODEV; 469 } 470 471 /* The current rb is the head of the free rb list */ 472 spin_lock_irqsave(&cl->free_list_spinlock, flags); 473 if (list_empty(&cl->free_rb_list.list)) { 474 dev_warn(&cl->device->dev, 475 "[ishtp-ish] Rx buffers pool is empty\n"); 476 rets = -ENOMEM; 477 rb = NULL; 478 spin_unlock_irqrestore(&cl->free_list_spinlock, flags); 479 goto out; 480 } 481 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list); 482 list_del_init(&rb->list); 483 spin_unlock_irqrestore(&cl->free_list_spinlock, flags); 484 485 rb->cl = cl; 486 rb->buf_idx = 0; 487 488 INIT_LIST_HEAD(&rb->list); 489 rets = 0; 490 491 /* 492 * This must be BEFORE sending flow control - 493 * response in ISR may come too fast... 494 */ 495 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags); 496 list_add_tail(&rb->list, &dev->read_list.list); 497 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags); 498 if (ishtp_hbm_cl_flow_control_req(dev, cl)) { 499 rets = -ENODEV; 500 goto out; 501 } 502 out: 503 /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */ 504 if (rets && rb) { 505 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags); 506 list_del(&rb->list); 507 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags); 508 509 spin_lock_irqsave(&cl->free_list_spinlock, flags); 510 list_add_tail(&rb->list, &cl->free_rb_list.list); 511 spin_unlock_irqrestore(&cl->free_list_spinlock, flags); 512 } 513 return rets; 514 } 515 516 /** 517 * ishtp_cl_send() - Send a message to firmware 518 * @cl: client device instance 519 * @buf: message buffer 520 * @length: length of message 521 * 522 * If the client is correct state to send message, this function gets a buffer 523 * from tx ring buffers, copy the message data and call to send the message 524 * using ishtp_cl_send_msg() 525 * 526 * Return: 0 if successful or error code on failure 527 */ 528 int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length) 529 { 530 struct ishtp_device *dev; 531 int id; 532 struct ishtp_cl_tx_ring *cl_msg; 533 int have_msg_to_send = 0; 534 unsigned long tx_flags, tx_free_flags; 535 536 if (WARN_ON(!cl || !cl->dev)) 537 return -ENODEV; 538 539 dev = cl->dev; 540 541 if (cl->state != ISHTP_CL_CONNECTED) { 542 ++cl->err_send_msg; 543 return -EPIPE; 544 } 545 546 if (dev->dev_state != ISHTP_DEV_ENABLED) { 547 ++cl->err_send_msg; 548 return -ENODEV; 549 } 550 551 /* Check if we have fw client device */ 552 id = ishtp_fw_cl_by_id(dev, cl->fw_client_id); 553 if (id < 0) { 554 ++cl->err_send_msg; 555 return -ENOENT; 556 } 557 558 if (length > dev->fw_clients[id].props.max_msg_length) { 559 ++cl->err_send_msg; 560 return -EMSGSIZE; 561 } 562 563 /* No free bufs */ 564 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); 565 if (list_empty(&cl->tx_free_list.list)) { 566 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, 567 tx_free_flags); 568 ++cl->err_send_msg; 569 return -ENOMEM; 570 } 571 572 cl_msg = list_first_entry(&cl->tx_free_list.list, 573 struct ishtp_cl_tx_ring, list); 574 if (!cl_msg->send_buf.data) { 575 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, 576 tx_free_flags); 577 return -EIO; 578 /* Should not happen, as free list is pre-allocated */ 579 } 580 /* 581 * This is safe, as 'length' is already checked for not exceeding 582 * max ISHTP message size per client 583 */ 584 list_del_init(&cl_msg->list); 585 --cl->tx_ring_free_size; 586 587 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); 588 memcpy(cl_msg->send_buf.data, buf, length); 589 cl_msg->send_buf.size = length; 590 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags); 591 have_msg_to_send = !list_empty(&cl->tx_list.list); 592 list_add_tail(&cl_msg->list, &cl->tx_list.list); 593 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 594 595 if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0) 596 ishtp_cl_send_msg(dev, cl); 597 598 return 0; 599 } 600 EXPORT_SYMBOL(ishtp_cl_send); 601 602 /** 603 * ishtp_cl_read_complete() - read complete 604 * @rb: Pointer to client request block 605 * 606 * If the message is completely received call ishtp_cl_bus_rx_event() 607 * to process message 608 */ 609 static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb) 610 { 611 unsigned long flags; 612 int schedule_work_flag = 0; 613 struct ishtp_cl *cl = rb->cl; 614 615 spin_lock_irqsave(&cl->in_process_spinlock, flags); 616 /* 617 * if in-process list is empty, then need to schedule 618 * the processing thread 619 */ 620 schedule_work_flag = list_empty(&cl->in_process_list.list); 621 list_add_tail(&rb->list, &cl->in_process_list.list); 622 spin_unlock_irqrestore(&cl->in_process_spinlock, flags); 623 624 if (schedule_work_flag) 625 ishtp_cl_bus_rx_event(cl->device); 626 } 627 628 /** 629 * ipc_tx_send() - IPC tx send function 630 * @prm: Pointer to client device instance 631 * 632 * Send message over IPC. Message will be split into fragments 633 * if message size is bigger than IPC FIFO size, and all 634 * fragments will be sent one by one. 635 */ 636 static void ipc_tx_send(void *prm) 637 { 638 struct ishtp_cl *cl = prm; 639 struct ishtp_cl_tx_ring *cl_msg; 640 size_t rem; 641 struct ishtp_device *dev = (cl ? cl->dev : NULL); 642 struct ishtp_msg_hdr ishtp_hdr; 643 unsigned long tx_flags, tx_free_flags; 644 unsigned char *pmsg; 645 646 if (!dev) 647 return; 648 649 /* 650 * Other conditions if some critical error has 651 * occurred before this callback is called 652 */ 653 if (dev->dev_state != ISHTP_DEV_ENABLED) 654 return; 655 656 if (cl->state != ISHTP_CL_CONNECTED) 657 return; 658 659 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags); 660 if (list_empty(&cl->tx_list.list)) { 661 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 662 return; 663 } 664 665 if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) { 666 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 667 return; 668 } 669 670 if (!cl->sending) { 671 --cl->ishtp_flow_ctrl_creds; 672 cl->last_ipc_acked = 0; 673 cl->last_tx_path = CL_TX_PATH_IPC; 674 cl->sending = 1; 675 } 676 677 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring, 678 list); 679 rem = cl_msg->send_buf.size - cl->tx_offs; 680 681 while (rem > 0) { 682 ishtp_hdr.host_addr = cl->host_client_id; 683 ishtp_hdr.fw_addr = cl->fw_client_id; 684 ishtp_hdr.reserved = 0; 685 pmsg = cl_msg->send_buf.data + cl->tx_offs; 686 687 if (rem <= dev->mtu) { 688 /* Last fragment or only one packet */ 689 ishtp_hdr.length = rem; 690 ishtp_hdr.msg_complete = 1; 691 /* Submit to IPC queue with no callback */ 692 ishtp_write_message(dev, &ishtp_hdr, pmsg); 693 cl->tx_offs = 0; 694 cl->sending = 0; 695 696 break; 697 } else { 698 /* Send ipc fragment */ 699 ishtp_hdr.length = dev->mtu; 700 ishtp_hdr.msg_complete = 0; 701 /* All fregments submitted to IPC queue with no callback */ 702 ishtp_write_message(dev, &ishtp_hdr, pmsg); 703 cl->tx_offs += dev->mtu; 704 rem = cl_msg->send_buf.size - cl->tx_offs; 705 } 706 } 707 708 list_del_init(&cl_msg->list); 709 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 710 711 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); 712 list_add_tail(&cl_msg->list, &cl->tx_free_list.list); 713 ++cl->tx_ring_free_size; 714 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, 715 tx_free_flags); 716 } 717 718 /** 719 * ishtp_cl_send_msg_ipc() -Send message using IPC 720 * @dev: ISHTP device instance 721 * @cl: Pointer to client device instance 722 * 723 * Send message over IPC not using DMA 724 */ 725 static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev, 726 struct ishtp_cl *cl) 727 { 728 /* If last DMA message wasn't acked yet, leave this one in Tx queue */ 729 if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0) 730 return; 731 732 cl->tx_offs = 0; 733 ipc_tx_send(cl); 734 ++cl->send_msg_cnt_ipc; 735 } 736 737 /** 738 * ishtp_cl_send_msg_dma() -Send message using DMA 739 * @dev: ISHTP device instance 740 * @cl: Pointer to client device instance 741 * 742 * Send message using DMA 743 */ 744 static void ishtp_cl_send_msg_dma(struct ishtp_device *dev, 745 struct ishtp_cl *cl) 746 { 747 struct ishtp_msg_hdr hdr; 748 struct dma_xfer_hbm dma_xfer; 749 unsigned char *msg_addr; 750 int off; 751 struct ishtp_cl_tx_ring *cl_msg; 752 unsigned long tx_flags, tx_free_flags; 753 754 /* If last IPC message wasn't acked yet, leave this one in Tx queue */ 755 if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0) 756 return; 757 758 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags); 759 if (list_empty(&cl->tx_list.list)) { 760 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 761 return; 762 } 763 764 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring, 765 list); 766 767 msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size); 768 if (!msg_addr) { 769 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 770 if (dev->transfer_path == CL_TX_PATH_DEFAULT) 771 ishtp_cl_send_msg_ipc(dev, cl); 772 return; 773 } 774 775 list_del_init(&cl_msg->list); /* Must be before write */ 776 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); 777 778 --cl->ishtp_flow_ctrl_creds; 779 cl->last_dma_acked = 0; 780 cl->last_dma_addr = msg_addr; 781 cl->last_tx_path = CL_TX_PATH_DMA; 782 783 /* write msg to dma buf */ 784 memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size); 785 786 /* 787 * if current fw don't support cache snooping, driver have to 788 * flush the cache manually. 789 */ 790 if (dev->ops->dma_no_cache_snooping && 791 dev->ops->dma_no_cache_snooping(dev)) 792 clflush_cache_range(msg_addr, cl_msg->send_buf.size); 793 794 /* send dma_xfer hbm msg */ 795 off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf; 796 ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm)); 797 dma_xfer.hbm = DMA_XFER; 798 dma_xfer.fw_client_id = cl->fw_client_id; 799 dma_xfer.host_client_id = cl->host_client_id; 800 dma_xfer.reserved = 0; 801 dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off; 802 dma_xfer.msg_length = cl_msg->send_buf.size; 803 dma_xfer.reserved2 = 0; 804 ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer); 805 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); 806 list_add_tail(&cl_msg->list, &cl->tx_free_list.list); 807 ++cl->tx_ring_free_size; 808 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); 809 ++cl->send_msg_cnt_dma; 810 } 811 812 /** 813 * ishtp_cl_send_msg() -Send message using DMA or IPC 814 * @dev: ISHTP device instance 815 * @cl: Pointer to client device instance 816 * 817 * Send message using DMA or IPC based on transfer_path 818 */ 819 void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl) 820 { 821 if (dev->transfer_path == CL_TX_PATH_DMA) 822 ishtp_cl_send_msg_dma(dev, cl); 823 else 824 ishtp_cl_send_msg_ipc(dev, cl); 825 } 826 827 /** 828 * recv_ishtp_cl_msg() -Receive client message 829 * @dev: ISHTP device instance 830 * @ishtp_hdr: Pointer to message header 831 * 832 * Receive and dispatch ISHTP client messages. This function executes in ISR 833 * or work queue context 834 */ 835 void recv_ishtp_cl_msg(struct ishtp_device *dev, 836 struct ishtp_msg_hdr *ishtp_hdr) 837 { 838 struct ishtp_cl *cl; 839 struct ishtp_cl_rb *rb; 840 struct ishtp_cl_rb *new_rb; 841 unsigned char *buffer = NULL; 842 struct ishtp_cl_rb *complete_rb = NULL; 843 unsigned long flags; 844 int rb_count; 845 846 if (ishtp_hdr->reserved) { 847 dev_err(dev->devc, "corrupted message header.\n"); 848 goto eoi; 849 } 850 851 if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) { 852 dev_err(dev->devc, 853 "ISHTP message length in hdr exceeds IPC MTU\n"); 854 goto eoi; 855 } 856 857 spin_lock_irqsave(&dev->read_list_spinlock, flags); 858 rb_count = -1; 859 list_for_each_entry(rb, &dev->read_list.list, list) { 860 ++rb_count; 861 cl = rb->cl; 862 if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr && 863 cl->fw_client_id == ishtp_hdr->fw_addr) || 864 !(cl->state == ISHTP_CL_CONNECTED)) 865 continue; 866 867 /* If no Rx buffer is allocated, disband the rb */ 868 if (rb->buffer.size == 0 || rb->buffer.data == NULL) { 869 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); 870 dev_err(&cl->device->dev, 871 "Rx buffer is not allocated.\n"); 872 list_del(&rb->list); 873 ishtp_io_rb_free(rb); 874 cl->status = -ENOMEM; 875 goto eoi; 876 } 877 878 /* 879 * If message buffer overflown (exceeds max. client msg 880 * size, drop message and return to free buffer. 881 * Do we need to disconnect such a client? (We don't send 882 * back FC, so communication will be stuck anyway) 883 */ 884 if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) { 885 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); 886 dev_err(&cl->device->dev, 887 "message overflow. size %d len %d idx %ld\n", 888 rb->buffer.size, ishtp_hdr->length, 889 rb->buf_idx); 890 list_del(&rb->list); 891 ishtp_cl_io_rb_recycle(rb); 892 cl->status = -EIO; 893 goto eoi; 894 } 895 896 buffer = rb->buffer.data + rb->buf_idx; 897 dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length); 898 899 rb->buf_idx += ishtp_hdr->length; 900 if (ishtp_hdr->msg_complete) { 901 /* Last fragment in message - it's complete */ 902 cl->status = 0; 903 list_del(&rb->list); 904 complete_rb = rb; 905 906 --cl->out_flow_ctrl_creds; 907 /* 908 * the whole msg arrived, send a new FC, and add a new 909 * rb buffer for the next coming msg 910 */ 911 spin_lock(&cl->free_list_spinlock); 912 913 if (!list_empty(&cl->free_rb_list.list)) { 914 new_rb = list_entry(cl->free_rb_list.list.next, 915 struct ishtp_cl_rb, list); 916 list_del_init(&new_rb->list); 917 spin_unlock(&cl->free_list_spinlock); 918 new_rb->cl = cl; 919 new_rb->buf_idx = 0; 920 INIT_LIST_HEAD(&new_rb->list); 921 list_add_tail(&new_rb->list, 922 &dev->read_list.list); 923 924 ishtp_hbm_cl_flow_control_req(dev, cl); 925 } else { 926 spin_unlock(&cl->free_list_spinlock); 927 } 928 } 929 /* One more fragment in message (even if this was last) */ 930 ++cl->recv_msg_num_frags; 931 932 /* 933 * We can safely break here (and in BH too), 934 * a single input message can go only to a single request! 935 */ 936 break; 937 } 938 939 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); 940 /* If it's nobody's message, just read and discard it */ 941 if (!buffer) { 942 uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE]; 943 944 dev_err(dev->devc, "Dropped Rx msg - no request\n"); 945 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length); 946 goto eoi; 947 } 948 949 if (complete_rb) { 950 cl = complete_rb->cl; 951 cl->ts_rx = ktime_get(); 952 ++cl->recv_msg_cnt_ipc; 953 ishtp_cl_read_complete(complete_rb); 954 } 955 eoi: 956 return; 957 } 958 959 /** 960 * recv_ishtp_cl_msg_dma() -Receive client message 961 * @dev: ISHTP device instance 962 * @msg: message pointer 963 * @hbm: hbm buffer 964 * 965 * Receive and dispatch ISHTP client messages using DMA. This function executes 966 * in ISR or work queue context 967 */ 968 void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg, 969 struct dma_xfer_hbm *hbm) 970 { 971 struct ishtp_cl *cl; 972 struct ishtp_cl_rb *rb; 973 struct ishtp_cl_rb *new_rb; 974 unsigned char *buffer = NULL; 975 struct ishtp_cl_rb *complete_rb = NULL; 976 unsigned long flags; 977 978 spin_lock_irqsave(&dev->read_list_spinlock, flags); 979 980 list_for_each_entry(rb, &dev->read_list.list, list) { 981 cl = rb->cl; 982 if (!cl || !(cl->host_client_id == hbm->host_client_id && 983 cl->fw_client_id == hbm->fw_client_id) || 984 !(cl->state == ISHTP_CL_CONNECTED)) 985 continue; 986 987 /* 988 * If no Rx buffer is allocated, disband the rb 989 */ 990 if (rb->buffer.size == 0 || rb->buffer.data == NULL) { 991 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); 992 dev_err(&cl->device->dev, 993 "response buffer is not allocated.\n"); 994 list_del(&rb->list); 995 ishtp_io_rb_free(rb); 996 cl->status = -ENOMEM; 997 goto eoi; 998 } 999 1000 /* 1001 * If message buffer overflown (exceeds max. client msg 1002 * size, drop message and return to free buffer. 1003 * Do we need to disconnect such a client? (We don't send 1004 * back FC, so communication will be stuck anyway) 1005 */ 1006 if (rb->buffer.size < hbm->msg_length) { 1007 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); 1008 dev_err(&cl->device->dev, 1009 "message overflow. size %d len %d idx %ld\n", 1010 rb->buffer.size, hbm->msg_length, rb->buf_idx); 1011 list_del(&rb->list); 1012 ishtp_cl_io_rb_recycle(rb); 1013 cl->status = -EIO; 1014 goto eoi; 1015 } 1016 1017 buffer = rb->buffer.data; 1018 1019 /* 1020 * if current fw don't support cache snooping, driver have to 1021 * flush the cache manually. 1022 */ 1023 if (dev->ops->dma_no_cache_snooping && 1024 dev->ops->dma_no_cache_snooping(dev)) 1025 clflush_cache_range(msg, hbm->msg_length); 1026 1027 memcpy(buffer, msg, hbm->msg_length); 1028 rb->buf_idx = hbm->msg_length; 1029 1030 /* Last fragment in message - it's complete */ 1031 cl->status = 0; 1032 list_del(&rb->list); 1033 complete_rb = rb; 1034 1035 --cl->out_flow_ctrl_creds; 1036 /* 1037 * the whole msg arrived, send a new FC, and add a new 1038 * rb buffer for the next coming msg 1039 */ 1040 spin_lock(&cl->free_list_spinlock); 1041 1042 if (!list_empty(&cl->free_rb_list.list)) { 1043 new_rb = list_entry(cl->free_rb_list.list.next, 1044 struct ishtp_cl_rb, list); 1045 list_del_init(&new_rb->list); 1046 spin_unlock(&cl->free_list_spinlock); 1047 new_rb->cl = cl; 1048 new_rb->buf_idx = 0; 1049 INIT_LIST_HEAD(&new_rb->list); 1050 list_add_tail(&new_rb->list, 1051 &dev->read_list.list); 1052 1053 ishtp_hbm_cl_flow_control_req(dev, cl); 1054 } else { 1055 spin_unlock(&cl->free_list_spinlock); 1056 } 1057 1058 /* One more fragment in message (this is always last) */ 1059 ++cl->recv_msg_num_frags; 1060 1061 /* 1062 * We can safely break here (and in BH too), 1063 * a single input message can go only to a single request! 1064 */ 1065 break; 1066 } 1067 1068 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); 1069 /* If it's nobody's message, just read and discard it */ 1070 if (!buffer) { 1071 dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n"); 1072 goto eoi; 1073 } 1074 1075 if (complete_rb) { 1076 cl = complete_rb->cl; 1077 cl->ts_rx = ktime_get(); 1078 ++cl->recv_msg_cnt_dma; 1079 ishtp_cl_read_complete(complete_rb); 1080 } 1081 eoi: 1082 return; 1083 } 1084 1085 void *ishtp_get_client_data(struct ishtp_cl *cl) 1086 { 1087 return cl->client_data; 1088 } 1089 EXPORT_SYMBOL(ishtp_get_client_data); 1090 1091 void ishtp_set_client_data(struct ishtp_cl *cl, void *data) 1092 { 1093 cl->client_data = data; 1094 } 1095 EXPORT_SYMBOL(ishtp_set_client_data); 1096 1097 struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl) 1098 { 1099 return cl->dev; 1100 } 1101 EXPORT_SYMBOL(ishtp_get_ishtp_device); 1102 1103 void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size) 1104 { 1105 cl->tx_ring_size = size; 1106 } 1107 EXPORT_SYMBOL(ishtp_set_tx_ring_size); 1108 1109 void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size) 1110 { 1111 cl->rx_ring_size = size; 1112 } 1113 EXPORT_SYMBOL(ishtp_set_rx_ring_size); 1114 1115 void ishtp_set_connection_state(struct ishtp_cl *cl, int state) 1116 { 1117 cl->state = state; 1118 } 1119 EXPORT_SYMBOL(ishtp_set_connection_state); 1120 1121 void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id) 1122 { 1123 cl->fw_client_id = fw_client_id; 1124 } 1125 EXPORT_SYMBOL(ishtp_cl_set_fw_client_id); 1126