1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-21 Intel Corporation. 4 */ 5 6 #include <linux/delay.h> 7 8 #include "iosm_ipc_chnl_cfg.h" 9 #include "iosm_ipc_devlink.h" 10 #include "iosm_ipc_imem.h" 11 #include "iosm_ipc_imem_ops.h" 12 #include "iosm_ipc_port.h" 13 #include "iosm_ipc_task_queue.h" 14 15 /* Open a packet data online channel between the network layer and CP. */ 16 int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id) 17 { 18 dev_dbg(ipc_imem->dev, "%s if id: %d", 19 ipc_imem_phase_get_string(ipc_imem->phase), if_id); 20 21 /* The network interface is only supported in the runtime phase. */ 22 if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) { 23 dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id, 24 ipc_imem_phase_get_string(ipc_imem->phase)); 25 return -EIO; 26 } 27 28 return ipc_mux_open_session(ipc_imem->mux, if_id); 29 } 30 31 /* Release a net link to CP. */ 32 void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id, 33 int channel_id) 34 { 35 if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START && 36 if_id <= IP_MUX_SESSION_END) 37 ipc_mux_close_session(ipc_imem->mux, if_id); 38 } 39 40 /* Tasklet call to do uplink transfer. */ 41 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg, 42 void *msg, size_t size) 43 { 44 ipc_imem_ul_send(ipc_imem); 45 46 return 0; 47 } 48 49 /* Through tasklet to do sio write. */ 50 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem) 51 { 52 return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0, 53 NULL, 0, false); 54 } 55 56 /* Function for transfer UL data */ 57 int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, 58 int if_id, int channel_id, struct sk_buff *skb) 59 { 60 int ret = -EINVAL; 61 62 if (!ipc_imem || channel_id < 0) 63 goto out; 64 65 /* Is CP Running? */ 66 if (ipc_imem->phase != IPC_P_RUN) { 67 dev_dbg(ipc_imem->dev, "phase %s transmit", 68 ipc_imem_phase_get_string(ipc_imem->phase)); 69 ret = -EIO; 70 goto out; 71 } 72 73 /* Route the UL packet through IP MUX Layer */ 74 ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb); 75 out: 76 return ret; 77 } 78 79 /* Initialize wwan channel */ 80 void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, 81 enum ipc_mux_protocol mux_type) 82 { 83 struct ipc_chnl_cfg chnl_cfg = { 0 }; 84 85 ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio); 86 87 /* If modem version is invalid (0xffffffff), do not initialize WWAN. */ 88 if (ipc_imem->cp_version == -1) { 89 dev_err(ipc_imem->dev, "invalid CP version"); 90 return; 91 } 92 93 ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels); 94 95 if (ipc_imem->mmio->mux_protocol == MUX_AGGREGATION && 96 ipc_imem->nr_of_channels == IPC_MEM_IP_CHL_ID_0) { 97 chnl_cfg.ul_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_UL; 98 chnl_cfg.dl_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_DL; 99 chnl_cfg.dl_buf_size = IPC_MEM_MAX_ADB_BUF_SIZE; 100 } 101 102 ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg, 103 IRQ_MOD_OFF); 104 105 /* WWAN registration. */ 106 ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev); 107 if (!ipc_imem->wwan) 108 dev_err(ipc_imem->dev, 109 "failed to register the ipc_wwan interfaces"); 110 } 111 112 /* Map SKB to DMA for transfer */ 113 static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem, 114 struct sk_buff *skb) 115 { 116 struct iosm_pcie *ipc_pcie = ipc_imem->pcie; 117 char *buf = skb->data; 118 int len = skb->len; 119 dma_addr_t mapping; 120 int ret; 121 122 ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE); 123 124 if (ret) 125 goto err; 126 127 BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb)); 128 129 IPC_CB(skb)->mapping = mapping; 130 IPC_CB(skb)->direction = DMA_TO_DEVICE; 131 IPC_CB(skb)->len = len; 132 IPC_CB(skb)->op_type = (u8)UL_DEFAULT; 133 134 err: 135 return ret; 136 } 137 138 /* return true if channel is ready for use */ 139 static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem, 140 struct ipc_mem_channel *channel) 141 { 142 enum ipc_phase phase; 143 144 /* Update the current operation phase. */ 145 phase = ipc_imem->phase; 146 147 /* Select the operation depending on the execution stage. */ 148 switch (phase) { 149 case IPC_P_RUN: 150 case IPC_P_PSI: 151 case IPC_P_EBL: 152 break; 153 154 case IPC_P_ROM: 155 /* Prepare the PSI image for the CP ROM driver and 156 * suspend the flash app. 157 */ 158 if (channel->state != IMEM_CHANNEL_RESERVED) { 159 dev_err(ipc_imem->dev, 160 "ch[%d]:invalid channel state %d,expected %d", 161 channel->channel_id, channel->state, 162 IMEM_CHANNEL_RESERVED); 163 goto channel_unavailable; 164 } 165 goto channel_available; 166 167 default: 168 /* Ignore uplink actions in all other phases. */ 169 dev_err(ipc_imem->dev, "ch[%d]: confused phase %d", 170 channel->channel_id, phase); 171 goto channel_unavailable; 172 } 173 /* Check the full availability of the channel. */ 174 if (channel->state != IMEM_CHANNEL_ACTIVE) { 175 dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d", 176 channel->channel_id, channel->state); 177 goto channel_unavailable; 178 } 179 180 channel_available: 181 return true; 182 183 channel_unavailable: 184 return false; 185 } 186 187 /** 188 * ipc_imem_sys_port_close - Release a sio link to CP. 189 * @ipc_imem: Imem instance. 190 * @channel: Channel instance. 191 */ 192 void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem, 193 struct ipc_mem_channel *channel) 194 { 195 enum ipc_phase curr_phase; 196 int status = 0; 197 u32 tail = 0; 198 199 curr_phase = ipc_imem->phase; 200 201 /* If current phase is IPC_P_OFF or SIO ID is -ve then 202 * channel is already freed. Nothing to do. 203 */ 204 if (curr_phase == IPC_P_OFF) { 205 dev_err(ipc_imem->dev, 206 "nothing to do. Current Phase: %s", 207 ipc_imem_phase_get_string(curr_phase)); 208 return; 209 } 210 211 if (channel->state == IMEM_CHANNEL_FREE) { 212 dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d", 213 channel->channel_id, channel->state); 214 return; 215 } 216 217 /* If there are any pending TDs then wait for Timeout/Completion before 218 * closing pipe. 219 */ 220 if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) { 221 ipc_imem->app_notify_ul_pend = 1; 222 223 /* Suspend the user app and wait a certain time for processing 224 * UL Data. 225 */ 226 status = wait_for_completion_interruptible_timeout 227 (&ipc_imem->ul_pend_sem, 228 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT)); 229 if (status == 0) { 230 dev_dbg(ipc_imem->dev, 231 "Pend data Timeout UL-Pipe:%d Head:%d Tail:%d", 232 channel->ul_pipe.pipe_nr, 233 channel->ul_pipe.old_head, 234 channel->ul_pipe.old_tail); 235 } 236 237 ipc_imem->app_notify_ul_pend = 0; 238 } 239 240 /* If there are any pending TDs then wait for Timeout/Completion before 241 * closing pipe. 242 */ 243 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, 244 &channel->dl_pipe, NULL, &tail); 245 246 if (tail != channel->dl_pipe.old_tail) { 247 ipc_imem->app_notify_dl_pend = 1; 248 249 /* Suspend the user app and wait a certain time for processing 250 * DL Data. 251 */ 252 status = wait_for_completion_interruptible_timeout 253 (&ipc_imem->dl_pend_sem, 254 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT)); 255 if (status == 0) { 256 dev_dbg(ipc_imem->dev, 257 "Pend data Timeout DL-Pipe:%d Head:%d Tail:%d", 258 channel->dl_pipe.pipe_nr, 259 channel->dl_pipe.old_head, 260 channel->dl_pipe.old_tail); 261 } 262 263 ipc_imem->app_notify_dl_pend = 0; 264 } 265 266 /* Due to wait for completion in messages, there is a small window 267 * between closing the pipe and updating the channel is closed. In this 268 * small window there could be HP update from Host Driver. Hence update 269 * the channel state as CLOSING to aviod unnecessary interrupt 270 * towards CP. 271 */ 272 channel->state = IMEM_CHANNEL_CLOSING; 273 274 ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe); 275 ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe); 276 277 ipc_imem_channel_free(channel); 278 } 279 280 /* Open a PORT link to CP and return the channel */ 281 struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem, 282 int chl_id, int hp_id) 283 { 284 struct ipc_mem_channel *channel; 285 int ch_id; 286 287 /* The PORT interface is only supported in the runtime phase. */ 288 if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) { 289 dev_err(ipc_imem->dev, "PORT open refused, phase %s", 290 ipc_imem_phase_get_string(ipc_imem->phase)); 291 return NULL; 292 } 293 294 ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL); 295 296 if (ch_id < 0) { 297 dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed"); 298 return NULL; 299 } 300 301 channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id); 302 303 if (!channel) { 304 dev_err(ipc_imem->dev, "PORT channel id open failed"); 305 return NULL; 306 } 307 308 return channel; 309 } 310 311 /* transfer skb to modem */ 312 int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb) 313 { 314 struct ipc_mem_channel *channel = ipc_cdev->channel; 315 struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem; 316 int ret = -EIO; 317 318 if (!ipc_imem_is_channel_active(ipc_imem, channel) || 319 ipc_imem->phase == IPC_P_OFF_REQ) 320 goto out; 321 322 ret = ipc_imem_map_skb_to_dma(ipc_imem, skb); 323 324 if (ret) 325 goto out; 326 327 /* Add skb to the uplink skbuf accumulator. */ 328 skb_queue_tail(&channel->ul_list, skb); 329 330 ret = ipc_imem_call_cdev_write(ipc_imem); 331 332 if (ret) { 333 skb_dequeue_tail(&channel->ul_list); 334 dev_err(ipc_cdev->dev, "channel id[%d] write failed\n", 335 ipc_cdev->channel->channel_id); 336 } 337 out: 338 return ret; 339 } 340 341 /* Open a SIO link to CP and return the channel instance */ 342 struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem) 343 { 344 struct ipc_mem_channel *channel; 345 enum ipc_phase phase; 346 int channel_id; 347 348 phase = ipc_imem_phase_update(ipc_imem); 349 switch (phase) { 350 case IPC_P_OFF: 351 case IPC_P_ROM: 352 /* Get a channel id as flash id and reserve it. */ 353 channel_id = ipc_imem_channel_alloc(ipc_imem, 354 IPC_MEM_CTRL_CHL_ID_7, 355 IPC_CTYPE_CTRL); 356 357 if (channel_id < 0) { 358 dev_err(ipc_imem->dev, 359 "reservation of a flash channel id failed"); 360 goto error; 361 } 362 363 ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id; 364 channel = &ipc_imem->channels[channel_id]; 365 366 /* Enqueue chip info data to be read */ 367 if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) { 368 dev_err(ipc_imem->dev, "Enqueue of chip info failed"); 369 channel->state = IMEM_CHANNEL_FREE; 370 goto error; 371 } 372 373 return channel; 374 375 case IPC_P_PSI: 376 case IPC_P_EBL: 377 ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio); 378 if (ipc_imem->cp_version == -1) { 379 dev_err(ipc_imem->dev, "invalid CP version"); 380 goto error; 381 } 382 383 channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id; 384 return ipc_imem_channel_open(ipc_imem, channel_id, 385 IPC_HP_CDEV_OPEN); 386 387 default: 388 /* CP is in the wrong state (e.g. CRASH or CD_READY) */ 389 dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase); 390 } 391 error: 392 return NULL; 393 } 394 395 /* Release a SIO channel link to CP. */ 396 void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink) 397 { 398 struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem; 399 int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT; 400 enum ipc_mem_exec_stage exec_stage; 401 struct ipc_mem_channel *channel; 402 int status = 0; 403 u32 tail = 0; 404 405 channel = ipc_imem->ipc_devlink->devlink_sio.channel; 406 /* Increase the total wait time to boot_check_timeout */ 407 do { 408 exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio); 409 if (exec_stage == IPC_MEM_EXEC_STAGE_RUN || 410 exec_stage == IPC_MEM_EXEC_STAGE_PSI) 411 break; 412 msleep(20); 413 boot_check_timeout -= 20; 414 } while (boot_check_timeout > 0); 415 416 /* If there are any pending TDs then wait for Timeout/Completion before 417 * closing pipe. 418 */ 419 if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) { 420 status = wait_for_completion_interruptible_timeout 421 (&ipc_imem->ul_pend_sem, 422 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT)); 423 if (status == 0) { 424 dev_dbg(ipc_imem->dev, 425 "Data Timeout on UL-Pipe:%d Head:%d Tail:%d", 426 channel->ul_pipe.pipe_nr, 427 channel->ul_pipe.old_head, 428 channel->ul_pipe.old_tail); 429 } 430 } 431 432 ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, 433 &channel->dl_pipe, NULL, &tail); 434 435 if (tail != channel->dl_pipe.old_tail) { 436 status = wait_for_completion_interruptible_timeout 437 (&ipc_imem->dl_pend_sem, 438 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT)); 439 if (status == 0) { 440 dev_dbg(ipc_imem->dev, 441 "Data Timeout on DL-Pipe:%d Head:%d Tail:%d", 442 channel->dl_pipe.pipe_nr, 443 channel->dl_pipe.old_head, 444 channel->dl_pipe.old_tail); 445 } 446 } 447 448 /* Due to wait for completion in messages, there is a small window 449 * between closing the pipe and updating the channel is closed. In this 450 * small window there could be HP update from Host Driver. Hence update 451 * the channel state as CLOSING to aviod unnecessary interrupt 452 * towards CP. 453 */ 454 channel->state = IMEM_CHANNEL_CLOSING; 455 /* Release the pipe resources */ 456 ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe); 457 ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe); 458 ipc_imem->nr_of_channels--; 459 } 460 461 void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink, 462 struct sk_buff *skb) 463 { 464 skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb); 465 complete(&ipc_devlink->devlink_sio.read_sem); 466 } 467 468 /* PSI transfer */ 469 static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem, 470 struct ipc_mem_channel *channel, 471 unsigned char *buf, int count) 472 { 473 int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT; 474 enum ipc_mem_exec_stage exec_stage; 475 476 dma_addr_t mapping = 0; 477 int ret; 478 479 ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping, 480 DMA_TO_DEVICE); 481 if (ret) 482 goto pcie_addr_map_fail; 483 484 /* Save the PSI information for the CP ROM driver on the doorbell 485 * scratchpad. 486 */ 487 ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count); 488 ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT); 489 490 ret = wait_for_completion_interruptible_timeout 491 (&channel->ul_sem, 492 msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT)); 493 494 if (ret <= 0) { 495 dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d", 496 ret); 497 goto psi_transfer_fail; 498 } 499 /* If the PSI download fails, return the CP boot ROM exit code */ 500 if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT && 501 ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) { 502 ret = (-1) * ((int)ipc_imem->rom_exit_code); 503 goto psi_transfer_fail; 504 } 505 506 dev_dbg(ipc_imem->dev, "PSI image successfully downloaded"); 507 508 /* Wait psi_start_timeout milliseconds until the CP PSI image is 509 * running and updates the execution_stage field with 510 * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage. 511 */ 512 do { 513 exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio); 514 515 if (exec_stage == IPC_MEM_EXEC_STAGE_PSI) 516 break; 517 518 msleep(20); 519 psi_start_timeout -= 20; 520 } while (psi_start_timeout > 0); 521 522 if (exec_stage != IPC_MEM_EXEC_STAGE_PSI) 523 goto psi_transfer_fail; /* Unknown status of CP PSI process. */ 524 525 ipc_imem->phase = IPC_P_PSI; 526 527 /* Enter the PSI phase. */ 528 dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage); 529 530 /* Request the RUNNING state from CP and wait until it was reached 531 * or timeout. 532 */ 533 ipc_imem_ipc_init_check(ipc_imem); 534 535 ret = wait_for_completion_interruptible_timeout 536 (&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT)); 537 if (ret <= 0) { 538 dev_err(ipc_imem->dev, 539 "Failed PSI RUNNING state on CP, Error-%d", ret); 540 goto psi_transfer_fail; 541 } 542 543 if (ipc_mmio_get_ipc_state(ipc_imem->mmio) != 544 IPC_MEM_DEVICE_IPC_RUNNING) { 545 dev_err(ipc_imem->dev, 546 "ch[%d] %s: unexpected CP IPC state %d, not RUNNING", 547 channel->channel_id, 548 ipc_imem_phase_get_string(ipc_imem->phase), 549 ipc_mmio_get_ipc_state(ipc_imem->mmio)); 550 551 goto psi_transfer_fail; 552 } 553 554 /* Create the flash channel for the transfer of the images. */ 555 if (!ipc_imem_sys_devlink_open(ipc_imem)) { 556 dev_err(ipc_imem->dev, "can't open flash_channel"); 557 goto psi_transfer_fail; 558 } 559 560 ret = 0; 561 psi_transfer_fail: 562 ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE); 563 pcie_addr_map_fail: 564 return ret; 565 } 566 567 int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink, 568 unsigned char *buf, int count) 569 { 570 struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem; 571 struct ipc_mem_channel *channel; 572 struct sk_buff *skb; 573 dma_addr_t mapping; 574 int ret; 575 576 channel = ipc_imem->ipc_devlink->devlink_sio.channel; 577 578 /* In the ROM phase the PSI image is passed to CP about a specific 579 * shared memory area and doorbell scratchpad directly. 580 */ 581 if (ipc_imem->phase == IPC_P_ROM) { 582 ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count); 583 /* If the PSI transfer fails then send crash 584 * Signature. 585 */ 586 if (ret > 0) 587 ipc_imem_msg_send_feature_set(ipc_imem, 588 IPC_MEM_INBAND_CRASH_SIG, 589 false); 590 goto out; 591 } 592 593 /* Allocate skb memory for the uplink buffer. */ 594 skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping, 595 DMA_TO_DEVICE, 0); 596 if (!skb) { 597 ret = -ENOMEM; 598 goto out; 599 } 600 601 skb_put_data(skb, buf, count); 602 603 IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED; 604 605 /* Add skb to the uplink skbuf accumulator. */ 606 skb_queue_tail(&channel->ul_list, skb); 607 608 /* Inform the IPC tasklet to pass uplink IP packets to CP. */ 609 if (!ipc_imem_call_cdev_write(ipc_imem)) { 610 ret = wait_for_completion_interruptible(&channel->ul_sem); 611 612 if (ret < 0) { 613 dev_err(ipc_imem->dev, 614 "ch[%d] no CP confirmation, status = %d", 615 channel->channel_id, ret); 616 ipc_pcie_kfree_skb(ipc_devlink->pcie, skb); 617 goto out; 618 } 619 } 620 ret = 0; 621 out: 622 return ret; 623 } 624 625 int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data, 626 u32 bytes_to_read, u32 *bytes_read) 627 { 628 struct sk_buff *skb = NULL; 629 int rc = 0; 630 631 /* check skb is available in rx_list or wait for skb */ 632 devlink->devlink_sio.devlink_read_pend = 1; 633 while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) { 634 if (!wait_for_completion_interruptible_timeout 635 (&devlink->devlink_sio.read_sem, 636 msecs_to_jiffies(IPC_READ_TIMEOUT))) { 637 dev_err(devlink->dev, "Read timedout"); 638 rc = -ETIMEDOUT; 639 goto devlink_read_fail; 640 } 641 } 642 devlink->devlink_sio.devlink_read_pend = 0; 643 if (bytes_to_read < skb->len) { 644 dev_err(devlink->dev, "Invalid size,expected len %d", skb->len); 645 rc = -EINVAL; 646 goto devlink_read_fail; 647 } 648 *bytes_read = skb->len; 649 memcpy(data, skb->data, skb->len); 650 651 devlink_read_fail: 652 dev_kfree_skb(skb); 653 return rc; 654 } 655