1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_imem.h"
11 #include "iosm_ipc_imem_ops.h"
12 #include "iosm_ipc_port.h"
13 #include "iosm_ipc_task_queue.h"
14 
15 /* Open a packet data online channel between the network layer and CP. */
16 int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
17 {
18 	dev_dbg(ipc_imem->dev, "%s if id: %d",
19 		ipc_imem_phase_get_string(ipc_imem->phase), if_id);
20 
21 	/* The network interface is only supported in the runtime phase. */
22 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
23 		dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
24 			ipc_imem_phase_get_string(ipc_imem->phase));
25 		return -EIO;
26 	}
27 
28 	return ipc_mux_open_session(ipc_imem->mux, if_id);
29 }
30 
31 /* Release a net link to CP. */
32 void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
33 			     int channel_id)
34 {
35 	if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
36 	    if_id <= IP_MUX_SESSION_END)
37 		ipc_mux_close_session(ipc_imem->mux, if_id);
38 }
39 
40 /* Tasklet call to do uplink transfer. */
41 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
42 				  void *msg, size_t size)
43 {
44 	ipc_imem->ev_cdev_write_pending = false;
45 	ipc_imem_ul_send(ipc_imem);
46 
47 	return 0;
48 }
49 
50 /* Through tasklet to do sio write. */
51 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
52 {
53 	if (ipc_imem->ev_cdev_write_pending)
54 		return -1;
55 
56 	ipc_imem->ev_cdev_write_pending = true;
57 
58 	return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
59 					NULL, 0, false);
60 }
61 
62 /* Function for transfer UL data */
63 int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
64 			       int if_id, int channel_id, struct sk_buff *skb)
65 {
66 	int ret = -EINVAL;
67 
68 	if (!ipc_imem || channel_id < 0)
69 		goto out;
70 
71 	/* Is CP Running? */
72 	if (ipc_imem->phase != IPC_P_RUN) {
73 		dev_dbg(ipc_imem->dev, "phase %s transmit",
74 			ipc_imem_phase_get_string(ipc_imem->phase));
75 		ret = -EIO;
76 		goto out;
77 	}
78 
79 	/* Route the UL packet through IP MUX Layer */
80 	ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
81 out:
82 	return ret;
83 }
84 
85 /* Initialize wwan channel */
86 void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
87 				enum ipc_mux_protocol mux_type)
88 {
89 	struct ipc_chnl_cfg chnl_cfg = { 0 };
90 
91 	ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
92 
93 	/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
94 	if (ipc_imem->cp_version == -1) {
95 		dev_err(ipc_imem->dev, "invalid CP version");
96 		return;
97 	}
98 
99 	ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
100 	ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
101 			      IRQ_MOD_OFF);
102 
103 	/* WWAN registration. */
104 	ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
105 	if (!ipc_imem->wwan)
106 		dev_err(ipc_imem->dev,
107 			"failed to register the ipc_wwan interfaces");
108 }
109 
110 /* Map SKB to DMA for transfer */
111 static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
112 				   struct sk_buff *skb)
113 {
114 	struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
115 	char *buf = skb->data;
116 	int len = skb->len;
117 	dma_addr_t mapping;
118 	int ret;
119 
120 	ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
121 
122 	if (ret)
123 		goto err;
124 
125 	BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
126 
127 	IPC_CB(skb)->mapping = mapping;
128 	IPC_CB(skb)->direction = DMA_TO_DEVICE;
129 	IPC_CB(skb)->len = len;
130 	IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
131 
132 err:
133 	return ret;
134 }
135 
136 /* return true if channel is ready for use */
137 static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
138 				       struct ipc_mem_channel *channel)
139 {
140 	enum ipc_phase phase;
141 
142 	/* Update the current operation phase. */
143 	phase = ipc_imem->phase;
144 
145 	/* Select the operation depending on the execution stage. */
146 	switch (phase) {
147 	case IPC_P_RUN:
148 	case IPC_P_PSI:
149 	case IPC_P_EBL:
150 		break;
151 
152 	case IPC_P_ROM:
153 		/* Prepare the PSI image for the CP ROM driver and
154 		 * suspend the flash app.
155 		 */
156 		if (channel->state != IMEM_CHANNEL_RESERVED) {
157 			dev_err(ipc_imem->dev,
158 				"ch[%d]:invalid channel state %d,expected %d",
159 				channel->channel_id, channel->state,
160 				IMEM_CHANNEL_RESERVED);
161 			goto channel_unavailable;
162 		}
163 		goto channel_available;
164 
165 	default:
166 		/* Ignore uplink actions in all other phases. */
167 		dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
168 			channel->channel_id, phase);
169 		goto channel_unavailable;
170 	}
171 	/* Check the full availability of the channel. */
172 	if (channel->state != IMEM_CHANNEL_ACTIVE) {
173 		dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
174 			channel->channel_id, channel->state);
175 		goto channel_unavailable;
176 	}
177 
178 channel_available:
179 	return true;
180 
181 channel_unavailable:
182 	return false;
183 }
184 
185 /* Release a sio link to CP. */
186 void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev)
187 {
188 	struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
189 	struct ipc_mem_channel *channel = ipc_cdev->channel;
190 	enum ipc_phase curr_phase;
191 	int status = 0;
192 	u32 tail = 0;
193 
194 	curr_phase = ipc_imem->phase;
195 
196 	/* If current phase is IPC_P_OFF or SIO ID is -ve then
197 	 * channel is already freed. Nothing to do.
198 	 */
199 	if (curr_phase == IPC_P_OFF) {
200 		dev_err(ipc_imem->dev,
201 			"nothing to do. Current Phase: %s",
202 			ipc_imem_phase_get_string(curr_phase));
203 		return;
204 	}
205 
206 	if (channel->state == IMEM_CHANNEL_FREE) {
207 		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
208 			channel->channel_id, channel->state);
209 		return;
210 	}
211 
212 	/* If there are any pending TDs then wait for Timeout/Completion before
213 	 * closing pipe.
214 	 */
215 	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
216 		ipc_imem->app_notify_ul_pend = 1;
217 
218 		/* Suspend the user app and wait a certain time for processing
219 		 * UL Data.
220 		 */
221 		status = wait_for_completion_interruptible_timeout
222 			 (&ipc_imem->ul_pend_sem,
223 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
224 		if (status == 0) {
225 			dev_dbg(ipc_imem->dev,
226 				"Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
227 				channel->ul_pipe.pipe_nr,
228 				channel->ul_pipe.old_head,
229 				channel->ul_pipe.old_tail);
230 		}
231 
232 		ipc_imem->app_notify_ul_pend = 0;
233 	}
234 
235 	/* If there are any pending TDs then wait for Timeout/Completion before
236 	 * closing pipe.
237 	 */
238 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
239 					 &channel->dl_pipe, NULL, &tail);
240 
241 	if (tail != channel->dl_pipe.old_tail) {
242 		ipc_imem->app_notify_dl_pend = 1;
243 
244 		/* Suspend the user app and wait a certain time for processing
245 		 * DL Data.
246 		 */
247 		status = wait_for_completion_interruptible_timeout
248 			 (&ipc_imem->dl_pend_sem,
249 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
250 		if (status == 0) {
251 			dev_dbg(ipc_imem->dev,
252 				"Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
253 				channel->dl_pipe.pipe_nr,
254 				channel->dl_pipe.old_head,
255 				channel->dl_pipe.old_tail);
256 		}
257 
258 		ipc_imem->app_notify_dl_pend = 0;
259 	}
260 
261 	/* Due to wait for completion in messages, there is a small window
262 	 * between closing the pipe and updating the channel is closed. In this
263 	 * small window there could be HP update from Host Driver. Hence update
264 	 * the channel state as CLOSING to aviod unnecessary interrupt
265 	 * towards CP.
266 	 */
267 	channel->state = IMEM_CHANNEL_CLOSING;
268 
269 	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
270 	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
271 
272 	ipc_imem_channel_free(channel);
273 }
274 
275 /* Open a PORT link to CP and return the channel */
276 struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
277 					       int chl_id, int hp_id)
278 {
279 	struct ipc_mem_channel *channel;
280 	int ch_id;
281 
282 	/* The PORT interface is only supported in the runtime phase. */
283 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
284 		dev_err(ipc_imem->dev, "PORT open refused, phase %s",
285 			ipc_imem_phase_get_string(ipc_imem->phase));
286 		return NULL;
287 	}
288 
289 	ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
290 
291 	if (ch_id < 0) {
292 		dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
293 		return NULL;
294 	}
295 
296 	channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
297 
298 	if (!channel) {
299 		dev_err(ipc_imem->dev, "PORT channel id open failed");
300 		return NULL;
301 	}
302 
303 	return channel;
304 }
305 
306 /* transfer skb to modem */
307 int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
308 {
309 	struct ipc_mem_channel *channel = ipc_cdev->channel;
310 	struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
311 	int ret = -EIO;
312 
313 	if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
314 	    ipc_imem->phase == IPC_P_OFF_REQ)
315 		goto out;
316 
317 	ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
318 
319 	if (ret)
320 		goto out;
321 
322 	/* Add skb to the uplink skbuf accumulator. */
323 	skb_queue_tail(&channel->ul_list, skb);
324 
325 	ret = ipc_imem_call_cdev_write(ipc_imem);
326 
327 	if (ret) {
328 		skb_dequeue_tail(&channel->ul_list);
329 		dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
330 			ipc_cdev->channel->channel_id);
331 	}
332 out:
333 	return ret;
334 }
335 
336 /* Open a SIO link to CP and return the channel instance */
337 struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
338 {
339 	struct ipc_mem_channel *channel;
340 	enum ipc_phase phase;
341 	int channel_id;
342 
343 	phase = ipc_imem_phase_update(ipc_imem);
344 	switch (phase) {
345 	case IPC_P_OFF:
346 	case IPC_P_ROM:
347 		/* Get a channel id as flash id and reserve it. */
348 		channel_id = ipc_imem_channel_alloc(ipc_imem,
349 						    IPC_MEM_CTRL_CHL_ID_7,
350 						    IPC_CTYPE_CTRL);
351 
352 		if (channel_id < 0) {
353 			dev_err(ipc_imem->dev,
354 				"reservation of a flash channel id failed");
355 			goto error;
356 		}
357 
358 		ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
359 		channel = &ipc_imem->channels[channel_id];
360 
361 		/* Enqueue chip info data to be read */
362 		if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
363 			dev_err(ipc_imem->dev, "Enqueue of chip info failed");
364 			channel->state = IMEM_CHANNEL_FREE;
365 			goto error;
366 		}
367 
368 		return channel;
369 
370 	case IPC_P_PSI:
371 	case IPC_P_EBL:
372 		ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
373 		if (ipc_imem->cp_version == -1) {
374 			dev_err(ipc_imem->dev, "invalid CP version");
375 			goto error;
376 		}
377 
378 		channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
379 		return ipc_imem_channel_open(ipc_imem, channel_id,
380 					     IPC_HP_CDEV_OPEN);
381 
382 	default:
383 		/* CP is in the wrong state (e.g. CRASH or CD_READY) */
384 		dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
385 	}
386 error:
387 	return NULL;
388 }
389 
390 /* Release a SIO channel link to CP. */
391 void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
392 {
393 	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
394 	int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
395 	enum ipc_mem_exec_stage exec_stage;
396 	struct ipc_mem_channel *channel;
397 	int status = 0;
398 	u32 tail = 0;
399 
400 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
401 	/* Increase the total wait time to boot_check_timeout */
402 	do {
403 		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
404 		if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
405 		    exec_stage == IPC_MEM_EXEC_STAGE_PSI)
406 			break;
407 		msleep(20);
408 		boot_check_timeout -= 20;
409 	} while (boot_check_timeout > 0);
410 
411 	/* If there are any pending TDs then wait for Timeout/Completion before
412 	 * closing pipe.
413 	 */
414 	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
415 		status = wait_for_completion_interruptible_timeout
416 			(&ipc_imem->ul_pend_sem,
417 			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
418 		if (status == 0) {
419 			dev_dbg(ipc_imem->dev,
420 				"Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
421 				channel->ul_pipe.pipe_nr,
422 				channel->ul_pipe.old_head,
423 				channel->ul_pipe.old_tail);
424 		}
425 	}
426 
427 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
428 					 &channel->dl_pipe, NULL, &tail);
429 
430 	if (tail != channel->dl_pipe.old_tail) {
431 		status = wait_for_completion_interruptible_timeout
432 			(&ipc_imem->dl_pend_sem,
433 			 msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
434 		if (status == 0) {
435 			dev_dbg(ipc_imem->dev,
436 				"Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
437 				channel->dl_pipe.pipe_nr,
438 				channel->dl_pipe.old_head,
439 				channel->dl_pipe.old_tail);
440 		}
441 	}
442 
443 	/* Due to wait for completion in messages, there is a small window
444 	 * between closing the pipe and updating the channel is closed. In this
445 	 * small window there could be HP update from Host Driver. Hence update
446 	 * the channel state as CLOSING to aviod unnecessary interrupt
447 	 * towards CP.
448 	 */
449 	channel->state = IMEM_CHANNEL_CLOSING;
450 	/* Release the pipe resources */
451 	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
452 	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
453 }
454 
455 void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
456 				    struct sk_buff *skb)
457 {
458 	skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
459 	complete(&ipc_devlink->devlink_sio.read_sem);
460 }
461 
462 /* PSI transfer */
463 static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
464 				     struct ipc_mem_channel *channel,
465 				     unsigned char *buf, int count)
466 {
467 	int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
468 	enum ipc_mem_exec_stage exec_stage;
469 
470 	dma_addr_t mapping = 0;
471 	int ret;
472 
473 	ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
474 				DMA_TO_DEVICE);
475 	if (ret)
476 		goto pcie_addr_map_fail;
477 
478 	/* Save the PSI information for the CP ROM driver on the doorbell
479 	 * scratchpad.
480 	 */
481 	ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
482 	ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
483 
484 	ret = wait_for_completion_interruptible_timeout
485 		(&channel->ul_sem,
486 		 msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
487 
488 	if (ret <= 0) {
489 		dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
490 			ret);
491 		goto psi_transfer_fail;
492 	}
493 	/* If the PSI download fails, return the CP boot ROM exit code */
494 	if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
495 	    ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
496 		ret = (-1) * ((int)ipc_imem->rom_exit_code);
497 		goto psi_transfer_fail;
498 	}
499 
500 	dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
501 
502 	/* Wait psi_start_timeout milliseconds until the CP PSI image is
503 	 * running and updates the execution_stage field with
504 	 * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
505 	 */
506 	do {
507 		exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
508 
509 		if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
510 			break;
511 
512 		msleep(20);
513 		psi_start_timeout -= 20;
514 	} while (psi_start_timeout > 0);
515 
516 	if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
517 		goto psi_transfer_fail; /* Unknown status of CP PSI process. */
518 
519 	ipc_imem->phase = IPC_P_PSI;
520 
521 	/* Enter the PSI phase. */
522 	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
523 
524 	/* Request the RUNNING state from CP and wait until it was reached
525 	 * or timeout.
526 	 */
527 	ipc_imem_ipc_init_check(ipc_imem);
528 
529 	ret = wait_for_completion_interruptible_timeout
530 		(&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
531 	if (ret <= 0) {
532 		dev_err(ipc_imem->dev,
533 			"Failed PSI RUNNING state on CP, Error-%d", ret);
534 		goto psi_transfer_fail;
535 	}
536 
537 	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
538 			IPC_MEM_DEVICE_IPC_RUNNING) {
539 		dev_err(ipc_imem->dev,
540 			"ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
541 			channel->channel_id,
542 			ipc_imem_phase_get_string(ipc_imem->phase),
543 			ipc_mmio_get_ipc_state(ipc_imem->mmio));
544 
545 		goto psi_transfer_fail;
546 	}
547 
548 	/* Create the flash channel for the transfer of the images. */
549 	if (!ipc_imem_sys_devlink_open(ipc_imem)) {
550 		dev_err(ipc_imem->dev, "can't open flash_channel");
551 		goto psi_transfer_fail;
552 	}
553 
554 	ret = 0;
555 psi_transfer_fail:
556 	ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
557 pcie_addr_map_fail:
558 	return ret;
559 }
560 
561 int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
562 			       unsigned char *buf, int count)
563 {
564 	struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
565 	struct ipc_mem_channel *channel;
566 	struct sk_buff *skb;
567 	dma_addr_t mapping;
568 	int ret;
569 
570 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
571 
572 	/* In the ROM phase the PSI image is passed to CP about a specific
573 	 *  shared memory area and doorbell scratchpad directly.
574 	 */
575 	if (ipc_imem->phase == IPC_P_ROM) {
576 		ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
577 		/* If the PSI transfer fails then send crash
578 		 * Signature.
579 		 */
580 		if (ret > 0)
581 			ipc_imem_msg_send_feature_set(ipc_imem,
582 						      IPC_MEM_INBAND_CRASH_SIG,
583 						      false);
584 		goto out;
585 	}
586 
587 	/* Allocate skb memory for the uplink buffer. */
588 	skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
589 				 DMA_TO_DEVICE, 0);
590 	if (!skb) {
591 		ret = -ENOMEM;
592 		goto out;
593 	}
594 
595 	memcpy(skb_put(skb, count), buf, count);
596 
597 	IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
598 
599 	/* Add skb to the uplink skbuf accumulator. */
600 	skb_queue_tail(&channel->ul_list, skb);
601 
602 	/* Inform the IPC tasklet to pass uplink IP packets to CP. */
603 	if (!ipc_imem_call_cdev_write(ipc_imem)) {
604 		ret = wait_for_completion_interruptible(&channel->ul_sem);
605 
606 		if (ret < 0) {
607 			dev_err(ipc_imem->dev,
608 				"ch[%d] no CP confirmation, status = %d",
609 				channel->channel_id, ret);
610 			ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
611 			goto out;
612 		}
613 	}
614 	ret = 0;
615 out:
616 	return ret;
617 }
618 
619 int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
620 			      u32 bytes_to_read, u32 *bytes_read)
621 {
622 	struct sk_buff *skb = NULL;
623 	int rc = 0;
624 
625 	/* check skb is available in rx_list or wait for skb */
626 	devlink->devlink_sio.devlink_read_pend = 1;
627 	while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
628 		if (!wait_for_completion_interruptible_timeout
629 				(&devlink->devlink_sio.read_sem,
630 				 msecs_to_jiffies(IPC_READ_TIMEOUT))) {
631 			dev_err(devlink->dev, "Read timedout");
632 			rc =  -ETIMEDOUT;
633 			goto devlink_read_fail;
634 		}
635 	}
636 	devlink->devlink_sio.devlink_read_pend = 0;
637 	if (bytes_to_read < skb->len) {
638 		dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
639 		rc = -EINVAL;
640 		goto devlink_read_fail;
641 	}
642 	*bytes_read = skb->len;
643 	memcpy(data, skb->data, skb->len);
644 
645 devlink_read_fail:
646 	ipc_pcie_kfree_skb(devlink->pcie, skb);
647 	return rc;
648 }
649