1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 #include <linux/pm_runtime.h>
8 
9 #include "iosm_ipc_chnl_cfg.h"
10 #include "iosm_ipc_devlink.h"
11 #include "iosm_ipc_flash.h"
12 #include "iosm_ipc_imem.h"
13 #include "iosm_ipc_port.h"
14 #include "iosm_ipc_trace.h"
15 #include "iosm_ipc_debugfs.h"
16 
17 /* Check the wwan ips if it is valid with Channel as input. */
18 static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
19 {
20 	if (chnl)
21 		return chnl->ctype == IPC_CTYPE_WWAN &&
22 		       chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
23 	return false;
24 }
25 
26 static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
27 {
28 	union ipc_msg_prep_args prep_args = {
29 		.sleep.target = 1,
30 		.sleep.state = state,
31 	};
32 
33 	ipc_imem->device_sleep = state;
34 
35 	return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
36 					IPC_MSG_PREP_SLEEP, &prep_args, NULL);
37 }
38 
39 static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
40 				  struct ipc_pipe *pipe)
41 {
42 	/* limit max. nr of entries */
43 	if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
44 		return false;
45 
46 	return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
47 }
48 
49 /* This timer handler will retry DL buff allocation if a pipe has no free buf
50  * and gives doorbell if TD is available
51  */
52 static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
53 				      void *msg, size_t size)
54 {
55 	bool new_buffers_available = false;
56 	bool retry_allocation = false;
57 	int i;
58 
59 	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
60 		struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
61 
62 		if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
63 			continue;
64 
65 		while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
66 			new_buffers_available = true;
67 
68 		if (pipe->nr_of_queued_entries == 0)
69 			retry_allocation = true;
70 	}
71 
72 	if (new_buffers_available)
73 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
74 					      IPC_HP_DL_PROCESS);
75 
76 	if (retry_allocation) {
77 		ipc_imem->hrtimer_period =
78 		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
79 		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
80 			hrtimer_start(&ipc_imem->td_alloc_timer,
81 				      ipc_imem->hrtimer_period,
82 				      HRTIMER_MODE_REL);
83 	}
84 	return 0;
85 }
86 
87 static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
88 {
89 	struct iosm_imem *ipc_imem =
90 		container_of(hr_timer, struct iosm_imem, td_alloc_timer);
91 	/* Post an async tasklet event to trigger HP update Doorbell */
92 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
93 				 0, false);
94 	return HRTIMER_NORESTART;
95 }
96 
97 /* Fast update timer tasklet handler to trigger HP update */
98 static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
99 					    void *msg, size_t size)
100 {
101 	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
102 				      IPC_HP_FAST_TD_UPD_TMR);
103 
104 	return 0;
105 }
106 
107 static enum hrtimer_restart
108 ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
109 {
110 	struct iosm_imem *ipc_imem =
111 		container_of(hr_timer, struct iosm_imem, fast_update_timer);
112 	/* Post an async tasklet event to trigger HP update Doorbell */
113 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
114 				 NULL, 0, false);
115 	return HRTIMER_NORESTART;
116 }
117 
118 static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
119 				    void *msg, size_t size)
120 {
121 	ipc_mux_ul_adb_finish(ipc_imem->mux);
122 	return 0;
123 }
124 
125 static enum hrtimer_restart
126 ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
127 {
128 	struct iosm_imem *ipc_imem =
129 		container_of(hr_timer, struct iosm_imem, adb_timer);
130 
131 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
132 				 NULL, 0, false);
133 	return HRTIMER_NORESTART;
134 }
135 
136 static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
137 					  struct ipc_mux_config *cfg)
138 {
139 	ipc_mmio_update_cp_capability(ipc_imem->mmio);
140 
141 	if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
142 		dev_err(ipc_imem->dev, "Failed to get Mux capability.");
143 		return -EINVAL;
144 	}
145 
146 	cfg->protocol = ipc_imem->mmio->mux_protocol;
147 
148 	cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
149 			       MUX_UL_ON_CREDITS :
150 			       MUX_UL;
151 
152 	/* The instance ID is same as channel ID because this is been reused
153 	 * for channel alloc function.
154 	 */
155 	cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
156 
157 	return 0;
158 }
159 
160 void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
161 				   unsigned int reset_enable, bool atomic_ctx)
162 {
163 	union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
164 						      reset_enable };
165 
166 	if (atomic_ctx)
167 		ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
168 					 IPC_MSG_PREP_FEATURE_SET, &prep_args,
169 					 NULL);
170 	else
171 		ipc_protocol_msg_send(ipc_imem->ipc_protocol,
172 				      IPC_MSG_PREP_FEATURE_SET, &prep_args);
173 }
174 
175 /**
176  * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
177  * @ipc_imem:                       Pointer to imem data-struct
178  */
179 void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
180 {
181 	/* Use the TD update timer only in the runtime phase */
182 	if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
183 		/* trigger the doorbell irq on CP directly. */
184 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
185 					      IPC_HP_TD_UPD_TMR_START);
186 		return;
187 	}
188 
189 	if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
190 		ipc_imem->hrtimer_period =
191 		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
192 		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
193 			hrtimer_start(&ipc_imem->tdupdate_timer,
194 				      ipc_imem->hrtimer_period,
195 				      HRTIMER_MODE_REL);
196 	}
197 }
198 
199 void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
200 {
201 	if (hrtimer_active(hr_timer))
202 		hrtimer_cancel(hr_timer);
203 }
204 
205 /**
206  * ipc_imem_adb_timer_start -	Starts the adb Timer if not starting.
207  * @ipc_imem:			Pointer to imem data-struct
208  */
209 void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
210 {
211 	if (!hrtimer_active(&ipc_imem->adb_timer)) {
212 		ipc_imem->hrtimer_period =
213 			ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
214 		hrtimer_start(&ipc_imem->adb_timer,
215 			      ipc_imem->hrtimer_period,
216 			      HRTIMER_MODE_REL);
217 	}
218 }
219 
220 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
221 {
222 	struct ipc_mem_channel *channel;
223 	bool hpda_ctrl_pending = false;
224 	struct sk_buff_head *ul_list;
225 	bool hpda_pending = false;
226 	struct ipc_pipe *pipe;
227 	int i;
228 
229 	/* Analyze the uplink pipe of all active channels. */
230 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
231 		channel = &ipc_imem->channels[i];
232 
233 		if (channel->state != IMEM_CHANNEL_ACTIVE)
234 			continue;
235 
236 		pipe = &channel->ul_pipe;
237 
238 		/* Get the reference to the skbuf accumulator list. */
239 		ul_list = &channel->ul_list;
240 
241 		/* Fill the transfer descriptor with the uplink buffer info. */
242 		if (!ipc_imem_check_wwan_ips(channel)) {
243 			hpda_ctrl_pending |=
244 				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
245 							pipe, ul_list);
246 		} else {
247 			hpda_pending |=
248 				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
249 							pipe, ul_list);
250 		}
251 	}
252 
253 	/* forced HP update needed for non data channels */
254 	if (hpda_ctrl_pending) {
255 		hpda_pending = false;
256 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
257 					      IPC_HP_UL_WRITE_TD);
258 	}
259 
260 	return hpda_pending;
261 }
262 
263 void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
264 {
265 	int timeout = IPC_MODEM_BOOT_TIMEOUT;
266 
267 	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
268 
269 	/* Trigger the CP interrupt to enter the init state. */
270 	ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
271 			  IPC_MEM_DEVICE_IPC_INIT);
272 	/* Wait for the CP update. */
273 	do {
274 		if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
275 		    ipc_imem->ipc_requested_state) {
276 			/* Prepare the MMIO space */
277 			ipc_mmio_config(ipc_imem->mmio);
278 
279 			/* Trigger the CP irq to enter the running state. */
280 			ipc_imem->ipc_requested_state =
281 				IPC_MEM_DEVICE_IPC_RUNNING;
282 			ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
283 					  IPC_MEM_DEVICE_IPC_RUNNING);
284 
285 			return;
286 		}
287 		msleep(20);
288 	} while (--timeout);
289 
290 	/* timeout */
291 	dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
292 		ipc_imem_phase_get_string(ipc_imem->phase),
293 		ipc_mmio_get_ipc_state(ipc_imem->mmio));
294 
295 	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
296 }
297 
298 /* Analyze the packet type and distribute it. */
299 static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
300 				    struct ipc_pipe *pipe, struct sk_buff *skb)
301 {
302 	u16 port_id;
303 
304 	if (!skb)
305 		return;
306 
307 	/* An AT/control or IP packet is expected. */
308 	switch (pipe->channel->ctype) {
309 	case IPC_CTYPE_CTRL:
310 		port_id = pipe->channel->channel_id;
311 		ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
312 				    IPC_CB(skb)->mapping,
313 				    IPC_CB(skb)->direction);
314 		if (port_id == IPC_MEM_CTRL_CHL_ID_7)
315 			ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
316 						       skb);
317 		else if (ipc_is_trace_channel(ipc_imem, port_id))
318 			ipc_trace_port_rx(ipc_imem, skb);
319 		else
320 			wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
321 				     skb);
322 		break;
323 
324 	case IPC_CTYPE_WWAN:
325 		if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
326 			ipc_mux_dl_decode(ipc_imem->mux, skb);
327 		break;
328 	default:
329 		dev_err(ipc_imem->dev, "Invalid channel type");
330 		break;
331 	}
332 }
333 
334 /* Process the downlink data and pass them to the char or net layer. */
335 static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
336 				     struct ipc_pipe *pipe)
337 {
338 	s32 cnt = 0, processed_td_cnt = 0;
339 	struct ipc_mem_channel *channel;
340 	u32 head = 0, tail = 0;
341 	bool processed = false;
342 	struct sk_buff *skb;
343 
344 	channel = pipe->channel;
345 
346 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
347 					 &tail);
348 	if (pipe->old_tail != tail) {
349 		if (pipe->old_tail < tail)
350 			cnt = tail - pipe->old_tail;
351 		else
352 			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
353 	}
354 
355 	processed_td_cnt = cnt;
356 
357 	/* Seek for pipes with pending DL data. */
358 	while (cnt--) {
359 		skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
360 
361 		/* Analyze the packet type and distribute it. */
362 		ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
363 	}
364 
365 	/* try to allocate new empty DL SKbs from head..tail - 1*/
366 	while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
367 		processed = true;
368 
369 	if (processed && !ipc_imem_check_wwan_ips(channel)) {
370 		/* Force HP update for non IP channels */
371 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
372 					      IPC_HP_DL_PROCESS);
373 		processed = false;
374 
375 		/* If Fast Update timer is already running then stop */
376 		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
377 	}
378 
379 	/* Any control channel process will get immediate HP update.
380 	 * Start Fast update timer only for IP channel if all the TDs were
381 	 * used in last process.
382 	 */
383 	if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
384 		ipc_imem->hrtimer_period =
385 		ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
386 		hrtimer_start(&ipc_imem->fast_update_timer,
387 			      ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
388 	}
389 
390 	if (ipc_imem->app_notify_dl_pend)
391 		complete(&ipc_imem->dl_pend_sem);
392 }
393 
394 /* process open uplink pipe */
395 static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
396 				     struct ipc_pipe *pipe)
397 {
398 	struct ipc_mem_channel *channel;
399 	u32 tail = 0, head = 0;
400 	struct sk_buff *skb;
401 	s32 cnt = 0;
402 
403 	channel = pipe->channel;
404 
405 	/* Get the internal phase. */
406 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
407 					 &tail);
408 
409 	if (pipe->old_tail != tail) {
410 		if (pipe->old_tail < tail)
411 			cnt = tail - pipe->old_tail;
412 		else
413 			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
414 	}
415 
416 	/* Free UL buffers. */
417 	while (cnt--) {
418 		skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
419 
420 		if (!skb)
421 			continue;
422 
423 		/* If the user app was suspended in uplink direction - blocking
424 		 * write, resume it.
425 		 */
426 		if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
427 			complete(&channel->ul_sem);
428 
429 		/* Free the skbuf element. */
430 		if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
431 			if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
432 				ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
433 			else
434 				dev_err(ipc_imem->dev,
435 					"OP Type is UL_MUX, unknown if_id %d",
436 					channel->if_id);
437 		} else {
438 			ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
439 		}
440 	}
441 
442 	/* Trace channel stats for IP UL pipe. */
443 	if (ipc_imem_check_wwan_ips(pipe->channel))
444 		ipc_mux_check_n_restart_tx(ipc_imem->mux);
445 
446 	if (ipc_imem->app_notify_ul_pend)
447 		complete(&ipc_imem->ul_pend_sem);
448 }
449 
450 /* Executes the irq. */
451 static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
452 {
453 	struct ipc_mem_channel *channel;
454 
455 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
456 	ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
457 	complete(&channel->ul_sem);
458 }
459 
460 /* Execute the UL bundle timer actions, generating the doorbell irq. */
461 static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
462 					  void *msg, size_t size)
463 {
464 	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
465 				      IPC_HP_TD_UPD_TMR);
466 	return 0;
467 }
468 
469 /* Consider link power management in the runtime phase. */
470 static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
471 {
472 	    /* link will go down, Test pending UL packets.*/
473 	if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
474 	    hrtimer_active(&ipc_imem->tdupdate_timer)) {
475 		/* Generate the doorbell irq. */
476 		ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
477 		/* Stop the TD update timer. */
478 		ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
479 		/* Stop the fast update timer. */
480 		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
481 	}
482 }
483 
484 /* Execute startup timer and wait for delayed start (e.g. NAND) */
485 static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
486 					void *msg, size_t size)
487 {
488 	/* Update & check the current operation phase. */
489 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
490 		return -EIO;
491 
492 	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
493 	    IPC_MEM_DEVICE_IPC_UNINIT) {
494 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
495 
496 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
497 				  IPC_MEM_DEVICE_IPC_INIT);
498 
499 		ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
500 		/* reduce period to 100 ms to check for mmio init state */
501 		if (!hrtimer_active(&ipc_imem->startup_timer))
502 			hrtimer_start(&ipc_imem->startup_timer,
503 				      ipc_imem->hrtimer_period,
504 				      HRTIMER_MODE_REL);
505 	} else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
506 		   IPC_MEM_DEVICE_IPC_INIT) {
507 		/* Startup complete  - disable timer */
508 		ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
509 
510 		/* Prepare the MMIO space */
511 		ipc_mmio_config(ipc_imem->mmio);
512 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
513 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
514 				  IPC_MEM_DEVICE_IPC_RUNNING);
515 	}
516 
517 	return 0;
518 }
519 
520 static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
521 {
522 	enum hrtimer_restart result = HRTIMER_NORESTART;
523 	struct iosm_imem *ipc_imem =
524 		container_of(hr_timer, struct iosm_imem, startup_timer);
525 
526 	if (ktime_to_ns(ipc_imem->hrtimer_period)) {
527 		hrtimer_forward_now(&ipc_imem->startup_timer,
528 				    ipc_imem->hrtimer_period);
529 		result = HRTIMER_RESTART;
530 	}
531 
532 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
533 				 NULL, 0, false);
534 	return result;
535 }
536 
537 /* Get the CP execution stage */
538 static enum ipc_mem_exec_stage
539 ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
540 {
541 	return (ipc_imem->phase == IPC_P_RUN &&
542 		ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
543 		       ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
544 		       ipc_mmio_get_exec_stage(ipc_imem->mmio);
545 }
546 
547 /* Callback to send the modem ready uevent */
548 static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
549 				    void *msg, size_t size)
550 {
551 	enum ipc_mem_exec_stage exec_stage =
552 		ipc_imem_get_exec_stage_buffered(ipc_imem);
553 
554 	if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
555 		ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
556 
557 	return 0;
558 }
559 
560 /* This function is executed in a task context via an ipc_worker object,
561  * as the creation or removal of device can't be done from tasklet.
562  */
563 static void ipc_imem_run_state_worker(struct work_struct *instance)
564 {
565 	struct ipc_chnl_cfg chnl_cfg_port = { 0 };
566 	struct ipc_mux_config mux_cfg;
567 	struct iosm_imem *ipc_imem;
568 	u8 ctrl_chl_idx = 0;
569 	int ret;
570 
571 	ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
572 
573 	if (ipc_imem->phase != IPC_P_RUN) {
574 		dev_err(ipc_imem->dev,
575 			"Modem link down. Exit run state worker.");
576 		goto err_out;
577 	}
578 
579 	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
580 		ipc_devlink_deinit(ipc_imem->ipc_devlink);
581 
582 	ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
583 	if (ret < 0)
584 		goto err_out;
585 
586 	ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
587 	if (!ipc_imem->mux)
588 		goto err_out;
589 
590 	ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
591 	if (ret < 0)
592 		goto err_ipc_mux_deinit;
593 
594 	ipc_imem->mux->wwan = ipc_imem->wwan;
595 
596 	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
597 		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
598 			ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
599 
600 			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID &&
601 			    chnl_cfg_port.wwan_port_type == WWAN_PORT_XMMRPC) {
602 				ctrl_chl_idx++;
603 				continue;
604 			}
605 
606 			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
607 			    chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
608 				ctrl_chl_idx++;
609 				continue;
610 			}
611 			if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
612 				ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
613 						      chnl_cfg_port,
614 						      IRQ_MOD_OFF);
615 				ipc_imem->ipc_port[ctrl_chl_idx] =
616 					ipc_port_init(ipc_imem, chnl_cfg_port);
617 			}
618 		}
619 		ctrl_chl_idx++;
620 	}
621 
622 	ipc_debugfs_init(ipc_imem);
623 
624 	ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
625 				 false);
626 
627 	/* Complete all memory stores before setting bit */
628 	smp_mb__before_atomic();
629 
630 	set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
631 
632 	/* Complete all memory stores after setting bit */
633 	smp_mb__after_atomic();
634 
635 	if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID) {
636 		pm_runtime_mark_last_busy(ipc_imem->dev);
637 		pm_runtime_put_autosuspend(ipc_imem->dev);
638 	}
639 
640 	return;
641 
642 err_ipc_mux_deinit:
643 	ipc_mux_deinit(ipc_imem->mux);
644 err_out:
645 	ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
646 }
647 
648 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
649 {
650 	enum ipc_mem_device_ipc_state curr_ipc_status;
651 	enum ipc_phase old_phase, phase;
652 	bool retry_allocation = false;
653 	bool ul_pending = false;
654 	int i;
655 
656 	if (irq != IMEM_IRQ_DONT_CARE)
657 		ipc_imem->ev_irq_pending[irq] = false;
658 
659 	/* Get the internal phase. */
660 	old_phase = ipc_imem->phase;
661 
662 	if (old_phase == IPC_P_OFF_REQ) {
663 		dev_dbg(ipc_imem->dev,
664 			"[%s]: Ignoring MSI. Deinit sequence in progress!",
665 			ipc_imem_phase_get_string(old_phase));
666 		return;
667 	}
668 
669 	/* Update the phase controlled by CP. */
670 	phase = ipc_imem_phase_update(ipc_imem);
671 
672 	switch (phase) {
673 	case IPC_P_RUN:
674 		if (!ipc_imem->enter_runtime) {
675 			/* Excute the transition from flash/boot to runtime. */
676 			ipc_imem->enter_runtime = 1;
677 
678 			/* allow device to sleep, default value is
679 			 * IPC_HOST_SLEEP_ENTER_SLEEP
680 			 */
681 			ipc_imem_msg_send_device_sleep(ipc_imem,
682 						       ipc_imem->device_sleep);
683 
684 			ipc_imem_msg_send_feature_set(ipc_imem,
685 						      IPC_MEM_INBAND_CRASH_SIG,
686 						  true);
687 		}
688 
689 		curr_ipc_status =
690 			ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
691 
692 		/* check ipc_status change */
693 		if (ipc_imem->ipc_status != curr_ipc_status) {
694 			ipc_imem->ipc_status = curr_ipc_status;
695 
696 			if (ipc_imem->ipc_status ==
697 			    IPC_MEM_DEVICE_IPC_RUNNING) {
698 				schedule_work(&ipc_imem->run_state_worker);
699 			}
700 		}
701 
702 		/* Consider power management in the runtime phase. */
703 		ipc_imem_slp_control_exec(ipc_imem);
704 		break; /* Continue with skbuf processing. */
705 
706 		/* Unexpected phases. */
707 	case IPC_P_OFF:
708 	case IPC_P_OFF_REQ:
709 		dev_err(ipc_imem->dev, "confused phase %s",
710 			ipc_imem_phase_get_string(phase));
711 		return;
712 
713 	case IPC_P_PSI:
714 		if (old_phase != IPC_P_ROM)
715 			break;
716 
717 		fallthrough;
718 		/* On CP the PSI phase is already active. */
719 
720 	case IPC_P_ROM:
721 		/* Before CP ROM driver starts the PSI image, it sets
722 		 * the exit_code field on the doorbell scratchpad and
723 		 * triggers the irq.
724 		 */
725 		ipc_imem_rom_irq_exec(ipc_imem);
726 		return;
727 
728 	default:
729 		break;
730 	}
731 
732 	/* process message ring */
733 	ipc_protocol_msg_process(ipc_imem, irq);
734 
735 	/* process all open pipes */
736 	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
737 		struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
738 		struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
739 
740 		if (dl_pipe->is_open &&
741 		    (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
742 			ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
743 
744 			if (dl_pipe->nr_of_queued_entries == 0)
745 				retry_allocation = true;
746 		}
747 
748 		if (ul_pipe->is_open)
749 			ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
750 	}
751 
752 	/* Try to generate new ADB or ADGH. */
753 	if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
754 		ipc_imem_td_update_timer_start(ipc_imem);
755 		if (ipc_imem->mux->protocol == MUX_AGGREGATION)
756 			ipc_imem_adb_timer_start(ipc_imem);
757 	}
758 
759 	/* Continue the send procedure with accumulated SIO or NETIF packets.
760 	 * Reset the debounce flags.
761 	 */
762 	ul_pending |= ipc_imem_ul_write_td(ipc_imem);
763 
764 	/* if UL data is pending restart TD update timer */
765 	if (ul_pending) {
766 		ipc_imem->hrtimer_period =
767 		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
768 		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
769 			hrtimer_start(&ipc_imem->tdupdate_timer,
770 				      ipc_imem->hrtimer_period,
771 				      HRTIMER_MODE_REL);
772 	}
773 
774 	/* If CP has executed the transition
775 	 * from IPC_INIT to IPC_RUNNING in the PSI
776 	 * phase, wake up the flash app to open the pipes.
777 	 */
778 	if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
779 	    ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
780 	    ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
781 						IPC_MEM_DEVICE_IPC_RUNNING) {
782 		complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
783 	}
784 
785 	/* Reset the expected CP state. */
786 	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
787 
788 	if (retry_allocation) {
789 		ipc_imem->hrtimer_period =
790 		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
791 		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
792 			hrtimer_start(&ipc_imem->td_alloc_timer,
793 				      ipc_imem->hrtimer_period,
794 				      HRTIMER_MODE_REL);
795 	}
796 }
797 
798 /* Callback by tasklet for handling interrupt events. */
799 static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
800 			      size_t size)
801 {
802 	ipc_imem_handle_irq(ipc_imem, arg);
803 
804 	return 0;
805 }
806 
807 void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
808 {
809 	/* start doorbell irq delay timer if UL is pending */
810 	if (ipc_imem_ul_write_td(ipc_imem))
811 		ipc_imem_td_update_timer_start(ipc_imem);
812 }
813 
814 /* Check the execution stage and update the AP phase */
815 static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
816 						  enum ipc_mem_exec_stage stage)
817 {
818 	switch (stage) {
819 	case IPC_MEM_EXEC_STAGE_BOOT:
820 		if (ipc_imem->phase != IPC_P_ROM) {
821 			/* Send this event only once */
822 			ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
823 		}
824 
825 		ipc_imem->phase = IPC_P_ROM;
826 		break;
827 
828 	case IPC_MEM_EXEC_STAGE_PSI:
829 		ipc_imem->phase = IPC_P_PSI;
830 		break;
831 
832 	case IPC_MEM_EXEC_STAGE_EBL:
833 		ipc_imem->phase = IPC_P_EBL;
834 		break;
835 
836 	case IPC_MEM_EXEC_STAGE_RUN:
837 		if (ipc_imem->phase != IPC_P_RUN &&
838 		    ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
839 			ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
840 		}
841 		ipc_imem->phase = IPC_P_RUN;
842 		break;
843 
844 	case IPC_MEM_EXEC_STAGE_CRASH:
845 		if (ipc_imem->phase != IPC_P_CRASH)
846 			ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
847 
848 		ipc_imem->phase = IPC_P_CRASH;
849 		break;
850 
851 	case IPC_MEM_EXEC_STAGE_CD_READY:
852 		if (ipc_imem->phase != IPC_P_CD_READY)
853 			ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
854 		ipc_imem->phase = IPC_P_CD_READY;
855 		break;
856 
857 	default:
858 		/* unknown exec stage:
859 		 * assume that link is down and send info to listeners
860 		 */
861 		ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
862 		break;
863 	}
864 
865 	return ipc_imem->phase;
866 }
867 
868 /* Send msg to device to open pipe */
869 static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
870 			       struct ipc_pipe *pipe)
871 {
872 	union ipc_msg_prep_args prep_args = {
873 		.pipe_open.pipe = pipe,
874 	};
875 
876 	if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
877 				  IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
878 		pipe->is_open = true;
879 
880 	return pipe->is_open;
881 }
882 
883 /* Allocates the TDs for the given pipe along with firing HP update DB. */
884 static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
885 				     void *msg, size_t size)
886 {
887 	struct ipc_pipe *dl_pipe = msg;
888 	bool processed = false;
889 	int i;
890 
891 	for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
892 		processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
893 
894 	/* Trigger the doorbell irq to inform CP that new downlink buffers are
895 	 * available.
896 	 */
897 	if (processed)
898 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
899 
900 	return 0;
901 }
902 
903 static enum hrtimer_restart
904 ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
905 {
906 	struct iosm_imem *ipc_imem =
907 		container_of(hr_timer, struct iosm_imem, tdupdate_timer);
908 
909 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
910 				 NULL, 0, false);
911 	return HRTIMER_NORESTART;
912 }
913 
914 /* Get the CP execution state and map it to the AP phase. */
915 enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
916 {
917 	enum ipc_mem_exec_stage exec_stage =
918 				ipc_imem_get_exec_stage_buffered(ipc_imem);
919 	/* If the CP stage is undef, return the internal precalculated phase. */
920 	return ipc_imem->phase == IPC_P_OFF_REQ ?
921 		       ipc_imem->phase :
922 		       ipc_imem_phase_update_check(ipc_imem, exec_stage);
923 }
924 
925 const char *ipc_imem_phase_get_string(enum ipc_phase phase)
926 {
927 	switch (phase) {
928 	case IPC_P_RUN:
929 		return "A-RUN";
930 
931 	case IPC_P_OFF:
932 		return "A-OFF";
933 
934 	case IPC_P_ROM:
935 		return "A-ROM";
936 
937 	case IPC_P_PSI:
938 		return "A-PSI";
939 
940 	case IPC_P_EBL:
941 		return "A-EBL";
942 
943 	case IPC_P_CRASH:
944 		return "A-CRASH";
945 
946 	case IPC_P_CD_READY:
947 		return "A-CD_READY";
948 
949 	case IPC_P_OFF_REQ:
950 		return "A-OFF_REQ";
951 
952 	default:
953 		return "A-???";
954 	}
955 }
956 
957 void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
958 {
959 	union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
960 
961 	pipe->is_open = false;
962 	ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
963 			      &prep_args);
964 
965 	ipc_imem_pipe_cleanup(ipc_imem, pipe);
966 }
967 
968 void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
969 {
970 	struct ipc_mem_channel *channel;
971 
972 	if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
973 		dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
974 		return;
975 	}
976 
977 	channel = &ipc_imem->channels[channel_id];
978 
979 	if (channel->state == IMEM_CHANNEL_FREE) {
980 		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
981 			channel_id, channel->state);
982 		return;
983 	}
984 
985 	/* Free only the channel id in the CP power off mode. */
986 	if (channel->state == IMEM_CHANNEL_RESERVED)
987 		/* Release only the channel id. */
988 		goto channel_free;
989 
990 	if (ipc_imem->phase == IPC_P_RUN) {
991 		ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
992 		ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
993 	}
994 
995 	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
996 	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
997 
998 channel_free:
999 	ipc_imem_channel_free(channel);
1000 }
1001 
1002 struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
1003 					      int channel_id, u32 db_id)
1004 {
1005 	struct ipc_mem_channel *channel;
1006 
1007 	if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
1008 		dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
1009 		return NULL;
1010 	}
1011 
1012 	channel = &ipc_imem->channels[channel_id];
1013 
1014 	channel->state = IMEM_CHANNEL_ACTIVE;
1015 
1016 	if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
1017 		goto ul_pipe_err;
1018 
1019 	if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
1020 		goto dl_pipe_err;
1021 
1022 	/* Allocate the downlink buffers in tasklet context. */
1023 	if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
1024 				     &channel->dl_pipe, 0, false)) {
1025 		dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
1026 		goto task_failed;
1027 	}
1028 
1029 	/* Active channel. */
1030 	return channel;
1031 task_failed:
1032 	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
1033 dl_pipe_err:
1034 	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
1035 ul_pipe_err:
1036 	ipc_imem_channel_free(channel);
1037 	return NULL;
1038 }
1039 
1040 void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
1041 {
1042 	ipc_protocol_suspend(ipc_imem->ipc_protocol);
1043 }
1044 
1045 void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
1046 {
1047 	ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
1048 }
1049 
1050 void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
1051 {
1052 	enum ipc_mem_exec_stage stage;
1053 
1054 	if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
1055 		stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1056 		ipc_imem_phase_update_check(ipc_imem, stage);
1057 	}
1058 }
1059 
1060 void ipc_imem_channel_free(struct ipc_mem_channel *channel)
1061 {
1062 	/* Reset dynamic channel elements. */
1063 	channel->state = IMEM_CHANNEL_FREE;
1064 }
1065 
1066 int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
1067 			   enum ipc_ctype ctype)
1068 {
1069 	struct ipc_mem_channel *channel;
1070 	int i;
1071 
1072 	/* Find channel of given type/index */
1073 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1074 		channel = &ipc_imem->channels[i];
1075 		if (channel->ctype == ctype && channel->index == index)
1076 			break;
1077 	}
1078 
1079 	if (i >= ipc_imem->nr_of_channels) {
1080 		dev_dbg(ipc_imem->dev,
1081 			"no channel definition for index=%d ctype=%d", index,
1082 			ctype);
1083 		return -ECHRNG;
1084 	}
1085 
1086 	if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1087 		dev_dbg(ipc_imem->dev, "channel is in use");
1088 		return -EBUSY;
1089 	}
1090 
1091 	if (channel->ctype == IPC_CTYPE_WWAN &&
1092 	    index == IPC_MEM_MUX_IP_CH_IF_ID)
1093 		channel->if_id = index;
1094 
1095 	channel->channel_id = index;
1096 	channel->state = IMEM_CHANNEL_RESERVED;
1097 
1098 	return i;
1099 }
1100 
1101 void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1102 			   struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1103 {
1104 	struct ipc_mem_channel *channel;
1105 
1106 	if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1107 	    chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1108 		dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1109 			chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1110 		return;
1111 	}
1112 
1113 	if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1114 		dev_err(ipc_imem->dev, "too many channels");
1115 		return;
1116 	}
1117 
1118 	channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1119 	channel->channel_id = ipc_imem->nr_of_channels;
1120 	channel->ctype = ctype;
1121 	channel->index = chnl_cfg.id;
1122 	channel->net_err_count = 0;
1123 	channel->state = IMEM_CHANNEL_FREE;
1124 	ipc_imem->nr_of_channels++;
1125 
1126 	ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1127 				IRQ_MOD_OFF);
1128 
1129 	skb_queue_head_init(&channel->ul_list);
1130 
1131 	init_completion(&channel->ul_sem);
1132 }
1133 
1134 void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1135 			     struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1136 {
1137 	struct ipc_mem_channel *channel;
1138 
1139 	if (id < 0 || id >= ipc_imem->nr_of_channels) {
1140 		dev_err(ipc_imem->dev, "invalid channel id %d", id);
1141 		return;
1142 	}
1143 
1144 	channel = &ipc_imem->channels[id];
1145 
1146 	if (channel->state != IMEM_CHANNEL_FREE &&
1147 	    channel->state != IMEM_CHANNEL_RESERVED) {
1148 		dev_err(ipc_imem->dev, "invalid channel state %d",
1149 			channel->state);
1150 		return;
1151 	}
1152 
1153 	channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1154 	channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1155 	channel->ul_pipe.is_open = false;
1156 	channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1157 	channel->ul_pipe.channel = channel;
1158 	channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1159 	channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1160 	channel->ul_pipe.irq_moderation = irq_moderation;
1161 	channel->ul_pipe.buf_size = 0;
1162 
1163 	channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1164 	channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1165 	channel->dl_pipe.is_open = false;
1166 	channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1167 	channel->dl_pipe.channel = channel;
1168 	channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1169 	channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1170 	channel->dl_pipe.irq_moderation = irq_moderation;
1171 	channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1172 }
1173 
1174 static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1175 {
1176 	int i;
1177 
1178 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1179 		struct ipc_mem_channel *channel;
1180 
1181 		channel = &ipc_imem->channels[i];
1182 
1183 		ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1184 		ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1185 
1186 		ipc_imem_channel_free(channel);
1187 	}
1188 }
1189 
1190 void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1191 {
1192 	struct sk_buff *skb;
1193 
1194 	/* Force pipe to closed state also when not explicitly closed through
1195 	 * ipc_imem_pipe_close()
1196 	 */
1197 	pipe->is_open = false;
1198 
1199 	/* Empty the uplink skb accumulator. */
1200 	while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1201 		ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1202 
1203 	ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1204 }
1205 
1206 /* Send IPC protocol uninit to the modem when Link is active. */
1207 static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1208 {
1209 	int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1210 	enum ipc_mem_device_ipc_state ipc_state;
1211 
1212 	/* When PCIe link is up set IPC_UNINIT
1213 	 * of the modem otherwise ignore it when PCIe link down happens.
1214 	 */
1215 	if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1216 		/* set modem to UNINIT
1217 		 * (in case we want to reload the AP driver without resetting
1218 		 * the modem)
1219 		 */
1220 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1221 				  IPC_MEM_DEVICE_IPC_UNINIT);
1222 		ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1223 
1224 		/* Wait for maximum 30ms to allow the Modem to uninitialize the
1225 		 * protocol.
1226 		 */
1227 		while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1228 		       (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1229 		       (timeout > 0)) {
1230 			usleep_range(1000, 1250);
1231 			timeout--;
1232 			ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1233 		}
1234 	}
1235 }
1236 
1237 void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1238 {
1239 	ipc_imem->phase = IPC_P_OFF_REQ;
1240 
1241 	/* forward MDM_NOT_READY to listeners */
1242 	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1243 	pm_runtime_get_sync(ipc_imem->dev);
1244 
1245 	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1246 	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1247 	hrtimer_cancel(&ipc_imem->fast_update_timer);
1248 	hrtimer_cancel(&ipc_imem->startup_timer);
1249 
1250 	/* cancel the workqueue */
1251 	cancel_work_sync(&ipc_imem->run_state_worker);
1252 
1253 	if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1254 		ipc_mux_deinit(ipc_imem->mux);
1255 		ipc_debugfs_deinit(ipc_imem);
1256 		ipc_wwan_deinit(ipc_imem->wwan);
1257 		ipc_port_deinit(ipc_imem->ipc_port);
1258 	}
1259 
1260 	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
1261 		ipc_devlink_deinit(ipc_imem->ipc_devlink);
1262 
1263 	ipc_imem_device_ipc_uninit(ipc_imem);
1264 	ipc_imem_channel_reset(ipc_imem);
1265 
1266 	ipc_protocol_deinit(ipc_imem->ipc_protocol);
1267 	ipc_task_deinit(ipc_imem->ipc_task);
1268 
1269 	kfree(ipc_imem->ipc_task);
1270 	kfree(ipc_imem->mmio);
1271 
1272 	ipc_imem->phase = IPC_P_OFF;
1273 }
1274 
1275 /* After CP has unblocked the PCIe link, save the start address of the doorbell
1276  * scratchpad and prepare the shared memory region. If the flashing to RAM
1277  * procedure shall be executed, copy the chip information from the doorbell
1278  * scratchtpad to the application buffer and wake up the flash app.
1279  */
1280 static int ipc_imem_config(struct iosm_imem *ipc_imem)
1281 {
1282 	enum ipc_phase phase;
1283 
1284 	/* Initialize the semaphore for the blocking read UL/DL transfer. */
1285 	init_completion(&ipc_imem->ul_pend_sem);
1286 
1287 	init_completion(&ipc_imem->dl_pend_sem);
1288 
1289 	/* clear internal flags */
1290 	ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1291 	ipc_imem->enter_runtime = 0;
1292 
1293 	phase = ipc_imem_phase_update(ipc_imem);
1294 
1295 	/* Either CP shall be in the power off or power on phase. */
1296 	switch (phase) {
1297 	case IPC_P_ROM:
1298 		ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1299 		/* poll execution stage (for delayed start, e.g. NAND) */
1300 		if (!hrtimer_active(&ipc_imem->startup_timer))
1301 			hrtimer_start(&ipc_imem->startup_timer,
1302 				      ipc_imem->hrtimer_period,
1303 				      HRTIMER_MODE_REL);
1304 		return 0;
1305 
1306 	case IPC_P_PSI:
1307 	case IPC_P_EBL:
1308 	case IPC_P_RUN:
1309 		/* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1310 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1311 
1312 		/* Verify the exepected initial state. */
1313 		if (ipc_imem->ipc_requested_state ==
1314 		    ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1315 			ipc_imem_ipc_init_check(ipc_imem);
1316 
1317 			return 0;
1318 		}
1319 		dev_err(ipc_imem->dev,
1320 			"ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1321 			ipc_mmio_get_ipc_state(ipc_imem->mmio));
1322 		break;
1323 	case IPC_P_CRASH:
1324 	case IPC_P_CD_READY:
1325 		dev_dbg(ipc_imem->dev,
1326 			"Modem is in phase %d, reset Modem to collect CD",
1327 			phase);
1328 		return 0;
1329 	default:
1330 		dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1331 		break;
1332 	}
1333 
1334 	complete(&ipc_imem->dl_pend_sem);
1335 	complete(&ipc_imem->ul_pend_sem);
1336 	ipc_imem->phase = IPC_P_OFF;
1337 	return -EIO;
1338 }
1339 
1340 /* Pass the dev ptr to the shared memory driver and request the entry points */
1341 struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1342 				void __iomem *mmio, struct device *dev)
1343 {
1344 	struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1345 	enum ipc_mem_exec_stage stage;
1346 
1347 	if (!ipc_imem)
1348 		return NULL;
1349 
1350 	/* Save the device address. */
1351 	ipc_imem->pcie = pcie;
1352 	ipc_imem->dev = dev;
1353 
1354 	ipc_imem->pci_device_id = device_id;
1355 
1356 	ipc_imem->cp_version = 0;
1357 	ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1358 
1359 	/* Reset the max number of configured channels */
1360 	ipc_imem->nr_of_channels = 0;
1361 
1362 	/* allocate IPC MMIO */
1363 	ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1364 	if (!ipc_imem->mmio) {
1365 		dev_err(ipc_imem->dev, "failed to initialize mmio region");
1366 		goto mmio_init_fail;
1367 	}
1368 
1369 	ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1370 				     GFP_KERNEL);
1371 
1372 	/* Create tasklet for event handling*/
1373 	if (!ipc_imem->ipc_task)
1374 		goto ipc_task_fail;
1375 
1376 	if (ipc_task_init(ipc_imem->ipc_task))
1377 		goto ipc_task_init_fail;
1378 
1379 	ipc_imem->ipc_task->dev = ipc_imem->dev;
1380 
1381 	INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1382 
1383 	ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1384 
1385 	if (!ipc_imem->ipc_protocol)
1386 		goto protocol_init_fail;
1387 
1388 	/* The phase is set to power off. */
1389 	ipc_imem->phase = IPC_P_OFF;
1390 
1391 	hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1392 		     HRTIMER_MODE_REL);
1393 	ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1394 
1395 	hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1396 		     HRTIMER_MODE_REL);
1397 	ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1398 
1399 	hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1400 		     HRTIMER_MODE_REL);
1401 	ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1402 
1403 	hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1404 		     HRTIMER_MODE_REL);
1405 	ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1406 
1407 	hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1408 	ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
1409 
1410 	if (ipc_imem_config(ipc_imem)) {
1411 		dev_err(ipc_imem->dev, "failed to initialize the imem");
1412 		goto imem_config_fail;
1413 	}
1414 
1415 	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1416 	if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
1417 		/* Alloc and Register devlink */
1418 		ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
1419 		if (!ipc_imem->ipc_devlink) {
1420 			dev_err(ipc_imem->dev, "Devlink register failed");
1421 			goto imem_config_fail;
1422 		}
1423 
1424 		if (ipc_flash_link_establish(ipc_imem))
1425 			goto devlink_channel_fail;
1426 
1427 		set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
1428 	}
1429 
1430 	if (!pm_runtime_enabled(ipc_imem->dev))
1431 		pm_runtime_enable(ipc_imem->dev);
1432 
1433 	pm_runtime_set_autosuspend_delay(ipc_imem->dev,
1434 					 IPC_MEM_AUTO_SUSPEND_DELAY_MS);
1435 	pm_runtime_use_autosuspend(ipc_imem->dev);
1436 	pm_runtime_allow(ipc_imem->dev);
1437 	pm_runtime_mark_last_busy(ipc_imem->dev);
1438 
1439 	return ipc_imem;
1440 devlink_channel_fail:
1441 	ipc_devlink_deinit(ipc_imem->ipc_devlink);
1442 imem_config_fail:
1443 	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1444 	hrtimer_cancel(&ipc_imem->fast_update_timer);
1445 	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1446 	hrtimer_cancel(&ipc_imem->startup_timer);
1447 protocol_init_fail:
1448 	cancel_work_sync(&ipc_imem->run_state_worker);
1449 	ipc_task_deinit(ipc_imem->ipc_task);
1450 ipc_task_init_fail:
1451 	kfree(ipc_imem->ipc_task);
1452 ipc_task_fail:
1453 	kfree(ipc_imem->mmio);
1454 mmio_init_fail:
1455 	kfree(ipc_imem);
1456 	return NULL;
1457 }
1458 
1459 void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1460 {
1461 	/* Debounce IPC_EV_IRQ. */
1462 	if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1463 		ipc_imem->ev_irq_pending[irq] = true;
1464 		ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1465 					 NULL, 0, false);
1466 	}
1467 }
1468 
1469 void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1470 {
1471 	ipc_imem->td_update_timer_suspended = suspend;
1472 }
1473 
1474 /* Verify the CP execution state, copy the chip info,
1475  * change the execution phase to ROM
1476  */
1477 static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
1478 						 int arg, void *msg,
1479 						 size_t msgsize)
1480 {
1481 	enum ipc_mem_exec_stage stage;
1482 	struct sk_buff *skb;
1483 	int rc = -EINVAL;
1484 	size_t size;
1485 
1486 	/* Test the CP execution state. */
1487 	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1488 	if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
1489 		dev_err(ipc_imem->dev,
1490 			"Execution_stage: expected BOOT, received = %X", stage);
1491 		goto trigger_chip_info_fail;
1492 	}
1493 	/* Allocate a new sk buf for the chip info. */
1494 	size = ipc_imem->mmio->chip_info_size;
1495 	if (size > IOSM_CHIP_INFO_SIZE_MAX)
1496 		goto trigger_chip_info_fail;
1497 
1498 	skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
1499 	if (!skb) {
1500 		dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
1501 		rc = -ENOMEM;
1502 		goto trigger_chip_info_fail;
1503 	}
1504 	/* Copy the chip info characters into the ipc_skb. */
1505 	ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
1506 	/* First change to the ROM boot phase. */
1507 	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
1508 	ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
1509 	ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
1510 	rc = 0;
1511 trigger_chip_info_fail:
1512 	return rc;
1513 }
1514 
1515 int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
1516 {
1517 	return ipc_task_queue_send_task(ipc_imem,
1518 					ipc_imem_devlink_trigger_chip_info_cb,
1519 					0, NULL, 0, true);
1520 }
1521