1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_flash.h"
11 #include "iosm_ipc_imem.h"
12 #include "iosm_ipc_port.h"
13 #include "iosm_ipc_trace.h"
14 #include "iosm_ipc_debugfs.h"
15 
16 /* Check the wwan ips if it is valid with Channel as input. */
17 static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
18 {
19 	if (chnl)
20 		return chnl->ctype == IPC_CTYPE_WWAN &&
21 		       chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
22 	return false;
23 }
24 
25 static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
26 {
27 	union ipc_msg_prep_args prep_args = {
28 		.sleep.target = 1,
29 		.sleep.state = state,
30 	};
31 
32 	ipc_imem->device_sleep = state;
33 
34 	return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
35 					IPC_MSG_PREP_SLEEP, &prep_args, NULL);
36 }
37 
38 static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
39 				  struct ipc_pipe *pipe)
40 {
41 	/* limit max. nr of entries */
42 	if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
43 		return false;
44 
45 	return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
46 }
47 
48 /* This timer handler will retry DL buff allocation if a pipe has no free buf
49  * and gives doorbell if TD is available
50  */
51 static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
52 				      void *msg, size_t size)
53 {
54 	bool new_buffers_available = false;
55 	bool retry_allocation = false;
56 	int i;
57 
58 	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
59 		struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
60 
61 		if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
62 			continue;
63 
64 		while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
65 			new_buffers_available = true;
66 
67 		if (pipe->nr_of_queued_entries == 0)
68 			retry_allocation = true;
69 	}
70 
71 	if (new_buffers_available)
72 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
73 					      IPC_HP_DL_PROCESS);
74 
75 	if (retry_allocation) {
76 		ipc_imem->hrtimer_period =
77 		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
78 		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
79 			hrtimer_start(&ipc_imem->td_alloc_timer,
80 				      ipc_imem->hrtimer_period,
81 				      HRTIMER_MODE_REL);
82 	}
83 	return 0;
84 }
85 
86 static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
87 {
88 	struct iosm_imem *ipc_imem =
89 		container_of(hr_timer, struct iosm_imem, td_alloc_timer);
90 	/* Post an async tasklet event to trigger HP update Doorbell */
91 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
92 				 0, false);
93 	return HRTIMER_NORESTART;
94 }
95 
96 /* Fast update timer tasklet handler to trigger HP update */
97 static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
98 					    void *msg, size_t size)
99 {
100 	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
101 				      IPC_HP_FAST_TD_UPD_TMR);
102 
103 	return 0;
104 }
105 
106 static enum hrtimer_restart
107 ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
108 {
109 	struct iosm_imem *ipc_imem =
110 		container_of(hr_timer, struct iosm_imem, fast_update_timer);
111 	/* Post an async tasklet event to trigger HP update Doorbell */
112 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
113 				 NULL, 0, false);
114 	return HRTIMER_NORESTART;
115 }
116 
117 static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
118 					  struct ipc_mux_config *cfg)
119 {
120 	ipc_mmio_update_cp_capability(ipc_imem->mmio);
121 
122 	if (!ipc_imem->mmio->has_mux_lite) {
123 		dev_err(ipc_imem->dev, "Failed to get Mux capability.");
124 		return -EINVAL;
125 	}
126 
127 	cfg->protocol = MUX_LITE;
128 
129 	cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
130 			       MUX_UL_ON_CREDITS :
131 			       MUX_UL;
132 
133 	/* The instance ID is same as channel ID because this is been reused
134 	 * for channel alloc function.
135 	 */
136 	cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
137 
138 	return 0;
139 }
140 
141 void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
142 				   unsigned int reset_enable, bool atomic_ctx)
143 {
144 	union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
145 						      reset_enable };
146 
147 	if (atomic_ctx)
148 		ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
149 					 IPC_MSG_PREP_FEATURE_SET, &prep_args,
150 					 NULL);
151 	else
152 		ipc_protocol_msg_send(ipc_imem->ipc_protocol,
153 				      IPC_MSG_PREP_FEATURE_SET, &prep_args);
154 }
155 
156 void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
157 {
158 	/* Use the TD update timer only in the runtime phase */
159 	if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
160 		/* trigger the doorbell irq on CP directly. */
161 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
162 					      IPC_HP_TD_UPD_TMR_START);
163 		return;
164 	}
165 
166 	if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
167 		ipc_imem->hrtimer_period =
168 		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
169 		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
170 			hrtimer_start(&ipc_imem->tdupdate_timer,
171 				      ipc_imem->hrtimer_period,
172 				      HRTIMER_MODE_REL);
173 	}
174 }
175 
176 void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
177 {
178 	if (hrtimer_active(hr_timer))
179 		hrtimer_cancel(hr_timer);
180 }
181 
182 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
183 {
184 	struct ipc_mem_channel *channel;
185 	bool hpda_ctrl_pending = false;
186 	struct sk_buff_head *ul_list;
187 	bool hpda_pending = false;
188 	struct ipc_pipe *pipe;
189 	int i;
190 
191 	/* Analyze the uplink pipe of all active channels. */
192 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
193 		channel = &ipc_imem->channels[i];
194 
195 		if (channel->state != IMEM_CHANNEL_ACTIVE)
196 			continue;
197 
198 		pipe = &channel->ul_pipe;
199 
200 		/* Get the reference to the skbuf accumulator list. */
201 		ul_list = &channel->ul_list;
202 
203 		/* Fill the transfer descriptor with the uplink buffer info. */
204 		if (!ipc_imem_check_wwan_ips(channel)) {
205 			hpda_ctrl_pending |=
206 				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
207 							pipe, ul_list);
208 		} else {
209 			hpda_pending |=
210 				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
211 							pipe, ul_list);
212 		}
213 	}
214 
215 	/* forced HP update needed for non data channels */
216 	if (hpda_ctrl_pending) {
217 		hpda_pending = false;
218 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
219 					      IPC_HP_UL_WRITE_TD);
220 	}
221 
222 	return hpda_pending;
223 }
224 
225 void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
226 {
227 	int timeout = IPC_MODEM_BOOT_TIMEOUT;
228 
229 	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
230 
231 	/* Trigger the CP interrupt to enter the init state. */
232 	ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
233 			  IPC_MEM_DEVICE_IPC_INIT);
234 	/* Wait for the CP update. */
235 	do {
236 		if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
237 		    ipc_imem->ipc_requested_state) {
238 			/* Prepare the MMIO space */
239 			ipc_mmio_config(ipc_imem->mmio);
240 
241 			/* Trigger the CP irq to enter the running state. */
242 			ipc_imem->ipc_requested_state =
243 				IPC_MEM_DEVICE_IPC_RUNNING;
244 			ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
245 					  IPC_MEM_DEVICE_IPC_RUNNING);
246 
247 			return;
248 		}
249 		msleep(20);
250 	} while (--timeout);
251 
252 	/* timeout */
253 	dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
254 		ipc_imem_phase_get_string(ipc_imem->phase),
255 		ipc_mmio_get_ipc_state(ipc_imem->mmio));
256 
257 	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
258 }
259 
260 /* Analyze the packet type and distribute it. */
261 static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
262 				    struct ipc_pipe *pipe, struct sk_buff *skb)
263 {
264 	u16 port_id;
265 
266 	if (!skb)
267 		return;
268 
269 	/* An AT/control or IP packet is expected. */
270 	switch (pipe->channel->ctype) {
271 	case IPC_CTYPE_CTRL:
272 		port_id = pipe->channel->channel_id;
273 		ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
274 				    IPC_CB(skb)->mapping,
275 				    IPC_CB(skb)->direction);
276 		if (port_id == IPC_MEM_CTRL_CHL_ID_7)
277 			ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
278 						       skb);
279 		else if (ipc_is_trace_channel(ipc_imem, port_id))
280 			ipc_trace_port_rx(ipc_imem, skb);
281 		else
282 			wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
283 				     skb);
284 		break;
285 
286 	case IPC_CTYPE_WWAN:
287 		if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
288 			ipc_mux_dl_decode(ipc_imem->mux, skb);
289 		break;
290 	default:
291 		dev_err(ipc_imem->dev, "Invalid channel type");
292 		break;
293 	}
294 }
295 
296 /* Process the downlink data and pass them to the char or net layer. */
297 static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
298 				     struct ipc_pipe *pipe)
299 {
300 	s32 cnt = 0, processed_td_cnt = 0;
301 	struct ipc_mem_channel *channel;
302 	u32 head = 0, tail = 0;
303 	bool processed = false;
304 	struct sk_buff *skb;
305 
306 	channel = pipe->channel;
307 
308 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
309 					 &tail);
310 	if (pipe->old_tail != tail) {
311 		if (pipe->old_tail < tail)
312 			cnt = tail - pipe->old_tail;
313 		else
314 			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
315 	}
316 
317 	processed_td_cnt = cnt;
318 
319 	/* Seek for pipes with pending DL data. */
320 	while (cnt--) {
321 		skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
322 
323 		/* Analyze the packet type and distribute it. */
324 		ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
325 	}
326 
327 	/* try to allocate new empty DL SKbs from head..tail - 1*/
328 	while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
329 		processed = true;
330 
331 	if (processed && !ipc_imem_check_wwan_ips(channel)) {
332 		/* Force HP update for non IP channels */
333 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
334 					      IPC_HP_DL_PROCESS);
335 		processed = false;
336 
337 		/* If Fast Update timer is already running then stop */
338 		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
339 	}
340 
341 	/* Any control channel process will get immediate HP update.
342 	 * Start Fast update timer only for IP channel if all the TDs were
343 	 * used in last process.
344 	 */
345 	if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
346 		ipc_imem->hrtimer_period =
347 		ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
348 		hrtimer_start(&ipc_imem->fast_update_timer,
349 			      ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
350 	}
351 
352 	if (ipc_imem->app_notify_dl_pend)
353 		complete(&ipc_imem->dl_pend_sem);
354 }
355 
356 /* process open uplink pipe */
357 static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
358 				     struct ipc_pipe *pipe)
359 {
360 	struct ipc_mem_channel *channel;
361 	u32 tail = 0, head = 0;
362 	struct sk_buff *skb;
363 	s32 cnt = 0;
364 
365 	channel = pipe->channel;
366 
367 	/* Get the internal phase. */
368 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
369 					 &tail);
370 
371 	if (pipe->old_tail != tail) {
372 		if (pipe->old_tail < tail)
373 			cnt = tail - pipe->old_tail;
374 		else
375 			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
376 	}
377 
378 	/* Free UL buffers. */
379 	while (cnt--) {
380 		skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
381 
382 		if (!skb)
383 			continue;
384 
385 		/* If the user app was suspended in uplink direction - blocking
386 		 * write, resume it.
387 		 */
388 		if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
389 			complete(&channel->ul_sem);
390 
391 		/* Free the skbuf element. */
392 		if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
393 			if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
394 				ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
395 			else
396 				dev_err(ipc_imem->dev,
397 					"OP Type is UL_MUX, unknown if_id %d",
398 					channel->if_id);
399 		} else {
400 			ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
401 		}
402 	}
403 
404 	/* Trace channel stats for IP UL pipe. */
405 	if (ipc_imem_check_wwan_ips(pipe->channel))
406 		ipc_mux_check_n_restart_tx(ipc_imem->mux);
407 
408 	if (ipc_imem->app_notify_ul_pend)
409 		complete(&ipc_imem->ul_pend_sem);
410 }
411 
412 /* Executes the irq. */
413 static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
414 {
415 	struct ipc_mem_channel *channel;
416 
417 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
418 	ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
419 	complete(&channel->ul_sem);
420 }
421 
422 /* Execute the UL bundle timer actions, generating the doorbell irq. */
423 static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
424 					  void *msg, size_t size)
425 {
426 	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
427 				      IPC_HP_TD_UPD_TMR);
428 	return 0;
429 }
430 
431 /* Consider link power management in the runtime phase. */
432 static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
433 {
434 	    /* link will go down, Test pending UL packets.*/
435 	if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
436 	    hrtimer_active(&ipc_imem->tdupdate_timer)) {
437 		/* Generate the doorbell irq. */
438 		ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
439 		/* Stop the TD update timer. */
440 		ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
441 		/* Stop the fast update timer. */
442 		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
443 	}
444 }
445 
446 /* Execute startup timer and wait for delayed start (e.g. NAND) */
447 static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
448 					void *msg, size_t size)
449 {
450 	/* Update & check the current operation phase. */
451 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
452 		return -EIO;
453 
454 	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
455 	    IPC_MEM_DEVICE_IPC_UNINIT) {
456 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
457 
458 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
459 				  IPC_MEM_DEVICE_IPC_INIT);
460 
461 		ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
462 		/* reduce period to 100 ms to check for mmio init state */
463 		if (!hrtimer_active(&ipc_imem->startup_timer))
464 			hrtimer_start(&ipc_imem->startup_timer,
465 				      ipc_imem->hrtimer_period,
466 				      HRTIMER_MODE_REL);
467 	} else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
468 		   IPC_MEM_DEVICE_IPC_INIT) {
469 		/* Startup complete  - disable timer */
470 		ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
471 
472 		/* Prepare the MMIO space */
473 		ipc_mmio_config(ipc_imem->mmio);
474 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
475 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
476 				  IPC_MEM_DEVICE_IPC_RUNNING);
477 	}
478 
479 	return 0;
480 }
481 
482 static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
483 {
484 	enum hrtimer_restart result = HRTIMER_NORESTART;
485 	struct iosm_imem *ipc_imem =
486 		container_of(hr_timer, struct iosm_imem, startup_timer);
487 
488 	if (ktime_to_ns(ipc_imem->hrtimer_period)) {
489 		hrtimer_forward_now(&ipc_imem->startup_timer,
490 				    ipc_imem->hrtimer_period);
491 		result = HRTIMER_RESTART;
492 	}
493 
494 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
495 				 NULL, 0, false);
496 	return result;
497 }
498 
499 /* Get the CP execution stage */
500 static enum ipc_mem_exec_stage
501 ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
502 {
503 	return (ipc_imem->phase == IPC_P_RUN &&
504 		ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
505 		       ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
506 		       ipc_mmio_get_exec_stage(ipc_imem->mmio);
507 }
508 
509 /* Callback to send the modem ready uevent */
510 static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
511 				    void *msg, size_t size)
512 {
513 	enum ipc_mem_exec_stage exec_stage =
514 		ipc_imem_get_exec_stage_buffered(ipc_imem);
515 
516 	if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
517 		ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
518 
519 	return 0;
520 }
521 
522 /* This function is executed in a task context via an ipc_worker object,
523  * as the creation or removal of device can't be done from tasklet.
524  */
525 static void ipc_imem_run_state_worker(struct work_struct *instance)
526 {
527 	struct ipc_chnl_cfg chnl_cfg_port = { 0 };
528 	struct ipc_mux_config mux_cfg;
529 	struct iosm_imem *ipc_imem;
530 	u8 ctrl_chl_idx = 0;
531 
532 	ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
533 
534 	if (ipc_imem->phase != IPC_P_RUN) {
535 		dev_err(ipc_imem->dev,
536 			"Modem link down. Exit run state worker.");
537 		return;
538 	}
539 
540 	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
541 		ipc_devlink_deinit(ipc_imem->ipc_devlink);
542 
543 	if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
544 		ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
545 
546 	ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
547 	if (ipc_imem->mux)
548 		ipc_imem->mux->wwan = ipc_imem->wwan;
549 
550 	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
551 		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
552 			ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
553 			if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
554 				ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
555 						      chnl_cfg_port,
556 						      IRQ_MOD_OFF);
557 				ipc_imem->ipc_port[ctrl_chl_idx] =
558 					ipc_port_init(ipc_imem, chnl_cfg_port);
559 			}
560 		}
561 		ctrl_chl_idx++;
562 	}
563 
564 	ipc_debugfs_init(ipc_imem);
565 
566 	ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
567 				 false);
568 
569 	/* Complete all memory stores before setting bit */
570 	smp_mb__before_atomic();
571 
572 	set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
573 
574 	/* Complete all memory stores after setting bit */
575 	smp_mb__after_atomic();
576 }
577 
578 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
579 {
580 	enum ipc_mem_device_ipc_state curr_ipc_status;
581 	enum ipc_phase old_phase, phase;
582 	bool retry_allocation = false;
583 	bool ul_pending = false;
584 	int i;
585 
586 	if (irq != IMEM_IRQ_DONT_CARE)
587 		ipc_imem->ev_irq_pending[irq] = false;
588 
589 	/* Get the internal phase. */
590 	old_phase = ipc_imem->phase;
591 
592 	if (old_phase == IPC_P_OFF_REQ) {
593 		dev_dbg(ipc_imem->dev,
594 			"[%s]: Ignoring MSI. Deinit sequence in progress!",
595 			ipc_imem_phase_get_string(old_phase));
596 		return;
597 	}
598 
599 	/* Update the phase controlled by CP. */
600 	phase = ipc_imem_phase_update(ipc_imem);
601 
602 	switch (phase) {
603 	case IPC_P_RUN:
604 		if (!ipc_imem->enter_runtime) {
605 			/* Excute the transition from flash/boot to runtime. */
606 			ipc_imem->enter_runtime = 1;
607 
608 			/* allow device to sleep, default value is
609 			 * IPC_HOST_SLEEP_ENTER_SLEEP
610 			 */
611 			ipc_imem_msg_send_device_sleep(ipc_imem,
612 						       ipc_imem->device_sleep);
613 
614 			ipc_imem_msg_send_feature_set(ipc_imem,
615 						      IPC_MEM_INBAND_CRASH_SIG,
616 						  true);
617 		}
618 
619 		curr_ipc_status =
620 			ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
621 
622 		/* check ipc_status change */
623 		if (ipc_imem->ipc_status != curr_ipc_status) {
624 			ipc_imem->ipc_status = curr_ipc_status;
625 
626 			if (ipc_imem->ipc_status ==
627 			    IPC_MEM_DEVICE_IPC_RUNNING) {
628 				schedule_work(&ipc_imem->run_state_worker);
629 			}
630 		}
631 
632 		/* Consider power management in the runtime phase. */
633 		ipc_imem_slp_control_exec(ipc_imem);
634 		break; /* Continue with skbuf processing. */
635 
636 		/* Unexpected phases. */
637 	case IPC_P_OFF:
638 	case IPC_P_OFF_REQ:
639 		dev_err(ipc_imem->dev, "confused phase %s",
640 			ipc_imem_phase_get_string(phase));
641 		return;
642 
643 	case IPC_P_PSI:
644 		if (old_phase != IPC_P_ROM)
645 			break;
646 
647 		fallthrough;
648 		/* On CP the PSI phase is already active. */
649 
650 	case IPC_P_ROM:
651 		/* Before CP ROM driver starts the PSI image, it sets
652 		 * the exit_code field on the doorbell scratchpad and
653 		 * triggers the irq.
654 		 */
655 		ipc_imem_rom_irq_exec(ipc_imem);
656 		return;
657 
658 	default:
659 		break;
660 	}
661 
662 	/* process message ring */
663 	ipc_protocol_msg_process(ipc_imem, irq);
664 
665 	/* process all open pipes */
666 	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
667 		struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
668 		struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
669 
670 		if (dl_pipe->is_open &&
671 		    (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
672 			ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
673 
674 			if (dl_pipe->nr_of_queued_entries == 0)
675 				retry_allocation = true;
676 		}
677 
678 		if (ul_pipe->is_open)
679 			ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
680 	}
681 
682 	/* Try to generate new ADB or ADGH. */
683 	if (ipc_mux_ul_data_encode(ipc_imem->mux))
684 		ipc_imem_td_update_timer_start(ipc_imem);
685 
686 	/* Continue the send procedure with accumulated SIO or NETIF packets.
687 	 * Reset the debounce flags.
688 	 */
689 	ul_pending |= ipc_imem_ul_write_td(ipc_imem);
690 
691 	/* if UL data is pending restart TD update timer */
692 	if (ul_pending) {
693 		ipc_imem->hrtimer_period =
694 		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
695 		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
696 			hrtimer_start(&ipc_imem->tdupdate_timer,
697 				      ipc_imem->hrtimer_period,
698 				      HRTIMER_MODE_REL);
699 	}
700 
701 	/* If CP has executed the transition
702 	 * from IPC_INIT to IPC_RUNNING in the PSI
703 	 * phase, wake up the flash app to open the pipes.
704 	 */
705 	if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
706 	    ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
707 	    ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
708 						IPC_MEM_DEVICE_IPC_RUNNING) {
709 		complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
710 	}
711 
712 	/* Reset the expected CP state. */
713 	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
714 
715 	if (retry_allocation) {
716 		ipc_imem->hrtimer_period =
717 		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
718 		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
719 			hrtimer_start(&ipc_imem->td_alloc_timer,
720 				      ipc_imem->hrtimer_period,
721 				      HRTIMER_MODE_REL);
722 	}
723 }
724 
725 /* Callback by tasklet for handling interrupt events. */
726 static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
727 			      size_t size)
728 {
729 	ipc_imem_handle_irq(ipc_imem, arg);
730 
731 	return 0;
732 }
733 
734 void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
735 {
736 	/* start doorbell irq delay timer if UL is pending */
737 	if (ipc_imem_ul_write_td(ipc_imem))
738 		ipc_imem_td_update_timer_start(ipc_imem);
739 }
740 
741 /* Check the execution stage and update the AP phase */
742 static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
743 						  enum ipc_mem_exec_stage stage)
744 {
745 	switch (stage) {
746 	case IPC_MEM_EXEC_STAGE_BOOT:
747 		if (ipc_imem->phase != IPC_P_ROM) {
748 			/* Send this event only once */
749 			ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
750 		}
751 
752 		ipc_imem->phase = IPC_P_ROM;
753 		break;
754 
755 	case IPC_MEM_EXEC_STAGE_PSI:
756 		ipc_imem->phase = IPC_P_PSI;
757 		break;
758 
759 	case IPC_MEM_EXEC_STAGE_EBL:
760 		ipc_imem->phase = IPC_P_EBL;
761 		break;
762 
763 	case IPC_MEM_EXEC_STAGE_RUN:
764 		if (ipc_imem->phase != IPC_P_RUN &&
765 		    ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
766 			ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
767 		}
768 		ipc_imem->phase = IPC_P_RUN;
769 		break;
770 
771 	case IPC_MEM_EXEC_STAGE_CRASH:
772 		if (ipc_imem->phase != IPC_P_CRASH)
773 			ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
774 
775 		ipc_imem->phase = IPC_P_CRASH;
776 		break;
777 
778 	case IPC_MEM_EXEC_STAGE_CD_READY:
779 		if (ipc_imem->phase != IPC_P_CD_READY)
780 			ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
781 		ipc_imem->phase = IPC_P_CD_READY;
782 		break;
783 
784 	default:
785 		/* unknown exec stage:
786 		 * assume that link is down and send info to listeners
787 		 */
788 		ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
789 		break;
790 	}
791 
792 	return ipc_imem->phase;
793 }
794 
795 /* Send msg to device to open pipe */
796 static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
797 			       struct ipc_pipe *pipe)
798 {
799 	union ipc_msg_prep_args prep_args = {
800 		.pipe_open.pipe = pipe,
801 	};
802 
803 	if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
804 				  IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
805 		pipe->is_open = true;
806 
807 	return pipe->is_open;
808 }
809 
810 /* Allocates the TDs for the given pipe along with firing HP update DB. */
811 static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
812 				     void *msg, size_t size)
813 {
814 	struct ipc_pipe *dl_pipe = msg;
815 	bool processed = false;
816 	int i;
817 
818 	for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
819 		processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
820 
821 	/* Trigger the doorbell irq to inform CP that new downlink buffers are
822 	 * available.
823 	 */
824 	if (processed)
825 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
826 
827 	return 0;
828 }
829 
830 static enum hrtimer_restart
831 ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
832 {
833 	struct iosm_imem *ipc_imem =
834 		container_of(hr_timer, struct iosm_imem, tdupdate_timer);
835 
836 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
837 				 NULL, 0, false);
838 	return HRTIMER_NORESTART;
839 }
840 
841 /* Get the CP execution state and map it to the AP phase. */
842 enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
843 {
844 	enum ipc_mem_exec_stage exec_stage =
845 				ipc_imem_get_exec_stage_buffered(ipc_imem);
846 	/* If the CP stage is undef, return the internal precalculated phase. */
847 	return ipc_imem->phase == IPC_P_OFF_REQ ?
848 		       ipc_imem->phase :
849 		       ipc_imem_phase_update_check(ipc_imem, exec_stage);
850 }
851 
852 const char *ipc_imem_phase_get_string(enum ipc_phase phase)
853 {
854 	switch (phase) {
855 	case IPC_P_RUN:
856 		return "A-RUN";
857 
858 	case IPC_P_OFF:
859 		return "A-OFF";
860 
861 	case IPC_P_ROM:
862 		return "A-ROM";
863 
864 	case IPC_P_PSI:
865 		return "A-PSI";
866 
867 	case IPC_P_EBL:
868 		return "A-EBL";
869 
870 	case IPC_P_CRASH:
871 		return "A-CRASH";
872 
873 	case IPC_P_CD_READY:
874 		return "A-CD_READY";
875 
876 	case IPC_P_OFF_REQ:
877 		return "A-OFF_REQ";
878 
879 	default:
880 		return "A-???";
881 	}
882 }
883 
884 void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
885 {
886 	union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
887 
888 	pipe->is_open = false;
889 	ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
890 			      &prep_args);
891 
892 	ipc_imem_pipe_cleanup(ipc_imem, pipe);
893 }
894 
895 void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
896 {
897 	struct ipc_mem_channel *channel;
898 
899 	if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
900 		dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
901 		return;
902 	}
903 
904 	channel = &ipc_imem->channels[channel_id];
905 
906 	if (channel->state == IMEM_CHANNEL_FREE) {
907 		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
908 			channel_id, channel->state);
909 		return;
910 	}
911 
912 	/* Free only the channel id in the CP power off mode. */
913 	if (channel->state == IMEM_CHANNEL_RESERVED)
914 		/* Release only the channel id. */
915 		goto channel_free;
916 
917 	if (ipc_imem->phase == IPC_P_RUN) {
918 		ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
919 		ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
920 	}
921 
922 	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
923 	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
924 
925 channel_free:
926 	ipc_imem_channel_free(channel);
927 }
928 
929 struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
930 					      int channel_id, u32 db_id)
931 {
932 	struct ipc_mem_channel *channel;
933 
934 	if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
935 		dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
936 		return NULL;
937 	}
938 
939 	channel = &ipc_imem->channels[channel_id];
940 
941 	channel->state = IMEM_CHANNEL_ACTIVE;
942 
943 	if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
944 		goto ul_pipe_err;
945 
946 	if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
947 		goto dl_pipe_err;
948 
949 	/* Allocate the downlink buffers in tasklet context. */
950 	if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
951 				     &channel->dl_pipe, 0, false)) {
952 		dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
953 		goto task_failed;
954 	}
955 
956 	/* Active channel. */
957 	return channel;
958 task_failed:
959 	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
960 dl_pipe_err:
961 	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
962 ul_pipe_err:
963 	ipc_imem_channel_free(channel);
964 	return NULL;
965 }
966 
967 void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
968 {
969 	ipc_protocol_suspend(ipc_imem->ipc_protocol);
970 }
971 
972 void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
973 {
974 	ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
975 }
976 
977 void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
978 {
979 	enum ipc_mem_exec_stage stage;
980 
981 	if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
982 		stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
983 		ipc_imem_phase_update_check(ipc_imem, stage);
984 	}
985 }
986 
987 void ipc_imem_channel_free(struct ipc_mem_channel *channel)
988 {
989 	/* Reset dynamic channel elements. */
990 	channel->state = IMEM_CHANNEL_FREE;
991 }
992 
993 int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
994 			   enum ipc_ctype ctype)
995 {
996 	struct ipc_mem_channel *channel;
997 	int i;
998 
999 	/* Find channel of given type/index */
1000 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1001 		channel = &ipc_imem->channels[i];
1002 		if (channel->ctype == ctype && channel->index == index)
1003 			break;
1004 	}
1005 
1006 	if (i >= ipc_imem->nr_of_channels) {
1007 		dev_dbg(ipc_imem->dev,
1008 			"no channel definition for index=%d ctype=%d", index,
1009 			ctype);
1010 		return -ECHRNG;
1011 	}
1012 
1013 	if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1014 		dev_dbg(ipc_imem->dev, "channel is in use");
1015 		return -EBUSY;
1016 	}
1017 
1018 	if (channel->ctype == IPC_CTYPE_WWAN &&
1019 	    index == IPC_MEM_MUX_IP_CH_IF_ID)
1020 		channel->if_id = index;
1021 
1022 	channel->channel_id = index;
1023 	channel->state = IMEM_CHANNEL_RESERVED;
1024 
1025 	return i;
1026 }
1027 
1028 void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1029 			   struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1030 {
1031 	struct ipc_mem_channel *channel;
1032 
1033 	if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1034 	    chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1035 		dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1036 			chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1037 		return;
1038 	}
1039 
1040 	if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1041 		dev_err(ipc_imem->dev, "too many channels");
1042 		return;
1043 	}
1044 
1045 	channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1046 	channel->channel_id = ipc_imem->nr_of_channels;
1047 	channel->ctype = ctype;
1048 	channel->index = chnl_cfg.id;
1049 	channel->net_err_count = 0;
1050 	channel->state = IMEM_CHANNEL_FREE;
1051 	ipc_imem->nr_of_channels++;
1052 
1053 	ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1054 				IRQ_MOD_OFF);
1055 
1056 	skb_queue_head_init(&channel->ul_list);
1057 
1058 	init_completion(&channel->ul_sem);
1059 }
1060 
1061 void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1062 			     struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1063 {
1064 	struct ipc_mem_channel *channel;
1065 
1066 	if (id < 0 || id >= ipc_imem->nr_of_channels) {
1067 		dev_err(ipc_imem->dev, "invalid channel id %d", id);
1068 		return;
1069 	}
1070 
1071 	channel = &ipc_imem->channels[id];
1072 
1073 	if (channel->state != IMEM_CHANNEL_FREE &&
1074 	    channel->state != IMEM_CHANNEL_RESERVED) {
1075 		dev_err(ipc_imem->dev, "invalid channel state %d",
1076 			channel->state);
1077 		return;
1078 	}
1079 
1080 	channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1081 	channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1082 	channel->ul_pipe.is_open = false;
1083 	channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1084 	channel->ul_pipe.channel = channel;
1085 	channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1086 	channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1087 	channel->ul_pipe.irq_moderation = irq_moderation;
1088 	channel->ul_pipe.buf_size = 0;
1089 
1090 	channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1091 	channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1092 	channel->dl_pipe.is_open = false;
1093 	channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1094 	channel->dl_pipe.channel = channel;
1095 	channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1096 	channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1097 	channel->dl_pipe.irq_moderation = irq_moderation;
1098 	channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1099 }
1100 
1101 static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1102 {
1103 	int i;
1104 
1105 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1106 		struct ipc_mem_channel *channel;
1107 
1108 		channel = &ipc_imem->channels[i];
1109 
1110 		ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1111 		ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1112 
1113 		ipc_imem_channel_free(channel);
1114 	}
1115 }
1116 
1117 void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1118 {
1119 	struct sk_buff *skb;
1120 
1121 	/* Force pipe to closed state also when not explicitly closed through
1122 	 * ipc_imem_pipe_close()
1123 	 */
1124 	pipe->is_open = false;
1125 
1126 	/* Empty the uplink skb accumulator. */
1127 	while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1128 		ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1129 
1130 	ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1131 }
1132 
1133 /* Send IPC protocol uninit to the modem when Link is active. */
1134 static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1135 {
1136 	int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1137 	enum ipc_mem_device_ipc_state ipc_state;
1138 
1139 	/* When PCIe link is up set IPC_UNINIT
1140 	 * of the modem otherwise ignore it when PCIe link down happens.
1141 	 */
1142 	if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1143 		/* set modem to UNINIT
1144 		 * (in case we want to reload the AP driver without resetting
1145 		 * the modem)
1146 		 */
1147 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1148 				  IPC_MEM_DEVICE_IPC_UNINIT);
1149 		ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1150 
1151 		/* Wait for maximum 30ms to allow the Modem to uninitialize the
1152 		 * protocol.
1153 		 */
1154 		while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1155 		       (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1156 		       (timeout > 0)) {
1157 			usleep_range(1000, 1250);
1158 			timeout--;
1159 			ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1160 		}
1161 	}
1162 }
1163 
1164 void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1165 {
1166 	ipc_imem->phase = IPC_P_OFF_REQ;
1167 
1168 	/* forward MDM_NOT_READY to listeners */
1169 	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1170 
1171 	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1172 	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1173 	hrtimer_cancel(&ipc_imem->fast_update_timer);
1174 	hrtimer_cancel(&ipc_imem->startup_timer);
1175 
1176 	/* cancel the workqueue */
1177 	cancel_work_sync(&ipc_imem->run_state_worker);
1178 
1179 	if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1180 		ipc_mux_deinit(ipc_imem->mux);
1181 		ipc_debugfs_deinit(ipc_imem);
1182 		ipc_wwan_deinit(ipc_imem->wwan);
1183 		ipc_port_deinit(ipc_imem->ipc_port);
1184 	}
1185 
1186 	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
1187 		ipc_devlink_deinit(ipc_imem->ipc_devlink);
1188 
1189 	ipc_imem_device_ipc_uninit(ipc_imem);
1190 	ipc_imem_channel_reset(ipc_imem);
1191 
1192 	ipc_protocol_deinit(ipc_imem->ipc_protocol);
1193 	ipc_task_deinit(ipc_imem->ipc_task);
1194 
1195 	kfree(ipc_imem->ipc_task);
1196 	kfree(ipc_imem->mmio);
1197 
1198 	ipc_imem->phase = IPC_P_OFF;
1199 }
1200 
1201 /* After CP has unblocked the PCIe link, save the start address of the doorbell
1202  * scratchpad and prepare the shared memory region. If the flashing to RAM
1203  * procedure shall be executed, copy the chip information from the doorbell
1204  * scratchtpad to the application buffer and wake up the flash app.
1205  */
1206 static int ipc_imem_config(struct iosm_imem *ipc_imem)
1207 {
1208 	enum ipc_phase phase;
1209 
1210 	/* Initialize the semaphore for the blocking read UL/DL transfer. */
1211 	init_completion(&ipc_imem->ul_pend_sem);
1212 
1213 	init_completion(&ipc_imem->dl_pend_sem);
1214 
1215 	/* clear internal flags */
1216 	ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1217 	ipc_imem->enter_runtime = 0;
1218 
1219 	phase = ipc_imem_phase_update(ipc_imem);
1220 
1221 	/* Either CP shall be in the power off or power on phase. */
1222 	switch (phase) {
1223 	case IPC_P_ROM:
1224 		ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1225 		/* poll execution stage (for delayed start, e.g. NAND) */
1226 		if (!hrtimer_active(&ipc_imem->startup_timer))
1227 			hrtimer_start(&ipc_imem->startup_timer,
1228 				      ipc_imem->hrtimer_period,
1229 				      HRTIMER_MODE_REL);
1230 		return 0;
1231 
1232 	case IPC_P_PSI:
1233 	case IPC_P_EBL:
1234 	case IPC_P_RUN:
1235 		/* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1236 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1237 
1238 		/* Verify the exepected initial state. */
1239 		if (ipc_imem->ipc_requested_state ==
1240 		    ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1241 			ipc_imem_ipc_init_check(ipc_imem);
1242 
1243 			return 0;
1244 		}
1245 		dev_err(ipc_imem->dev,
1246 			"ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1247 			ipc_mmio_get_ipc_state(ipc_imem->mmio));
1248 		break;
1249 	case IPC_P_CRASH:
1250 	case IPC_P_CD_READY:
1251 		dev_dbg(ipc_imem->dev,
1252 			"Modem is in phase %d, reset Modem to collect CD",
1253 			phase);
1254 		return 0;
1255 	default:
1256 		dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1257 		break;
1258 	}
1259 
1260 	complete(&ipc_imem->dl_pend_sem);
1261 	complete(&ipc_imem->ul_pend_sem);
1262 	ipc_imem->phase = IPC_P_OFF;
1263 	return -EIO;
1264 }
1265 
1266 /* Pass the dev ptr to the shared memory driver and request the entry points */
1267 struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1268 				void __iomem *mmio, struct device *dev)
1269 {
1270 	struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1271 	enum ipc_mem_exec_stage stage;
1272 
1273 	if (!ipc_imem)
1274 		return NULL;
1275 
1276 	/* Save the device address. */
1277 	ipc_imem->pcie = pcie;
1278 	ipc_imem->dev = dev;
1279 
1280 	ipc_imem->pci_device_id = device_id;
1281 
1282 	ipc_imem->cp_version = 0;
1283 	ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1284 
1285 	/* Reset the max number of configured channels */
1286 	ipc_imem->nr_of_channels = 0;
1287 
1288 	/* allocate IPC MMIO */
1289 	ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1290 	if (!ipc_imem->mmio) {
1291 		dev_err(ipc_imem->dev, "failed to initialize mmio region");
1292 		goto mmio_init_fail;
1293 	}
1294 
1295 	ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1296 				     GFP_KERNEL);
1297 
1298 	/* Create tasklet for event handling*/
1299 	if (!ipc_imem->ipc_task)
1300 		goto ipc_task_fail;
1301 
1302 	if (ipc_task_init(ipc_imem->ipc_task))
1303 		goto ipc_task_init_fail;
1304 
1305 	ipc_imem->ipc_task->dev = ipc_imem->dev;
1306 
1307 	INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1308 
1309 	ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1310 
1311 	if (!ipc_imem->ipc_protocol)
1312 		goto protocol_init_fail;
1313 
1314 	/* The phase is set to power off. */
1315 	ipc_imem->phase = IPC_P_OFF;
1316 
1317 	hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1318 		     HRTIMER_MODE_REL);
1319 	ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1320 
1321 	hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1322 		     HRTIMER_MODE_REL);
1323 	ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1324 
1325 	hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1326 		     HRTIMER_MODE_REL);
1327 	ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1328 
1329 	hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1330 		     HRTIMER_MODE_REL);
1331 	ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1332 
1333 	if (ipc_imem_config(ipc_imem)) {
1334 		dev_err(ipc_imem->dev, "failed to initialize the imem");
1335 		goto imem_config_fail;
1336 	}
1337 
1338 	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1339 	if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
1340 		/* Alloc and Register devlink */
1341 		ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
1342 		if (!ipc_imem->ipc_devlink) {
1343 			dev_err(ipc_imem->dev, "Devlink register failed");
1344 			goto imem_config_fail;
1345 		}
1346 
1347 		if (ipc_flash_link_establish(ipc_imem))
1348 			goto devlink_channel_fail;
1349 
1350 		set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
1351 	}
1352 	return ipc_imem;
1353 devlink_channel_fail:
1354 	ipc_devlink_deinit(ipc_imem->ipc_devlink);
1355 imem_config_fail:
1356 	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1357 	hrtimer_cancel(&ipc_imem->fast_update_timer);
1358 	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1359 	hrtimer_cancel(&ipc_imem->startup_timer);
1360 protocol_init_fail:
1361 	cancel_work_sync(&ipc_imem->run_state_worker);
1362 	ipc_task_deinit(ipc_imem->ipc_task);
1363 ipc_task_init_fail:
1364 	kfree(ipc_imem->ipc_task);
1365 ipc_task_fail:
1366 	kfree(ipc_imem->mmio);
1367 mmio_init_fail:
1368 	kfree(ipc_imem);
1369 	return NULL;
1370 }
1371 
1372 void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1373 {
1374 	/* Debounce IPC_EV_IRQ. */
1375 	if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1376 		ipc_imem->ev_irq_pending[irq] = true;
1377 		ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1378 					 NULL, 0, false);
1379 	}
1380 }
1381 
1382 void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1383 {
1384 	ipc_imem->td_update_timer_suspended = suspend;
1385 }
1386 
1387 /* Verify the CP execution state, copy the chip info,
1388  * change the execution phase to ROM
1389  */
1390 static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
1391 						 int arg, void *msg,
1392 						 size_t msgsize)
1393 {
1394 	enum ipc_mem_exec_stage stage;
1395 	struct sk_buff *skb;
1396 	int rc = -EINVAL;
1397 	size_t size;
1398 
1399 	/* Test the CP execution state. */
1400 	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1401 	if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
1402 		dev_err(ipc_imem->dev,
1403 			"Execution_stage: expected BOOT, received = %X", stage);
1404 		goto trigger_chip_info_fail;
1405 	}
1406 	/* Allocate a new sk buf for the chip info. */
1407 	size = ipc_imem->mmio->chip_info_size;
1408 	if (size > IOSM_CHIP_INFO_SIZE_MAX)
1409 		goto trigger_chip_info_fail;
1410 
1411 	skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
1412 	if (!skb) {
1413 		dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
1414 		rc = -ENOMEM;
1415 		goto trigger_chip_info_fail;
1416 	}
1417 	/* Copy the chip info characters into the ipc_skb. */
1418 	ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
1419 	/* First change to the ROM boot phase. */
1420 	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
1421 	ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
1422 	ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
1423 	rc = 0;
1424 trigger_chip_info_fail:
1425 	return rc;
1426 }
1427 
1428 int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
1429 {
1430 	return ipc_task_queue_send_task(ipc_imem,
1431 					ipc_imem_devlink_trigger_chip_info_cb,
1432 					0, NULL, 0, true);
1433 }
1434