1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_flash.h"
11 #include "iosm_ipc_imem.h"
12 #include "iosm_ipc_port.h"
13 
14 /* Check the wwan ips if it is valid with Channel as input. */
15 static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
16 {
17 	if (chnl)
18 		return chnl->ctype == IPC_CTYPE_WWAN &&
19 		       chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
20 	return false;
21 }
22 
23 static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
24 {
25 	union ipc_msg_prep_args prep_args = {
26 		.sleep.target = 1,
27 		.sleep.state = state,
28 	};
29 
30 	ipc_imem->device_sleep = state;
31 
32 	return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
33 					IPC_MSG_PREP_SLEEP, &prep_args, NULL);
34 }
35 
36 static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
37 				  struct ipc_pipe *pipe)
38 {
39 	/* limit max. nr of entries */
40 	if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
41 		return false;
42 
43 	return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
44 }
45 
46 /* This timer handler will retry DL buff allocation if a pipe has no free buf
47  * and gives doorbell if TD is available
48  */
49 static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
50 				      void *msg, size_t size)
51 {
52 	bool new_buffers_available = false;
53 	bool retry_allocation = false;
54 	int i;
55 
56 	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
57 		struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
58 
59 		if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
60 			continue;
61 
62 		while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
63 			new_buffers_available = true;
64 
65 		if (pipe->nr_of_queued_entries == 0)
66 			retry_allocation = true;
67 	}
68 
69 	if (new_buffers_available)
70 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
71 					      IPC_HP_DL_PROCESS);
72 
73 	if (retry_allocation) {
74 		ipc_imem->hrtimer_period =
75 		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
76 		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
77 			hrtimer_start(&ipc_imem->td_alloc_timer,
78 				      ipc_imem->hrtimer_period,
79 				      HRTIMER_MODE_REL);
80 	}
81 	return 0;
82 }
83 
84 static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
85 {
86 	struct iosm_imem *ipc_imem =
87 		container_of(hr_timer, struct iosm_imem, td_alloc_timer);
88 	/* Post an async tasklet event to trigger HP update Doorbell */
89 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
90 				 0, false);
91 	return HRTIMER_NORESTART;
92 }
93 
94 /* Fast update timer tasklet handler to trigger HP update */
95 static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
96 					    void *msg, size_t size)
97 {
98 	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
99 				      IPC_HP_FAST_TD_UPD_TMR);
100 
101 	return 0;
102 }
103 
104 static enum hrtimer_restart
105 ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
106 {
107 	struct iosm_imem *ipc_imem =
108 		container_of(hr_timer, struct iosm_imem, fast_update_timer);
109 	/* Post an async tasklet event to trigger HP update Doorbell */
110 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
111 				 NULL, 0, false);
112 	return HRTIMER_NORESTART;
113 }
114 
115 static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
116 					  struct ipc_mux_config *cfg)
117 {
118 	ipc_mmio_update_cp_capability(ipc_imem->mmio);
119 
120 	if (!ipc_imem->mmio->has_mux_lite) {
121 		dev_err(ipc_imem->dev, "Failed to get Mux capability.");
122 		return -EINVAL;
123 	}
124 
125 	cfg->protocol = MUX_LITE;
126 
127 	cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
128 			       MUX_UL_ON_CREDITS :
129 			       MUX_UL;
130 
131 	/* The instance ID is same as channel ID because this is been reused
132 	 * for channel alloc function.
133 	 */
134 	cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
135 	cfg->nr_sessions = IPC_MEM_MUX_IP_SESSION_ENTRIES;
136 
137 	return 0;
138 }
139 
140 void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
141 				   unsigned int reset_enable, bool atomic_ctx)
142 {
143 	union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
144 						      reset_enable };
145 
146 	if (atomic_ctx)
147 		ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
148 					 IPC_MSG_PREP_FEATURE_SET, &prep_args,
149 					 NULL);
150 	else
151 		ipc_protocol_msg_send(ipc_imem->ipc_protocol,
152 				      IPC_MSG_PREP_FEATURE_SET, &prep_args);
153 }
154 
155 void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
156 {
157 	/* Use the TD update timer only in the runtime phase */
158 	if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
159 		/* trigger the doorbell irq on CP directly. */
160 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
161 					      IPC_HP_TD_UPD_TMR_START);
162 		return;
163 	}
164 
165 	if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
166 		ipc_imem->hrtimer_period =
167 		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
168 		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
169 			hrtimer_start(&ipc_imem->tdupdate_timer,
170 				      ipc_imem->hrtimer_period,
171 				      HRTIMER_MODE_REL);
172 	}
173 }
174 
175 void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
176 {
177 	if (hrtimer_active(hr_timer))
178 		hrtimer_cancel(hr_timer);
179 }
180 
181 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
182 {
183 	struct ipc_mem_channel *channel;
184 	struct sk_buff_head *ul_list;
185 	bool hpda_pending = false;
186 	bool forced_hpdu = false;
187 	struct ipc_pipe *pipe;
188 	int i;
189 
190 	/* Analyze the uplink pipe of all active channels. */
191 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
192 		channel = &ipc_imem->channels[i];
193 
194 		if (channel->state != IMEM_CHANNEL_ACTIVE)
195 			continue;
196 
197 		pipe = &channel->ul_pipe;
198 
199 		/* Get the reference to the skbuf accumulator list. */
200 		ul_list = &channel->ul_list;
201 
202 		/* Fill the transfer descriptor with the uplink buffer info. */
203 		hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
204 							pipe, ul_list);
205 
206 		/* forced HP update needed for non data channels */
207 		if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
208 			forced_hpdu = true;
209 	}
210 
211 	if (forced_hpdu) {
212 		hpda_pending = false;
213 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
214 					      IPC_HP_UL_WRITE_TD);
215 	}
216 
217 	return hpda_pending;
218 }
219 
220 void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
221 {
222 	int timeout = IPC_MODEM_BOOT_TIMEOUT;
223 
224 	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
225 
226 	/* Trigger the CP interrupt to enter the init state. */
227 	ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
228 			  IPC_MEM_DEVICE_IPC_INIT);
229 	/* Wait for the CP update. */
230 	do {
231 		if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
232 		    ipc_imem->ipc_requested_state) {
233 			/* Prepare the MMIO space */
234 			ipc_mmio_config(ipc_imem->mmio);
235 
236 			/* Trigger the CP irq to enter the running state. */
237 			ipc_imem->ipc_requested_state =
238 				IPC_MEM_DEVICE_IPC_RUNNING;
239 			ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
240 					  IPC_MEM_DEVICE_IPC_RUNNING);
241 
242 			return;
243 		}
244 		msleep(20);
245 	} while (--timeout);
246 
247 	/* timeout */
248 	dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
249 		ipc_imem_phase_get_string(ipc_imem->phase),
250 		ipc_mmio_get_ipc_state(ipc_imem->mmio));
251 
252 	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
253 }
254 
255 /* Analyze the packet type and distribute it. */
256 static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
257 				    struct ipc_pipe *pipe, struct sk_buff *skb)
258 {
259 	u16 port_id;
260 
261 	if (!skb)
262 		return;
263 
264 	/* An AT/control or IP packet is expected. */
265 	switch (pipe->channel->ctype) {
266 	case IPC_CTYPE_CTRL:
267 		port_id = pipe->channel->channel_id;
268 		if (port_id == IPC_MEM_CTRL_CHL_ID_7)
269 			ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
270 						       skb);
271 		else
272 			wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
273 				     skb);
274 		break;
275 
276 	case IPC_CTYPE_WWAN:
277 		if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
278 			ipc_mux_dl_decode(ipc_imem->mux, skb);
279 		break;
280 	default:
281 		dev_err(ipc_imem->dev, "Invalid channel type");
282 		break;
283 	}
284 }
285 
286 /* Process the downlink data and pass them to the char or net layer. */
287 static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
288 				     struct ipc_pipe *pipe)
289 {
290 	s32 cnt = 0, processed_td_cnt = 0;
291 	struct ipc_mem_channel *channel;
292 	u32 head = 0, tail = 0;
293 	bool processed = false;
294 	struct sk_buff *skb;
295 
296 	channel = pipe->channel;
297 
298 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
299 					 &tail);
300 	if (pipe->old_tail != tail) {
301 		if (pipe->old_tail < tail)
302 			cnt = tail - pipe->old_tail;
303 		else
304 			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
305 	}
306 
307 	processed_td_cnt = cnt;
308 
309 	/* Seek for pipes with pending DL data. */
310 	while (cnt--) {
311 		skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
312 
313 		/* Analyze the packet type and distribute it. */
314 		ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
315 	}
316 
317 	/* try to allocate new empty DL SKbs from head..tail - 1*/
318 	while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
319 		processed = true;
320 
321 	if (processed && !ipc_imem_check_wwan_ips(channel)) {
322 		/* Force HP update for non IP channels */
323 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
324 					      IPC_HP_DL_PROCESS);
325 		processed = false;
326 
327 		/* If Fast Update timer is already running then stop */
328 		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
329 	}
330 
331 	/* Any control channel process will get immediate HP update.
332 	 * Start Fast update timer only for IP channel if all the TDs were
333 	 * used in last process.
334 	 */
335 	if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
336 		ipc_imem->hrtimer_period =
337 		ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
338 		hrtimer_start(&ipc_imem->fast_update_timer,
339 			      ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
340 	}
341 
342 	if (ipc_imem->app_notify_dl_pend)
343 		complete(&ipc_imem->dl_pend_sem);
344 }
345 
346 /* process open uplink pipe */
347 static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
348 				     struct ipc_pipe *pipe)
349 {
350 	struct ipc_mem_channel *channel;
351 	u32 tail = 0, head = 0;
352 	struct sk_buff *skb;
353 	s32 cnt = 0;
354 
355 	channel = pipe->channel;
356 
357 	/* Get the internal phase. */
358 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
359 					 &tail);
360 
361 	if (pipe->old_tail != tail) {
362 		if (pipe->old_tail < tail)
363 			cnt = tail - pipe->old_tail;
364 		else
365 			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
366 	}
367 
368 	/* Free UL buffers. */
369 	while (cnt--) {
370 		skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
371 
372 		if (!skb)
373 			continue;
374 
375 		/* If the user app was suspended in uplink direction - blocking
376 		 * write, resume it.
377 		 */
378 		if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
379 			complete(&channel->ul_sem);
380 
381 		/* Free the skbuf element. */
382 		if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
383 			if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
384 				ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
385 			else
386 				dev_err(ipc_imem->dev,
387 					"OP Type is UL_MUX, unknown if_id %d",
388 					channel->if_id);
389 		} else {
390 			ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
391 		}
392 	}
393 
394 	/* Trace channel stats for IP UL pipe. */
395 	if (ipc_imem_check_wwan_ips(pipe->channel))
396 		ipc_mux_check_n_restart_tx(ipc_imem->mux);
397 
398 	if (ipc_imem->app_notify_ul_pend)
399 		complete(&ipc_imem->ul_pend_sem);
400 }
401 
402 /* Executes the irq. */
403 static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
404 {
405 	struct ipc_mem_channel *channel;
406 
407 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
408 	ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
409 	complete(&channel->ul_sem);
410 }
411 
412 /* Execute the UL bundle timer actions, generating the doorbell irq. */
413 static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
414 					  void *msg, size_t size)
415 {
416 	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
417 				      IPC_HP_TD_UPD_TMR);
418 	return 0;
419 }
420 
421 /* Consider link power management in the runtime phase. */
422 static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
423 {
424 	    /* link will go down, Test pending UL packets.*/
425 	if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
426 	    hrtimer_active(&ipc_imem->tdupdate_timer)) {
427 		/* Generate the doorbell irq. */
428 		ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
429 		/* Stop the TD update timer. */
430 		ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
431 		/* Stop the fast update timer. */
432 		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
433 	}
434 }
435 
436 /* Execute startup timer and wait for delayed start (e.g. NAND) */
437 static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
438 					void *msg, size_t size)
439 {
440 	/* Update & check the current operation phase. */
441 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
442 		return -EIO;
443 
444 	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
445 	    IPC_MEM_DEVICE_IPC_UNINIT) {
446 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
447 
448 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
449 				  IPC_MEM_DEVICE_IPC_INIT);
450 
451 		ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
452 		/* reduce period to 100 ms to check for mmio init state */
453 		if (!hrtimer_active(&ipc_imem->startup_timer))
454 			hrtimer_start(&ipc_imem->startup_timer,
455 				      ipc_imem->hrtimer_period,
456 				      HRTIMER_MODE_REL);
457 	} else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
458 		   IPC_MEM_DEVICE_IPC_INIT) {
459 		/* Startup complete  - disable timer */
460 		ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
461 
462 		/* Prepare the MMIO space */
463 		ipc_mmio_config(ipc_imem->mmio);
464 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
465 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
466 				  IPC_MEM_DEVICE_IPC_RUNNING);
467 	}
468 
469 	return 0;
470 }
471 
472 static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
473 {
474 	enum hrtimer_restart result = HRTIMER_NORESTART;
475 	struct iosm_imem *ipc_imem =
476 		container_of(hr_timer, struct iosm_imem, startup_timer);
477 
478 	if (ktime_to_ns(ipc_imem->hrtimer_period)) {
479 		hrtimer_forward_now(&ipc_imem->startup_timer,
480 				    ipc_imem->hrtimer_period);
481 		result = HRTIMER_RESTART;
482 	}
483 
484 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
485 				 NULL, 0, false);
486 	return result;
487 }
488 
489 /* Get the CP execution stage */
490 static enum ipc_mem_exec_stage
491 ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
492 {
493 	return (ipc_imem->phase == IPC_P_RUN &&
494 		ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
495 		       ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
496 		       ipc_mmio_get_exec_stage(ipc_imem->mmio);
497 }
498 
499 /* Callback to send the modem ready uevent */
500 static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
501 				    void *msg, size_t size)
502 {
503 	enum ipc_mem_exec_stage exec_stage =
504 		ipc_imem_get_exec_stage_buffered(ipc_imem);
505 
506 	if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
507 		ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
508 
509 	return 0;
510 }
511 
512 /* This function is executed in a task context via an ipc_worker object,
513  * as the creation or removal of device can't be done from tasklet.
514  */
515 static void ipc_imem_run_state_worker(struct work_struct *instance)
516 {
517 	struct ipc_chnl_cfg chnl_cfg_port = { 0 };
518 	struct ipc_mux_config mux_cfg;
519 	struct iosm_imem *ipc_imem;
520 	u8 ctrl_chl_idx = 0;
521 
522 	ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
523 
524 	if (ipc_imem->phase != IPC_P_RUN) {
525 		dev_err(ipc_imem->dev,
526 			"Modem link down. Exit run state worker.");
527 		return;
528 	}
529 
530 	if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
531 		ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
532 
533 	ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
534 	if (ipc_imem->mux)
535 		ipc_imem->mux->wwan = ipc_imem->wwan;
536 
537 	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
538 		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
539 			ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
540 			if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
541 				ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
542 						      chnl_cfg_port,
543 						      IRQ_MOD_OFF);
544 				ipc_imem->ipc_port[ctrl_chl_idx] =
545 					ipc_port_init(ipc_imem, chnl_cfg_port);
546 			}
547 		}
548 		ctrl_chl_idx++;
549 	}
550 
551 	ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
552 				 false);
553 
554 	/* Complete all memory stores before setting bit */
555 	smp_mb__before_atomic();
556 
557 	set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
558 
559 	/* Complete all memory stores after setting bit */
560 	smp_mb__after_atomic();
561 }
562 
563 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
564 {
565 	enum ipc_mem_device_ipc_state curr_ipc_status;
566 	enum ipc_phase old_phase, phase;
567 	bool retry_allocation = false;
568 	bool ul_pending = false;
569 	int i;
570 
571 	if (irq != IMEM_IRQ_DONT_CARE)
572 		ipc_imem->ev_irq_pending[irq] = false;
573 
574 	/* Get the internal phase. */
575 	old_phase = ipc_imem->phase;
576 
577 	if (old_phase == IPC_P_OFF_REQ) {
578 		dev_dbg(ipc_imem->dev,
579 			"[%s]: Ignoring MSI. Deinit sequence in progress!",
580 			ipc_imem_phase_get_string(old_phase));
581 		return;
582 	}
583 
584 	/* Update the phase controlled by CP. */
585 	phase = ipc_imem_phase_update(ipc_imem);
586 
587 	switch (phase) {
588 	case IPC_P_RUN:
589 		if (!ipc_imem->enter_runtime) {
590 			/* Excute the transition from flash/boot to runtime. */
591 			ipc_imem->enter_runtime = 1;
592 
593 			/* allow device to sleep, default value is
594 			 * IPC_HOST_SLEEP_ENTER_SLEEP
595 			 */
596 			ipc_imem_msg_send_device_sleep(ipc_imem,
597 						       ipc_imem->device_sleep);
598 
599 			ipc_imem_msg_send_feature_set(ipc_imem,
600 						      IPC_MEM_INBAND_CRASH_SIG,
601 						  true);
602 		}
603 
604 		curr_ipc_status =
605 			ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
606 
607 		/* check ipc_status change */
608 		if (ipc_imem->ipc_status != curr_ipc_status) {
609 			ipc_imem->ipc_status = curr_ipc_status;
610 
611 			if (ipc_imem->ipc_status ==
612 			    IPC_MEM_DEVICE_IPC_RUNNING) {
613 				schedule_work(&ipc_imem->run_state_worker);
614 			}
615 		}
616 
617 		/* Consider power management in the runtime phase. */
618 		ipc_imem_slp_control_exec(ipc_imem);
619 		break; /* Continue with skbuf processing. */
620 
621 		/* Unexpected phases. */
622 	case IPC_P_OFF:
623 	case IPC_P_OFF_REQ:
624 		dev_err(ipc_imem->dev, "confused phase %s",
625 			ipc_imem_phase_get_string(phase));
626 		return;
627 
628 	case IPC_P_PSI:
629 		if (old_phase != IPC_P_ROM)
630 			break;
631 
632 		fallthrough;
633 		/* On CP the PSI phase is already active. */
634 
635 	case IPC_P_ROM:
636 		/* Before CP ROM driver starts the PSI image, it sets
637 		 * the exit_code field on the doorbell scratchpad and
638 		 * triggers the irq.
639 		 */
640 		ipc_imem_rom_irq_exec(ipc_imem);
641 		return;
642 
643 	default:
644 		break;
645 	}
646 
647 	/* process message ring */
648 	ipc_protocol_msg_process(ipc_imem, irq);
649 
650 	/* process all open pipes */
651 	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
652 		struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
653 		struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
654 
655 		if (dl_pipe->is_open &&
656 		    (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
657 			ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
658 
659 			if (dl_pipe->nr_of_queued_entries == 0)
660 				retry_allocation = true;
661 		}
662 
663 		if (ul_pipe->is_open)
664 			ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
665 	}
666 
667 	/* Try to generate new ADB or ADGH. */
668 	if (ipc_mux_ul_data_encode(ipc_imem->mux))
669 		ipc_imem_td_update_timer_start(ipc_imem);
670 
671 	/* Continue the send procedure with accumulated SIO or NETIF packets.
672 	 * Reset the debounce flags.
673 	 */
674 	ul_pending |= ipc_imem_ul_write_td(ipc_imem);
675 
676 	/* if UL data is pending restart TD update timer */
677 	if (ul_pending) {
678 		ipc_imem->hrtimer_period =
679 		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
680 		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
681 			hrtimer_start(&ipc_imem->tdupdate_timer,
682 				      ipc_imem->hrtimer_period,
683 				      HRTIMER_MODE_REL);
684 	}
685 
686 	/* If CP has executed the transition
687 	 * from IPC_INIT to IPC_RUNNING in the PSI
688 	 * phase, wake up the flash app to open the pipes.
689 	 */
690 	if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
691 	    ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
692 	    ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
693 						IPC_MEM_DEVICE_IPC_RUNNING) {
694 		complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
695 	}
696 
697 	/* Reset the expected CP state. */
698 	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
699 
700 	if (retry_allocation) {
701 		ipc_imem->hrtimer_period =
702 		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
703 		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
704 			hrtimer_start(&ipc_imem->td_alloc_timer,
705 				      ipc_imem->hrtimer_period,
706 				      HRTIMER_MODE_REL);
707 	}
708 }
709 
710 /* Callback by tasklet for handling interrupt events. */
711 static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
712 			      size_t size)
713 {
714 	ipc_imem_handle_irq(ipc_imem, arg);
715 
716 	return 0;
717 }
718 
719 void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
720 {
721 	/* start doorbell irq delay timer if UL is pending */
722 	if (ipc_imem_ul_write_td(ipc_imem))
723 		ipc_imem_td_update_timer_start(ipc_imem);
724 }
725 
726 /* Check the execution stage and update the AP phase */
727 static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
728 						  enum ipc_mem_exec_stage stage)
729 {
730 	switch (stage) {
731 	case IPC_MEM_EXEC_STAGE_BOOT:
732 		if (ipc_imem->phase != IPC_P_ROM) {
733 			/* Send this event only once */
734 			ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
735 		}
736 
737 		ipc_imem->phase = IPC_P_ROM;
738 		break;
739 
740 	case IPC_MEM_EXEC_STAGE_PSI:
741 		ipc_imem->phase = IPC_P_PSI;
742 		break;
743 
744 	case IPC_MEM_EXEC_STAGE_EBL:
745 		ipc_imem->phase = IPC_P_EBL;
746 		break;
747 
748 	case IPC_MEM_EXEC_STAGE_RUN:
749 		if (ipc_imem->phase != IPC_P_RUN &&
750 		    ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
751 			ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
752 		}
753 		ipc_imem->phase = IPC_P_RUN;
754 		break;
755 
756 	case IPC_MEM_EXEC_STAGE_CRASH:
757 		if (ipc_imem->phase != IPC_P_CRASH)
758 			ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
759 
760 		ipc_imem->phase = IPC_P_CRASH;
761 		break;
762 
763 	case IPC_MEM_EXEC_STAGE_CD_READY:
764 		if (ipc_imem->phase != IPC_P_CD_READY)
765 			ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
766 		ipc_imem->phase = IPC_P_CD_READY;
767 		break;
768 
769 	default:
770 		/* unknown exec stage:
771 		 * assume that link is down and send info to listeners
772 		 */
773 		ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
774 		break;
775 	}
776 
777 	return ipc_imem->phase;
778 }
779 
780 /* Send msg to device to open pipe */
781 static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
782 			       struct ipc_pipe *pipe)
783 {
784 	union ipc_msg_prep_args prep_args = {
785 		.pipe_open.pipe = pipe,
786 	};
787 
788 	if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
789 				  IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
790 		pipe->is_open = true;
791 
792 	return pipe->is_open;
793 }
794 
795 /* Allocates the TDs for the given pipe along with firing HP update DB. */
796 static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
797 				     void *msg, size_t size)
798 {
799 	struct ipc_pipe *dl_pipe = msg;
800 	bool processed = false;
801 	int i;
802 
803 	for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
804 		processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
805 
806 	/* Trigger the doorbell irq to inform CP that new downlink buffers are
807 	 * available.
808 	 */
809 	if (processed)
810 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
811 
812 	return 0;
813 }
814 
815 static enum hrtimer_restart
816 ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
817 {
818 	struct iosm_imem *ipc_imem =
819 		container_of(hr_timer, struct iosm_imem, tdupdate_timer);
820 
821 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
822 				 NULL, 0, false);
823 	return HRTIMER_NORESTART;
824 }
825 
826 /* Get the CP execution state and map it to the AP phase. */
827 enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
828 {
829 	enum ipc_mem_exec_stage exec_stage =
830 				ipc_imem_get_exec_stage_buffered(ipc_imem);
831 	/* If the CP stage is undef, return the internal precalculated phase. */
832 	return ipc_imem->phase == IPC_P_OFF_REQ ?
833 		       ipc_imem->phase :
834 		       ipc_imem_phase_update_check(ipc_imem, exec_stage);
835 }
836 
837 const char *ipc_imem_phase_get_string(enum ipc_phase phase)
838 {
839 	switch (phase) {
840 	case IPC_P_RUN:
841 		return "A-RUN";
842 
843 	case IPC_P_OFF:
844 		return "A-OFF";
845 
846 	case IPC_P_ROM:
847 		return "A-ROM";
848 
849 	case IPC_P_PSI:
850 		return "A-PSI";
851 
852 	case IPC_P_EBL:
853 		return "A-EBL";
854 
855 	case IPC_P_CRASH:
856 		return "A-CRASH";
857 
858 	case IPC_P_CD_READY:
859 		return "A-CD_READY";
860 
861 	case IPC_P_OFF_REQ:
862 		return "A-OFF_REQ";
863 
864 	default:
865 		return "A-???";
866 	}
867 }
868 
869 void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
870 {
871 	union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
872 
873 	pipe->is_open = false;
874 	ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
875 			      &prep_args);
876 
877 	ipc_imem_pipe_cleanup(ipc_imem, pipe);
878 }
879 
880 void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
881 {
882 	struct ipc_mem_channel *channel;
883 
884 	if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
885 		dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
886 		return;
887 	}
888 
889 	channel = &ipc_imem->channels[channel_id];
890 
891 	if (channel->state == IMEM_CHANNEL_FREE) {
892 		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
893 			channel_id, channel->state);
894 		return;
895 	}
896 
897 	/* Free only the channel id in the CP power off mode. */
898 	if (channel->state == IMEM_CHANNEL_RESERVED)
899 		/* Release only the channel id. */
900 		goto channel_free;
901 
902 	if (ipc_imem->phase == IPC_P_RUN) {
903 		ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
904 		ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
905 	}
906 
907 	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
908 	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
909 
910 channel_free:
911 	ipc_imem_channel_free(channel);
912 }
913 
914 struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
915 					      int channel_id, u32 db_id)
916 {
917 	struct ipc_mem_channel *channel;
918 
919 	if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
920 		dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
921 		return NULL;
922 	}
923 
924 	channel = &ipc_imem->channels[channel_id];
925 
926 	channel->state = IMEM_CHANNEL_ACTIVE;
927 
928 	if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
929 		goto ul_pipe_err;
930 
931 	if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
932 		goto dl_pipe_err;
933 
934 	/* Allocate the downlink buffers in tasklet context. */
935 	if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
936 				     &channel->dl_pipe, 0, false)) {
937 		dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
938 		goto task_failed;
939 	}
940 
941 	/* Active channel. */
942 	return channel;
943 task_failed:
944 	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
945 dl_pipe_err:
946 	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
947 ul_pipe_err:
948 	ipc_imem_channel_free(channel);
949 	return NULL;
950 }
951 
952 void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
953 {
954 	ipc_protocol_suspend(ipc_imem->ipc_protocol);
955 }
956 
957 void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
958 {
959 	ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
960 }
961 
962 void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
963 {
964 	enum ipc_mem_exec_stage stage;
965 
966 	if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
967 		stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
968 		ipc_imem_phase_update_check(ipc_imem, stage);
969 	}
970 }
971 
972 void ipc_imem_channel_free(struct ipc_mem_channel *channel)
973 {
974 	/* Reset dynamic channel elements. */
975 	channel->state = IMEM_CHANNEL_FREE;
976 }
977 
978 int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
979 			   enum ipc_ctype ctype)
980 {
981 	struct ipc_mem_channel *channel;
982 	int i;
983 
984 	/* Find channel of given type/index */
985 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
986 		channel = &ipc_imem->channels[i];
987 		if (channel->ctype == ctype && channel->index == index)
988 			break;
989 	}
990 
991 	if (i >= ipc_imem->nr_of_channels) {
992 		dev_dbg(ipc_imem->dev,
993 			"no channel definition for index=%d ctype=%d", index,
994 			ctype);
995 		return -ECHRNG;
996 	}
997 
998 	if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
999 		dev_dbg(ipc_imem->dev, "channel is in use");
1000 		return -EBUSY;
1001 	}
1002 
1003 	if (channel->ctype == IPC_CTYPE_WWAN &&
1004 	    index == IPC_MEM_MUX_IP_CH_IF_ID)
1005 		channel->if_id = index;
1006 
1007 	channel->channel_id = index;
1008 	channel->state = IMEM_CHANNEL_RESERVED;
1009 
1010 	return i;
1011 }
1012 
1013 void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1014 			   struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1015 {
1016 	struct ipc_mem_channel *channel;
1017 
1018 	if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1019 	    chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1020 		dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1021 			chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1022 		return;
1023 	}
1024 
1025 	if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1026 		dev_err(ipc_imem->dev, "too many channels");
1027 		return;
1028 	}
1029 
1030 	channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1031 	channel->channel_id = ipc_imem->nr_of_channels;
1032 	channel->ctype = ctype;
1033 	channel->index = chnl_cfg.id;
1034 	channel->net_err_count = 0;
1035 	channel->state = IMEM_CHANNEL_FREE;
1036 	ipc_imem->nr_of_channels++;
1037 
1038 	ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1039 				IRQ_MOD_OFF);
1040 
1041 	skb_queue_head_init(&channel->ul_list);
1042 
1043 	init_completion(&channel->ul_sem);
1044 }
1045 
1046 void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1047 			     struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1048 {
1049 	struct ipc_mem_channel *channel;
1050 
1051 	if (id < 0 || id >= ipc_imem->nr_of_channels) {
1052 		dev_err(ipc_imem->dev, "invalid channel id %d", id);
1053 		return;
1054 	}
1055 
1056 	channel = &ipc_imem->channels[id];
1057 
1058 	if (channel->state != IMEM_CHANNEL_FREE &&
1059 	    channel->state != IMEM_CHANNEL_RESERVED) {
1060 		dev_err(ipc_imem->dev, "invalid channel state %d",
1061 			channel->state);
1062 		return;
1063 	}
1064 
1065 	channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1066 	channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1067 	channel->ul_pipe.is_open = false;
1068 	channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1069 	channel->ul_pipe.channel = channel;
1070 	channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1071 	channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1072 	channel->ul_pipe.irq_moderation = irq_moderation;
1073 	channel->ul_pipe.buf_size = 0;
1074 
1075 	channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1076 	channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1077 	channel->dl_pipe.is_open = false;
1078 	channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1079 	channel->dl_pipe.channel = channel;
1080 	channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1081 	channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1082 	channel->dl_pipe.irq_moderation = irq_moderation;
1083 	channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1084 }
1085 
1086 static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1087 {
1088 	int i;
1089 
1090 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1091 		struct ipc_mem_channel *channel;
1092 
1093 		channel = &ipc_imem->channels[i];
1094 
1095 		ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1096 		ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1097 
1098 		ipc_imem_channel_free(channel);
1099 	}
1100 }
1101 
1102 void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1103 {
1104 	struct sk_buff *skb;
1105 
1106 	/* Force pipe to closed state also when not explicitly closed through
1107 	 * ipc_imem_pipe_close()
1108 	 */
1109 	pipe->is_open = false;
1110 
1111 	/* Empty the uplink skb accumulator. */
1112 	while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1113 		ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1114 
1115 	ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1116 }
1117 
1118 /* Send IPC protocol uninit to the modem when Link is active. */
1119 static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1120 {
1121 	int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1122 	enum ipc_mem_device_ipc_state ipc_state;
1123 
1124 	/* When PCIe link is up set IPC_UNINIT
1125 	 * of the modem otherwise ignore it when PCIe link down happens.
1126 	 */
1127 	if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1128 		/* set modem to UNINIT
1129 		 * (in case we want to reload the AP driver without resetting
1130 		 * the modem)
1131 		 */
1132 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1133 				  IPC_MEM_DEVICE_IPC_UNINIT);
1134 		ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1135 
1136 		/* Wait for maximum 30ms to allow the Modem to uninitialize the
1137 		 * protocol.
1138 		 */
1139 		while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1140 		       (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1141 		       (timeout > 0)) {
1142 			usleep_range(1000, 1250);
1143 			timeout--;
1144 			ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1145 		}
1146 	}
1147 }
1148 
1149 void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1150 {
1151 	ipc_imem->phase = IPC_P_OFF_REQ;
1152 
1153 	/* forward MDM_NOT_READY to listeners */
1154 	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1155 
1156 	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1157 	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1158 	hrtimer_cancel(&ipc_imem->fast_update_timer);
1159 	hrtimer_cancel(&ipc_imem->startup_timer);
1160 
1161 	/* cancel the workqueue */
1162 	cancel_work_sync(&ipc_imem->run_state_worker);
1163 
1164 	if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1165 		ipc_mux_deinit(ipc_imem->mux);
1166 		ipc_wwan_deinit(ipc_imem->wwan);
1167 		ipc_port_deinit(ipc_imem->ipc_port);
1168 	}
1169 
1170 	if (ipc_imem->ipc_devlink)
1171 		ipc_devlink_deinit(ipc_imem->ipc_devlink);
1172 
1173 	ipc_imem_device_ipc_uninit(ipc_imem);
1174 	ipc_imem_channel_reset(ipc_imem);
1175 
1176 	ipc_protocol_deinit(ipc_imem->ipc_protocol);
1177 	ipc_task_deinit(ipc_imem->ipc_task);
1178 
1179 	kfree(ipc_imem->ipc_task);
1180 	kfree(ipc_imem->mmio);
1181 
1182 	ipc_imem->phase = IPC_P_OFF;
1183 }
1184 
1185 /* After CP has unblocked the PCIe link, save the start address of the doorbell
1186  * scratchpad and prepare the shared memory region. If the flashing to RAM
1187  * procedure shall be executed, copy the chip information from the doorbell
1188  * scratchtpad to the application buffer and wake up the flash app.
1189  */
1190 static int ipc_imem_config(struct iosm_imem *ipc_imem)
1191 {
1192 	enum ipc_phase phase;
1193 
1194 	/* Initialize the semaphore for the blocking read UL/DL transfer. */
1195 	init_completion(&ipc_imem->ul_pend_sem);
1196 
1197 	init_completion(&ipc_imem->dl_pend_sem);
1198 
1199 	/* clear internal flags */
1200 	ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1201 	ipc_imem->enter_runtime = 0;
1202 
1203 	phase = ipc_imem_phase_update(ipc_imem);
1204 
1205 	/* Either CP shall be in the power off or power on phase. */
1206 	switch (phase) {
1207 	case IPC_P_ROM:
1208 		ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1209 		/* poll execution stage (for delayed start, e.g. NAND) */
1210 		if (!hrtimer_active(&ipc_imem->startup_timer))
1211 			hrtimer_start(&ipc_imem->startup_timer,
1212 				      ipc_imem->hrtimer_period,
1213 				      HRTIMER_MODE_REL);
1214 		return 0;
1215 
1216 	case IPC_P_PSI:
1217 	case IPC_P_EBL:
1218 	case IPC_P_RUN:
1219 		/* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1220 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1221 
1222 		/* Verify the exepected initial state. */
1223 		if (ipc_imem->ipc_requested_state ==
1224 		    ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1225 			ipc_imem_ipc_init_check(ipc_imem);
1226 
1227 			return 0;
1228 		}
1229 		dev_err(ipc_imem->dev,
1230 			"ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1231 			ipc_mmio_get_ipc_state(ipc_imem->mmio));
1232 		break;
1233 	case IPC_P_CRASH:
1234 	case IPC_P_CD_READY:
1235 		dev_dbg(ipc_imem->dev,
1236 			"Modem is in phase %d, reset Modem to collect CD",
1237 			phase);
1238 		return 0;
1239 	default:
1240 		dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1241 		break;
1242 	}
1243 
1244 	complete(&ipc_imem->dl_pend_sem);
1245 	complete(&ipc_imem->ul_pend_sem);
1246 	ipc_imem->phase = IPC_P_OFF;
1247 	return -EIO;
1248 }
1249 
1250 /* Pass the dev ptr to the shared memory driver and request the entry points */
1251 struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1252 				void __iomem *mmio, struct device *dev)
1253 {
1254 	struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1255 	enum ipc_mem_exec_stage stage;
1256 
1257 	if (!ipc_imem)
1258 		return NULL;
1259 
1260 	/* Save the device address. */
1261 	ipc_imem->pcie = pcie;
1262 	ipc_imem->dev = dev;
1263 
1264 	ipc_imem->pci_device_id = device_id;
1265 
1266 	ipc_imem->ev_cdev_write_pending = false;
1267 	ipc_imem->cp_version = 0;
1268 	ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1269 
1270 	/* Reset the max number of configured channels */
1271 	ipc_imem->nr_of_channels = 0;
1272 
1273 	/* allocate IPC MMIO */
1274 	ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1275 	if (!ipc_imem->mmio) {
1276 		dev_err(ipc_imem->dev, "failed to initialize mmio region");
1277 		goto mmio_init_fail;
1278 	}
1279 
1280 	ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1281 				     GFP_KERNEL);
1282 
1283 	/* Create tasklet for event handling*/
1284 	if (!ipc_imem->ipc_task)
1285 		goto ipc_task_fail;
1286 
1287 	if (ipc_task_init(ipc_imem->ipc_task))
1288 		goto ipc_task_init_fail;
1289 
1290 	ipc_imem->ipc_task->dev = ipc_imem->dev;
1291 
1292 	INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1293 
1294 	ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1295 
1296 	if (!ipc_imem->ipc_protocol)
1297 		goto protocol_init_fail;
1298 
1299 	/* The phase is set to power off. */
1300 	ipc_imem->phase = IPC_P_OFF;
1301 
1302 	hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1303 		     HRTIMER_MODE_REL);
1304 	ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1305 
1306 	hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1307 		     HRTIMER_MODE_REL);
1308 	ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1309 
1310 	hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1311 		     HRTIMER_MODE_REL);
1312 	ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1313 
1314 	hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1315 		     HRTIMER_MODE_REL);
1316 	ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1317 
1318 	if (ipc_imem_config(ipc_imem)) {
1319 		dev_err(ipc_imem->dev, "failed to initialize the imem");
1320 		goto imem_config_fail;
1321 	}
1322 
1323 	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1324 	if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
1325 		/* Alloc and Register devlink */
1326 		ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
1327 		if (!ipc_imem->ipc_devlink) {
1328 			dev_err(ipc_imem->dev, "Devlink register failed");
1329 			goto imem_config_fail;
1330 		}
1331 
1332 		if (ipc_flash_link_establish(ipc_imem))
1333 			goto devlink_channel_fail;
1334 	}
1335 	return ipc_imem;
1336 devlink_channel_fail:
1337 	ipc_devlink_deinit(ipc_imem->ipc_devlink);
1338 imem_config_fail:
1339 	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1340 	hrtimer_cancel(&ipc_imem->fast_update_timer);
1341 	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1342 	hrtimer_cancel(&ipc_imem->startup_timer);
1343 protocol_init_fail:
1344 	cancel_work_sync(&ipc_imem->run_state_worker);
1345 	ipc_task_deinit(ipc_imem->ipc_task);
1346 ipc_task_init_fail:
1347 	kfree(ipc_imem->ipc_task);
1348 ipc_task_fail:
1349 	kfree(ipc_imem->mmio);
1350 mmio_init_fail:
1351 	kfree(ipc_imem);
1352 	return NULL;
1353 }
1354 
1355 void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1356 {
1357 	/* Debounce IPC_EV_IRQ. */
1358 	if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1359 		ipc_imem->ev_irq_pending[irq] = true;
1360 		ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1361 					 NULL, 0, false);
1362 	}
1363 }
1364 
1365 void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1366 {
1367 	ipc_imem->td_update_timer_suspended = suspend;
1368 }
1369 
1370 /* Verify the CP execution state, copy the chip info,
1371  * change the execution phase to ROM
1372  */
1373 static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
1374 						 int arg, void *msg,
1375 						 size_t msgsize)
1376 {
1377 	enum ipc_mem_exec_stage stage;
1378 	struct sk_buff *skb;
1379 	int rc = -EINVAL;
1380 	size_t size;
1381 
1382 	/* Test the CP execution state. */
1383 	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1384 	if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
1385 		dev_err(ipc_imem->dev,
1386 			"Execution_stage: expected BOOT, received = %X", stage);
1387 		goto trigger_chip_info_fail;
1388 	}
1389 	/* Allocate a new sk buf for the chip info. */
1390 	size = ipc_imem->mmio->chip_info_size;
1391 	if (size > IOSM_CHIP_INFO_SIZE_MAX)
1392 		goto trigger_chip_info_fail;
1393 
1394 	skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
1395 	if (!skb) {
1396 		dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
1397 		rc = -ENOMEM;
1398 		goto trigger_chip_info_fail;
1399 	}
1400 	/* Copy the chip info characters into the ipc_skb. */
1401 	ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
1402 	/* First change to the ROM boot phase. */
1403 	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
1404 	ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
1405 	ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
1406 	rc = 0;
1407 trigger_chip_info_fail:
1408 	return rc;
1409 }
1410 
1411 int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
1412 {
1413 	return ipc_task_queue_send_task(ipc_imem,
1414 					ipc_imem_devlink_trigger_chip_info_cb,
1415 					0, NULL, 0, true);
1416 }
1417