1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_devlink.h"
10 #include "iosm_ipc_flash.h"
11 #include "iosm_ipc_imem.h"
12 #include "iosm_ipc_port.h"
13 #include "iosm_ipc_trace.h"
14 #include "iosm_ipc_debugfs.h"
15 
16 /* Check the wwan ips if it is valid with Channel as input. */
17 static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
18 {
19 	if (chnl)
20 		return chnl->ctype == IPC_CTYPE_WWAN &&
21 		       chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
22 	return false;
23 }
24 
25 static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
26 {
27 	union ipc_msg_prep_args prep_args = {
28 		.sleep.target = 1,
29 		.sleep.state = state,
30 	};
31 
32 	ipc_imem->device_sleep = state;
33 
34 	return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
35 					IPC_MSG_PREP_SLEEP, &prep_args, NULL);
36 }
37 
38 static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
39 				  struct ipc_pipe *pipe)
40 {
41 	/* limit max. nr of entries */
42 	if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
43 		return false;
44 
45 	return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
46 }
47 
48 /* This timer handler will retry DL buff allocation if a pipe has no free buf
49  * and gives doorbell if TD is available
50  */
51 static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
52 				      void *msg, size_t size)
53 {
54 	bool new_buffers_available = false;
55 	bool retry_allocation = false;
56 	int i;
57 
58 	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
59 		struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
60 
61 		if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
62 			continue;
63 
64 		while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
65 			new_buffers_available = true;
66 
67 		if (pipe->nr_of_queued_entries == 0)
68 			retry_allocation = true;
69 	}
70 
71 	if (new_buffers_available)
72 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
73 					      IPC_HP_DL_PROCESS);
74 
75 	if (retry_allocation) {
76 		ipc_imem->hrtimer_period =
77 		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
78 		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
79 			hrtimer_start(&ipc_imem->td_alloc_timer,
80 				      ipc_imem->hrtimer_period,
81 				      HRTIMER_MODE_REL);
82 	}
83 	return 0;
84 }
85 
86 static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
87 {
88 	struct iosm_imem *ipc_imem =
89 		container_of(hr_timer, struct iosm_imem, td_alloc_timer);
90 	/* Post an async tasklet event to trigger HP update Doorbell */
91 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
92 				 0, false);
93 	return HRTIMER_NORESTART;
94 }
95 
96 /* Fast update timer tasklet handler to trigger HP update */
97 static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
98 					    void *msg, size_t size)
99 {
100 	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
101 				      IPC_HP_FAST_TD_UPD_TMR);
102 
103 	return 0;
104 }
105 
106 static enum hrtimer_restart
107 ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
108 {
109 	struct iosm_imem *ipc_imem =
110 		container_of(hr_timer, struct iosm_imem, fast_update_timer);
111 	/* Post an async tasklet event to trigger HP update Doorbell */
112 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
113 				 NULL, 0, false);
114 	return HRTIMER_NORESTART;
115 }
116 
117 static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
118 				    void *msg, size_t size)
119 {
120 	ipc_mux_ul_adb_finish(ipc_imem->mux);
121 	return 0;
122 }
123 
124 static enum hrtimer_restart
125 ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
126 {
127 	struct iosm_imem *ipc_imem =
128 		container_of(hr_timer, struct iosm_imem, adb_timer);
129 
130 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
131 				 NULL, 0, false);
132 	return HRTIMER_NORESTART;
133 }
134 
135 static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
136 					  struct ipc_mux_config *cfg)
137 {
138 	ipc_mmio_update_cp_capability(ipc_imem->mmio);
139 
140 	if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
141 		dev_err(ipc_imem->dev, "Failed to get Mux capability.");
142 		return -EINVAL;
143 	}
144 
145 	cfg->protocol = ipc_imem->mmio->mux_protocol;
146 
147 	cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
148 			       MUX_UL_ON_CREDITS :
149 			       MUX_UL;
150 
151 	/* The instance ID is same as channel ID because this is been reused
152 	 * for channel alloc function.
153 	 */
154 	cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
155 
156 	return 0;
157 }
158 
159 void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
160 				   unsigned int reset_enable, bool atomic_ctx)
161 {
162 	union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
163 						      reset_enable };
164 
165 	if (atomic_ctx)
166 		ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
167 					 IPC_MSG_PREP_FEATURE_SET, &prep_args,
168 					 NULL);
169 	else
170 		ipc_protocol_msg_send(ipc_imem->ipc_protocol,
171 				      IPC_MSG_PREP_FEATURE_SET, &prep_args);
172 }
173 
174 /**
175  * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
176  * @ipc_imem:                       Pointer to imem data-struct
177  */
178 void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
179 {
180 	/* Use the TD update timer only in the runtime phase */
181 	if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
182 		/* trigger the doorbell irq on CP directly. */
183 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
184 					      IPC_HP_TD_UPD_TMR_START);
185 		return;
186 	}
187 
188 	if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
189 		ipc_imem->hrtimer_period =
190 		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
191 		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
192 			hrtimer_start(&ipc_imem->tdupdate_timer,
193 				      ipc_imem->hrtimer_period,
194 				      HRTIMER_MODE_REL);
195 	}
196 }
197 
198 void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
199 {
200 	if (hrtimer_active(hr_timer))
201 		hrtimer_cancel(hr_timer);
202 }
203 
204 /**
205  * ipc_imem_adb_timer_start -	Starts the adb Timer if not starting.
206  * @ipc_imem:			Pointer to imem data-struct
207  */
208 void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
209 {
210 	if (!hrtimer_active(&ipc_imem->adb_timer)) {
211 		ipc_imem->hrtimer_period =
212 			ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
213 		hrtimer_start(&ipc_imem->adb_timer,
214 			      ipc_imem->hrtimer_period,
215 			      HRTIMER_MODE_REL);
216 	}
217 }
218 
219 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
220 {
221 	struct ipc_mem_channel *channel;
222 	bool hpda_ctrl_pending = false;
223 	struct sk_buff_head *ul_list;
224 	bool hpda_pending = false;
225 	struct ipc_pipe *pipe;
226 	int i;
227 
228 	/* Analyze the uplink pipe of all active channels. */
229 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
230 		channel = &ipc_imem->channels[i];
231 
232 		if (channel->state != IMEM_CHANNEL_ACTIVE)
233 			continue;
234 
235 		pipe = &channel->ul_pipe;
236 
237 		/* Get the reference to the skbuf accumulator list. */
238 		ul_list = &channel->ul_list;
239 
240 		/* Fill the transfer descriptor with the uplink buffer info. */
241 		if (!ipc_imem_check_wwan_ips(channel)) {
242 			hpda_ctrl_pending |=
243 				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
244 							pipe, ul_list);
245 		} else {
246 			hpda_pending |=
247 				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
248 							pipe, ul_list);
249 		}
250 	}
251 
252 	/* forced HP update needed for non data channels */
253 	if (hpda_ctrl_pending) {
254 		hpda_pending = false;
255 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
256 					      IPC_HP_UL_WRITE_TD);
257 	}
258 
259 	return hpda_pending;
260 }
261 
262 void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
263 {
264 	int timeout = IPC_MODEM_BOOT_TIMEOUT;
265 
266 	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
267 
268 	/* Trigger the CP interrupt to enter the init state. */
269 	ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
270 			  IPC_MEM_DEVICE_IPC_INIT);
271 	/* Wait for the CP update. */
272 	do {
273 		if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
274 		    ipc_imem->ipc_requested_state) {
275 			/* Prepare the MMIO space */
276 			ipc_mmio_config(ipc_imem->mmio);
277 
278 			/* Trigger the CP irq to enter the running state. */
279 			ipc_imem->ipc_requested_state =
280 				IPC_MEM_DEVICE_IPC_RUNNING;
281 			ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
282 					  IPC_MEM_DEVICE_IPC_RUNNING);
283 
284 			return;
285 		}
286 		msleep(20);
287 	} while (--timeout);
288 
289 	/* timeout */
290 	dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
291 		ipc_imem_phase_get_string(ipc_imem->phase),
292 		ipc_mmio_get_ipc_state(ipc_imem->mmio));
293 
294 	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
295 }
296 
297 /* Analyze the packet type and distribute it. */
298 static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
299 				    struct ipc_pipe *pipe, struct sk_buff *skb)
300 {
301 	u16 port_id;
302 
303 	if (!skb)
304 		return;
305 
306 	/* An AT/control or IP packet is expected. */
307 	switch (pipe->channel->ctype) {
308 	case IPC_CTYPE_CTRL:
309 		port_id = pipe->channel->channel_id;
310 		ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
311 				    IPC_CB(skb)->mapping,
312 				    IPC_CB(skb)->direction);
313 		if (port_id == IPC_MEM_CTRL_CHL_ID_7)
314 			ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
315 						       skb);
316 		else if (ipc_is_trace_channel(ipc_imem, port_id))
317 			ipc_trace_port_rx(ipc_imem, skb);
318 		else
319 			wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
320 				     skb);
321 		break;
322 
323 	case IPC_CTYPE_WWAN:
324 		if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
325 			ipc_mux_dl_decode(ipc_imem->mux, skb);
326 		break;
327 	default:
328 		dev_err(ipc_imem->dev, "Invalid channel type");
329 		break;
330 	}
331 }
332 
333 /* Process the downlink data and pass them to the char or net layer. */
334 static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
335 				     struct ipc_pipe *pipe)
336 {
337 	s32 cnt = 0, processed_td_cnt = 0;
338 	struct ipc_mem_channel *channel;
339 	u32 head = 0, tail = 0;
340 	bool processed = false;
341 	struct sk_buff *skb;
342 
343 	channel = pipe->channel;
344 
345 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
346 					 &tail);
347 	if (pipe->old_tail != tail) {
348 		if (pipe->old_tail < tail)
349 			cnt = tail - pipe->old_tail;
350 		else
351 			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
352 	}
353 
354 	processed_td_cnt = cnt;
355 
356 	/* Seek for pipes with pending DL data. */
357 	while (cnt--) {
358 		skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
359 
360 		/* Analyze the packet type and distribute it. */
361 		ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
362 	}
363 
364 	/* try to allocate new empty DL SKbs from head..tail - 1*/
365 	while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
366 		processed = true;
367 
368 	if (processed && !ipc_imem_check_wwan_ips(channel)) {
369 		/* Force HP update for non IP channels */
370 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
371 					      IPC_HP_DL_PROCESS);
372 		processed = false;
373 
374 		/* If Fast Update timer is already running then stop */
375 		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
376 	}
377 
378 	/* Any control channel process will get immediate HP update.
379 	 * Start Fast update timer only for IP channel if all the TDs were
380 	 * used in last process.
381 	 */
382 	if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
383 		ipc_imem->hrtimer_period =
384 		ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
385 		hrtimer_start(&ipc_imem->fast_update_timer,
386 			      ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
387 	}
388 
389 	if (ipc_imem->app_notify_dl_pend)
390 		complete(&ipc_imem->dl_pend_sem);
391 }
392 
393 /* process open uplink pipe */
394 static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
395 				     struct ipc_pipe *pipe)
396 {
397 	struct ipc_mem_channel *channel;
398 	u32 tail = 0, head = 0;
399 	struct sk_buff *skb;
400 	s32 cnt = 0;
401 
402 	channel = pipe->channel;
403 
404 	/* Get the internal phase. */
405 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
406 					 &tail);
407 
408 	if (pipe->old_tail != tail) {
409 		if (pipe->old_tail < tail)
410 			cnt = tail - pipe->old_tail;
411 		else
412 			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
413 	}
414 
415 	/* Free UL buffers. */
416 	while (cnt--) {
417 		skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
418 
419 		if (!skb)
420 			continue;
421 
422 		/* If the user app was suspended in uplink direction - blocking
423 		 * write, resume it.
424 		 */
425 		if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
426 			complete(&channel->ul_sem);
427 
428 		/* Free the skbuf element. */
429 		if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
430 			if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
431 				ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
432 			else
433 				dev_err(ipc_imem->dev,
434 					"OP Type is UL_MUX, unknown if_id %d",
435 					channel->if_id);
436 		} else {
437 			ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
438 		}
439 	}
440 
441 	/* Trace channel stats for IP UL pipe. */
442 	if (ipc_imem_check_wwan_ips(pipe->channel))
443 		ipc_mux_check_n_restart_tx(ipc_imem->mux);
444 
445 	if (ipc_imem->app_notify_ul_pend)
446 		complete(&ipc_imem->ul_pend_sem);
447 }
448 
449 /* Executes the irq. */
450 static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
451 {
452 	struct ipc_mem_channel *channel;
453 
454 	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
455 	ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
456 	complete(&channel->ul_sem);
457 }
458 
459 /* Execute the UL bundle timer actions, generating the doorbell irq. */
460 static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
461 					  void *msg, size_t size)
462 {
463 	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
464 				      IPC_HP_TD_UPD_TMR);
465 	return 0;
466 }
467 
468 /* Consider link power management in the runtime phase. */
469 static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
470 {
471 	    /* link will go down, Test pending UL packets.*/
472 	if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
473 	    hrtimer_active(&ipc_imem->tdupdate_timer)) {
474 		/* Generate the doorbell irq. */
475 		ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
476 		/* Stop the TD update timer. */
477 		ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
478 		/* Stop the fast update timer. */
479 		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
480 	}
481 }
482 
483 /* Execute startup timer and wait for delayed start (e.g. NAND) */
484 static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
485 					void *msg, size_t size)
486 {
487 	/* Update & check the current operation phase. */
488 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
489 		return -EIO;
490 
491 	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
492 	    IPC_MEM_DEVICE_IPC_UNINIT) {
493 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
494 
495 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
496 				  IPC_MEM_DEVICE_IPC_INIT);
497 
498 		ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
499 		/* reduce period to 100 ms to check for mmio init state */
500 		if (!hrtimer_active(&ipc_imem->startup_timer))
501 			hrtimer_start(&ipc_imem->startup_timer,
502 				      ipc_imem->hrtimer_period,
503 				      HRTIMER_MODE_REL);
504 	} else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
505 		   IPC_MEM_DEVICE_IPC_INIT) {
506 		/* Startup complete  - disable timer */
507 		ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
508 
509 		/* Prepare the MMIO space */
510 		ipc_mmio_config(ipc_imem->mmio);
511 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
512 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
513 				  IPC_MEM_DEVICE_IPC_RUNNING);
514 	}
515 
516 	return 0;
517 }
518 
519 static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
520 {
521 	enum hrtimer_restart result = HRTIMER_NORESTART;
522 	struct iosm_imem *ipc_imem =
523 		container_of(hr_timer, struct iosm_imem, startup_timer);
524 
525 	if (ktime_to_ns(ipc_imem->hrtimer_period)) {
526 		hrtimer_forward_now(&ipc_imem->startup_timer,
527 				    ipc_imem->hrtimer_period);
528 		result = HRTIMER_RESTART;
529 	}
530 
531 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
532 				 NULL, 0, false);
533 	return result;
534 }
535 
536 /* Get the CP execution stage */
537 static enum ipc_mem_exec_stage
538 ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
539 {
540 	return (ipc_imem->phase == IPC_P_RUN &&
541 		ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
542 		       ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
543 		       ipc_mmio_get_exec_stage(ipc_imem->mmio);
544 }
545 
546 /* Callback to send the modem ready uevent */
547 static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
548 				    void *msg, size_t size)
549 {
550 	enum ipc_mem_exec_stage exec_stage =
551 		ipc_imem_get_exec_stage_buffered(ipc_imem);
552 
553 	if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
554 		ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
555 
556 	return 0;
557 }
558 
559 /* This function is executed in a task context via an ipc_worker object,
560  * as the creation or removal of device can't be done from tasklet.
561  */
562 static void ipc_imem_run_state_worker(struct work_struct *instance)
563 {
564 	struct ipc_chnl_cfg chnl_cfg_port = { 0 };
565 	struct ipc_mux_config mux_cfg;
566 	struct iosm_imem *ipc_imem;
567 	u8 ctrl_chl_idx = 0;
568 
569 	ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
570 
571 	if (ipc_imem->phase != IPC_P_RUN) {
572 		dev_err(ipc_imem->dev,
573 			"Modem link down. Exit run state worker.");
574 		return;
575 	}
576 
577 	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
578 		ipc_devlink_deinit(ipc_imem->ipc_devlink);
579 
580 	if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
581 		ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
582 
583 	ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
584 	if (ipc_imem->mux)
585 		ipc_imem->mux->wwan = ipc_imem->wwan;
586 
587 	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
588 		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
589 			ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
590 
591 			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID &&
592 			    chnl_cfg_port.wwan_port_type == WWAN_PORT_XMMRPC) {
593 				ctrl_chl_idx++;
594 				continue;
595 			}
596 
597 			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
598 			    chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
599 				ctrl_chl_idx++;
600 				continue;
601 			}
602 			if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
603 				ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
604 						      chnl_cfg_port,
605 						      IRQ_MOD_OFF);
606 				ipc_imem->ipc_port[ctrl_chl_idx] =
607 					ipc_port_init(ipc_imem, chnl_cfg_port);
608 			}
609 		}
610 		ctrl_chl_idx++;
611 	}
612 
613 	ipc_debugfs_init(ipc_imem);
614 
615 	ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
616 				 false);
617 
618 	/* Complete all memory stores before setting bit */
619 	smp_mb__before_atomic();
620 
621 	set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
622 
623 	/* Complete all memory stores after setting bit */
624 	smp_mb__after_atomic();
625 }
626 
627 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
628 {
629 	enum ipc_mem_device_ipc_state curr_ipc_status;
630 	enum ipc_phase old_phase, phase;
631 	bool retry_allocation = false;
632 	bool ul_pending = false;
633 	int i;
634 
635 	if (irq != IMEM_IRQ_DONT_CARE)
636 		ipc_imem->ev_irq_pending[irq] = false;
637 
638 	/* Get the internal phase. */
639 	old_phase = ipc_imem->phase;
640 
641 	if (old_phase == IPC_P_OFF_REQ) {
642 		dev_dbg(ipc_imem->dev,
643 			"[%s]: Ignoring MSI. Deinit sequence in progress!",
644 			ipc_imem_phase_get_string(old_phase));
645 		return;
646 	}
647 
648 	/* Update the phase controlled by CP. */
649 	phase = ipc_imem_phase_update(ipc_imem);
650 
651 	switch (phase) {
652 	case IPC_P_RUN:
653 		if (!ipc_imem->enter_runtime) {
654 			/* Excute the transition from flash/boot to runtime. */
655 			ipc_imem->enter_runtime = 1;
656 
657 			/* allow device to sleep, default value is
658 			 * IPC_HOST_SLEEP_ENTER_SLEEP
659 			 */
660 			ipc_imem_msg_send_device_sleep(ipc_imem,
661 						       ipc_imem->device_sleep);
662 
663 			ipc_imem_msg_send_feature_set(ipc_imem,
664 						      IPC_MEM_INBAND_CRASH_SIG,
665 						  true);
666 		}
667 
668 		curr_ipc_status =
669 			ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
670 
671 		/* check ipc_status change */
672 		if (ipc_imem->ipc_status != curr_ipc_status) {
673 			ipc_imem->ipc_status = curr_ipc_status;
674 
675 			if (ipc_imem->ipc_status ==
676 			    IPC_MEM_DEVICE_IPC_RUNNING) {
677 				schedule_work(&ipc_imem->run_state_worker);
678 			}
679 		}
680 
681 		/* Consider power management in the runtime phase. */
682 		ipc_imem_slp_control_exec(ipc_imem);
683 		break; /* Continue with skbuf processing. */
684 
685 		/* Unexpected phases. */
686 	case IPC_P_OFF:
687 	case IPC_P_OFF_REQ:
688 		dev_err(ipc_imem->dev, "confused phase %s",
689 			ipc_imem_phase_get_string(phase));
690 		return;
691 
692 	case IPC_P_PSI:
693 		if (old_phase != IPC_P_ROM)
694 			break;
695 
696 		fallthrough;
697 		/* On CP the PSI phase is already active. */
698 
699 	case IPC_P_ROM:
700 		/* Before CP ROM driver starts the PSI image, it sets
701 		 * the exit_code field on the doorbell scratchpad and
702 		 * triggers the irq.
703 		 */
704 		ipc_imem_rom_irq_exec(ipc_imem);
705 		return;
706 
707 	default:
708 		break;
709 	}
710 
711 	/* process message ring */
712 	ipc_protocol_msg_process(ipc_imem, irq);
713 
714 	/* process all open pipes */
715 	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
716 		struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
717 		struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
718 
719 		if (dl_pipe->is_open &&
720 		    (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
721 			ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
722 
723 			if (dl_pipe->nr_of_queued_entries == 0)
724 				retry_allocation = true;
725 		}
726 
727 		if (ul_pipe->is_open)
728 			ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
729 	}
730 
731 	/* Try to generate new ADB or ADGH. */
732 	if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
733 		ipc_imem_td_update_timer_start(ipc_imem);
734 		if (ipc_imem->mux->protocol == MUX_AGGREGATION)
735 			ipc_imem_adb_timer_start(ipc_imem);
736 	}
737 
738 	/* Continue the send procedure with accumulated SIO or NETIF packets.
739 	 * Reset the debounce flags.
740 	 */
741 	ul_pending |= ipc_imem_ul_write_td(ipc_imem);
742 
743 	/* if UL data is pending restart TD update timer */
744 	if (ul_pending) {
745 		ipc_imem->hrtimer_period =
746 		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
747 		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
748 			hrtimer_start(&ipc_imem->tdupdate_timer,
749 				      ipc_imem->hrtimer_period,
750 				      HRTIMER_MODE_REL);
751 	}
752 
753 	/* If CP has executed the transition
754 	 * from IPC_INIT to IPC_RUNNING in the PSI
755 	 * phase, wake up the flash app to open the pipes.
756 	 */
757 	if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
758 	    ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
759 	    ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
760 						IPC_MEM_DEVICE_IPC_RUNNING) {
761 		complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
762 	}
763 
764 	/* Reset the expected CP state. */
765 	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
766 
767 	if (retry_allocation) {
768 		ipc_imem->hrtimer_period =
769 		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
770 		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
771 			hrtimer_start(&ipc_imem->td_alloc_timer,
772 				      ipc_imem->hrtimer_period,
773 				      HRTIMER_MODE_REL);
774 	}
775 }
776 
777 /* Callback by tasklet for handling interrupt events. */
778 static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
779 			      size_t size)
780 {
781 	ipc_imem_handle_irq(ipc_imem, arg);
782 
783 	return 0;
784 }
785 
786 void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
787 {
788 	/* start doorbell irq delay timer if UL is pending */
789 	if (ipc_imem_ul_write_td(ipc_imem))
790 		ipc_imem_td_update_timer_start(ipc_imem);
791 }
792 
793 /* Check the execution stage and update the AP phase */
794 static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
795 						  enum ipc_mem_exec_stage stage)
796 {
797 	switch (stage) {
798 	case IPC_MEM_EXEC_STAGE_BOOT:
799 		if (ipc_imem->phase != IPC_P_ROM) {
800 			/* Send this event only once */
801 			ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
802 		}
803 
804 		ipc_imem->phase = IPC_P_ROM;
805 		break;
806 
807 	case IPC_MEM_EXEC_STAGE_PSI:
808 		ipc_imem->phase = IPC_P_PSI;
809 		break;
810 
811 	case IPC_MEM_EXEC_STAGE_EBL:
812 		ipc_imem->phase = IPC_P_EBL;
813 		break;
814 
815 	case IPC_MEM_EXEC_STAGE_RUN:
816 		if (ipc_imem->phase != IPC_P_RUN &&
817 		    ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
818 			ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
819 		}
820 		ipc_imem->phase = IPC_P_RUN;
821 		break;
822 
823 	case IPC_MEM_EXEC_STAGE_CRASH:
824 		if (ipc_imem->phase != IPC_P_CRASH)
825 			ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
826 
827 		ipc_imem->phase = IPC_P_CRASH;
828 		break;
829 
830 	case IPC_MEM_EXEC_STAGE_CD_READY:
831 		if (ipc_imem->phase != IPC_P_CD_READY)
832 			ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
833 		ipc_imem->phase = IPC_P_CD_READY;
834 		break;
835 
836 	default:
837 		/* unknown exec stage:
838 		 * assume that link is down and send info to listeners
839 		 */
840 		ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
841 		break;
842 	}
843 
844 	return ipc_imem->phase;
845 }
846 
847 /* Send msg to device to open pipe */
848 static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
849 			       struct ipc_pipe *pipe)
850 {
851 	union ipc_msg_prep_args prep_args = {
852 		.pipe_open.pipe = pipe,
853 	};
854 
855 	if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
856 				  IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
857 		pipe->is_open = true;
858 
859 	return pipe->is_open;
860 }
861 
862 /* Allocates the TDs for the given pipe along with firing HP update DB. */
863 static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
864 				     void *msg, size_t size)
865 {
866 	struct ipc_pipe *dl_pipe = msg;
867 	bool processed = false;
868 	int i;
869 
870 	for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
871 		processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
872 
873 	/* Trigger the doorbell irq to inform CP that new downlink buffers are
874 	 * available.
875 	 */
876 	if (processed)
877 		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
878 
879 	return 0;
880 }
881 
882 static enum hrtimer_restart
883 ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
884 {
885 	struct iosm_imem *ipc_imem =
886 		container_of(hr_timer, struct iosm_imem, tdupdate_timer);
887 
888 	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
889 				 NULL, 0, false);
890 	return HRTIMER_NORESTART;
891 }
892 
893 /* Get the CP execution state and map it to the AP phase. */
894 enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
895 {
896 	enum ipc_mem_exec_stage exec_stage =
897 				ipc_imem_get_exec_stage_buffered(ipc_imem);
898 	/* If the CP stage is undef, return the internal precalculated phase. */
899 	return ipc_imem->phase == IPC_P_OFF_REQ ?
900 		       ipc_imem->phase :
901 		       ipc_imem_phase_update_check(ipc_imem, exec_stage);
902 }
903 
904 const char *ipc_imem_phase_get_string(enum ipc_phase phase)
905 {
906 	switch (phase) {
907 	case IPC_P_RUN:
908 		return "A-RUN";
909 
910 	case IPC_P_OFF:
911 		return "A-OFF";
912 
913 	case IPC_P_ROM:
914 		return "A-ROM";
915 
916 	case IPC_P_PSI:
917 		return "A-PSI";
918 
919 	case IPC_P_EBL:
920 		return "A-EBL";
921 
922 	case IPC_P_CRASH:
923 		return "A-CRASH";
924 
925 	case IPC_P_CD_READY:
926 		return "A-CD_READY";
927 
928 	case IPC_P_OFF_REQ:
929 		return "A-OFF_REQ";
930 
931 	default:
932 		return "A-???";
933 	}
934 }
935 
936 void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
937 {
938 	union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
939 
940 	pipe->is_open = false;
941 	ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
942 			      &prep_args);
943 
944 	ipc_imem_pipe_cleanup(ipc_imem, pipe);
945 }
946 
947 void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
948 {
949 	struct ipc_mem_channel *channel;
950 
951 	if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
952 		dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
953 		return;
954 	}
955 
956 	channel = &ipc_imem->channels[channel_id];
957 
958 	if (channel->state == IMEM_CHANNEL_FREE) {
959 		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
960 			channel_id, channel->state);
961 		return;
962 	}
963 
964 	/* Free only the channel id in the CP power off mode. */
965 	if (channel->state == IMEM_CHANNEL_RESERVED)
966 		/* Release only the channel id. */
967 		goto channel_free;
968 
969 	if (ipc_imem->phase == IPC_P_RUN) {
970 		ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
971 		ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
972 	}
973 
974 	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
975 	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
976 
977 channel_free:
978 	ipc_imem_channel_free(channel);
979 }
980 
981 struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
982 					      int channel_id, u32 db_id)
983 {
984 	struct ipc_mem_channel *channel;
985 
986 	if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
987 		dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
988 		return NULL;
989 	}
990 
991 	channel = &ipc_imem->channels[channel_id];
992 
993 	channel->state = IMEM_CHANNEL_ACTIVE;
994 
995 	if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
996 		goto ul_pipe_err;
997 
998 	if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
999 		goto dl_pipe_err;
1000 
1001 	/* Allocate the downlink buffers in tasklet context. */
1002 	if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
1003 				     &channel->dl_pipe, 0, false)) {
1004 		dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
1005 		goto task_failed;
1006 	}
1007 
1008 	/* Active channel. */
1009 	return channel;
1010 task_failed:
1011 	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
1012 dl_pipe_err:
1013 	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
1014 ul_pipe_err:
1015 	ipc_imem_channel_free(channel);
1016 	return NULL;
1017 }
1018 
1019 void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
1020 {
1021 	ipc_protocol_suspend(ipc_imem->ipc_protocol);
1022 }
1023 
1024 void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
1025 {
1026 	ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
1027 }
1028 
1029 void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
1030 {
1031 	enum ipc_mem_exec_stage stage;
1032 
1033 	if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
1034 		stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1035 		ipc_imem_phase_update_check(ipc_imem, stage);
1036 	}
1037 }
1038 
1039 void ipc_imem_channel_free(struct ipc_mem_channel *channel)
1040 {
1041 	/* Reset dynamic channel elements. */
1042 	channel->state = IMEM_CHANNEL_FREE;
1043 }
1044 
1045 int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
1046 			   enum ipc_ctype ctype)
1047 {
1048 	struct ipc_mem_channel *channel;
1049 	int i;
1050 
1051 	/* Find channel of given type/index */
1052 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1053 		channel = &ipc_imem->channels[i];
1054 		if (channel->ctype == ctype && channel->index == index)
1055 			break;
1056 	}
1057 
1058 	if (i >= ipc_imem->nr_of_channels) {
1059 		dev_dbg(ipc_imem->dev,
1060 			"no channel definition for index=%d ctype=%d", index,
1061 			ctype);
1062 		return -ECHRNG;
1063 	}
1064 
1065 	if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
1066 		dev_dbg(ipc_imem->dev, "channel is in use");
1067 		return -EBUSY;
1068 	}
1069 
1070 	if (channel->ctype == IPC_CTYPE_WWAN &&
1071 	    index == IPC_MEM_MUX_IP_CH_IF_ID)
1072 		channel->if_id = index;
1073 
1074 	channel->channel_id = index;
1075 	channel->state = IMEM_CHANNEL_RESERVED;
1076 
1077 	return i;
1078 }
1079 
1080 void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
1081 			   struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1082 {
1083 	struct ipc_mem_channel *channel;
1084 
1085 	if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
1086 	    chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
1087 		dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
1088 			chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
1089 		return;
1090 	}
1091 
1092 	if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
1093 		dev_err(ipc_imem->dev, "too many channels");
1094 		return;
1095 	}
1096 
1097 	channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
1098 	channel->channel_id = ipc_imem->nr_of_channels;
1099 	channel->ctype = ctype;
1100 	channel->index = chnl_cfg.id;
1101 	channel->net_err_count = 0;
1102 	channel->state = IMEM_CHANNEL_FREE;
1103 	ipc_imem->nr_of_channels++;
1104 
1105 	ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
1106 				IRQ_MOD_OFF);
1107 
1108 	skb_queue_head_init(&channel->ul_list);
1109 
1110 	init_completion(&channel->ul_sem);
1111 }
1112 
1113 void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
1114 			     struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
1115 {
1116 	struct ipc_mem_channel *channel;
1117 
1118 	if (id < 0 || id >= ipc_imem->nr_of_channels) {
1119 		dev_err(ipc_imem->dev, "invalid channel id %d", id);
1120 		return;
1121 	}
1122 
1123 	channel = &ipc_imem->channels[id];
1124 
1125 	if (channel->state != IMEM_CHANNEL_FREE &&
1126 	    channel->state != IMEM_CHANNEL_RESERVED) {
1127 		dev_err(ipc_imem->dev, "invalid channel state %d",
1128 			channel->state);
1129 		return;
1130 	}
1131 
1132 	channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
1133 	channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
1134 	channel->ul_pipe.is_open = false;
1135 	channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
1136 	channel->ul_pipe.channel = channel;
1137 	channel->ul_pipe.dir = IPC_MEM_DIR_UL;
1138 	channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1139 	channel->ul_pipe.irq_moderation = irq_moderation;
1140 	channel->ul_pipe.buf_size = 0;
1141 
1142 	channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
1143 	channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
1144 	channel->dl_pipe.is_open = false;
1145 	channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
1146 	channel->dl_pipe.channel = channel;
1147 	channel->dl_pipe.dir = IPC_MEM_DIR_DL;
1148 	channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
1149 	channel->dl_pipe.irq_moderation = irq_moderation;
1150 	channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
1151 }
1152 
1153 static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
1154 {
1155 	int i;
1156 
1157 	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
1158 		struct ipc_mem_channel *channel;
1159 
1160 		channel = &ipc_imem->channels[i];
1161 
1162 		ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
1163 		ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
1164 
1165 		ipc_imem_channel_free(channel);
1166 	}
1167 }
1168 
1169 void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
1170 {
1171 	struct sk_buff *skb;
1172 
1173 	/* Force pipe to closed state also when not explicitly closed through
1174 	 * ipc_imem_pipe_close()
1175 	 */
1176 	pipe->is_open = false;
1177 
1178 	/* Empty the uplink skb accumulator. */
1179 	while ((skb = skb_dequeue(&pipe->channel->ul_list)))
1180 		ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
1181 
1182 	ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
1183 }
1184 
1185 /* Send IPC protocol uninit to the modem when Link is active. */
1186 static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
1187 {
1188 	int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
1189 	enum ipc_mem_device_ipc_state ipc_state;
1190 
1191 	/* When PCIe link is up set IPC_UNINIT
1192 	 * of the modem otherwise ignore it when PCIe link down happens.
1193 	 */
1194 	if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
1195 		/* set modem to UNINIT
1196 		 * (in case we want to reload the AP driver without resetting
1197 		 * the modem)
1198 		 */
1199 		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
1200 				  IPC_MEM_DEVICE_IPC_UNINIT);
1201 		ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1202 
1203 		/* Wait for maximum 30ms to allow the Modem to uninitialize the
1204 		 * protocol.
1205 		 */
1206 		while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
1207 		       (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
1208 		       (timeout > 0)) {
1209 			usleep_range(1000, 1250);
1210 			timeout--;
1211 			ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
1212 		}
1213 	}
1214 }
1215 
1216 void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
1217 {
1218 	ipc_imem->phase = IPC_P_OFF_REQ;
1219 
1220 	/* forward MDM_NOT_READY to listeners */
1221 	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
1222 
1223 	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1224 	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1225 	hrtimer_cancel(&ipc_imem->fast_update_timer);
1226 	hrtimer_cancel(&ipc_imem->startup_timer);
1227 
1228 	/* cancel the workqueue */
1229 	cancel_work_sync(&ipc_imem->run_state_worker);
1230 
1231 	if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
1232 		ipc_mux_deinit(ipc_imem->mux);
1233 		ipc_debugfs_deinit(ipc_imem);
1234 		ipc_wwan_deinit(ipc_imem->wwan);
1235 		ipc_port_deinit(ipc_imem->ipc_port);
1236 	}
1237 
1238 	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
1239 		ipc_devlink_deinit(ipc_imem->ipc_devlink);
1240 
1241 	ipc_imem_device_ipc_uninit(ipc_imem);
1242 	ipc_imem_channel_reset(ipc_imem);
1243 
1244 	ipc_protocol_deinit(ipc_imem->ipc_protocol);
1245 	ipc_task_deinit(ipc_imem->ipc_task);
1246 
1247 	kfree(ipc_imem->ipc_task);
1248 	kfree(ipc_imem->mmio);
1249 
1250 	ipc_imem->phase = IPC_P_OFF;
1251 }
1252 
1253 /* After CP has unblocked the PCIe link, save the start address of the doorbell
1254  * scratchpad and prepare the shared memory region. If the flashing to RAM
1255  * procedure shall be executed, copy the chip information from the doorbell
1256  * scratchtpad to the application buffer and wake up the flash app.
1257  */
1258 static int ipc_imem_config(struct iosm_imem *ipc_imem)
1259 {
1260 	enum ipc_phase phase;
1261 
1262 	/* Initialize the semaphore for the blocking read UL/DL transfer. */
1263 	init_completion(&ipc_imem->ul_pend_sem);
1264 
1265 	init_completion(&ipc_imem->dl_pend_sem);
1266 
1267 	/* clear internal flags */
1268 	ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
1269 	ipc_imem->enter_runtime = 0;
1270 
1271 	phase = ipc_imem_phase_update(ipc_imem);
1272 
1273 	/* Either CP shall be in the power off or power on phase. */
1274 	switch (phase) {
1275 	case IPC_P_ROM:
1276 		ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
1277 		/* poll execution stage (for delayed start, e.g. NAND) */
1278 		if (!hrtimer_active(&ipc_imem->startup_timer))
1279 			hrtimer_start(&ipc_imem->startup_timer,
1280 				      ipc_imem->hrtimer_period,
1281 				      HRTIMER_MODE_REL);
1282 		return 0;
1283 
1284 	case IPC_P_PSI:
1285 	case IPC_P_EBL:
1286 	case IPC_P_RUN:
1287 		/* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
1288 		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
1289 
1290 		/* Verify the exepected initial state. */
1291 		if (ipc_imem->ipc_requested_state ==
1292 		    ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
1293 			ipc_imem_ipc_init_check(ipc_imem);
1294 
1295 			return 0;
1296 		}
1297 		dev_err(ipc_imem->dev,
1298 			"ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
1299 			ipc_mmio_get_ipc_state(ipc_imem->mmio));
1300 		break;
1301 	case IPC_P_CRASH:
1302 	case IPC_P_CD_READY:
1303 		dev_dbg(ipc_imem->dev,
1304 			"Modem is in phase %d, reset Modem to collect CD",
1305 			phase);
1306 		return 0;
1307 	default:
1308 		dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
1309 		break;
1310 	}
1311 
1312 	complete(&ipc_imem->dl_pend_sem);
1313 	complete(&ipc_imem->ul_pend_sem);
1314 	ipc_imem->phase = IPC_P_OFF;
1315 	return -EIO;
1316 }
1317 
1318 /* Pass the dev ptr to the shared memory driver and request the entry points */
1319 struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
1320 				void __iomem *mmio, struct device *dev)
1321 {
1322 	struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
1323 	enum ipc_mem_exec_stage stage;
1324 
1325 	if (!ipc_imem)
1326 		return NULL;
1327 
1328 	/* Save the device address. */
1329 	ipc_imem->pcie = pcie;
1330 	ipc_imem->dev = dev;
1331 
1332 	ipc_imem->pci_device_id = device_id;
1333 
1334 	ipc_imem->cp_version = 0;
1335 	ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
1336 
1337 	/* Reset the max number of configured channels */
1338 	ipc_imem->nr_of_channels = 0;
1339 
1340 	/* allocate IPC MMIO */
1341 	ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
1342 	if (!ipc_imem->mmio) {
1343 		dev_err(ipc_imem->dev, "failed to initialize mmio region");
1344 		goto mmio_init_fail;
1345 	}
1346 
1347 	ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
1348 				     GFP_KERNEL);
1349 
1350 	/* Create tasklet for event handling*/
1351 	if (!ipc_imem->ipc_task)
1352 		goto ipc_task_fail;
1353 
1354 	if (ipc_task_init(ipc_imem->ipc_task))
1355 		goto ipc_task_init_fail;
1356 
1357 	ipc_imem->ipc_task->dev = ipc_imem->dev;
1358 
1359 	INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
1360 
1361 	ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
1362 
1363 	if (!ipc_imem->ipc_protocol)
1364 		goto protocol_init_fail;
1365 
1366 	/* The phase is set to power off. */
1367 	ipc_imem->phase = IPC_P_OFF;
1368 
1369 	hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
1370 		     HRTIMER_MODE_REL);
1371 	ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
1372 
1373 	hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
1374 		     HRTIMER_MODE_REL);
1375 	ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
1376 
1377 	hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
1378 		     HRTIMER_MODE_REL);
1379 	ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
1380 
1381 	hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
1382 		     HRTIMER_MODE_REL);
1383 	ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
1384 
1385 	hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1386 	ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
1387 
1388 	if (ipc_imem_config(ipc_imem)) {
1389 		dev_err(ipc_imem->dev, "failed to initialize the imem");
1390 		goto imem_config_fail;
1391 	}
1392 
1393 	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1394 	if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
1395 		/* Alloc and Register devlink */
1396 		ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
1397 		if (!ipc_imem->ipc_devlink) {
1398 			dev_err(ipc_imem->dev, "Devlink register failed");
1399 			goto imem_config_fail;
1400 		}
1401 
1402 		if (ipc_flash_link_establish(ipc_imem))
1403 			goto devlink_channel_fail;
1404 
1405 		set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
1406 	}
1407 	return ipc_imem;
1408 devlink_channel_fail:
1409 	ipc_devlink_deinit(ipc_imem->ipc_devlink);
1410 imem_config_fail:
1411 	hrtimer_cancel(&ipc_imem->td_alloc_timer);
1412 	hrtimer_cancel(&ipc_imem->fast_update_timer);
1413 	hrtimer_cancel(&ipc_imem->tdupdate_timer);
1414 	hrtimer_cancel(&ipc_imem->startup_timer);
1415 protocol_init_fail:
1416 	cancel_work_sync(&ipc_imem->run_state_worker);
1417 	ipc_task_deinit(ipc_imem->ipc_task);
1418 ipc_task_init_fail:
1419 	kfree(ipc_imem->ipc_task);
1420 ipc_task_fail:
1421 	kfree(ipc_imem->mmio);
1422 mmio_init_fail:
1423 	kfree(ipc_imem);
1424 	return NULL;
1425 }
1426 
1427 void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
1428 {
1429 	/* Debounce IPC_EV_IRQ. */
1430 	if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
1431 		ipc_imem->ev_irq_pending[irq] = true;
1432 		ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
1433 					 NULL, 0, false);
1434 	}
1435 }
1436 
1437 void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
1438 {
1439 	ipc_imem->td_update_timer_suspended = suspend;
1440 }
1441 
1442 /* Verify the CP execution state, copy the chip info,
1443  * change the execution phase to ROM
1444  */
1445 static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
1446 						 int arg, void *msg,
1447 						 size_t msgsize)
1448 {
1449 	enum ipc_mem_exec_stage stage;
1450 	struct sk_buff *skb;
1451 	int rc = -EINVAL;
1452 	size_t size;
1453 
1454 	/* Test the CP execution state. */
1455 	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
1456 	if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
1457 		dev_err(ipc_imem->dev,
1458 			"Execution_stage: expected BOOT, received = %X", stage);
1459 		goto trigger_chip_info_fail;
1460 	}
1461 	/* Allocate a new sk buf for the chip info. */
1462 	size = ipc_imem->mmio->chip_info_size;
1463 	if (size > IOSM_CHIP_INFO_SIZE_MAX)
1464 		goto trigger_chip_info_fail;
1465 
1466 	skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
1467 	if (!skb) {
1468 		dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
1469 		rc = -ENOMEM;
1470 		goto trigger_chip_info_fail;
1471 	}
1472 	/* Copy the chip info characters into the ipc_skb. */
1473 	ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
1474 	/* First change to the ROM boot phase. */
1475 	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
1476 	ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
1477 	ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
1478 	rc = 0;
1479 trigger_chip_info_fail:
1480 	return rc;
1481 }
1482 
1483 int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
1484 {
1485 	return ipc_task_queue_send_task(ipc_imem,
1486 					ipc_imem_devlink_trigger_chip_info_cb,
1487 					0, NULL, 0, true);
1488 }
1489