1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include "iosm_ipc_chnl_cfg.h"
9 #include "iosm_ipc_imem.h"
10 #include "iosm_ipc_imem_ops.h"
11 #include "iosm_ipc_port.h"
12 #include "iosm_ipc_task_queue.h"
13 
14 /* Open a packet data online channel between the network layer and CP. */
15 int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
16 {
17 	dev_dbg(ipc_imem->dev, "%s if id: %d",
18 		ipc_imem_phase_get_string(ipc_imem->phase), if_id);
19 
20 	/* The network interface is only supported in the runtime phase. */
21 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
22 		dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
23 			ipc_imem_phase_get_string(ipc_imem->phase));
24 		return -EIO;
25 	}
26 
27 	/* check for the interafce id
28 	 * if if_id 1 to 8 then create IP MUX channel sessions.
29 	 * To start MUX session from 0 as network interface id would start
30 	 * from 1 so map it to if_id = if_id - 1
31 	 */
32 	if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
33 		return ipc_mux_open_session(ipc_imem->mux, if_id - 1);
34 
35 	return -EINVAL;
36 }
37 
38 /* Release a net link to CP. */
39 void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
40 			     int channel_id)
41 {
42 	if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
43 	    if_id <= IP_MUX_SESSION_END)
44 		ipc_mux_close_session(ipc_imem->mux, if_id - 1);
45 }
46 
47 /* Tasklet call to do uplink transfer. */
48 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
49 				  void *msg, size_t size)
50 {
51 	ipc_imem->ev_cdev_write_pending = false;
52 	ipc_imem_ul_send(ipc_imem);
53 
54 	return 0;
55 }
56 
57 /* Through tasklet to do sio write. */
58 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
59 {
60 	if (ipc_imem->ev_cdev_write_pending)
61 		return -1;
62 
63 	ipc_imem->ev_cdev_write_pending = true;
64 
65 	return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
66 					NULL, 0, false);
67 }
68 
69 /* Function for transfer UL data */
70 int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
71 			       int if_id, int channel_id, struct sk_buff *skb)
72 {
73 	int ret = -EINVAL;
74 
75 	if (!ipc_imem || channel_id < 0)
76 		goto out;
77 
78 	/* Is CP Running? */
79 	if (ipc_imem->phase != IPC_P_RUN) {
80 		dev_dbg(ipc_imem->dev, "phase %s transmit",
81 			ipc_imem_phase_get_string(ipc_imem->phase));
82 		ret = -EIO;
83 		goto out;
84 	}
85 
86 	if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
87 		/* Route the UL packet through IP MUX Layer */
88 		ret = ipc_mux_ul_trigger_encode(ipc_imem->mux,
89 						if_id - 1, skb);
90 	else
91 		dev_err(ipc_imem->dev,
92 			"invalid if_id %d: ", if_id);
93 out:
94 	return ret;
95 }
96 
97 /* Initialize wwan channel */
98 void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
99 				enum ipc_mux_protocol mux_type)
100 {
101 	struct ipc_chnl_cfg chnl_cfg = { 0 };
102 
103 	ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
104 
105 	/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
106 	if (ipc_imem->cp_version == -1) {
107 		dev_err(ipc_imem->dev, "invalid CP version");
108 		return;
109 	}
110 
111 	ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
112 	ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
113 			      IRQ_MOD_OFF);
114 
115 	/* WWAN registration. */
116 	ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
117 	if (!ipc_imem->wwan)
118 		dev_err(ipc_imem->dev,
119 			"failed to register the ipc_wwan interfaces");
120 }
121 
122 /* Map SKB to DMA for transfer */
123 static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
124 				   struct sk_buff *skb)
125 {
126 	struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
127 	char *buf = skb->data;
128 	int len = skb->len;
129 	dma_addr_t mapping;
130 	int ret;
131 
132 	ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
133 
134 	if (ret)
135 		goto err;
136 
137 	BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
138 
139 	IPC_CB(skb)->mapping = mapping;
140 	IPC_CB(skb)->direction = DMA_TO_DEVICE;
141 	IPC_CB(skb)->len = len;
142 	IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
143 
144 err:
145 	return ret;
146 }
147 
148 /* return true if channel is ready for use */
149 static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
150 				       struct ipc_mem_channel *channel)
151 {
152 	enum ipc_phase phase;
153 
154 	/* Update the current operation phase. */
155 	phase = ipc_imem->phase;
156 
157 	/* Select the operation depending on the execution stage. */
158 	switch (phase) {
159 	case IPC_P_RUN:
160 	case IPC_P_PSI:
161 	case IPC_P_EBL:
162 		break;
163 
164 	case IPC_P_ROM:
165 		/* Prepare the PSI image for the CP ROM driver and
166 		 * suspend the flash app.
167 		 */
168 		if (channel->state != IMEM_CHANNEL_RESERVED) {
169 			dev_err(ipc_imem->dev,
170 				"ch[%d]:invalid channel state %d,expected %d",
171 				channel->channel_id, channel->state,
172 				IMEM_CHANNEL_RESERVED);
173 			goto channel_unavailable;
174 		}
175 		goto channel_available;
176 
177 	default:
178 		/* Ignore uplink actions in all other phases. */
179 		dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
180 			channel->channel_id, phase);
181 		goto channel_unavailable;
182 	}
183 	/* Check the full availability of the channel. */
184 	if (channel->state != IMEM_CHANNEL_ACTIVE) {
185 		dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
186 			channel->channel_id, channel->state);
187 		goto channel_unavailable;
188 	}
189 
190 channel_available:
191 	return true;
192 
193 channel_unavailable:
194 	return false;
195 }
196 
197 /* Release a sio link to CP. */
198 void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev)
199 {
200 	struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
201 	struct ipc_mem_channel *channel = ipc_cdev->channel;
202 	enum ipc_phase curr_phase;
203 	int status = 0;
204 	u32 tail = 0;
205 
206 	curr_phase = ipc_imem->phase;
207 
208 	/* If current phase is IPC_P_OFF or SIO ID is -ve then
209 	 * channel is already freed. Nothing to do.
210 	 */
211 	if (curr_phase == IPC_P_OFF) {
212 		dev_err(ipc_imem->dev,
213 			"nothing to do. Current Phase: %s",
214 			ipc_imem_phase_get_string(curr_phase));
215 		return;
216 	}
217 
218 	if (channel->state == IMEM_CHANNEL_FREE) {
219 		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
220 			channel->channel_id, channel->state);
221 		return;
222 	}
223 
224 	/* If there are any pending TDs then wait for Timeout/Completion before
225 	 * closing pipe.
226 	 */
227 	if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
228 		ipc_imem->app_notify_ul_pend = 1;
229 
230 		/* Suspend the user app and wait a certain time for processing
231 		 * UL Data.
232 		 */
233 		status = wait_for_completion_interruptible_timeout
234 			 (&ipc_imem->ul_pend_sem,
235 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
236 		if (status == 0) {
237 			dev_dbg(ipc_imem->dev,
238 				"Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
239 				channel->ul_pipe.pipe_nr,
240 				channel->ul_pipe.old_head,
241 				channel->ul_pipe.old_tail);
242 		}
243 
244 		ipc_imem->app_notify_ul_pend = 0;
245 	}
246 
247 	/* If there are any pending TDs then wait for Timeout/Completion before
248 	 * closing pipe.
249 	 */
250 	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
251 					 &channel->dl_pipe, NULL, &tail);
252 
253 	if (tail != channel->dl_pipe.old_tail) {
254 		ipc_imem->app_notify_dl_pend = 1;
255 
256 		/* Suspend the user app and wait a certain time for processing
257 		 * DL Data.
258 		 */
259 		status = wait_for_completion_interruptible_timeout
260 			 (&ipc_imem->dl_pend_sem,
261 			  msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
262 		if (status == 0) {
263 			dev_dbg(ipc_imem->dev,
264 				"Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
265 				channel->dl_pipe.pipe_nr,
266 				channel->dl_pipe.old_head,
267 				channel->dl_pipe.old_tail);
268 		}
269 
270 		ipc_imem->app_notify_dl_pend = 0;
271 	}
272 
273 	/* Due to wait for completion in messages, there is a small window
274 	 * between closing the pipe and updating the channel is closed. In this
275 	 * small window there could be HP update from Host Driver. Hence update
276 	 * the channel state as CLOSING to aviod unnecessary interrupt
277 	 * towards CP.
278 	 */
279 	channel->state = IMEM_CHANNEL_CLOSING;
280 
281 	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
282 	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
283 
284 	ipc_imem_channel_free(channel);
285 }
286 
287 /* Open a PORT link to CP and return the channel */
288 struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
289 					       int chl_id, int hp_id)
290 {
291 	struct ipc_mem_channel *channel;
292 	int ch_id;
293 
294 	/* The PORT interface is only supported in the runtime phase. */
295 	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
296 		dev_err(ipc_imem->dev, "PORT open refused, phase %s",
297 			ipc_imem_phase_get_string(ipc_imem->phase));
298 		return NULL;
299 	}
300 
301 	ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
302 
303 	if (ch_id < 0) {
304 		dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
305 		return NULL;
306 	}
307 
308 	channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
309 
310 	if (!channel) {
311 		dev_err(ipc_imem->dev, "PORT channel id open failed");
312 		return NULL;
313 	}
314 
315 	return channel;
316 }
317 
318 /* transfer skb to modem */
319 int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
320 {
321 	struct ipc_mem_channel *channel = ipc_cdev->channel;
322 	struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
323 	int ret = -EIO;
324 
325 	if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
326 	    ipc_imem->phase == IPC_P_OFF_REQ)
327 		goto out;
328 
329 	ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
330 
331 	if (ret)
332 		goto out;
333 
334 	/* Add skb to the uplink skbuf accumulator. */
335 	skb_queue_tail(&channel->ul_list, skb);
336 
337 	ret = ipc_imem_call_cdev_write(ipc_imem);
338 
339 	if (ret) {
340 		skb_dequeue_tail(&channel->ul_list);
341 		dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
342 			ipc_cdev->channel->channel_id);
343 	}
344 out:
345 	return ret;
346 }
347