1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include <linux/nospec.h>
7 
8 #include "iosm_ipc_imem_ops.h"
9 #include "iosm_ipc_mux_codec.h"
10 #include "iosm_ipc_task_queue.h"
11 
12 /* Test the link power state and send a MUX command in blocking mode. */
13 static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
14 			       size_t size)
15 {
16 	struct iosm_mux *ipc_mux = ipc_imem->mux;
17 	const struct mux_acb *acb = msg;
18 
19 	skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
20 	ipc_imem_ul_send(ipc_mux->imem);
21 
22 	return 0;
23 }
24 
25 static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
26 {
27 	struct completion *completion = &ipc_mux->channel->ul_sem;
28 	int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
29 					   0, &ipc_mux->acb,
30 					   sizeof(ipc_mux->acb), false);
31 	if (ret) {
32 		dev_err(ipc_mux->dev, "unable to send mux command");
33 		return ret;
34 	}
35 
36 	/* if blocking, suspend the app and wait for irq in the flash or
37 	 * crash phase. return false on timeout to indicate failure.
38 	 */
39 	if (blocking) {
40 		u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
41 
42 		reinit_completion(completion);
43 
44 		if (wait_for_completion_interruptible_timeout
45 		   (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
46 		   0) {
47 			dev_err(ipc_mux->dev, "ch[%d] timeout",
48 				ipc_mux->channel_id);
49 			ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
50 			return -ETIMEDOUT;
51 		}
52 	}
53 
54 	return 0;
55 }
56 
57 /* Initialize the command header. */
58 static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
59 {
60 	struct mux_acb *acb = &ipc_mux->acb;
61 	struct mux_acbh *header;
62 
63 	header = (struct mux_acbh *)(acb->skb)->data;
64 	header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
65 	header->first_cmd_index = header->block_length;
66 	header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
67 	header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
68 }
69 
70 /* Add a command to the ACB. */
71 static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
72 					    void *param, u32 param_size)
73 {
74 	struct mux_acbh *header;
75 	struct mux_cmdh *cmdh;
76 	struct mux_acb *acb;
77 
78 	acb = &ipc_mux->acb;
79 	header = (struct mux_acbh *)(acb->skb)->data;
80 	cmdh = (struct mux_cmdh *)
81 		((acb->skb)->data + le32_to_cpu(header->block_length));
82 
83 	cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
84 	cmdh->command_type = cpu_to_le32(cmd);
85 	cmdh->if_id = acb->if_id;
86 
87 	acb->cmd = cmd;
88 	cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
89 				    param_size);
90 	cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
91 	if (param)
92 		memcpy(&cmdh->param, param, param_size);
93 
94 	skb_put(acb->skb, le32_to_cpu(header->block_length) +
95 					le16_to_cpu(cmdh->cmd_len));
96 
97 	return cmdh;
98 }
99 
100 /* Prepare mux Command */
101 static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
102 						  u32 cmd, struct mux_acb *acb,
103 						  void *param, u32 param_size)
104 {
105 	struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
106 
107 	cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
108 	cmdh->command_type = cpu_to_le32(cmd);
109 	cmdh->if_id = acb->if_id;
110 
111 	acb->cmd = cmd;
112 
113 	cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
114 				    param_size);
115 	cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
116 
117 	if (param)
118 		memcpy(&cmdh->param, param, param_size);
119 
120 	skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
121 
122 	return cmdh;
123 }
124 
125 static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
126 {
127 	struct mux_acb *acb = &ipc_mux->acb;
128 	struct sk_buff *skb;
129 	dma_addr_t mapping;
130 
131 	/* Allocate skb memory for the uplink buffer. */
132 	skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
133 				 GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
134 	if (!skb)
135 		return -ENOMEM;
136 
137 	/* Save the skb address. */
138 	acb->skb = skb;
139 
140 	memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
141 
142 	return 0;
143 }
144 
145 int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
146 			     u32 transaction_id, union mux_cmd_param *param,
147 			     size_t res_size, bool blocking, bool respond)
148 {
149 	struct mux_acb *acb = &ipc_mux->acb;
150 	union mux_type_cmdh cmdh;
151 	int ret = 0;
152 
153 	acb->if_id = if_id;
154 	ret = ipc_mux_acb_alloc(ipc_mux);
155 	if (ret)
156 		return ret;
157 
158 	if (ipc_mux->protocol == MUX_LITE) {
159 		cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
160 						     param, res_size);
161 
162 		if (respond)
163 			cmdh.ack_lite->transaction_id =
164 					cpu_to_le32(transaction_id);
165 	} else {
166 		/* Initialize the ACB header. */
167 		ipc_mux_acb_init(ipc_mux);
168 		cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
169 						    res_size);
170 
171 		if (respond)
172 			cmdh.ack_aggr->transaction_id =
173 					cpu_to_le32(transaction_id);
174 	}
175 	ret = ipc_mux_acb_send(ipc_mux, blocking);
176 
177 	return ret;
178 }
179 
180 void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
181 {
182 	/* Inform the network interface to start/stop flow ctrl */
183 	ipc_wwan_tx_flowctrl(session->wwan, idx, on);
184 }
185 
186 static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
187 					      union mux_cmd_param param,
188 					      __le32 command_type, u8 if_id,
189 					      __le32 transaction_id)
190 {
191 	struct mux_acb *acb = &ipc_mux->acb;
192 
193 	switch (le32_to_cpu(command_type)) {
194 	case MUX_CMD_OPEN_SESSION_RESP:
195 	case MUX_CMD_CLOSE_SESSION_RESP:
196 		/* Resume the control application. */
197 		acb->got_param = param;
198 		break;
199 
200 	case MUX_LITE_CMD_FLOW_CTL_ACK:
201 		/* This command type is not expected as response for
202 		 * Aggregation version of the protocol. So return non-zero.
203 		 */
204 		if (ipc_mux->protocol != MUX_LITE)
205 			return -EINVAL;
206 
207 		dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
208 			if_id, le32_to_cpu(transaction_id));
209 		break;
210 
211 	case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
212 		/* This command type is not expected as response for
213 		 * Lite version of the protocol. So return non-zero.
214 		 */
215 		if (ipc_mux->protocol == MUX_LITE)
216 			return -EINVAL;
217 		break;
218 
219 	default:
220 		return -EINVAL;
221 	}
222 
223 	acb->wanted_response = MUX_CMD_INVALID;
224 	acb->got_response = le32_to_cpu(command_type);
225 	complete(&ipc_mux->channel->ul_sem);
226 
227 	return 0;
228 }
229 
230 static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
231 					  union mux_cmd_param *param,
232 					  __le32 command_type, u8 if_id,
233 					  __le16 cmd_len, int size)
234 {
235 	struct mux_session *session;
236 	struct hrtimer *adb_timer;
237 
238 	dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
239 		if_id, le32_to_cpu(command_type));
240 
241 	switch (le32_to_cpu(command_type)) {
242 	case MUX_LITE_CMD_FLOW_CTL:
243 	case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
244 
245 		if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
246 			dev_err(ipc_mux->dev, "if_id [%d] not valid",
247 				if_id);
248 			return -EINVAL; /* No session interface id. */
249 		}
250 
251 		session = &ipc_mux->session[if_id];
252 		adb_timer = &ipc_mux->imem->adb_timer;
253 
254 		if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
255 			/* Backward Compatibility */
256 			if (cmd_len == cpu_to_le16(size))
257 				session->flow_ctl_mask =
258 					le32_to_cpu(param->flow_ctl.mask);
259 			else
260 				session->flow_ctl_mask = ~0;
261 			/* if CP asks for FLOW CTRL Enable
262 			 * then set our internal flow control Tx flag
263 			 * to limit uplink session queueing
264 			 */
265 			session->net_tx_stop = true;
266 
267 			/* We have to call Finish ADB here.
268 			 * Otherwise any already queued data
269 			 * will be sent to CP when ADB is full
270 			 * for some other sessions.
271 			 */
272 			if (ipc_mux->protocol == MUX_AGGREGATION) {
273 				ipc_mux_ul_adb_finish(ipc_mux);
274 				ipc_imem_hrtimer_stop(adb_timer);
275 			}
276 			/* Update the stats */
277 			session->flow_ctl_en_cnt++;
278 		} else if (param->flow_ctl.mask == 0) {
279 			/* Just reset the Flow control mask and let
280 			 * mux_flow_ctrl_low_thre_b take control on
281 			 * our internal Tx flag and enabling kernel
282 			 * flow control
283 			 */
284 			dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
285 				if_id, le32_to_cpu(param->flow_ctl.mask));
286 			/* Backward Compatibility */
287 			if (cmd_len == cpu_to_le16(size))
288 				session->flow_ctl_mask =
289 					le32_to_cpu(param->flow_ctl.mask);
290 			else
291 				session->flow_ctl_mask = 0;
292 			/* Update the stats */
293 			session->flow_ctl_dis_cnt++;
294 		} else {
295 			break;
296 		}
297 
298 		ipc_mux->acc_adb_size = 0;
299 		ipc_mux->acc_payload_size = 0;
300 
301 		dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
302 			le32_to_cpu(param->flow_ctl.mask));
303 		break;
304 
305 	case MUX_LITE_CMD_LINK_STATUS_REPORT:
306 		break;
307 
308 	default:
309 		return -EINVAL;
310 	}
311 	return 0;
312 }
313 
314 /* Decode and Send appropriate response to a command block. */
315 static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
316 {
317 	struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
318 	__le32 trans_id = cmdh->transaction_id;
319 	int size;
320 
321 	if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
322 					       cmdh->command_type, cmdh->if_id,
323 					       cmdh->transaction_id)) {
324 		/* Unable to decode command response indicates the cmd_type
325 		 * may be a command instead of response. So try to decoding it.
326 		 */
327 		size = offsetof(struct mux_lite_cmdh, param) +
328 				sizeof(cmdh->param.flow_ctl);
329 		if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
330 						    cmdh->command_type,
331 						    cmdh->if_id,
332 						    cmdh->cmd_len, size)) {
333 			/* Decoded command may need a response. Give the
334 			 * response according to the command type.
335 			 */
336 			union mux_cmd_param *mux_cmd = NULL;
337 			size_t size = 0;
338 			u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
339 
340 			if (cmdh->command_type ==
341 			    cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
342 				mux_cmd = &cmdh->param;
343 				mux_cmd->link_status_resp.response =
344 					cpu_to_le32(MUX_CMD_RESP_SUCCESS);
345 				/* response field is u32 */
346 				size = sizeof(u32);
347 			} else if (cmdh->command_type ==
348 				   cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
349 				cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
350 			} else {
351 				return;
352 			}
353 
354 			if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
355 						     le32_to_cpu(trans_id),
356 						     mux_cmd, size, false,
357 						     true))
358 				dev_err(ipc_mux->dev,
359 					"if_id %d: cmd send failed",
360 					cmdh->if_id);
361 		}
362 	}
363 }
364 
365 /* Pass the DL packet to the netif layer. */
366 static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
367 			       struct iosm_wwan *wwan, u32 offset,
368 			       u8 service_class, struct sk_buff *skb,
369 			       u32 pkt_len)
370 {
371 	struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
372 
373 	if (!dest_skb)
374 		return -ENOMEM;
375 
376 	skb_pull(dest_skb, offset);
377 	skb_trim(dest_skb, pkt_len);
378 	/* Pass the packet to the netif layer. */
379 	dest_skb->priority = service_class;
380 
381 	return ipc_wwan_receive(wwan, dest_skb, false, if_id);
382 }
383 
384 /* Decode Flow Credit Table in the block */
385 static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
386 				   unsigned char *block)
387 {
388 	struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
389 	struct iosm_wwan *wwan;
390 	int ul_credits;
391 	int if_id;
392 
393 	if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
394 		dev_err(ipc_mux->dev, "unexpected FCT length: %d",
395 			fct->vfl_length);
396 		return;
397 	}
398 
399 	if_id = fct->if_id;
400 	if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
401 		dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
402 		return;
403 	}
404 
405 	/* Is the session active ? */
406 	if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
407 	wwan = ipc_mux->session[if_id].wwan;
408 	if (!wwan) {
409 		dev_err(ipc_mux->dev, "session Net ID is NULL");
410 		return;
411 	}
412 
413 	ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
414 
415 	dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
416 		if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
417 
418 	/* Update the Flow Credit information from ADB */
419 	ipc_mux->session[if_id].ul_flow_credits += ul_credits;
420 
421 	/* Check whether the TX can be started */
422 	if (ipc_mux->session[if_id].ul_flow_credits > 0) {
423 		ipc_mux->session[if_id].net_tx_stop = false;
424 		ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
425 					  ipc_mux->session[if_id].if_id, false);
426 	}
427 }
428 
429 /* Decode non-aggregated datagram */
430 static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
431 				   struct sk_buff *skb)
432 {
433 	u32 pad_len, packet_offset, adgh_len;
434 	struct iosm_wwan *wwan;
435 	struct mux_adgh *adgh;
436 	u8 *block = skb->data;
437 	int rc = 0;
438 	u8 if_id;
439 
440 	adgh = (struct mux_adgh *)block;
441 
442 	if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
443 		dev_err(ipc_mux->dev, "invalid ADGH signature received");
444 		return;
445 	}
446 
447 	if_id = adgh->if_id;
448 	if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
449 		dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
450 		return;
451 	}
452 
453 	/* Is the session active ? */
454 	if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
455 	wwan = ipc_mux->session[if_id].wwan;
456 	if (!wwan) {
457 		dev_err(ipc_mux->dev, "session Net ID is NULL");
458 		return;
459 	}
460 
461 	/* Store the pad len for the corresponding session
462 	 * Pad bytes as negotiated in the open session less the header size
463 	 * (see session management chapter for details).
464 	 * If resulting padding is zero or less, the additional head padding is
465 	 * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
466 	 * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
467 	 * set to zero
468 	 */
469 	pad_len =
470 		ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
471 	packet_offset = sizeof(*adgh) + pad_len;
472 
473 	if_id += ipc_mux->wwan_q_offset;
474 	adgh_len = le16_to_cpu(adgh->length);
475 
476 	/* Pass the packet to the netif layer */
477 	rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
478 				 adgh->service_class, skb,
479 				 adgh_len - packet_offset);
480 	if (rc) {
481 		dev_err(ipc_mux->dev, "mux adgh decoding error");
482 		return;
483 	}
484 	ipc_mux->session[if_id].flush = 1;
485 }
486 
487 static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
488 				     struct mux_cmdh *cmdh, int size)
489 {
490 	u32 link_st  = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
491 	u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
492 	u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
493 	u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
494 	union mux_cmd_param *cmd_p = NULL;
495 	u32 cmd = link_st;
496 	u32 trans_id;
497 
498 	if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
499 					    cmdh->command_type, cmdh->if_id,
500 					    cmdh->cmd_len, size)) {
501 		size = 0;
502 		if (cmdh->command_type == cpu_to_le32(link_st)) {
503 			cmd_p = &cmdh->param;
504 			cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
505 		} else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
506 				(cmdh->command_type == cpu_to_le32(fctl_dis))) {
507 			cmd = fctl_ack;
508 		} else {
509 			return;
510 			}
511 		trans_id = le32_to_cpu(cmdh->transaction_id);
512 		ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
513 					 trans_id, cmd_p, size, false, true);
514 	}
515 }
516 
517 /* Decode an aggregated command block. */
518 static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
519 {
520 	struct mux_acbh *acbh;
521 	struct mux_cmdh *cmdh;
522 	u32 next_cmd_index;
523 	u8 *block;
524 	int size;
525 
526 	acbh = (struct mux_acbh *)(skb->data);
527 	block = (u8 *)(skb->data);
528 
529 	next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
530 	next_cmd_index = array_index_nospec(next_cmd_index,
531 					    sizeof(struct mux_cmdh));
532 
533 	while (next_cmd_index != 0) {
534 		cmdh = (struct mux_cmdh *)&block[next_cmd_index];
535 		next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
536 		if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
537 						       cmdh->command_type,
538 						       cmdh->if_id,
539 						       cmdh->transaction_id)) {
540 			size = offsetof(struct mux_cmdh, param) +
541 				sizeof(cmdh->param.flow_ctl);
542 			ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
543 		}
544 	}
545 }
546 
547 /* process datagram */
548 static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
549 			     struct mux_adth_dg *dg, struct sk_buff *skb,
550 			     int if_id, int nr_of_dg)
551 {
552 	u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
553 	u32 packet_offset, i, rc, dg_len;
554 
555 	for (i = 0; i < nr_of_dg; i++, dg++) {
556 		if (le32_to_cpu(dg->datagram_index)
557 				< sizeof(struct mux_adbh))
558 			goto dg_error;
559 
560 		/* Is the packet inside of the ADB */
561 		if (le32_to_cpu(dg->datagram_index) >=
562 					le32_to_cpu(adbh->block_length)) {
563 			goto dg_error;
564 		} else {
565 			packet_offset =
566 				le32_to_cpu(dg->datagram_index) +
567 				dl_head_pad_len;
568 			dg_len = le16_to_cpu(dg->datagram_length);
569 			/* Pass the packet to the netif layer. */
570 			rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
571 						 packet_offset,
572 						 dg->service_class, skb,
573 						 dg_len - dl_head_pad_len);
574 			if (rc)
575 				goto dg_error;
576 		}
577 	}
578 	return 0;
579 dg_error:
580 	return -1;
581 }
582 
583 /* Decode an aggregated data block. */
584 static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
585 			      struct sk_buff *skb)
586 {
587 	struct mux_adth_dg *dg;
588 	struct iosm_wwan *wwan;
589 	struct mux_adbh *adbh;
590 	struct mux_adth *adth;
591 	int nr_of_dg, if_id;
592 	u32 adth_index;
593 	u8 *block;
594 
595 	block = skb->data;
596 	adbh = (struct mux_adbh *)block;
597 
598 	/* Process the aggregated datagram tables. */
599 	adth_index = le32_to_cpu(adbh->first_table_index);
600 
601 	/* Has CP sent an empty ADB ? */
602 	if (adth_index < 1) {
603 		dev_err(ipc_mux->dev, "unexpected empty ADB");
604 		goto adb_decode_err;
605 	}
606 
607 	/* Loop through mixed session tables. */
608 	while (adth_index) {
609 		/* Get the reference to the table header. */
610 		adth = (struct mux_adth *)(block + adth_index);
611 
612 		/* Get the interface id and map it to the netif id. */
613 		if_id = adth->if_id;
614 		if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
615 			goto adb_decode_err;
616 
617 		if_id = array_index_nospec(if_id,
618 					   IPC_MEM_MUX_IP_SESSION_ENTRIES);
619 
620 		/* Is the session active ? */
621 		wwan = ipc_mux->session[if_id].wwan;
622 		if (!wwan)
623 			goto adb_decode_err;
624 
625 		/* Consistency checks for aggregated datagram table. */
626 		if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
627 			goto adb_decode_err;
628 
629 		if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) -
630 				sizeof(struct mux_adth_dg)))
631 			goto adb_decode_err;
632 
633 		/* Calculate the number of datagrams. */
634 		nr_of_dg = (le16_to_cpu(adth->table_length) -
635 					sizeof(struct mux_adth) +
636 					sizeof(struct mux_adth_dg)) /
637 					sizeof(struct mux_adth_dg);
638 
639 		/* Is the datagram table empty ? */
640 		if (nr_of_dg < 1) {
641 			dev_err(ipc_mux->dev,
642 				"adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
643 				adth_index, nr_of_dg,
644 				le32_to_cpu(adth->next_table_index));
645 
646 			/* Move to the next aggregated datagram table. */
647 			adth_index = le32_to_cpu(adth->next_table_index);
648 			continue;
649 		}
650 
651 		/* New aggregated datagram table. */
652 		dg = &adth->dg;
653 		if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
654 				      nr_of_dg) < 0)
655 			goto adb_decode_err;
656 
657 		/* mark session for final flush */
658 		ipc_mux->session[if_id].flush = 1;
659 
660 		/* Move to the next aggregated datagram table. */
661 		adth_index = le32_to_cpu(adth->next_table_index);
662 	}
663 
664 adb_decode_err:
665 	return;
666 }
667 
668 /**
669  * ipc_mux_dl_decode -  Route the DL packet through the IP MUX layer
670  *                      depending on Header.
671  * @ipc_mux:            Pointer to MUX data-struct
672  * @skb:                Pointer to ipc_skb.
673  */
674 void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
675 {
676 	u32 signature;
677 
678 	if (!skb->data)
679 		return;
680 
681 	/* Decode the MUX header type. */
682 	signature = le32_to_cpup((__le32 *)skb->data);
683 
684 	switch (signature) {
685 	case IOSM_AGGR_MUX_SIG_ADBH:	/* Aggregated Data Block Header */
686 		mux_dl_adb_decode(ipc_mux, skb);
687 		break;
688 	case IOSM_AGGR_MUX_SIG_ADGH:
689 		ipc_mux_dl_adgh_decode(ipc_mux, skb);
690 		break;
691 	case MUX_SIG_FCTH:
692 		ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
693 		break;
694 	case IOSM_AGGR_MUX_SIG_ACBH:	/* Aggregated Command Block Header */
695 		ipc_mux_dl_acb_decode(ipc_mux, skb);
696 		break;
697 	case MUX_SIG_CMDH:
698 		ipc_mux_dl_cmd_decode(ipc_mux, skb);
699 		break;
700 
701 	default:
702 		dev_err(ipc_mux->dev, "invalid ABH signature");
703 	}
704 
705 	ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
706 }
707 
708 static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
709 				struct mux_adb *ul_adb, u32 type)
710 {
711 	/* Take the first element of the free list. */
712 	struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
713 	u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
714 	u32 *next_tb_id;
715 	int qlt_size;
716 	u32 if_id;
717 
718 	if (!skb)
719 		return -EBUSY; /* Wait for a free ADB skb. */
720 
721 	/* Mark it as UL ADB to select the right free operation. */
722 	IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
723 
724 	switch (type) {
725 	case IOSM_AGGR_MUX_SIG_ADBH:
726 		/* Save the ADB memory settings. */
727 		ul_adb->dest_skb = skb;
728 		ul_adb->buf = skb->data;
729 		ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
730 
731 		/* reset statistic counter */
732 		ul_adb->if_cnt = 0;
733 		ul_adb->payload_size = 0;
734 		ul_adb->dg_cnt_total = 0;
735 
736 		/* Initialize the ADBH. */
737 		ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
738 		memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
739 		ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
740 		ul_adb->adbh->block_length =
741 					cpu_to_le32(sizeof(struct mux_adbh));
742 		next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
743 		ul_adb->next_table_index = next_tb_id;
744 
745 		/* Clear the local copy of DGs for new ADB */
746 		memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
747 
748 		/* Clear the DG count and QLT updated status for new ADB */
749 		for (if_id = 0; if_id < no_if; if_id++) {
750 			ul_adb->dg_count[if_id] = 0;
751 			ul_adb->qlt_updated[if_id] = 0;
752 		}
753 		break;
754 
755 	case IOSM_AGGR_MUX_SIG_ADGH:
756 		/* Save the ADB memory settings. */
757 		ul_adb->dest_skb = skb;
758 		ul_adb->buf = skb->data;
759 		ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
760 		/* reset statistic counter */
761 		ul_adb->if_cnt = 0;
762 		ul_adb->payload_size = 0;
763 		ul_adb->dg_cnt_total = 0;
764 
765 		ul_adb->adgh = (struct mux_adgh *)skb->data;
766 		memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
767 		break;
768 
769 	case MUX_SIG_QLTH:
770 		qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
771 			   (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
772 
773 		if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
774 			dev_err(ipc_mux->dev,
775 				"can't support. QLT size:%d SKB size: %d",
776 				qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
777 			return -ERANGE;
778 		}
779 
780 		ul_adb->qlth_skb = skb;
781 		memset((ul_adb->qlth_skb)->data, 0, qlt_size);
782 		skb_put(skb, qlt_size);
783 		break;
784 	}
785 
786 	return 0;
787 }
788 
789 static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
790 {
791 	struct mux_adb *ul_adb = &ipc_mux->ul_adb;
792 	u16 adgh_len;
793 	long long bytes;
794 	char *str;
795 
796 	if (!ul_adb->dest_skb) {
797 		dev_err(ipc_mux->dev, "no dest skb");
798 		return;
799 	}
800 
801 	adgh_len = le16_to_cpu(ul_adb->adgh->length);
802 	skb_put(ul_adb->dest_skb, adgh_len);
803 	skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
804 	ul_adb->dest_skb = NULL;
805 
806 	if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
807 		struct mux_session *session;
808 
809 		session = &ipc_mux->session[ul_adb->adgh->if_id];
810 		str = "available_credits";
811 		bytes = (long long)session->ul_flow_credits;
812 
813 	} else {
814 		str = "pend_bytes";
815 		bytes = ipc_mux->ul_data_pend_bytes;
816 		ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
817 					      adgh_len;
818 	}
819 
820 	dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
821 		adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
822 		str, bytes);
823 }
824 
825 static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
826 				   struct mux_adb *ul_adb, int *out_offset)
827 {
828 	int i, qlt_size, offset = *out_offset;
829 	struct mux_qlth *p_adb_qlt;
830 	struct mux_adth_dg *dg;
831 	struct mux_adth *adth;
832 	u16 adth_dg_size;
833 	u32 *next_tb_id;
834 
835 	qlt_size = offsetof(struct mux_qlth, ql) +
836 			MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
837 
838 	for (i = 0; i < ipc_mux->nr_sessions; i++) {
839 		if (ul_adb->dg_count[i] > 0) {
840 			adth_dg_size = offsetof(struct mux_adth, dg) +
841 					ul_adb->dg_count[i] * sizeof(*dg);
842 
843 			*ul_adb->next_table_index = offset;
844 			adth = (struct mux_adth *)&ul_adb->buf[offset];
845 			next_tb_id = (unsigned int *)&adth->next_table_index;
846 			ul_adb->next_table_index = next_tb_id;
847 			offset += adth_dg_size;
848 			adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
849 			adth->if_id = i;
850 			adth->table_length = cpu_to_le16(adth_dg_size);
851 			adth_dg_size -= offsetof(struct mux_adth, dg);
852 			memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size);
853 			ul_adb->if_cnt++;
854 		}
855 
856 		if (ul_adb->qlt_updated[i]) {
857 			*ul_adb->next_table_index = offset;
858 			p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
859 			ul_adb->next_table_index =
860 				(u32 *)&p_adb_qlt->next_table_index;
861 			memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
862 			offset += qlt_size;
863 		}
864 	}
865 	*out_offset = offset;
866 }
867 
868 /**
869  * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
870  * @ipc_mux:               Pointer to MUX data-struct.
871  */
872 void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
873 {
874 	bool ul_data_pend = false;
875 	struct mux_adb *ul_adb;
876 	unsigned long flags;
877 	int offset;
878 
879 	ul_adb = &ipc_mux->ul_adb;
880 	if (!ul_adb->dest_skb)
881 		return;
882 
883 	offset = *ul_adb->next_table_index;
884 	ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
885 	ul_adb->adbh->block_length = cpu_to_le32(offset);
886 
887 	if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
888 		ul_adb->dest_skb = NULL;
889 		return;
890 	}
891 
892 	*ul_adb->next_table_index = 0;
893 	ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
894 	skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
895 
896 	spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
897 	__skb_queue_tail(&ipc_mux->channel->ul_list,  ul_adb->dest_skb);
898 	spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
899 
900 	ul_adb->dest_skb = NULL;
901 	/* Updates the TDs with ul_list */
902 	ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
903 
904 	/* Delay the doorbell irq */
905 	if (ul_data_pend)
906 		ipc_imem_td_update_timer_start(ipc_mux->imem);
907 
908 	ipc_mux->acc_adb_size +=  le32_to_cpu(ul_adb->adbh->block_length);
909 	ipc_mux->acc_payload_size += ul_adb->payload_size;
910 	ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
911 }
912 
913 /* Allocates an ADB from the free list and initializes it with ADBH  */
914 static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
915 				    struct mux_adb *adb, int *size_needed,
916 				    u32 type)
917 {
918 	bool ret_val = false;
919 	int status;
920 
921 	if (!adb->dest_skb) {
922 		/* Allocate memory for the ADB including of the
923 		 * datagram table header.
924 		 */
925 		status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
926 		if (status)
927 			/* Is a pending ADB available ? */
928 			ret_val = true; /* None. */
929 
930 		/* Update size need to zero only for new ADB memory */
931 		*size_needed = 0;
932 	}
933 
934 	return ret_val;
935 }
936 
937 /* Informs the network stack to stop sending further packets for all opened
938  * sessions
939  */
940 static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
941 {
942 	struct mux_session *session;
943 	int idx;
944 
945 	for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
946 		session = &ipc_mux->session[idx];
947 
948 		if (!session->wwan)
949 			continue;
950 
951 		session->net_tx_stop = true;
952 	}
953 }
954 
955 /* Sends Queue Level Table of all opened sessions */
956 static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
957 {
958 	struct ipc_mem_lite_gen_tbl *qlt;
959 	struct mux_session *session;
960 	bool qlt_updated = false;
961 	int i;
962 	int qlt_size;
963 
964 	if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
965 		return qlt_updated;
966 
967 	qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
968 		   MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
969 
970 	for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
971 		session = &ipc_mux->session[i];
972 
973 		if (!session->wwan || session->flow_ctl_mask)
974 			continue;
975 
976 		if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
977 					 MUX_SIG_QLTH)) {
978 			dev_err(ipc_mux->dev,
979 				"no reserved mem to send QLT of if_id: %d", i);
980 			break;
981 		}
982 
983 		/* Prepare QLT */
984 		qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
985 			      ->data;
986 		qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
987 		qlt->length = cpu_to_le16(qlt_size);
988 		qlt->if_id = i;
989 		qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
990 		qlt->reserved[0] = 0;
991 		qlt->reserved[1] = 0;
992 
993 		qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
994 
995 		/* Add QLT to the transfer list. */
996 		skb_queue_tail(&ipc_mux->channel->ul_list,
997 			       ipc_mux->ul_adb.qlth_skb);
998 
999 		qlt_updated = true;
1000 		ipc_mux->ul_adb.qlth_skb = NULL;
1001 	}
1002 
1003 	if (qlt_updated)
1004 		/* Updates the TDs with ul_list */
1005 		(void)ipc_imem_ul_write_td(ipc_mux->imem);
1006 
1007 	return qlt_updated;
1008 }
1009 
1010 /* Checks the available credits for the specified session and returns
1011  * number of packets for which credits are available.
1012  */
1013 static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
1014 					  struct mux_session *session,
1015 					  struct sk_buff_head *ul_list,
1016 					  int max_nr_of_pkts)
1017 {
1018 	int pkts_to_send = 0;
1019 	struct sk_buff *skb;
1020 	int credits = 0;
1021 
1022 	if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
1023 		credits = session->ul_flow_credits;
1024 		if (credits <= 0) {
1025 			dev_dbg(ipc_mux->dev,
1026 				"FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
1027 				session->if_id, session->ul_flow_credits,
1028 				session->ul_list.qlen); /* nr_of_bytes */
1029 			return 0;
1030 		}
1031 	} else {
1032 		credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
1033 			  ipc_mux->ul_data_pend_bytes;
1034 		if (credits <= 0) {
1035 			ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1036 
1037 			dev_dbg(ipc_mux->dev,
1038 				"if_id[%d] encod. fail Bytes: %llu, thresh: %d",
1039 				session->if_id, ipc_mux->ul_data_pend_bytes,
1040 				IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
1041 			return 0;
1042 		}
1043 	}
1044 
1045 	/* Check if there are enough credits/bytes available to send the
1046 	 * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
1047 	 * depending on available credits.
1048 	 */
1049 	skb_queue_walk(ul_list, skb)
1050 	{
1051 		if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
1052 			break;
1053 		credits -= skb->len;
1054 		pkts_to_send++;
1055 	}
1056 
1057 	return pkts_to_send;
1058 }
1059 
1060 /* Encode the UL IP packet according to Lite spec. */
1061 static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
1062 				  struct mux_session *session,
1063 				  struct sk_buff_head *ul_list,
1064 				  struct mux_adb *adb, int nr_of_pkts)
1065 {
1066 	int offset = sizeof(struct mux_adgh);
1067 	int adb_updated = -EINVAL;
1068 	struct sk_buff *src_skb;
1069 	int aligned_size = 0;
1070 	int nr_of_skb = 0;
1071 	u32 pad_len = 0;
1072 
1073 	/* Re-calculate the number of packets depending on number of bytes to be
1074 	 * processed/available credits.
1075 	 */
1076 	nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
1077 						    nr_of_pkts);
1078 
1079 	/* If calculated nr_of_pkts from available credits is <= 0
1080 	 * then nothing to do.
1081 	 */
1082 	if (nr_of_pkts <= 0)
1083 		return 0;
1084 
1085 	/* Read configured UL head_pad_length for session.*/
1086 	if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1087 		pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1088 
1089 	/* Process all pending UL packets for this session
1090 	 * depending on the allocated datagram table size.
1091 	 */
1092 	while (nr_of_pkts > 0) {
1093 		/* get destination skb allocated */
1094 		if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1095 					    IOSM_AGGR_MUX_SIG_ADGH)) {
1096 			dev_err(ipc_mux->dev, "no reserved memory for ADGH");
1097 			return -ENOMEM;
1098 		}
1099 
1100 		/* Peek at the head of the list. */
1101 		src_skb = skb_peek(ul_list);
1102 		if (!src_skb) {
1103 			dev_err(ipc_mux->dev,
1104 				"skb peek return NULL with count : %d",
1105 				nr_of_pkts);
1106 			break;
1107 		}
1108 
1109 		/* Calculate the memory value. */
1110 		aligned_size = ALIGN((pad_len + src_skb->len), 4);
1111 
1112 		ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
1113 
1114 		if (ipc_mux->size_needed > adb->size) {
1115 			dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
1116 				ipc_mux->size_needed, adb->size);
1117 			/* Return 1 if any IP packet is added to the transfer
1118 			 * list.
1119 			 */
1120 			return nr_of_skb ? 1 : 0;
1121 		}
1122 
1123 		/* Add buffer (without head padding to next pending transfer) */
1124 		memcpy(adb->buf + offset + pad_len, src_skb->data,
1125 		       src_skb->len);
1126 
1127 		adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
1128 		adb->adgh->if_id = session_id;
1129 		adb->adgh->length =
1130 			cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
1131 				    src_skb->len);
1132 		adb->adgh->service_class = src_skb->priority;
1133 		adb->adgh->next_count = --nr_of_pkts;
1134 		adb->dg_cnt_total++;
1135 		adb->payload_size += src_skb->len;
1136 
1137 		if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
1138 			/* Decrement the credit value as we are processing the
1139 			 * datagram from the UL list.
1140 			 */
1141 			session->ul_flow_credits -= src_skb->len;
1142 
1143 		/* Remove the processed elements and free it. */
1144 		src_skb = skb_dequeue(ul_list);
1145 		dev_kfree_skb(src_skb);
1146 		nr_of_skb++;
1147 
1148 		ipc_mux_ul_adgh_finish(ipc_mux);
1149 	}
1150 
1151 	if (nr_of_skb) {
1152 		/* Send QLT info to modem if pending bytes > high watermark
1153 		 * in case of mux lite
1154 		 */
1155 		if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
1156 		    ipc_mux->ul_data_pend_bytes >=
1157 			    IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
1158 			adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
1159 		else
1160 			adb_updated = 1;
1161 
1162 		/* Updates the TDs with ul_list */
1163 		(void)ipc_imem_ul_write_td(ipc_mux->imem);
1164 	}
1165 
1166 	return adb_updated;
1167 }
1168 
1169 /**
1170  * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
1171  * @ipc_mux:            pointer to MUX instance data
1172  * @p_adb:              pointer to UL aggegated data block
1173  * @session_id:         session id
1174  * @qlth_n_ql_size:     Length (in bytes) of the datagram table
1175  * @ul_list:            pointer to skb buffer head
1176  */
1177 void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
1178 			      int session_id, int qlth_n_ql_size,
1179 			      struct sk_buff_head *ul_list)
1180 {
1181 	int qlevel = ul_list->qlen;
1182 	struct mux_qlth *p_qlt;
1183 
1184 	p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
1185 
1186 	/* Initialize QLTH if not been done */
1187 	if (p_adb->qlt_updated[session_id] == 0) {
1188 		p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
1189 		p_qlt->if_id = session_id;
1190 		p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
1191 		p_qlt->reserved = 0;
1192 		p_qlt->reserved2 = 0;
1193 	}
1194 
1195 	/* Update Queue Level information always */
1196 	p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
1197 	p_adb->qlt_updated[session_id] = 1;
1198 }
1199 
1200 /* Update the next table index. */
1201 static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
1202 				      int session_id,
1203 				      struct sk_buff_head *ul_list,
1204 				      struct mux_adth_dg *dg,
1205 				      int aligned_size,
1206 				      u32 qlth_n_ql_size,
1207 				      struct mux_adb *adb,
1208 				      struct sk_buff *src_skb)
1209 {
1210 	ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1211 				 qlth_n_ql_size, ul_list);
1212 	ipc_mux_ul_adb_finish(ipc_mux);
1213 	if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1214 				    IOSM_AGGR_MUX_SIG_ADBH))
1215 		return -ENOMEM;
1216 
1217 	ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
1218 
1219 	ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1220 	ipc_mux->size_needed += qlth_n_ql_size;
1221 	ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1222 	return 0;
1223 }
1224 
1225 /* Process encode session UL data. */
1226 static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
1227 			    struct mux_adth_dg *dg,
1228 			    struct sk_buff_head *ul_list,
1229 			    struct sk_buff *src_skb, int session_id,
1230 			    int pkt_to_send, u32 qlth_n_ql_size,
1231 			    int *out_offset, int head_pad_len)
1232 {
1233 	int aligned_size;
1234 	int offset = *out_offset;
1235 	unsigned long flags;
1236 	int nr_of_skb = 0;
1237 
1238 	while (pkt_to_send > 0) {
1239 		/* Peek at the head of the list. */
1240 		src_skb = skb_peek(ul_list);
1241 		if (!src_skb) {
1242 			dev_err(ipc_mux->dev,
1243 				"skb peek return NULL with count : %d",
1244 				pkt_to_send);
1245 			return -1;
1246 		}
1247 		aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
1248 		ipc_mux->size_needed += sizeof(*dg) + aligned_size;
1249 
1250 		if (ipc_mux->size_needed > adb->size ||
1251 		    ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
1252 		      IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
1253 			*adb->next_table_index = offset;
1254 			if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
1255 						       ul_list, dg,
1256 						       aligned_size,
1257 						       qlth_n_ql_size, adb,
1258 						       src_skb) < 0)
1259 				return -ENOMEM;
1260 			nr_of_skb = 0;
1261 			offset = le32_to_cpu(adb->adbh->block_length);
1262 			/* Load pointer to next available datagram entry */
1263 			dg = adb->dg[session_id] + adb->dg_count[session_id];
1264 		}
1265 		/* Add buffer without head padding to next pending transfer. */
1266 		memcpy(adb->buf + offset + head_pad_len,
1267 		       src_skb->data, src_skb->len);
1268 		/* Setup datagram entry. */
1269 		dg->datagram_index = cpu_to_le32(offset);
1270 		dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
1271 		dg->service_class = (((struct sk_buff *)src_skb)->priority);
1272 		dg->reserved = 0;
1273 		adb->dg_cnt_total++;
1274 		adb->payload_size += le16_to_cpu(dg->datagram_length);
1275 		dg++;
1276 		adb->dg_count[session_id]++;
1277 		offset += aligned_size;
1278 		/* Remove the processed elements and free it. */
1279 		spin_lock_irqsave(&ul_list->lock, flags);
1280 		src_skb = __skb_dequeue(ul_list);
1281 		spin_unlock_irqrestore(&ul_list->lock, flags);
1282 
1283 		dev_kfree_skb(src_skb);
1284 		nr_of_skb++;
1285 		pkt_to_send--;
1286 	}
1287 	*out_offset = offset;
1288 	return nr_of_skb;
1289 }
1290 
1291 /* Process encode session UL data to ADB. */
1292 static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
1293 			     struct mux_session *session,
1294 			     struct sk_buff_head *ul_list, struct mux_adb *adb,
1295 			     int pkt_to_send)
1296 {
1297 	int adb_updated = -EINVAL;
1298 	int head_pad_len, offset;
1299 	struct sk_buff *src_skb = NULL;
1300 	struct mux_adth_dg *dg;
1301 	u32 qlth_n_ql_size;
1302 
1303 	/* If any of the opened session has set Flow Control ON then limit the
1304 	 * UL data to mux_flow_ctrl_high_thresh_b bytes
1305 	 */
1306 	if (ipc_mux->ul_data_pend_bytes >=
1307 		IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
1308 		ipc_mux_stop_tx_for_all_sessions(ipc_mux);
1309 		return adb_updated;
1310 	}
1311 
1312 	qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
1313 			 MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
1314 	head_pad_len = session->ul_head_pad_len;
1315 
1316 	if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
1317 		head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
1318 
1319 	if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
1320 				    IOSM_AGGR_MUX_SIG_ADBH))
1321 		return -ENOMEM;
1322 
1323 	offset = le32_to_cpu(adb->adbh->block_length);
1324 
1325 	if (ipc_mux->size_needed == 0)
1326 		ipc_mux->size_needed = offset;
1327 
1328 	/* Calculate the size needed for ADTH, QLTH and QL*/
1329 	if (adb->dg_count[session_id] == 0) {
1330 		ipc_mux->size_needed += offsetof(struct mux_adth, dg);
1331 		ipc_mux->size_needed += qlth_n_ql_size;
1332 	}
1333 
1334 	dg = adb->dg[session_id] + adb->dg_count[session_id];
1335 
1336 	if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
1337 			     session_id, pkt_to_send, qlth_n_ql_size, &offset,
1338 			     head_pad_len) > 0) {
1339 		adb_updated = 1;
1340 		*adb->next_table_index = offset;
1341 		ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
1342 					 qlth_n_ql_size, ul_list);
1343 		adb->adbh->block_length = cpu_to_le32(offset);
1344 	}
1345 
1346 	return adb_updated;
1347 }
1348 
1349 bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
1350 {
1351 	struct sk_buff_head *ul_list;
1352 	struct mux_session *session;
1353 	int updated = 0;
1354 	int session_id;
1355 	int dg_n;
1356 	int i;
1357 
1358 	if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
1359 	    ipc_mux->adb_prep_ongoing)
1360 		return false;
1361 
1362 	ipc_mux->adb_prep_ongoing = true;
1363 
1364 	for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
1365 		session_id = ipc_mux->rr_next_session;
1366 		session = &ipc_mux->session[session_id];
1367 
1368 		/* Go to next handle rr_next_session overflow */
1369 		ipc_mux->rr_next_session++;
1370 		if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
1371 			ipc_mux->rr_next_session = 0;
1372 
1373 		if (!session->wwan || session->flow_ctl_mask ||
1374 		    session->net_tx_stop)
1375 			continue;
1376 
1377 		ul_list = &session->ul_list;
1378 
1379 		/* Is something pending in UL and flow ctrl off */
1380 		dg_n = skb_queue_len(ul_list);
1381 		if (dg_n > MUX_MAX_UL_DG_ENTRIES)
1382 			dg_n = MUX_MAX_UL_DG_ENTRIES;
1383 
1384 		if (dg_n == 0)
1385 			/* Nothing to do for ipc_mux session
1386 			 * -> try next session id.
1387 			 */
1388 			continue;
1389 		if (ipc_mux->protocol == MUX_LITE)
1390 			updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
1391 							 session, ul_list,
1392 							 &ipc_mux->ul_adb,
1393 							 dg_n);
1394 		else
1395 			updated = mux_ul_adb_encode(ipc_mux, session_id,
1396 						    session, ul_list,
1397 						    &ipc_mux->ul_adb,
1398 						    dg_n);
1399 	}
1400 
1401 	ipc_mux->adb_prep_ongoing = false;
1402 	return updated == 1;
1403 }
1404 
1405 /* Calculates the Payload from any given ADB. */
1406 static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
1407 					struct mux_adbh *p_adbh)
1408 {
1409 	struct mux_adth_dg *dg;
1410 	struct mux_adth *adth;
1411 	u32 payload_size = 0;
1412 	u32 next_table_idx;
1413 	int nr_of_dg, i;
1414 
1415 	/* Process the aggregated datagram tables. */
1416 	next_table_idx = le32_to_cpu(p_adbh->first_table_index);
1417 
1418 	if (next_table_idx < sizeof(struct mux_adbh)) {
1419 		dev_err(ipc_mux->dev, "unexpected empty ADB");
1420 		return payload_size;
1421 	}
1422 
1423 	while (next_table_idx != 0) {
1424 		/* Get the reference to the table header. */
1425 		adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
1426 
1427 		if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
1428 			nr_of_dg = (le16_to_cpu(adth->table_length) -
1429 					sizeof(struct mux_adth) +
1430 					sizeof(struct mux_adth_dg)) /
1431 					sizeof(struct mux_adth_dg);
1432 
1433 			if (nr_of_dg <= 0)
1434 				return payload_size;
1435 
1436 			dg = &adth->dg;
1437 
1438 			for (i = 0; i < nr_of_dg; i++, dg++) {
1439 				if (le32_to_cpu(dg->datagram_index) <
1440 					sizeof(struct mux_adbh)) {
1441 					return payload_size;
1442 				}
1443 				payload_size +=
1444 					le16_to_cpu(dg->datagram_length);
1445 			}
1446 		}
1447 		next_table_idx = le32_to_cpu(adth->next_table_index);
1448 	}
1449 
1450 	return payload_size;
1451 }
1452 
1453 void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
1454 {
1455 	union mux_type_header hr;
1456 	u16 adgh_len;
1457 	int payload;
1458 
1459 	if (ipc_mux->protocol == MUX_LITE) {
1460 		hr.adgh = (struct mux_adgh *)skb->data;
1461 		adgh_len = le16_to_cpu(hr.adgh->length);
1462 		if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
1463 		    ipc_mux->ul_flow == MUX_UL)
1464 			ipc_mux->ul_data_pend_bytes =
1465 					ipc_mux->ul_data_pend_bytes - adgh_len;
1466 	} else {
1467 		hr.adbh = (struct mux_adbh *)(skb->data);
1468 		payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
1469 		ipc_mux->ul_data_pend_bytes -= payload;
1470 	}
1471 
1472 	if (ipc_mux->ul_flow == MUX_UL)
1473 		dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
1474 			ipc_mux->ul_data_pend_bytes);
1475 
1476 	/* Reset the skb settings. */
1477 	skb_trim(skb, 0);
1478 
1479 	/* Add the consumed ADB to the free list. */
1480 	skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
1481 }
1482 
1483 /* Start the NETIF uplink send transfer in MUX mode. */
1484 static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
1485 					void *msg, size_t size)
1486 {
1487 	struct iosm_mux *ipc_mux = ipc_imem->mux;
1488 	bool ul_data_pend = false;
1489 
1490 	/* Add session UL data to a ADB and ADGH */
1491 	ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
1492 	if (ul_data_pend) {
1493 		if (ipc_mux->protocol == MUX_AGGREGATION)
1494 			ipc_imem_adb_timer_start(ipc_mux->imem);
1495 
1496 		/* Delay the doorbell irq */
1497 		ipc_imem_td_update_timer_start(ipc_mux->imem);
1498 	}
1499 	/* reset the debounce flag */
1500 	ipc_mux->ev_mux_net_transmit_pending = false;
1501 
1502 	return 0;
1503 }
1504 
1505 int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
1506 			      struct sk_buff *skb)
1507 {
1508 	struct mux_session *session = &ipc_mux->session[if_id];
1509 	int ret = -EINVAL;
1510 
1511 	if (ipc_mux->channel &&
1512 	    ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
1513 		dev_err(ipc_mux->dev,
1514 			"channel state is not IMEM_CHANNEL_ACTIVE");
1515 		goto out;
1516 	}
1517 
1518 	if (!session->wwan) {
1519 		dev_err(ipc_mux->dev, "session net ID is NULL");
1520 		ret = -EFAULT;
1521 		goto out;
1522 	}
1523 
1524 	/* Session is under flow control.
1525 	 * Check if packet can be queued in session list, if not
1526 	 * suspend net tx
1527 	 */
1528 	if (skb_queue_len(&session->ul_list) >=
1529 	    (session->net_tx_stop ?
1530 		     IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
1531 		     (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
1532 		      IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
1533 		ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
1534 		ret = -EBUSY;
1535 		goto out;
1536 	}
1537 
1538 	/* Add skb to the uplink skb accumulator. */
1539 	skb_queue_tail(&session->ul_list, skb);
1540 
1541 	/* Inform the IPC kthread to pass uplink IP packets to CP. */
1542 	if (!ipc_mux->ev_mux_net_transmit_pending) {
1543 		ipc_mux->ev_mux_net_transmit_pending = true;
1544 		ret = ipc_task_queue_send_task(ipc_mux->imem,
1545 					       ipc_mux_tq_ul_trigger_encode, 0,
1546 					       NULL, 0, false);
1547 		if (ret)
1548 			goto out;
1549 	}
1550 	dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
1551 		if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
1552 		skb->len, skb->truesize, skb->priority);
1553 	ret = 0;
1554 out:
1555 	return ret;
1556 }
1557