xref: /openbmc/linux/drivers/net/wwan/t7xx/t7xx_port_proxy.c (revision 87dae9e70bf7be2bd8a3c561fe3ddf666eb8a7a4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Amir Hanania <amir.hanania@intel.com>
8  *  Haijun Liu <haijun.liu@mediatek.com>
9  *  Moises Veleta <moises.veleta@intel.com>
10  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
11  *
12  * Contributors:
13  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
14  *  Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
15  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
16  *  Eliot Lee <eliot.lee@intel.com>
17  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
18  */
19 
20 #include <linux/bits.h>
21 #include <linux/bitfield.h>
22 #include <linux/device.h>
23 #include <linux/gfp.h>
24 #include <linux/kernel.h>
25 #include <linux/kthread.h>
26 #include <linux/list.h>
27 #include <linux/mutex.h>
28 #include <linux/netdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/spinlock.h>
31 #include <linux/wait.h>
32 #include <linux/wwan.h>
33 
34 #include "t7xx_hif_cldma.h"
35 #include "t7xx_modem_ops.h"
36 #include "t7xx_port.h"
37 #include "t7xx_port_proxy.h"
38 #include "t7xx_state_monitor.h"
39 
40 #define Q_IDX_CTRL			0
41 #define Q_IDX_MBIM			2
42 #define Q_IDX_AT_CMD			5
43 
44 #define INVALID_SEQ_NUM			GENMASK(15, 0)
45 
46 #define for_each_proxy_port(i, p, proxy)	\
47 	for (i = 0, (p) = &(proxy)->ports[i];	\
48 	     i < (proxy)->port_count;		\
49 	     i++, (p) = &(proxy)->ports[i])
50 
51 static const struct t7xx_port_conf t7xx_md_port_conf[] = {
52 	{
53 		.tx_ch = PORT_CH_UART2_TX,
54 		.rx_ch = PORT_CH_UART2_RX,
55 		.txq_index = Q_IDX_AT_CMD,
56 		.rxq_index = Q_IDX_AT_CMD,
57 		.txq_exp_index = 0xff,
58 		.rxq_exp_index = 0xff,
59 		.path_id = CLDMA_ID_MD,
60 		.ops = &wwan_sub_port_ops,
61 		.name = "AT",
62 		.port_type = WWAN_PORT_AT,
63 	}, {
64 		.tx_ch = PORT_CH_MBIM_TX,
65 		.rx_ch = PORT_CH_MBIM_RX,
66 		.txq_index = Q_IDX_MBIM,
67 		.rxq_index = Q_IDX_MBIM,
68 		.path_id = CLDMA_ID_MD,
69 		.ops = &wwan_sub_port_ops,
70 		.name = "MBIM",
71 		.port_type = WWAN_PORT_MBIM,
72 	}, {
73 		.tx_ch = PORT_CH_CONTROL_TX,
74 		.rx_ch = PORT_CH_CONTROL_RX,
75 		.txq_index = Q_IDX_CTRL,
76 		.rxq_index = Q_IDX_CTRL,
77 		.path_id = CLDMA_ID_MD,
78 		.ops = &ctl_port_ops,
79 		.name = "t7xx_ctrl",
80 	}, {
81 		.tx_ch = PORT_CH_AP_CONTROL_TX,
82 		.rx_ch = PORT_CH_AP_CONTROL_RX,
83 		.txq_index = Q_IDX_CTRL,
84 		.rxq_index = Q_IDX_CTRL,
85 		.path_id = CLDMA_ID_AP,
86 		.ops = &ctl_port_ops,
87 		.name = "t7xx_ap_ctrl",
88 	},
89 };
90 
91 static struct t7xx_port_conf t7xx_early_port_conf[] = {
92 	{
93 		.tx_ch = 0xffff,
94 		.rx_ch = 0xffff,
95 		.txq_index = 1,
96 		.rxq_index = 1,
97 		.txq_exp_index = 1,
98 		.rxq_exp_index = 1,
99 		.path_id = CLDMA_ID_AP,
100 		.is_early_port = true,
101 		.ops = &devlink_port_ops,
102 		.name = "ttyDUMP",
103 	},
104 };
105 
106 static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
107 {
108 	const struct t7xx_port_conf *port_conf;
109 	struct t7xx_port *port;
110 	int i;
111 
112 	for_each_proxy_port(i, port, port_prox) {
113 		port_conf = port->port_conf;
114 		if (port_conf->rx_ch == ch || port_conf->tx_ch == ch)
115 			return port;
116 	}
117 
118 	return NULL;
119 }
120 
121 static u16 t7xx_port_next_rx_seq_num(struct t7xx_port *port, struct ccci_header *ccci_h)
122 {
123 	u32 status = le32_to_cpu(ccci_h->status);
124 	u16 seq_num, next_seq_num;
125 	bool assert_bit;
126 
127 	seq_num = FIELD_GET(CCCI_H_SEQ_FLD, status);
128 	next_seq_num = (seq_num + 1) & FIELD_MAX(CCCI_H_SEQ_FLD);
129 	assert_bit = status & CCCI_H_AST_BIT;
130 	if (!assert_bit || port->seq_nums[MTK_RX] == INVALID_SEQ_NUM)
131 		return next_seq_num;
132 
133 	if (seq_num != port->seq_nums[MTK_RX])
134 		dev_warn_ratelimited(port->dev,
135 				     "seq num out-of-order %u != %u (header %X, len %X)\n",
136 				     seq_num, port->seq_nums[MTK_RX],
137 				     le32_to_cpu(ccci_h->packet_header),
138 				     le32_to_cpu(ccci_h->packet_len));
139 
140 	return next_seq_num;
141 }
142 
143 void t7xx_port_proxy_reset(struct port_proxy *port_prox)
144 {
145 	struct t7xx_port *port;
146 	int i;
147 
148 	for_each_proxy_port(i, port, port_prox) {
149 		port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
150 		port->seq_nums[MTK_TX] = 0;
151 	}
152 }
153 
154 static int t7xx_port_get_queue_no(struct t7xx_port *port)
155 {
156 	const struct t7xx_port_conf *port_conf = port->port_conf;
157 	struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
158 
159 	return t7xx_fsm_get_md_state(ctl) == MD_STATE_EXCEPTION ?
160 		port_conf->txq_exp_index : port_conf->txq_index;
161 }
162 
163 static void t7xx_port_struct_init(struct t7xx_port *port)
164 {
165 	INIT_LIST_HEAD(&port->entry);
166 	INIT_LIST_HEAD(&port->queue_entry);
167 	skb_queue_head_init(&port->rx_skb_list);
168 	init_waitqueue_head(&port->rx_wq);
169 	port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
170 	port->seq_nums[MTK_TX] = 0;
171 	atomic_set(&port->usage_cnt, 0);
172 }
173 
174 struct sk_buff *t7xx_port_alloc_skb(int payload)
175 {
176 	struct sk_buff *skb = __dev_alloc_skb(payload + sizeof(struct ccci_header), GFP_KERNEL);
177 
178 	if (skb)
179 		skb_reserve(skb, sizeof(struct ccci_header));
180 
181 	return skb;
182 }
183 
184 struct sk_buff *t7xx_ctrl_alloc_skb(int payload)
185 {
186 	struct sk_buff *skb = t7xx_port_alloc_skb(payload + sizeof(struct ctrl_msg_header));
187 
188 	if (skb)
189 		skb_reserve(skb, sizeof(struct ctrl_msg_header));
190 
191 	return skb;
192 }
193 
194 /**
195  * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list.
196  * @port: port context.
197  * @skb: received skb.
198  *
199  * Return:
200  * * 0		- Success.
201  * * -ENOBUFS	- Not enough buffer space. Caller will try again later, skb is not consumed.
202  */
203 int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
204 {
205 	unsigned long flags;
206 
207 	spin_lock_irqsave(&port->rx_wq.lock, flags);
208 	if (port->rx_skb_list.qlen >= port->rx_length_th) {
209 		spin_unlock_irqrestore(&port->rx_wq.lock, flags);
210 
211 		return -ENOBUFS;
212 	}
213 	__skb_queue_tail(&port->rx_skb_list, skb);
214 	spin_unlock_irqrestore(&port->rx_wq.lock, flags);
215 
216 	wake_up_all(&port->rx_wq);
217 	return 0;
218 }
219 
220 int t7xx_get_port_mtu(struct t7xx_port *port)
221 {
222 	enum cldma_id path_id = port->port_conf->path_id;
223 	int tx_qno = t7xx_port_get_queue_no(port);
224 	struct cldma_ctrl *md_ctrl;
225 
226 	md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
227 	return md_ctrl->tx_ring[tx_qno].pkt_size;
228 }
229 
230 int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
231 {
232 	enum cldma_id path_id = port->port_conf->path_id;
233 	struct cldma_ctrl *md_ctrl;
234 	int ret, tx_qno;
235 
236 	md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
237 	tx_qno = t7xx_port_get_queue_no(port);
238 	ret = t7xx_cldma_send_skb(md_ctrl, tx_qno, skb);
239 	if (ret)
240 		dev_err(port->dev, "Failed to send skb: %d\n", ret);
241 
242 	return ret;
243 }
244 
245 static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb,
246 				   unsigned int pkt_header, unsigned int ex_msg)
247 {
248 	const struct t7xx_port_conf *port_conf = port->port_conf;
249 	struct ccci_header *ccci_h;
250 	u32 status;
251 	int ret;
252 
253 	ccci_h = skb_push(skb, sizeof(*ccci_h));
254 	status = FIELD_PREP(CCCI_H_CHN_FLD, port_conf->tx_ch) |
255 		 FIELD_PREP(CCCI_H_SEQ_FLD, port->seq_nums[MTK_TX]) | CCCI_H_AST_BIT;
256 	ccci_h->status = cpu_to_le32(status);
257 	ccci_h->packet_header = cpu_to_le32(pkt_header);
258 	ccci_h->packet_len = cpu_to_le32(skb->len);
259 	ccci_h->ex_msg = cpu_to_le32(ex_msg);
260 
261 	ret = t7xx_port_send_raw_skb(port, skb);
262 	if (ret)
263 		return ret;
264 
265 	port->seq_nums[MTK_TX]++;
266 	return 0;
267 }
268 
269 int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
270 			   unsigned int ex_msg)
271 {
272 	struct ctrl_msg_header *ctrl_msg_h;
273 	unsigned int msg_len = skb->len;
274 	u32 pkt_header = 0;
275 
276 	ctrl_msg_h = skb_push(skb, sizeof(*ctrl_msg_h));
277 	ctrl_msg_h->ctrl_msg_id = cpu_to_le32(msg);
278 	ctrl_msg_h->ex_msg = cpu_to_le32(ex_msg);
279 	ctrl_msg_h->data_length = cpu_to_le32(msg_len);
280 
281 	if (!msg_len)
282 		pkt_header = CCCI_HEADER_NO_DATA;
283 
284 	return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
285 }
286 
287 int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
288 		       unsigned int ex_msg)
289 {
290 	struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
291 	unsigned int fsm_state;
292 
293 	fsm_state = t7xx_fsm_get_ctl_state(ctl);
294 	if (fsm_state != FSM_STATE_PRE_START) {
295 		const struct t7xx_port_conf *port_conf = port->port_conf;
296 		enum md_state md_state = t7xx_fsm_get_md_state(ctl);
297 
298 		switch (md_state) {
299 		case MD_STATE_EXCEPTION:
300 			if (port_conf->tx_ch != PORT_CH_MD_LOG_TX)
301 				return -EBUSY;
302 			break;
303 
304 		case MD_STATE_WAITING_FOR_HS1:
305 		case MD_STATE_WAITING_FOR_HS2:
306 		case MD_STATE_STOPPED:
307 		case MD_STATE_WAITING_TO_STOP:
308 		case MD_STATE_INVALID:
309 			return -ENODEV;
310 
311 		default:
312 			break;
313 		}
314 	}
315 
316 	return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
317 }
318 
319 static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
320 {
321 	struct t7xx_port *port;
322 
323 	int i, j;
324 
325 	for (i = 0; i < ARRAY_SIZE(port_prox->rx_ch_ports); i++)
326 		INIT_LIST_HEAD(&port_prox->rx_ch_ports[i]);
327 
328 	for (j = 0; j < ARRAY_SIZE(port_prox->queue_ports); j++) {
329 		for (i = 0; i < ARRAY_SIZE(port_prox->queue_ports[j]); i++)
330 			INIT_LIST_HEAD(&port_prox->queue_ports[j][i]);
331 	}
332 
333 	for_each_proxy_port(i, port, port_prox) {
334 		const struct t7xx_port_conf *port_conf = port->port_conf;
335 		enum cldma_id path_id = port_conf->path_id;
336 		u8 ch_id;
337 
338 		ch_id = FIELD_GET(PORT_CH_ID_MASK, port_conf->rx_ch);
339 		list_add_tail(&port->entry, &port_prox->rx_ch_ports[ch_id]);
340 		list_add_tail(&port->queue_entry,
341 			      &port_prox->queue_ports[path_id][port_conf->rxq_index]);
342 	}
343 }
344 
345 static int t7xx_port_proxy_recv_skb_from_queue(struct t7xx_pci_dev *t7xx_dev,
346 					       struct cldma_queue *queue, struct sk_buff *skb)
347 {
348 	struct port_proxy *port_prox = t7xx_dev->md->port_prox;
349 	const struct t7xx_port_conf *port_conf;
350 	struct t7xx_port *port;
351 	int ret;
352 
353 	port = port_prox->ports;
354 	port_conf = port->port_conf;
355 
356 	ret = port_conf->ops->recv_skb(port, skb);
357 	if (ret < 0 && ret != -ENOBUFS) {
358 		dev_err(port->dev, "drop on RX ch %d, %d\n", port_conf->rx_ch, ret);
359 		dev_kfree_skb_any(skb);
360 	}
361 
362 	return ret;
363 }
364 
365 static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
366 						   struct cldma_queue *queue, u16 channel)
367 {
368 	struct port_proxy *port_prox = t7xx_dev->md->port_prox;
369 	struct list_head *port_list;
370 	struct t7xx_port *port;
371 	u8 ch_id;
372 
373 	ch_id = FIELD_GET(PORT_CH_ID_MASK, channel);
374 	port_list = &port_prox->rx_ch_ports[ch_id];
375 	list_for_each_entry(port, port_list, entry) {
376 		const struct t7xx_port_conf *port_conf = port->port_conf;
377 
378 		if (queue->md_ctrl->hif_id == port_conf->path_id &&
379 		    channel == port_conf->rx_ch)
380 			return port;
381 	}
382 
383 	return NULL;
384 }
385 
386 struct t7xx_port *t7xx_port_proxy_get_port_by_name(struct port_proxy *port_prox, char *port_name)
387 {
388 	const struct t7xx_port_conf *port_conf;
389 	struct t7xx_port *port;
390 	int i;
391 
392 	for_each_proxy_port(i, port, port_prox) {
393 		port_conf = port->port_conf;
394 
395 		if (!strncmp(port_conf->name, port_name, strlen(port_conf->name)))
396 			return port;
397 	}
398 
399 	return NULL;
400 }
401 
402 /**
403  * t7xx_port_proxy_recv_skb() - Dispatch received skb.
404  * @queue: CLDMA queue.
405  * @skb: Socket buffer.
406  *
407  * Return:
408  ** 0		- Packet consumed.
409  ** -ERROR	- Failed to process skb.
410  */
411 static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
412 {
413 	struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
414 	struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
415 	struct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl;
416 	struct device *dev = queue->md_ctrl->dev;
417 	const struct t7xx_port_conf *port_conf;
418 	struct t7xx_port *port;
419 	u16 seq_num, channel;
420 	int ret;
421 
422 	if (queue->q_type == CLDMA_DEDICATED_Q)
423 		return t7xx_port_proxy_recv_skb_from_queue(t7xx_dev, queue, skb);
424 
425 	channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status));
426 	if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) {
427 		dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel);
428 		goto drop_skb;
429 	}
430 
431 	port = t7xx_port_proxy_find_port(t7xx_dev, queue, channel);
432 	if (!port) {
433 		dev_err_ratelimited(dev, "Packet drop on channel 0x%x, port not found\n", channel);
434 		goto drop_skb;
435 	}
436 
437 	seq_num = t7xx_port_next_rx_seq_num(port, ccci_h);
438 	port_conf = port->port_conf;
439 	if (!port->port_conf->is_early_port)
440 		skb_pull(skb, sizeof(*ccci_h));
441 
442 	ret = port_conf->ops->recv_skb(port, skb);
443 	/* Error indicates to try again later */
444 	if (ret) {
445 		skb_push(skb, sizeof(*ccci_h));
446 		return ret;
447 	}
448 
449 	port->seq_nums[MTK_RX] = seq_num;
450 	return 0;
451 
452 drop_skb:
453 	dev_kfree_skb_any(skb);
454 	return 0;
455 }
456 
457 /**
458  * t7xx_port_proxy_md_status_notify() - Notify all ports of state.
459  *@port_prox: The port_proxy pointer.
460  *@state: State.
461  *
462  * Called by t7xx_fsm. Used to dispatch modem status for all ports,
463  * which want to know MD state transition.
464  */
465 void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state)
466 {
467 	struct t7xx_port *port;
468 	int i;
469 
470 	for_each_proxy_port(i, port, port_prox) {
471 		const struct t7xx_port_conf *port_conf = port->port_conf;
472 
473 		if (port_conf->ops->md_state_notify)
474 			port_conf->ops->md_state_notify(port, state);
475 	}
476 }
477 
478 static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
479 {
480 	struct port_proxy *port_prox = md->port_prox;
481 	struct t7xx_port *port;
482 	int i;
483 
484 	for_each_proxy_port(i, port, port_prox) {
485 		const struct t7xx_port_conf *port_conf = port->port_conf;
486 
487 		t7xx_port_struct_init(port);
488 
489 		if (port_conf->tx_ch == PORT_CH_CONTROL_TX)
490 			md->core_md.ctl_port = port;
491 
492 		if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX)
493 			md->core_ap.ctl_port = port;
494 
495 		port->t7xx_dev = md->t7xx_dev;
496 		port->dev = &md->t7xx_dev->pdev->dev;
497 		port->dl = md->t7xx_dev->dl;
498 		spin_lock_init(&port->port_update_lock);
499 		port->chan_enable = false;
500 
501 		if (port_conf->ops->init)
502 			port_conf->ops->init(port);
503 	}
504 
505 	t7xx_proxy_setup_ch_mapping(port_prox);
506 }
507 
508 void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id)
509 {
510 	struct port_proxy *port_prox = md->port_prox;
511 	const struct t7xx_port_conf *port_conf;
512 	struct device *dev = port_prox->dev;
513 	unsigned int port_count;
514 	struct t7xx_port *port;
515 	int i;
516 
517 	if (port_prox->cfg_id == cfg_id)
518 		return;
519 
520 	if (port_prox->cfg_id != PORT_CFG_ID_INVALID) {
521 		for_each_proxy_port(i, port, port_prox)
522 			port->port_conf->ops->uninit(port);
523 
524 		devm_kfree(dev, port_prox->ports);
525 	}
526 
527 	if (cfg_id == PORT_CFG_ID_EARLY) {
528 		port_conf = t7xx_early_port_conf;
529 		port_count = ARRAY_SIZE(t7xx_early_port_conf);
530 	} else {
531 		port_conf = t7xx_md_port_conf;
532 		port_count = ARRAY_SIZE(t7xx_md_port_conf);
533 	}
534 
535 	port_prox->ports = devm_kzalloc(dev, sizeof(struct t7xx_port) * port_count, GFP_KERNEL);
536 	if (!port_prox->ports)
537 		return;
538 
539 	for (i = 0; i < port_count; i++)
540 		port_prox->ports[i].port_conf = &port_conf[i];
541 
542 	port_prox->cfg_id = cfg_id;
543 	port_prox->port_count = port_count;
544 	t7xx_proxy_init_all_ports(md);
545 }
546 
547 static int t7xx_proxy_alloc(struct t7xx_modem *md)
548 {
549 	struct device *dev = &md->t7xx_dev->pdev->dev;
550 	struct port_proxy *port_prox;
551 
552 	port_prox = devm_kzalloc(dev, sizeof(*port_prox), GFP_KERNEL);
553 	if (!port_prox)
554 		return -ENOMEM;
555 
556 	md->port_prox = port_prox;
557 	port_prox->dev = dev;
558 	t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
559 
560 	return 0;
561 }
562 
563 /**
564  * t7xx_port_proxy_init() - Initialize ports.
565  * @md: Modem.
566  *
567  * Create all port instances.
568  *
569  * Return:
570  * * 0		- Success.
571  * * -ERROR	- Error code from failure sub-initializations.
572  */
573 int t7xx_port_proxy_init(struct t7xx_modem *md)
574 {
575 	int ret;
576 
577 	ret = t7xx_proxy_alloc(md);
578 	if (ret)
579 		return ret;
580 
581 	t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
582 	t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
583 	return 0;
584 }
585 
586 void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
587 {
588 	struct t7xx_port *port;
589 	int i;
590 
591 	for_each_proxy_port(i, port, port_prox) {
592 		const struct t7xx_port_conf *port_conf = port->port_conf;
593 
594 		if (port_conf->ops->uninit)
595 			port_conf->ops->uninit(port);
596 	}
597 }
598 
599 int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
600 				       bool en_flag)
601 {
602 	struct t7xx_port *port = t7xx_proxy_get_port_by_ch(port_prox, ch_id);
603 	const struct t7xx_port_conf *port_conf;
604 
605 	if (!port)
606 		return -EINVAL;
607 
608 	port_conf = port->port_conf;
609 
610 	if (en_flag) {
611 		if (port_conf->ops->enable_chl)
612 			port_conf->ops->enable_chl(port);
613 	} else {
614 		if (port_conf->ops->disable_chl)
615 			port_conf->ops->disable_chl(port);
616 	}
617 
618 	return 0;
619 }
620