1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Haijun Liu <haijun.liu@mediatek.com>
8  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
9  *  Moises Veleta <moises.veleta@intel.com>
10  *
11  * Contributors:
12  *  Amir Hanania <amir.hanania@intel.com>
13  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
14  *  Eliot Lee <eliot.lee@intel.com>
15  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
16  */
17 
18 #include <linux/bitfield.h>
19 #include <linux/device.h>
20 #include <linux/err.h>
21 #include <linux/kthread.h>
22 #include <linux/netdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/spinlock.h>
25 
26 #include "t7xx_port.h"
27 #include "t7xx_port_proxy.h"
28 #include "t7xx_state_monitor.h"
29 
30 #define PORT_MSG_VERSION	GENMASK(31, 16)
31 #define PORT_MSG_PRT_CNT	GENMASK(15, 0)
32 
33 struct port_msg {
34 	__le32	head_pattern;
35 	__le32	info;
36 	__le32	tail_pattern;
37 	__le32	data[];
38 };
39 
40 static int port_ctl_send_msg_to_md(struct t7xx_port *port, unsigned int msg, unsigned int ex_msg)
41 {
42 	struct sk_buff *skb;
43 	int ret;
44 
45 	skb = t7xx_ctrl_alloc_skb(0);
46 	if (!skb)
47 		return -ENOMEM;
48 
49 	ret = t7xx_port_send_ctl_skb(port, skb, msg, ex_msg);
50 	if (ret)
51 		dev_kfree_skb_any(skb);
52 
53 	return ret;
54 }
55 
56 static int fsm_ee_message_handler(struct t7xx_port *port, struct t7xx_fsm_ctl *ctl,
57 				  struct sk_buff *skb)
58 {
59 	struct ctrl_msg_header *ctrl_msg_h = (struct ctrl_msg_header *)skb->data;
60 	struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
61 	enum md_state md_state;
62 	int ret = -EINVAL;
63 
64 	md_state = t7xx_fsm_get_md_state(ctl);
65 	if (md_state != MD_STATE_EXCEPTION) {
66 		dev_err(dev, "Receive invalid MD_EX %x when MD state is %d\n",
67 			ctrl_msg_h->ex_msg, md_state);
68 		return -EINVAL;
69 	}
70 
71 	switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) {
72 	case CTL_ID_MD_EX:
73 		if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ID) {
74 			dev_err(dev, "Receive invalid MD_EX %x\n", ctrl_msg_h->ex_msg);
75 			break;
76 		}
77 
78 		ret = port_ctl_send_msg_to_md(port, CTL_ID_MD_EX, MD_EX_CHK_ID);
79 		if (ret) {
80 			dev_err(dev, "Failed to send exception message to modem\n");
81 			break;
82 		}
83 
84 		ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX, NULL, 0);
85 		if (ret)
86 			dev_err(dev, "Failed to append Modem Exception event");
87 
88 		break;
89 
90 	case CTL_ID_MD_EX_ACK:
91 		if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ACK_ID) {
92 			dev_err(dev, "Receive invalid MD_EX_ACK %x\n", ctrl_msg_h->ex_msg);
93 			break;
94 		}
95 
96 		ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_REC_OK, NULL, 0);
97 		if (ret)
98 			dev_err(dev, "Failed to append Modem Exception Received event");
99 
100 		break;
101 
102 	case CTL_ID_MD_EX_PASS:
103 		ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_PASS, NULL, 0);
104 		if (ret)
105 			dev_err(dev, "Failed to append Modem Exception Passed event");
106 
107 		break;
108 
109 	case CTL_ID_DRV_VER_ERROR:
110 		dev_err(dev, "AP/MD driver version mismatch\n");
111 	}
112 
113 	return ret;
114 }
115 
116 /**
117  * t7xx_port_enum_msg_handler() - Parse the port enumeration message to create/remove nodes.
118  * @md: Modem context.
119  * @msg: Message.
120  *
121  * Used to control create/remove device node.
122  *
123  * Return:
124  * * 0		- Success.
125  * * -EFAULT	- Message check failure.
126  */
127 int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg)
128 {
129 	struct device *dev = &md->t7xx_dev->pdev->dev;
130 	unsigned int version, port_count, i;
131 	struct port_msg *port_msg = msg;
132 
133 	version = FIELD_GET(PORT_MSG_VERSION, le32_to_cpu(port_msg->info));
134 	if (version != PORT_ENUM_VER ||
135 	    le32_to_cpu(port_msg->head_pattern) != PORT_ENUM_HEAD_PATTERN ||
136 	    le32_to_cpu(port_msg->tail_pattern) != PORT_ENUM_TAIL_PATTERN) {
137 		dev_err(dev, "Invalid port control message %x:%x:%x\n",
138 			version, le32_to_cpu(port_msg->head_pattern),
139 			le32_to_cpu(port_msg->tail_pattern));
140 		return -EFAULT;
141 	}
142 
143 	port_count = FIELD_GET(PORT_MSG_PRT_CNT, le32_to_cpu(port_msg->info));
144 	for (i = 0; i < port_count; i++) {
145 		u32 port_info = le32_to_cpu(port_msg->data[i]);
146 		unsigned int ch_id;
147 		bool en_flag;
148 
149 		ch_id = FIELD_GET(PORT_INFO_CH_ID, port_info);
150 		en_flag = port_info & PORT_INFO_ENFLG;
151 		if (t7xx_port_proxy_chl_enable_disable(md->port_prox, ch_id, en_flag))
152 			dev_dbg(dev, "Port:%x not found\n", ch_id);
153 	}
154 
155 	return 0;
156 }
157 
158 static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb)
159 {
160 	const struct t7xx_port_conf *port_conf = port->port_conf;
161 	struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
162 	struct ctrl_msg_header *ctrl_msg_h;
163 	int ret = 0;
164 
165 	ctrl_msg_h = (struct ctrl_msg_header *)skb->data;
166 	switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) {
167 	case CTL_ID_HS2_MSG:
168 		skb_pull(skb, sizeof(*ctrl_msg_h));
169 
170 		if (port_conf->rx_ch == PORT_CH_CONTROL_RX) {
171 			ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2, skb->data,
172 						    le32_to_cpu(ctrl_msg_h->data_length));
173 			if (ret)
174 				dev_err(port->dev, "Failed to append Handshake 2 event");
175 		}
176 
177 		dev_kfree_skb_any(skb);
178 		break;
179 
180 	case CTL_ID_MD_EX:
181 	case CTL_ID_MD_EX_ACK:
182 	case CTL_ID_MD_EX_PASS:
183 	case CTL_ID_DRV_VER_ERROR:
184 		ret = fsm_ee_message_handler(port, ctl, skb);
185 		dev_kfree_skb_any(skb);
186 		break;
187 
188 	case CTL_ID_PORT_ENUM:
189 		skb_pull(skb, sizeof(*ctrl_msg_h));
190 		ret = t7xx_port_enum_msg_handler(ctl->md, (struct port_msg *)skb->data);
191 		if (!ret)
192 			ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, 0);
193 		else
194 			ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM,
195 						      PORT_ENUM_VER_MISMATCH);
196 
197 		break;
198 
199 	default:
200 		ret = -EINVAL;
201 		dev_err(port->dev, "Unknown control message ID to FSM %x\n",
202 			le32_to_cpu(ctrl_msg_h->ctrl_msg_id));
203 		break;
204 	}
205 
206 	if (ret)
207 		dev_err(port->dev, "%s control message handle error: %d\n", port_conf->name, ret);
208 
209 	return ret;
210 }
211 
212 static int port_ctl_rx_thread(void *arg)
213 {
214 	while (!kthread_should_stop()) {
215 		struct t7xx_port *port = arg;
216 		struct sk_buff *skb;
217 		unsigned long flags;
218 
219 		spin_lock_irqsave(&port->rx_wq.lock, flags);
220 		if (skb_queue_empty(&port->rx_skb_list) &&
221 		    wait_event_interruptible_locked_irq(port->rx_wq,
222 							!skb_queue_empty(&port->rx_skb_list) ||
223 							kthread_should_stop())) {
224 			spin_unlock_irqrestore(&port->rx_wq.lock, flags);
225 			continue;
226 		}
227 		if (kthread_should_stop()) {
228 			spin_unlock_irqrestore(&port->rx_wq.lock, flags);
229 			break;
230 		}
231 		skb = __skb_dequeue(&port->rx_skb_list);
232 		spin_unlock_irqrestore(&port->rx_wq.lock, flags);
233 
234 		control_msg_handler(port, skb);
235 	}
236 
237 	return 0;
238 }
239 
240 static int port_ctl_init(struct t7xx_port *port)
241 {
242 	const struct t7xx_port_conf *port_conf = port->port_conf;
243 
244 	port->thread = kthread_run(port_ctl_rx_thread, port, "%s", port_conf->name);
245 	if (IS_ERR(port->thread)) {
246 		dev_err(port->dev, "Failed to start port control thread\n");
247 		return PTR_ERR(port->thread);
248 	}
249 
250 	port->rx_length_th = CTRL_QUEUE_MAXLEN;
251 	return 0;
252 }
253 
254 static void port_ctl_uninit(struct t7xx_port *port)
255 {
256 	unsigned long flags;
257 	struct sk_buff *skb;
258 
259 	if (port->thread)
260 		kthread_stop(port->thread);
261 
262 	spin_lock_irqsave(&port->rx_wq.lock, flags);
263 	port->rx_length_th = 0;
264 	while ((skb = __skb_dequeue(&port->rx_skb_list)) != NULL)
265 		dev_kfree_skb_any(skb);
266 	spin_unlock_irqrestore(&port->rx_wq.lock, flags);
267 }
268 
269 struct port_ops ctl_port_ops = {
270 	.init = port_ctl_init,
271 	.recv_skb = t7xx_port_enqueue_skb,
272 	.uninit = port_ctl_uninit,
273 };
274