1 /*
2  *   Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
3  *   Copyright (c) 2017, I2SE GmbH
4  *
5  *   Permission to use, copy, modify, and/or distribute this software
6  *   for any purpose with or without fee is hereby granted, provided
7  *   that the above copyright notice and this permission notice appear
8  *   in all copies.
9  *
10  *   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  *   WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  *   WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
13  *   THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
14  *   CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15  *   LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
16  *   NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17  *   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*   This module implements the Qualcomm Atheros UART protocol for
21  *   kernel-based UART device; it is essentially an Ethernet-to-UART
22  *   serial converter;
23  */
24 
25 #include <linux/device.h>
26 #include <linux/errno.h>
27 #include <linux/etherdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_ether.h>
30 #include <linux/jiffies.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/netdevice.h>
34 #include <linux/of.h>
35 #include <linux/of_net.h>
36 #include <linux/sched.h>
37 #include <linux/serdev.h>
38 #include <linux/skbuff.h>
39 #include <linux/types.h>
40 
41 #include "qca_7k_common.h"
42 
43 #define QCAUART_DRV_VERSION "0.1.0"
44 #define QCAUART_DRV_NAME "qcauart"
45 #define QCAUART_TX_TIMEOUT (1 * HZ)
46 
47 struct qcauart {
48 	struct net_device *net_dev;
49 	spinlock_t lock;			/* transmit lock */
50 	struct work_struct tx_work;		/* Flushes transmit buffer   */
51 
52 	struct serdev_device *serdev;
53 	struct qcafrm_handle frm_handle;
54 	struct sk_buff *rx_skb;
55 
56 	unsigned char *tx_head;			/* pointer to next XMIT byte */
57 	int tx_left;				/* bytes left in XMIT queue  */
58 	unsigned char *tx_buffer;
59 };
60 
61 static int
62 qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
63 		size_t count)
64 {
65 	struct qcauart *qca = serdev_device_get_drvdata(serdev);
66 	struct net_device *netdev = qca->net_dev;
67 	struct net_device_stats *n_stats = &netdev->stats;
68 	size_t i;
69 
70 	if (!qca->rx_skb) {
71 		qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
72 							netdev->mtu +
73 							VLAN_ETH_HLEN);
74 		if (!qca->rx_skb) {
75 			n_stats->rx_errors++;
76 			n_stats->rx_dropped++;
77 			return 0;
78 		}
79 	}
80 
81 	for (i = 0; i < count; i++) {
82 		s32 retcode;
83 
84 		retcode = qcafrm_fsm_decode(&qca->frm_handle,
85 					    qca->rx_skb->data,
86 					    skb_tailroom(qca->rx_skb),
87 					    data[i]);
88 
89 		switch (retcode) {
90 		case QCAFRM_GATHER:
91 		case QCAFRM_NOHEAD:
92 			break;
93 		case QCAFRM_NOTAIL:
94 			netdev_dbg(netdev, "recv: no RX tail\n");
95 			n_stats->rx_errors++;
96 			n_stats->rx_dropped++;
97 			break;
98 		case QCAFRM_INVLEN:
99 			netdev_dbg(netdev, "recv: invalid RX length\n");
100 			n_stats->rx_errors++;
101 			n_stats->rx_dropped++;
102 			break;
103 		default:
104 			n_stats->rx_packets++;
105 			n_stats->rx_bytes += retcode;
106 			skb_put(qca->rx_skb, retcode);
107 			qca->rx_skb->protocol = eth_type_trans(
108 						qca->rx_skb, qca->rx_skb->dev);
109 			skb_checksum_none_assert(qca->rx_skb);
110 			netif_rx(qca->rx_skb);
111 			qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
112 								netdev->mtu +
113 								VLAN_ETH_HLEN);
114 			if (!qca->rx_skb) {
115 				netdev_dbg(netdev, "recv: out of RX resources\n");
116 				n_stats->rx_errors++;
117 				return i;
118 			}
119 		}
120 	}
121 
122 	return i;
123 }
124 
125 /* Write out any remaining transmit buffer. Scheduled when tty is writable */
126 static void qcauart_transmit(struct work_struct *work)
127 {
128 	struct qcauart *qca = container_of(work, struct qcauart, tx_work);
129 	struct net_device_stats *n_stats = &qca->net_dev->stats;
130 	int written;
131 
132 	spin_lock_bh(&qca->lock);
133 
134 	/* First make sure we're connected. */
135 	if (!netif_running(qca->net_dev)) {
136 		spin_unlock_bh(&qca->lock);
137 		return;
138 	}
139 
140 	if (qca->tx_left <= 0)  {
141 		/* Now serial buffer is almost free & we can start
142 		 * transmission of another packet
143 		 */
144 		n_stats->tx_packets++;
145 		spin_unlock_bh(&qca->lock);
146 		netif_wake_queue(qca->net_dev);
147 		return;
148 	}
149 
150 	written = serdev_device_write_buf(qca->serdev, qca->tx_head,
151 					  qca->tx_left);
152 	if (written > 0) {
153 		qca->tx_left -= written;
154 		qca->tx_head += written;
155 	}
156 	spin_unlock_bh(&qca->lock);
157 }
158 
159 /* Called by the driver when there's room for more data.
160  * Schedule the transmit.
161  */
162 static void qca_tty_wakeup(struct serdev_device *serdev)
163 {
164 	struct qcauart *qca = serdev_device_get_drvdata(serdev);
165 
166 	schedule_work(&qca->tx_work);
167 }
168 
169 static const struct serdev_device_ops qca_serdev_ops = {
170 	.receive_buf = qca_tty_receive,
171 	.write_wakeup = qca_tty_wakeup,
172 };
173 
174 static int qcauart_netdev_open(struct net_device *dev)
175 {
176 	struct qcauart *qca = netdev_priv(dev);
177 
178 	netif_start_queue(qca->net_dev);
179 
180 	return 0;
181 }
182 
183 static int qcauart_netdev_close(struct net_device *dev)
184 {
185 	struct qcauart *qca = netdev_priv(dev);
186 
187 	netif_stop_queue(dev);
188 	flush_work(&qca->tx_work);
189 
190 	spin_lock_bh(&qca->lock);
191 	qca->tx_left = 0;
192 	spin_unlock_bh(&qca->lock);
193 
194 	return 0;
195 }
196 
197 static netdev_tx_t
198 qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
199 {
200 	struct net_device_stats *n_stats = &dev->stats;
201 	struct qcauart *qca = netdev_priv(dev);
202 	u8 pad_len = 0;
203 	int written;
204 	u8 *pos;
205 
206 	spin_lock(&qca->lock);
207 
208 	WARN_ON(qca->tx_left);
209 
210 	if (!netif_running(dev))  {
211 		spin_unlock(&qca->lock);
212 		netdev_warn(qca->net_dev, "xmit: iface is down\n");
213 		goto out;
214 	}
215 
216 	pos = qca->tx_buffer;
217 
218 	if (skb->len < QCAFRM_MIN_LEN)
219 		pad_len = QCAFRM_MIN_LEN - skb->len;
220 
221 	pos += qcafrm_create_header(pos, skb->len + pad_len);
222 
223 	memcpy(pos, skb->data, skb->len);
224 	pos += skb->len;
225 
226 	if (pad_len) {
227 		memset(pos, 0, pad_len);
228 		pos += pad_len;
229 	}
230 
231 	pos += qcafrm_create_footer(pos);
232 
233 	netif_stop_queue(qca->net_dev);
234 
235 	written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
236 					  pos - qca->tx_buffer);
237 	if (written > 0) {
238 		qca->tx_left = (pos - qca->tx_buffer) - written;
239 		qca->tx_head = qca->tx_buffer + written;
240 		n_stats->tx_bytes += written;
241 	}
242 	spin_unlock(&qca->lock);
243 
244 	netif_trans_update(dev);
245 out:
246 	dev_kfree_skb_any(skb);
247 	return NETDEV_TX_OK;
248 }
249 
250 static void qcauart_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
251 {
252 	struct qcauart *qca = netdev_priv(dev);
253 
254 	netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
255 		    jiffies, dev_trans_start(dev));
256 	dev->stats.tx_errors++;
257 	dev->stats.tx_dropped++;
258 }
259 
260 static int qcauart_netdev_init(struct net_device *dev)
261 {
262 	struct qcauart *qca = netdev_priv(dev);
263 	size_t len;
264 
265 	/* Finish setting up the device info. */
266 	dev->mtu = QCAFRM_MAX_MTU;
267 	dev->type = ARPHRD_ETHER;
268 
269 	len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
270 	qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
271 	if (!qca->tx_buffer)
272 		return -ENOMEM;
273 
274 	qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
275 						qca->net_dev->mtu +
276 						VLAN_ETH_HLEN);
277 	if (!qca->rx_skb)
278 		return -ENOBUFS;
279 
280 	return 0;
281 }
282 
283 static void qcauart_netdev_uninit(struct net_device *dev)
284 {
285 	struct qcauart *qca = netdev_priv(dev);
286 
287 	dev_kfree_skb(qca->rx_skb);
288 }
289 
290 static const struct net_device_ops qcauart_netdev_ops = {
291 	.ndo_init = qcauart_netdev_init,
292 	.ndo_uninit = qcauart_netdev_uninit,
293 	.ndo_open = qcauart_netdev_open,
294 	.ndo_stop = qcauart_netdev_close,
295 	.ndo_start_xmit = qcauart_netdev_xmit,
296 	.ndo_set_mac_address = eth_mac_addr,
297 	.ndo_tx_timeout = qcauart_netdev_tx_timeout,
298 	.ndo_validate_addr = eth_validate_addr,
299 };
300 
301 static void qcauart_netdev_setup(struct net_device *dev)
302 {
303 	dev->netdev_ops = &qcauart_netdev_ops;
304 	dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
305 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
306 	dev->tx_queue_len = 100;
307 
308 	/* MTU range: 46 - 1500 */
309 	dev->min_mtu = QCAFRM_MIN_MTU;
310 	dev->max_mtu = QCAFRM_MAX_MTU;
311 }
312 
313 static const struct of_device_id qca_uart_of_match[] = {
314 	{
315 	 .compatible = "qca,qca7000",
316 	},
317 	{}
318 };
319 MODULE_DEVICE_TABLE(of, qca_uart_of_match);
320 
321 static int qca_uart_probe(struct serdev_device *serdev)
322 {
323 	struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
324 	struct qcauart *qca;
325 	u32 speed = 115200;
326 	int ret;
327 
328 	if (!qcauart_dev)
329 		return -ENOMEM;
330 
331 	qcauart_netdev_setup(qcauart_dev);
332 	SET_NETDEV_DEV(qcauart_dev, &serdev->dev);
333 
334 	qca = netdev_priv(qcauart_dev);
335 	if (!qca) {
336 		pr_err("qca_uart: Fail to retrieve private structure\n");
337 		ret = -ENOMEM;
338 		goto free;
339 	}
340 	qca->net_dev = qcauart_dev;
341 	qca->serdev = serdev;
342 	qcafrm_fsm_init_uart(&qca->frm_handle);
343 
344 	spin_lock_init(&qca->lock);
345 	INIT_WORK(&qca->tx_work, qcauart_transmit);
346 
347 	of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
348 
349 	ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev);
350 	if (ret) {
351 		eth_hw_addr_random(qca->net_dev);
352 		dev_info(&serdev->dev, "Using random MAC address: %pM\n",
353 			 qca->net_dev->dev_addr);
354 	}
355 
356 	netif_carrier_on(qca->net_dev);
357 	serdev_device_set_drvdata(serdev, qca);
358 	serdev_device_set_client_ops(serdev, &qca_serdev_ops);
359 
360 	ret = serdev_device_open(serdev);
361 	if (ret) {
362 		dev_err(&serdev->dev, "Unable to open device %s\n",
363 			qcauart_dev->name);
364 		goto free;
365 	}
366 
367 	speed = serdev_device_set_baudrate(serdev, speed);
368 	dev_info(&serdev->dev, "Using baudrate: %u\n", speed);
369 
370 	serdev_device_set_flow_control(serdev, false);
371 
372 	ret = register_netdev(qcauart_dev);
373 	if (ret) {
374 		dev_err(&serdev->dev, "Unable to register net device %s\n",
375 			qcauart_dev->name);
376 		serdev_device_close(serdev);
377 		cancel_work_sync(&qca->tx_work);
378 		goto free;
379 	}
380 
381 	return 0;
382 
383 free:
384 	free_netdev(qcauart_dev);
385 	return ret;
386 }
387 
388 static void qca_uart_remove(struct serdev_device *serdev)
389 {
390 	struct qcauart *qca = serdev_device_get_drvdata(serdev);
391 
392 	unregister_netdev(qca->net_dev);
393 
394 	/* Flush any pending characters in the driver. */
395 	serdev_device_close(serdev);
396 	cancel_work_sync(&qca->tx_work);
397 
398 	free_netdev(qca->net_dev);
399 }
400 
401 static struct serdev_device_driver qca_uart_driver = {
402 	.probe = qca_uart_probe,
403 	.remove = qca_uart_remove,
404 	.driver = {
405 		.name = QCAUART_DRV_NAME,
406 		.of_match_table = of_match_ptr(qca_uart_of_match),
407 	},
408 };
409 
410 module_serdev_device_driver(qca_uart_driver);
411 
412 MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
413 MODULE_AUTHOR("Qualcomm Atheros Communications");
414 MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
415 MODULE_LICENSE("Dual BSD/GPL");
416 MODULE_VERSION(QCAUART_DRV_VERSION);
417