xref: /openbmc/linux/drivers/net/wan/hdlc_cisco.c (revision 6f4eaea2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic HDLC support routines for Linux
4  * Cisco HDLC support
5  *
6  * Copyright (C) 2000 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/hdlc.h>
11 #include <linux/if_arp.h>
12 #include <linux/inetdevice.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/pkt_sched.h>
17 #include <linux/poll.h>
18 #include <linux/rtnetlink.h>
19 #include <linux/skbuff.h>
20 
21 #undef DEBUG_HARD_HEADER
22 
23 #define CISCO_MULTICAST		0x8F	/* Cisco multicast address */
24 #define CISCO_UNICAST		0x0F	/* Cisco unicast address */
25 #define CISCO_KEEPALIVE		0x8035	/* Cisco keepalive protocol */
26 #define CISCO_SYS_INFO		0x2000	/* Cisco interface/system info */
27 #define CISCO_ADDR_REQ		0	/* Cisco address request */
28 #define CISCO_ADDR_REPLY	1	/* Cisco address reply */
29 #define CISCO_KEEPALIVE_REQ	2	/* Cisco keepalive request */
30 
31 
32 struct hdlc_header {
33 	u8 address;
34 	u8 control;
35 	__be16 protocol;
36 }__packed;
37 
38 
39 struct cisco_packet {
40 	__be32 type;		/* code */
41 	__be32 par1;
42 	__be32 par2;
43 	__be16 rel;		/* reliability */
44 	__be32 time;
45 }__packed;
46 #define	CISCO_PACKET_LEN	18
47 #define	CISCO_BIG_PACKET_LEN	20
48 
49 
50 struct cisco_state {
51 	cisco_proto settings;
52 
53 	struct timer_list timer;
54 	struct net_device *dev;
55 	spinlock_t lock;
56 	unsigned long last_poll;
57 	int up;
58 	u32 txseq; /* TX sequence number, 0 = none */
59 	u32 rxseq; /* RX sequence number */
60 };
61 
62 
63 static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
64 
65 
66 static inline struct cisco_state* state(hdlc_device *hdlc)
67 {
68 	return (struct cisco_state *)hdlc->state;
69 }
70 
71 
72 static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
73 			     u16 type, const void *daddr, const void *saddr,
74 			     unsigned int len)
75 {
76 	struct hdlc_header *data;
77 #ifdef DEBUG_HARD_HEADER
78 	netdev_dbg(dev, "%s called\n", __func__);
79 #endif
80 
81 	skb_push(skb, sizeof(struct hdlc_header));
82 	data = (struct hdlc_header*)skb->data;
83 	if (type == CISCO_KEEPALIVE)
84 		data->address = CISCO_MULTICAST;
85 	else
86 		data->address = CISCO_UNICAST;
87 	data->control = 0;
88 	data->protocol = htons(type);
89 
90 	return sizeof(struct hdlc_header);
91 }
92 
93 
94 
95 static void cisco_keepalive_send(struct net_device *dev, u32 type,
96 				 __be32 par1, __be32 par2)
97 {
98 	struct sk_buff *skb;
99 	struct cisco_packet *data;
100 
101 	skb = dev_alloc_skb(sizeof(struct hdlc_header) +
102 			    sizeof(struct cisco_packet));
103 	if (!skb) {
104 		netdev_warn(dev, "Memory squeeze on %s()\n", __func__);
105 		return;
106 	}
107 	skb_reserve(skb, 4);
108 	cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
109 	data = (struct cisco_packet*)(skb->data + 4);
110 
111 	data->type = htonl(type);
112 	data->par1 = par1;
113 	data->par2 = par2;
114 	data->rel = cpu_to_be16(0xFFFF);
115 	/* we will need do_div here if 1000 % HZ != 0 */
116 	data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
117 
118 	skb_put(skb, sizeof(struct cisco_packet));
119 	skb->priority = TC_PRIO_CONTROL;
120 	skb->dev = dev;
121 	skb->protocol = htons(ETH_P_HDLC);
122 	skb_reset_network_header(skb);
123 
124 	dev_queue_xmit(skb);
125 }
126 
127 
128 
129 static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
130 {
131 	struct hdlc_header *data = (struct hdlc_header*)skb->data;
132 
133 	if (skb->len < sizeof(struct hdlc_header))
134 		return cpu_to_be16(ETH_P_HDLC);
135 
136 	if (data->address != CISCO_MULTICAST &&
137 	    data->address != CISCO_UNICAST)
138 		return cpu_to_be16(ETH_P_HDLC);
139 
140 	switch (data->protocol) {
141 	case cpu_to_be16(ETH_P_IP):
142 	case cpu_to_be16(ETH_P_IPX):
143 	case cpu_to_be16(ETH_P_IPV6):
144 		skb_pull(skb, sizeof(struct hdlc_header));
145 		return data->protocol;
146 	default:
147 		return cpu_to_be16(ETH_P_HDLC);
148 	}
149 }
150 
151 
152 static int cisco_rx(struct sk_buff *skb)
153 {
154 	struct net_device *dev = skb->dev;
155 	hdlc_device *hdlc = dev_to_hdlc(dev);
156 	struct cisco_state *st = state(hdlc);
157 	struct hdlc_header *data = (struct hdlc_header*)skb->data;
158 	struct cisco_packet *cisco_data;
159 	struct in_device *in_dev;
160 	__be32 addr, mask;
161 	u32 ack;
162 
163 	if (skb->len < sizeof(struct hdlc_header))
164 		goto rx_error;
165 
166 	if (data->address != CISCO_MULTICAST &&
167 	    data->address != CISCO_UNICAST)
168 		goto rx_error;
169 
170 	switch (ntohs(data->protocol)) {
171 	case CISCO_SYS_INFO:
172 		/* Packet is not needed, drop it. */
173 		dev_kfree_skb_any(skb);
174 		return NET_RX_SUCCESS;
175 
176 	case CISCO_KEEPALIVE:
177 		if ((skb->len != sizeof(struct hdlc_header) +
178 		     CISCO_PACKET_LEN) &&
179 		    (skb->len != sizeof(struct hdlc_header) +
180 		     CISCO_BIG_PACKET_LEN)) {
181 			netdev_info(dev, "Invalid length of Cisco control packet (%d bytes)\n",
182 				    skb->len);
183 			goto rx_error;
184 		}
185 
186 		cisco_data = (struct cisco_packet*)(skb->data + sizeof
187 						    (struct hdlc_header));
188 
189 		switch (ntohl (cisco_data->type)) {
190 		case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
191 			rcu_read_lock();
192 			in_dev = __in_dev_get_rcu(dev);
193 			addr = 0;
194 			mask = ~cpu_to_be32(0); /* is the mask correct? */
195 
196 			if (in_dev != NULL) {
197 				const struct in_ifaddr *ifa;
198 
199 				in_dev_for_each_ifa_rcu(ifa, in_dev) {
200 					if (strcmp(dev->name,
201 						   ifa->ifa_label) == 0) {
202 						addr = ifa->ifa_local;
203 						mask = ifa->ifa_mask;
204 						break;
205 					}
206 				}
207 
208 				cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
209 						     addr, mask);
210 			}
211 			rcu_read_unlock();
212 			dev_kfree_skb_any(skb);
213 			return NET_RX_SUCCESS;
214 
215 		case CISCO_ADDR_REPLY:
216 			netdev_info(dev, "Unexpected Cisco IP address reply\n");
217 			goto rx_error;
218 
219 		case CISCO_KEEPALIVE_REQ:
220 			spin_lock(&st->lock);
221 			st->rxseq = ntohl(cisco_data->par1);
222 			ack = ntohl(cisco_data->par2);
223 			if (ack && (ack == st->txseq ||
224 				    /* our current REQ may be in transit */
225 				    ack == st->txseq - 1)) {
226 				st->last_poll = jiffies;
227 				if (!st->up) {
228 					u32 sec, min, hrs, days;
229 					sec = ntohl(cisco_data->time) / 1000;
230 					min = sec / 60; sec -= min * 60;
231 					hrs = min / 60; min -= hrs * 60;
232 					days = hrs / 24; hrs -= days * 24;
233 					netdev_info(dev, "Link up (peer uptime %ud%uh%um%us)\n",
234 						    days, hrs, min, sec);
235 					netif_dormant_off(dev);
236 					st->up = 1;
237 				}
238 			}
239 			spin_unlock(&st->lock);
240 
241 			dev_kfree_skb_any(skb);
242 			return NET_RX_SUCCESS;
243 		} /* switch (keepalive type) */
244 	} /* switch (protocol) */
245 
246 	netdev_info(dev, "Unsupported protocol %x\n", ntohs(data->protocol));
247 	dev_kfree_skb_any(skb);
248 	return NET_RX_DROP;
249 
250 rx_error:
251 	dev->stats.rx_errors++; /* Mark error */
252 	dev_kfree_skb_any(skb);
253 	return NET_RX_DROP;
254 }
255 
256 
257 
258 static void cisco_timer(struct timer_list *t)
259 {
260 	struct cisco_state *st = from_timer(st, t, timer);
261 	struct net_device *dev = st->dev;
262 
263 	spin_lock(&st->lock);
264 	if (st->up &&
265 	    time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
266 		st->up = 0;
267 		netdev_info(dev, "Link down\n");
268 		netif_dormant_on(dev);
269 	}
270 
271 	cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
272 			     htonl(st->rxseq));
273 	spin_unlock(&st->lock);
274 
275 	st->timer.expires = jiffies + st->settings.interval * HZ;
276 	add_timer(&st->timer);
277 }
278 
279 
280 
281 static void cisco_start(struct net_device *dev)
282 {
283 	hdlc_device *hdlc = dev_to_hdlc(dev);
284 	struct cisco_state *st = state(hdlc);
285 	unsigned long flags;
286 
287 	spin_lock_irqsave(&st->lock, flags);
288 	st->up = st->txseq = st->rxseq = 0;
289 	spin_unlock_irqrestore(&st->lock, flags);
290 
291 	st->dev = dev;
292 	timer_setup(&st->timer, cisco_timer, 0);
293 	st->timer.expires = jiffies + HZ; /* First poll after 1 s */
294 	add_timer(&st->timer);
295 }
296 
297 
298 
299 static void cisco_stop(struct net_device *dev)
300 {
301 	hdlc_device *hdlc = dev_to_hdlc(dev);
302 	struct cisco_state *st = state(hdlc);
303 	unsigned long flags;
304 
305 	del_timer_sync(&st->timer);
306 
307 	spin_lock_irqsave(&st->lock, flags);
308 	netif_dormant_on(dev);
309 	st->up = st->txseq = 0;
310 	spin_unlock_irqrestore(&st->lock, flags);
311 }
312 
313 
314 static struct hdlc_proto proto = {
315 	.start		= cisco_start,
316 	.stop		= cisco_stop,
317 	.type_trans	= cisco_type_trans,
318 	.ioctl		= cisco_ioctl,
319 	.netif_rx	= cisco_rx,
320 	.module		= THIS_MODULE,
321 };
322 
323 static const struct header_ops cisco_header_ops = {
324 	.create = cisco_hard_header,
325 };
326 
327 static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
328 {
329 	cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
330 	const size_t size = sizeof(cisco_proto);
331 	cisco_proto new_settings;
332 	hdlc_device *hdlc = dev_to_hdlc(dev);
333 	int result;
334 
335 	switch (ifr->ifr_settings.type) {
336 	case IF_GET_PROTO:
337 		if (dev_to_hdlc(dev)->proto != &proto)
338 			return -EINVAL;
339 		ifr->ifr_settings.type = IF_PROTO_CISCO;
340 		if (ifr->ifr_settings.size < size) {
341 			ifr->ifr_settings.size = size; /* data size wanted */
342 			return -ENOBUFS;
343 		}
344 		if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
345 			return -EFAULT;
346 		return 0;
347 
348 	case IF_PROTO_CISCO:
349 		if (!capable(CAP_NET_ADMIN))
350 			return -EPERM;
351 
352 		if (dev->flags & IFF_UP)
353 			return -EBUSY;
354 
355 		if (copy_from_user(&new_settings, cisco_s, size))
356 			return -EFAULT;
357 
358 		if (new_settings.interval < 1 ||
359 		    new_settings.timeout < 2)
360 			return -EINVAL;
361 
362 		result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
363 		if (result)
364 			return result;
365 
366 		result = attach_hdlc_protocol(dev, &proto,
367 					      sizeof(struct cisco_state));
368 		if (result)
369 			return result;
370 
371 		memcpy(&state(hdlc)->settings, &new_settings, size);
372 		spin_lock_init(&state(hdlc)->lock);
373 		dev->header_ops = &cisco_header_ops;
374 		dev->hard_header_len = sizeof(struct hdlc_header);
375 		dev->type = ARPHRD_CISCO;
376 		call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
377 		netif_dormant_on(dev);
378 		return 0;
379 	}
380 
381 	return -EINVAL;
382 }
383 
384 
385 static int __init mod_init(void)
386 {
387 	register_hdlc_protocol(&proto);
388 	return 0;
389 }
390 
391 
392 
393 static void __exit mod_exit(void)
394 {
395 	unregister_hdlc_protocol(&proto);
396 }
397 
398 
399 module_init(mod_init);
400 module_exit(mod_exit);
401 
402 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
403 MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
404 MODULE_LICENSE("GPL v2");
405