xref: /openbmc/linux/net/x25/x25_link.c (revision 8bd1369b)
1 /*
2  *	X.25 Packet Layer release 002
3  *
4  *	This is ALPHA test software. This code may break your machine,
5  *	randomly fail to work with new releases, misbehave and/or generally
6  *	screw up. It might even work.
7  *
8  *	This code REQUIRES 2.1.15 or higher
9  *
10  *	This module:
11  *		This module is free software; you can redistribute it and/or
12  *		modify it under the terms of the GNU General Public License
13  *		as published by the Free Software Foundation; either version
14  *		2 of the License, or (at your option) any later version.
15  *
16  *	History
17  *	X.25 001	Jonathan Naylor	  Started coding.
18  *	X.25 002	Jonathan Naylor	  New timer architecture.
19  *	mar/20/00	Daniela Squassoni Disabling/enabling of facilities
20  *					  negotiation.
21  *	2000-09-04	Henner Eisen	  dev_hold() / dev_put() for x25_neigh.
22  */
23 
24 #define pr_fmt(fmt) "X25: " fmt
25 
26 #include <linux/kernel.h>
27 #include <linux/jiffies.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/netdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/uaccess.h>
33 #include <linux/init.h>
34 #include <net/x25.h>
35 
36 LIST_HEAD(x25_neigh_list);
37 DEFINE_RWLOCK(x25_neigh_list_lock);
38 
39 static void x25_t20timer_expiry(struct timer_list *);
40 
41 static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
42 static void x25_transmit_restart_request(struct x25_neigh *nb);
43 
44 /*
45  *	Linux set/reset timer routines
46  */
47 static inline void x25_start_t20timer(struct x25_neigh *nb)
48 {
49 	mod_timer(&nb->t20timer, jiffies + nb->t20);
50 }
51 
52 static void x25_t20timer_expiry(struct timer_list *t)
53 {
54 	struct x25_neigh *nb = from_timer(nb, t, t20timer);
55 
56 	x25_transmit_restart_request(nb);
57 
58 	x25_start_t20timer(nb);
59 }
60 
61 static inline void x25_stop_t20timer(struct x25_neigh *nb)
62 {
63 	del_timer(&nb->t20timer);
64 }
65 
66 static inline int x25_t20timer_pending(struct x25_neigh *nb)
67 {
68 	return timer_pending(&nb->t20timer);
69 }
70 
71 /*
72  *	This handles all restart and diagnostic frames.
73  */
74 void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
75 		      unsigned short frametype)
76 {
77 	struct sk_buff *skbn;
78 	int confirm;
79 
80 	switch (frametype) {
81 	case X25_RESTART_REQUEST:
82 		confirm = !x25_t20timer_pending(nb);
83 		x25_stop_t20timer(nb);
84 		nb->state = X25_LINK_STATE_3;
85 		if (confirm)
86 			x25_transmit_restart_confirmation(nb);
87 		break;
88 
89 	case X25_RESTART_CONFIRMATION:
90 		x25_stop_t20timer(nb);
91 		nb->state = X25_LINK_STATE_3;
92 		break;
93 
94 	case X25_DIAGNOSTIC:
95 		if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
96 			break;
97 
98 		pr_warn("diagnostic #%d - %02X %02X %02X\n",
99 		       skb->data[3], skb->data[4],
100 		       skb->data[5], skb->data[6]);
101 		break;
102 
103 	default:
104 		pr_warn("received unknown %02X with LCI 000\n",
105 		       frametype);
106 		break;
107 	}
108 
109 	if (nb->state == X25_LINK_STATE_3)
110 		while ((skbn = skb_dequeue(&nb->queue)) != NULL)
111 			x25_send_frame(skbn, nb);
112 }
113 
114 /*
115  *	This routine is called when a Restart Request is needed
116  */
117 static void x25_transmit_restart_request(struct x25_neigh *nb)
118 {
119 	unsigned char *dptr;
120 	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
121 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
122 
123 	if (!skb)
124 		return;
125 
126 	skb_reserve(skb, X25_MAX_L2_LEN);
127 
128 	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
129 
130 	*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
131 	*dptr++ = 0x00;
132 	*dptr++ = X25_RESTART_REQUEST;
133 	*dptr++ = 0x00;
134 	*dptr++ = 0;
135 
136 	skb->sk = NULL;
137 
138 	x25_send_frame(skb, nb);
139 }
140 
141 /*
142  * This routine is called when a Restart Confirmation is needed
143  */
144 static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
145 {
146 	unsigned char *dptr;
147 	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
148 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
149 
150 	if (!skb)
151 		return;
152 
153 	skb_reserve(skb, X25_MAX_L2_LEN);
154 
155 	dptr = skb_put(skb, X25_STD_MIN_LEN);
156 
157 	*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
158 	*dptr++ = 0x00;
159 	*dptr++ = X25_RESTART_CONFIRMATION;
160 
161 	skb->sk = NULL;
162 
163 	x25_send_frame(skb, nb);
164 }
165 
166 /*
167  *	This routine is called when a Clear Request is needed outside of the context
168  *	of a connected socket.
169  */
170 void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
171 				unsigned char cause)
172 {
173 	unsigned char *dptr;
174 	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
175 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
176 
177 	if (!skb)
178 		return;
179 
180 	skb_reserve(skb, X25_MAX_L2_LEN);
181 
182 	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
183 
184 	*dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
185 					 X25_GFI_EXTSEQ :
186 					 X25_GFI_STDSEQ);
187 	*dptr++ = (lci >> 0) & 0xFF;
188 	*dptr++ = X25_CLEAR_REQUEST;
189 	*dptr++ = cause;
190 	*dptr++ = 0x00;
191 
192 	skb->sk = NULL;
193 
194 	x25_send_frame(skb, nb);
195 }
196 
197 void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
198 {
199 	switch (nb->state) {
200 	case X25_LINK_STATE_0:
201 		skb_queue_tail(&nb->queue, skb);
202 		nb->state = X25_LINK_STATE_1;
203 		x25_establish_link(nb);
204 		break;
205 	case X25_LINK_STATE_1:
206 	case X25_LINK_STATE_2:
207 		skb_queue_tail(&nb->queue, skb);
208 		break;
209 	case X25_LINK_STATE_3:
210 		x25_send_frame(skb, nb);
211 		break;
212 	}
213 }
214 
215 /*
216  *	Called when the link layer has become established.
217  */
218 void x25_link_established(struct x25_neigh *nb)
219 {
220 	switch (nb->state) {
221 	case X25_LINK_STATE_0:
222 		nb->state = X25_LINK_STATE_2;
223 		break;
224 	case X25_LINK_STATE_1:
225 		x25_transmit_restart_request(nb);
226 		nb->state = X25_LINK_STATE_2;
227 		x25_start_t20timer(nb);
228 		break;
229 	}
230 }
231 
232 /*
233  *	Called when the link layer has terminated, or an establishment
234  *	request has failed.
235  */
236 
237 void x25_link_terminated(struct x25_neigh *nb)
238 {
239 	nb->state = X25_LINK_STATE_0;
240 	/* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
241 	x25_kill_by_neigh(nb);
242 }
243 
244 /*
245  *	Add a new device.
246  */
247 void x25_link_device_up(struct net_device *dev)
248 {
249 	struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
250 
251 	if (!nb)
252 		return;
253 
254 	skb_queue_head_init(&nb->queue);
255 	timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
256 
257 	dev_hold(dev);
258 	nb->dev      = dev;
259 	nb->state    = X25_LINK_STATE_0;
260 	nb->extended = 0;
261 	/*
262 	 * Enables negotiation
263 	 */
264 	nb->global_facil_mask = X25_MASK_REVERSE |
265 				       X25_MASK_THROUGHPUT |
266 				       X25_MASK_PACKET_SIZE |
267 				       X25_MASK_WINDOW_SIZE;
268 	nb->t20      = sysctl_x25_restart_request_timeout;
269 	refcount_set(&nb->refcnt, 1);
270 
271 	write_lock_bh(&x25_neigh_list_lock);
272 	list_add(&nb->node, &x25_neigh_list);
273 	write_unlock_bh(&x25_neigh_list_lock);
274 }
275 
276 /**
277  *	__x25_remove_neigh - remove neighbour from x25_neigh_list
278  *	@nb - neigh to remove
279  *
280  *	Remove neighbour from x25_neigh_list. If it was there.
281  *	Caller must hold x25_neigh_list_lock.
282  */
283 static void __x25_remove_neigh(struct x25_neigh *nb)
284 {
285 	skb_queue_purge(&nb->queue);
286 	x25_stop_t20timer(nb);
287 
288 	if (nb->node.next) {
289 		list_del(&nb->node);
290 		x25_neigh_put(nb);
291 	}
292 }
293 
294 /*
295  *	A device has been removed, remove its links.
296  */
297 void x25_link_device_down(struct net_device *dev)
298 {
299 	struct x25_neigh *nb;
300 	struct list_head *entry, *tmp;
301 
302 	write_lock_bh(&x25_neigh_list_lock);
303 
304 	list_for_each_safe(entry, tmp, &x25_neigh_list) {
305 		nb = list_entry(entry, struct x25_neigh, node);
306 
307 		if (nb->dev == dev) {
308 			__x25_remove_neigh(nb);
309 			dev_put(dev);
310 		}
311 	}
312 
313 	write_unlock_bh(&x25_neigh_list_lock);
314 }
315 
316 /*
317  *	Given a device, return the neighbour address.
318  */
319 struct x25_neigh *x25_get_neigh(struct net_device *dev)
320 {
321 	struct x25_neigh *nb, *use = NULL;
322 	struct list_head *entry;
323 
324 	read_lock_bh(&x25_neigh_list_lock);
325 	list_for_each(entry, &x25_neigh_list) {
326 		nb = list_entry(entry, struct x25_neigh, node);
327 
328 		if (nb->dev == dev) {
329 			use = nb;
330 			break;
331 		}
332 	}
333 
334 	if (use)
335 		x25_neigh_hold(use);
336 	read_unlock_bh(&x25_neigh_list_lock);
337 	return use;
338 }
339 
340 /*
341  *	Handle the ioctls that control the subscription functions.
342  */
343 int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
344 {
345 	struct x25_subscrip_struct x25_subscr;
346 	struct x25_neigh *nb;
347 	struct net_device *dev;
348 	int rc = -EINVAL;
349 
350 	if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
351 		goto out;
352 
353 	rc = -EFAULT;
354 	if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
355 		goto out;
356 
357 	rc = -EINVAL;
358 	if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
359 		goto out;
360 
361 	if ((nb = x25_get_neigh(dev)) == NULL)
362 		goto out_dev_put;
363 
364 	dev_put(dev);
365 
366 	if (cmd == SIOCX25GSUBSCRIP) {
367 		read_lock_bh(&x25_neigh_list_lock);
368 		x25_subscr.extended	     = nb->extended;
369 		x25_subscr.global_facil_mask = nb->global_facil_mask;
370 		read_unlock_bh(&x25_neigh_list_lock);
371 		rc = copy_to_user(arg, &x25_subscr,
372 				  sizeof(x25_subscr)) ? -EFAULT : 0;
373 	} else {
374 		rc = -EINVAL;
375 		if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
376 			rc = 0;
377 			write_lock_bh(&x25_neigh_list_lock);
378 			nb->extended	     = x25_subscr.extended;
379 			nb->global_facil_mask = x25_subscr.global_facil_mask;
380 			write_unlock_bh(&x25_neigh_list_lock);
381 		}
382 	}
383 	x25_neigh_put(nb);
384 out:
385 	return rc;
386 out_dev_put:
387 	dev_put(dev);
388 	goto out;
389 }
390 
391 
392 /*
393  *	Release all memory associated with X.25 neighbour structures.
394  */
395 void __exit x25_link_free(void)
396 {
397 	struct x25_neigh *nb;
398 	struct list_head *entry, *tmp;
399 
400 	write_lock_bh(&x25_neigh_list_lock);
401 
402 	list_for_each_safe(entry, tmp, &x25_neigh_list) {
403 		struct net_device *dev;
404 
405 		nb = list_entry(entry, struct x25_neigh, node);
406 		dev = nb->dev;
407 		__x25_remove_neigh(nb);
408 		dev_put(dev);
409 	}
410 	write_unlock_bh(&x25_neigh_list_lock);
411 }
412