xref: /openbmc/linux/net/x25/x25_link.c (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *	X.25 Packet Layer release 002
4   *
5   *	This is ALPHA test software. This code may break your machine,
6   *	randomly fail to work with new releases, misbehave and/or generally
7   *	screw up. It might even work.
8   *
9   *	This code REQUIRES 2.1.15 or higher
10   *
11   *	History
12   *	X.25 001	Jonathan Naylor	  Started coding.
13   *	X.25 002	Jonathan Naylor	  New timer architecture.
14   *	mar/20/00	Daniela Squassoni Disabling/enabling of facilities
15   *					  negotiation.
16   *	2000-09-04	Henner Eisen	  dev_hold() / dev_put() for x25_neigh.
17   */
18  
19  #define pr_fmt(fmt) "X25: " fmt
20  
21  #include <linux/kernel.h>
22  #include <linux/jiffies.h>
23  #include <linux/timer.h>
24  #include <linux/slab.h>
25  #include <linux/netdevice.h>
26  #include <linux/skbuff.h>
27  #include <linux/uaccess.h>
28  #include <linux/init.h>
29  #include <net/x25.h>
30  
31  LIST_HEAD(x25_neigh_list);
32  DEFINE_RWLOCK(x25_neigh_list_lock);
33  
34  static void x25_t20timer_expiry(struct timer_list *);
35  
36  static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
37  static void x25_transmit_restart_request(struct x25_neigh *nb);
38  
39  /*
40   *	Linux set/reset timer routines
41   */
x25_start_t20timer(struct x25_neigh * nb)42  static inline void x25_start_t20timer(struct x25_neigh *nb)
43  {
44  	mod_timer(&nb->t20timer, jiffies + nb->t20);
45  }
46  
x25_t20timer_expiry(struct timer_list * t)47  static void x25_t20timer_expiry(struct timer_list *t)
48  {
49  	struct x25_neigh *nb = from_timer(nb, t, t20timer);
50  
51  	x25_transmit_restart_request(nb);
52  
53  	x25_start_t20timer(nb);
54  }
55  
x25_stop_t20timer(struct x25_neigh * nb)56  static inline void x25_stop_t20timer(struct x25_neigh *nb)
57  {
58  	del_timer(&nb->t20timer);
59  }
60  
61  /*
62   *	This handles all restart and diagnostic frames.
63   */
x25_link_control(struct sk_buff * skb,struct x25_neigh * nb,unsigned short frametype)64  void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
65  		      unsigned short frametype)
66  {
67  	struct sk_buff *skbn;
68  
69  	switch (frametype) {
70  	case X25_RESTART_REQUEST:
71  		switch (nb->state) {
72  		case X25_LINK_STATE_0:
73  			/* This can happen when the x25 module just gets loaded
74  			 * and doesn't know layer 2 has already connected
75  			 */
76  			nb->state = X25_LINK_STATE_3;
77  			x25_transmit_restart_confirmation(nb);
78  			break;
79  		case X25_LINK_STATE_2:
80  			x25_stop_t20timer(nb);
81  			nb->state = X25_LINK_STATE_3;
82  			break;
83  		case X25_LINK_STATE_3:
84  			/* clear existing virtual calls */
85  			x25_kill_by_neigh(nb);
86  
87  			x25_transmit_restart_confirmation(nb);
88  			break;
89  		}
90  		break;
91  
92  	case X25_RESTART_CONFIRMATION:
93  		switch (nb->state) {
94  		case X25_LINK_STATE_2:
95  			x25_stop_t20timer(nb);
96  			nb->state = X25_LINK_STATE_3;
97  			break;
98  		case X25_LINK_STATE_3:
99  			/* clear existing virtual calls */
100  			x25_kill_by_neigh(nb);
101  
102  			x25_transmit_restart_request(nb);
103  			nb->state = X25_LINK_STATE_2;
104  			x25_start_t20timer(nb);
105  			break;
106  		}
107  		break;
108  
109  	case X25_DIAGNOSTIC:
110  		if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
111  			break;
112  
113  		pr_warn("diagnostic #%d - %02X %02X %02X\n",
114  		       skb->data[3], skb->data[4],
115  		       skb->data[5], skb->data[6]);
116  		break;
117  
118  	default:
119  		pr_warn("received unknown %02X with LCI 000\n",
120  		       frametype);
121  		break;
122  	}
123  
124  	if (nb->state == X25_LINK_STATE_3)
125  		while ((skbn = skb_dequeue(&nb->queue)) != NULL)
126  			x25_send_frame(skbn, nb);
127  }
128  
129  /*
130   *	This routine is called when a Restart Request is needed
131   */
x25_transmit_restart_request(struct x25_neigh * nb)132  static void x25_transmit_restart_request(struct x25_neigh *nb)
133  {
134  	unsigned char *dptr;
135  	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
136  	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
137  
138  	if (!skb)
139  		return;
140  
141  	skb_reserve(skb, X25_MAX_L2_LEN);
142  
143  	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
144  
145  	*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
146  	*dptr++ = 0x00;
147  	*dptr++ = X25_RESTART_REQUEST;
148  	*dptr++ = 0x00;
149  	*dptr++ = 0;
150  
151  	skb->sk = NULL;
152  
153  	x25_send_frame(skb, nb);
154  }
155  
156  /*
157   * This routine is called when a Restart Confirmation is needed
158   */
x25_transmit_restart_confirmation(struct x25_neigh * nb)159  static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
160  {
161  	unsigned char *dptr;
162  	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
163  	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
164  
165  	if (!skb)
166  		return;
167  
168  	skb_reserve(skb, X25_MAX_L2_LEN);
169  
170  	dptr = skb_put(skb, X25_STD_MIN_LEN);
171  
172  	*dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
173  	*dptr++ = 0x00;
174  	*dptr++ = X25_RESTART_CONFIRMATION;
175  
176  	skb->sk = NULL;
177  
178  	x25_send_frame(skb, nb);
179  }
180  
181  /*
182   *	This routine is called when a Clear Request is needed outside of the context
183   *	of a connected socket.
184   */
x25_transmit_clear_request(struct x25_neigh * nb,unsigned int lci,unsigned char cause)185  void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
186  				unsigned char cause)
187  {
188  	unsigned char *dptr;
189  	int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
190  	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
191  
192  	if (!skb)
193  		return;
194  
195  	skb_reserve(skb, X25_MAX_L2_LEN);
196  
197  	dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
198  
199  	*dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
200  					 X25_GFI_EXTSEQ :
201  					 X25_GFI_STDSEQ);
202  	*dptr++ = (lci >> 0) & 0xFF;
203  	*dptr++ = X25_CLEAR_REQUEST;
204  	*dptr++ = cause;
205  	*dptr++ = 0x00;
206  
207  	skb->sk = NULL;
208  
209  	x25_send_frame(skb, nb);
210  }
211  
x25_transmit_link(struct sk_buff * skb,struct x25_neigh * nb)212  void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
213  {
214  	switch (nb->state) {
215  	case X25_LINK_STATE_0:
216  		skb_queue_tail(&nb->queue, skb);
217  		nb->state = X25_LINK_STATE_1;
218  		x25_establish_link(nb);
219  		break;
220  	case X25_LINK_STATE_1:
221  	case X25_LINK_STATE_2:
222  		skb_queue_tail(&nb->queue, skb);
223  		break;
224  	case X25_LINK_STATE_3:
225  		x25_send_frame(skb, nb);
226  		break;
227  	}
228  }
229  
230  /*
231   *	Called when the link layer has become established.
232   */
x25_link_established(struct x25_neigh * nb)233  void x25_link_established(struct x25_neigh *nb)
234  {
235  	switch (nb->state) {
236  	case X25_LINK_STATE_0:
237  	case X25_LINK_STATE_1:
238  		x25_transmit_restart_request(nb);
239  		nb->state = X25_LINK_STATE_2;
240  		x25_start_t20timer(nb);
241  		break;
242  	}
243  }
244  
245  /*
246   *	Called when the link layer has terminated, or an establishment
247   *	request has failed.
248   */
249  
x25_link_terminated(struct x25_neigh * nb)250  void x25_link_terminated(struct x25_neigh *nb)
251  {
252  	nb->state = X25_LINK_STATE_0;
253  	skb_queue_purge(&nb->queue);
254  	x25_stop_t20timer(nb);
255  
256  	/* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
257  	x25_kill_by_neigh(nb);
258  }
259  
260  /*
261   *	Add a new device.
262   */
x25_link_device_up(struct net_device * dev)263  void x25_link_device_up(struct net_device *dev)
264  {
265  	struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
266  
267  	if (!nb)
268  		return;
269  
270  	skb_queue_head_init(&nb->queue);
271  	timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
272  
273  	dev_hold(dev);
274  	nb->dev      = dev;
275  	nb->state    = X25_LINK_STATE_0;
276  	nb->extended = 0;
277  	/*
278  	 * Enables negotiation
279  	 */
280  	nb->global_facil_mask = X25_MASK_REVERSE |
281  				       X25_MASK_THROUGHPUT |
282  				       X25_MASK_PACKET_SIZE |
283  				       X25_MASK_WINDOW_SIZE;
284  	nb->t20      = sysctl_x25_restart_request_timeout;
285  	refcount_set(&nb->refcnt, 1);
286  
287  	write_lock_bh(&x25_neigh_list_lock);
288  	list_add(&nb->node, &x25_neigh_list);
289  	write_unlock_bh(&x25_neigh_list_lock);
290  }
291  
292  /**
293   *	__x25_remove_neigh - remove neighbour from x25_neigh_list
294   *	@nb: - neigh to remove
295   *
296   *	Remove neighbour from x25_neigh_list. If it was there.
297   *	Caller must hold x25_neigh_list_lock.
298   */
__x25_remove_neigh(struct x25_neigh * nb)299  static void __x25_remove_neigh(struct x25_neigh *nb)
300  {
301  	if (nb->node.next) {
302  		list_del(&nb->node);
303  		x25_neigh_put(nb);
304  	}
305  }
306  
307  /*
308   *	A device has been removed, remove its links.
309   */
x25_link_device_down(struct net_device * dev)310  void x25_link_device_down(struct net_device *dev)
311  {
312  	struct x25_neigh *nb;
313  	struct list_head *entry, *tmp;
314  
315  	write_lock_bh(&x25_neigh_list_lock);
316  
317  	list_for_each_safe(entry, tmp, &x25_neigh_list) {
318  		nb = list_entry(entry, struct x25_neigh, node);
319  
320  		if (nb->dev == dev) {
321  			__x25_remove_neigh(nb);
322  			dev_put(dev);
323  		}
324  	}
325  
326  	write_unlock_bh(&x25_neigh_list_lock);
327  }
328  
329  /*
330   *	Given a device, return the neighbour address.
331   */
x25_get_neigh(struct net_device * dev)332  struct x25_neigh *x25_get_neigh(struct net_device *dev)
333  {
334  	struct x25_neigh *nb, *use = NULL;
335  
336  	read_lock_bh(&x25_neigh_list_lock);
337  	list_for_each_entry(nb, &x25_neigh_list, node) {
338  		if (nb->dev == dev) {
339  			use = nb;
340  			break;
341  		}
342  	}
343  
344  	if (use)
345  		x25_neigh_hold(use);
346  	read_unlock_bh(&x25_neigh_list_lock);
347  	return use;
348  }
349  
350  /*
351   *	Handle the ioctls that control the subscription functions.
352   */
x25_subscr_ioctl(unsigned int cmd,void __user * arg)353  int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
354  {
355  	struct x25_subscrip_struct x25_subscr;
356  	struct x25_neigh *nb;
357  	struct net_device *dev;
358  	int rc = -EINVAL;
359  
360  	if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
361  		goto out;
362  
363  	rc = -EFAULT;
364  	if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
365  		goto out;
366  
367  	rc = -EINVAL;
368  	if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
369  		goto out;
370  
371  	if ((nb = x25_get_neigh(dev)) == NULL)
372  		goto out_dev_put;
373  
374  	dev_put(dev);
375  
376  	if (cmd == SIOCX25GSUBSCRIP) {
377  		read_lock_bh(&x25_neigh_list_lock);
378  		x25_subscr.extended	     = nb->extended;
379  		x25_subscr.global_facil_mask = nb->global_facil_mask;
380  		read_unlock_bh(&x25_neigh_list_lock);
381  		rc = copy_to_user(arg, &x25_subscr,
382  				  sizeof(x25_subscr)) ? -EFAULT : 0;
383  	} else {
384  		rc = -EINVAL;
385  		if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
386  			rc = 0;
387  			write_lock_bh(&x25_neigh_list_lock);
388  			nb->extended	     = x25_subscr.extended;
389  			nb->global_facil_mask = x25_subscr.global_facil_mask;
390  			write_unlock_bh(&x25_neigh_list_lock);
391  		}
392  	}
393  	x25_neigh_put(nb);
394  out:
395  	return rc;
396  out_dev_put:
397  	dev_put(dev);
398  	goto out;
399  }
400  
401  
402  /*
403   *	Release all memory associated with X.25 neighbour structures.
404   */
x25_link_free(void)405  void __exit x25_link_free(void)
406  {
407  	struct x25_neigh *nb;
408  	struct list_head *entry, *tmp;
409  
410  	write_lock_bh(&x25_neigh_list_lock);
411  
412  	list_for_each_safe(entry, tmp, &x25_neigh_list) {
413  		struct net_device *dev;
414  
415  		nb = list_entry(entry, struct x25_neigh, node);
416  		dev = nb->dev;
417  		__x25_remove_neigh(nb);
418  		dev_put(dev);
419  	}
420  	write_unlock_bh(&x25_neigh_list_lock);
421  }
422