1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic HDLC support routines for Linux 4 * X.25 support 5 * 6 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl> 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/gfp.h> 11 #include <linux/hdlc.h> 12 #include <linux/if_arp.h> 13 #include <linux/inetdevice.h> 14 #include <linux/init.h> 15 #include <linux/kernel.h> 16 #include <linux/lapb.h> 17 #include <linux/module.h> 18 #include <linux/pkt_sched.h> 19 #include <linux/poll.h> 20 #include <linux/rtnetlink.h> 21 #include <linux/skbuff.h> 22 #include <net/x25device.h> 23 24 struct x25_state { 25 x25_hdlc_proto settings; 26 bool up; 27 spinlock_t up_lock; /* Protects "up" */ 28 struct sk_buff_head rx_queue; 29 struct tasklet_struct rx_tasklet; 30 }; 31 32 static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); 33 34 static struct x25_state *state(hdlc_device *hdlc) 35 { 36 return hdlc->state; 37 } 38 39 static void x25_rx_queue_kick(struct tasklet_struct *t) 40 { 41 struct x25_state *x25st = from_tasklet(x25st, t, rx_tasklet); 42 struct sk_buff *skb = skb_dequeue(&x25st->rx_queue); 43 44 while (skb) { 45 netif_receive_skb_core(skb); 46 skb = skb_dequeue(&x25st->rx_queue); 47 } 48 } 49 50 /* These functions are callbacks called by LAPB layer */ 51 52 static void x25_connect_disconnect(struct net_device *dev, int reason, int code) 53 { 54 struct x25_state *x25st = state(dev_to_hdlc(dev)); 55 struct sk_buff *skb; 56 unsigned char *ptr; 57 58 skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC); 59 if (!skb) 60 return; 61 62 ptr = skb_put(skb, 1); 63 *ptr = code; 64 65 skb->protocol = x25_type_trans(skb, dev); 66 67 skb_queue_tail(&x25st->rx_queue, skb); 68 tasklet_schedule(&x25st->rx_tasklet); 69 } 70 71 static void x25_connected(struct net_device *dev, int reason) 72 { 73 x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT); 74 } 75 76 static void x25_disconnected(struct net_device *dev, int reason) 77 { 78 x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT); 79 } 80 81 static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) 82 { 83 struct x25_state *x25st = state(dev_to_hdlc(dev)); 84 unsigned char *ptr; 85 86 if (skb_cow(skb, 1)) { 87 kfree_skb(skb); 88 return NET_RX_DROP; 89 } 90 91 skb_push(skb, 1); 92 93 ptr = skb->data; 94 *ptr = X25_IFACE_DATA; 95 96 skb->protocol = x25_type_trans(skb, dev); 97 98 skb_queue_tail(&x25st->rx_queue, skb); 99 tasklet_schedule(&x25st->rx_tasklet); 100 return NET_RX_SUCCESS; 101 } 102 103 static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb) 104 { 105 hdlc_device *hdlc = dev_to_hdlc(dev); 106 107 skb_reset_network_header(skb); 108 skb->protocol = hdlc_type_trans(skb, dev); 109 110 if (dev_nit_active(dev)) 111 dev_queue_xmit_nit(skb, dev); 112 113 hdlc->xmit(skb, dev); /* Ignore return value :-( */ 114 } 115 116 static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) 117 { 118 hdlc_device *hdlc = dev_to_hdlc(dev); 119 struct x25_state *x25st = state(hdlc); 120 int result; 121 122 /* There should be a pseudo header of 1 byte added by upper layers. 123 * Check to make sure it is there before reading it. 124 */ 125 if (skb->len < 1) { 126 kfree_skb(skb); 127 return NETDEV_TX_OK; 128 } 129 130 spin_lock_bh(&x25st->up_lock); 131 if (!x25st->up) { 132 spin_unlock_bh(&x25st->up_lock); 133 kfree_skb(skb); 134 return NETDEV_TX_OK; 135 } 136 137 switch (skb->data[0]) { 138 case X25_IFACE_DATA: /* Data to be transmitted */ 139 skb_pull(skb, 1); 140 result = lapb_data_request(dev, skb); 141 if (result != LAPB_OK) 142 dev_kfree_skb(skb); 143 spin_unlock_bh(&x25st->up_lock); 144 return NETDEV_TX_OK; 145 146 case X25_IFACE_CONNECT: 147 result = lapb_connect_request(dev); 148 if (result != LAPB_OK) { 149 if (result == LAPB_CONNECTED) 150 /* Send connect confirm. msg to level 3 */ 151 x25_connected(dev, 0); 152 else 153 netdev_err(dev, "LAPB connect request failed, error code = %i\n", 154 result); 155 } 156 break; 157 158 case X25_IFACE_DISCONNECT: 159 result = lapb_disconnect_request(dev); 160 if (result != LAPB_OK) { 161 if (result == LAPB_NOTCONNECTED) 162 /* Send disconnect confirm. msg to level 3 */ 163 x25_disconnected(dev, 0); 164 else 165 netdev_err(dev, "LAPB disconnect request failed, error code = %i\n", 166 result); 167 } 168 break; 169 170 default: /* to be defined */ 171 break; 172 } 173 174 spin_unlock_bh(&x25st->up_lock); 175 dev_kfree_skb(skb); 176 return NETDEV_TX_OK; 177 } 178 179 static int x25_open(struct net_device *dev) 180 { 181 static const struct lapb_register_struct cb = { 182 .connect_confirmation = x25_connected, 183 .connect_indication = x25_connected, 184 .disconnect_confirmation = x25_disconnected, 185 .disconnect_indication = x25_disconnected, 186 .data_indication = x25_data_indication, 187 .data_transmit = x25_data_transmit, 188 }; 189 hdlc_device *hdlc = dev_to_hdlc(dev); 190 struct x25_state *x25st = state(hdlc); 191 struct lapb_parms_struct params; 192 int result; 193 194 result = lapb_register(dev, &cb); 195 if (result != LAPB_OK) 196 return -ENOMEM; 197 198 result = lapb_getparms(dev, ¶ms); 199 if (result != LAPB_OK) 200 return -EINVAL; 201 202 if (state(hdlc)->settings.dce) 203 params.mode = params.mode | LAPB_DCE; 204 205 if (state(hdlc)->settings.modulo == 128) 206 params.mode = params.mode | LAPB_EXTENDED; 207 208 params.window = state(hdlc)->settings.window; 209 params.t1 = state(hdlc)->settings.t1; 210 params.t2 = state(hdlc)->settings.t2; 211 params.n2 = state(hdlc)->settings.n2; 212 213 result = lapb_setparms(dev, ¶ms); 214 if (result != LAPB_OK) 215 return -EINVAL; 216 217 spin_lock_bh(&x25st->up_lock); 218 x25st->up = true; 219 spin_unlock_bh(&x25st->up_lock); 220 221 return 0; 222 } 223 224 static void x25_close(struct net_device *dev) 225 { 226 hdlc_device *hdlc = dev_to_hdlc(dev); 227 struct x25_state *x25st = state(hdlc); 228 229 spin_lock_bh(&x25st->up_lock); 230 x25st->up = false; 231 spin_unlock_bh(&x25st->up_lock); 232 233 lapb_unregister(dev); 234 tasklet_kill(&x25st->rx_tasklet); 235 } 236 237 static int x25_rx(struct sk_buff *skb) 238 { 239 struct net_device *dev = skb->dev; 240 hdlc_device *hdlc = dev_to_hdlc(dev); 241 struct x25_state *x25st = state(hdlc); 242 243 skb = skb_share_check(skb, GFP_ATOMIC); 244 if (!skb) { 245 dev->stats.rx_dropped++; 246 return NET_RX_DROP; 247 } 248 249 spin_lock_bh(&x25st->up_lock); 250 if (!x25st->up) { 251 spin_unlock_bh(&x25st->up_lock); 252 kfree_skb(skb); 253 dev->stats.rx_dropped++; 254 return NET_RX_DROP; 255 } 256 257 if (lapb_data_received(dev, skb) == LAPB_OK) { 258 spin_unlock_bh(&x25st->up_lock); 259 return NET_RX_SUCCESS; 260 } 261 262 spin_unlock_bh(&x25st->up_lock); 263 dev->stats.rx_errors++; 264 dev_kfree_skb_any(skb); 265 return NET_RX_DROP; 266 } 267 268 static struct hdlc_proto proto = { 269 .open = x25_open, 270 .close = x25_close, 271 .ioctl = x25_ioctl, 272 .netif_rx = x25_rx, 273 .xmit = x25_xmit, 274 .module = THIS_MODULE, 275 }; 276 277 static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) 278 { 279 x25_hdlc_proto __user *x25_s = ifr->ifr_settings.ifs_ifsu.x25; 280 const size_t size = sizeof(x25_hdlc_proto); 281 hdlc_device *hdlc = dev_to_hdlc(dev); 282 x25_hdlc_proto new_settings; 283 int result; 284 285 switch (ifr->ifr_settings.type) { 286 case IF_GET_PROTO: 287 if (dev_to_hdlc(dev)->proto != &proto) 288 return -EINVAL; 289 ifr->ifr_settings.type = IF_PROTO_X25; 290 if (ifr->ifr_settings.size < size) { 291 ifr->ifr_settings.size = size; /* data size wanted */ 292 return -ENOBUFS; 293 } 294 if (copy_to_user(x25_s, &state(hdlc)->settings, size)) 295 return -EFAULT; 296 return 0; 297 298 case IF_PROTO_X25: 299 if (!capable(CAP_NET_ADMIN)) 300 return -EPERM; 301 302 if (dev->flags & IFF_UP) 303 return -EBUSY; 304 305 /* backward compatibility */ 306 if (ifr->ifr_settings.size == 0) { 307 new_settings.dce = 0; 308 new_settings.modulo = 8; 309 new_settings.window = 7; 310 new_settings.t1 = 3; 311 new_settings.t2 = 1; 312 new_settings.n2 = 10; 313 } else { 314 if (copy_from_user(&new_settings, x25_s, size)) 315 return -EFAULT; 316 317 if ((new_settings.dce != 0 && 318 new_settings.dce != 1) || 319 (new_settings.modulo != 8 && 320 new_settings.modulo != 128) || 321 new_settings.window < 1 || 322 (new_settings.modulo == 8 && 323 new_settings.window > 7) || 324 (new_settings.modulo == 128 && 325 new_settings.window > 127) || 326 new_settings.t1 < 1 || 327 new_settings.t1 > 255 || 328 new_settings.t2 < 1 || 329 new_settings.t2 > 255 || 330 new_settings.n2 < 1 || 331 new_settings.n2 > 255) 332 return -EINVAL; 333 } 334 335 result = hdlc->attach(dev, ENCODING_NRZ, 336 PARITY_CRC16_PR1_CCITT); 337 if (result) 338 return result; 339 340 result = attach_hdlc_protocol(dev, &proto, 341 sizeof(struct x25_state)); 342 if (result) 343 return result; 344 345 memcpy(&state(hdlc)->settings, &new_settings, size); 346 state(hdlc)->up = false; 347 spin_lock_init(&state(hdlc)->up_lock); 348 skb_queue_head_init(&state(hdlc)->rx_queue); 349 tasklet_setup(&state(hdlc)->rx_tasklet, x25_rx_queue_kick); 350 351 /* There's no header_ops so hard_header_len should be 0. */ 352 dev->hard_header_len = 0; 353 /* When transmitting data: 354 * first we'll remove a pseudo header of 1 byte, 355 * then we'll prepend an LAPB header of at most 3 bytes. 356 */ 357 dev->needed_headroom = 3 - 1; 358 359 dev->type = ARPHRD_X25; 360 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); 361 netif_dormant_off(dev); 362 return 0; 363 } 364 365 return -EINVAL; 366 } 367 368 static int __init mod_init(void) 369 { 370 register_hdlc_protocol(&proto); 371 return 0; 372 } 373 374 static void __exit mod_exit(void) 375 { 376 unregister_hdlc_protocol(&proto); 377 } 378 379 module_init(mod_init); 380 module_exit(mod_exit); 381 382 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 383 MODULE_DESCRIPTION("X.25 protocol support for generic HDLC"); 384 MODULE_LICENSE("GPL v2"); 385