1 /* 2 * Copyright (C) ST-Ericsson AB 2010 3 * Author: Sjur Brendeland 4 * License terms: GNU General Public License (GPL) version 2 5 */ 6 7 #include <linux/hardirq.h> 8 #include <linux/init.h> 9 #include <linux/module.h> 10 #include <linux/device.h> 11 #include <linux/types.h> 12 #include <linux/skbuff.h> 13 #include <linux/netdevice.h> 14 #include <linux/rtnetlink.h> 15 #include <linux/tty.h> 16 #include <linux/file.h> 17 #include <linux/if_arp.h> 18 #include <net/caif/caif_device.h> 19 #include <net/caif/cfcnfg.h> 20 #include <linux/err.h> 21 #include <linux/debugfs.h> 22 23 MODULE_LICENSE("GPL"); 24 MODULE_AUTHOR("Sjur Brendeland"); 25 MODULE_DESCRIPTION("CAIF serial device TTY line discipline"); 26 MODULE_LICENSE("GPL"); 27 MODULE_ALIAS_LDISC(N_CAIF); 28 29 #define SEND_QUEUE_LOW 10 30 #define SEND_QUEUE_HIGH 100 31 #define CAIF_SENDING 1 /* Bit 1 = 0x02*/ 32 #define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */ 33 #define MAX_WRITE_CHUNK 4096 34 #define ON 1 35 #define OFF 0 36 #define CAIF_MAX_MTU 4096 37 38 static DEFINE_SPINLOCK(ser_lock); 39 static LIST_HEAD(ser_list); 40 static LIST_HEAD(ser_release_list); 41 42 static bool ser_loop; 43 module_param(ser_loop, bool, S_IRUGO); 44 MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode."); 45 46 static bool ser_use_stx = true; 47 module_param(ser_use_stx, bool, S_IRUGO); 48 MODULE_PARM_DESC(ser_use_stx, "STX enabled or not."); 49 50 static bool ser_use_fcs = true; 51 52 module_param(ser_use_fcs, bool, S_IRUGO); 53 MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not."); 54 55 static int ser_write_chunk = MAX_WRITE_CHUNK; 56 module_param(ser_write_chunk, int, S_IRUGO); 57 58 MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART."); 59 60 static struct dentry *debugfsdir; 61 62 static int caif_net_open(struct net_device *dev); 63 static int caif_net_close(struct net_device *dev); 64 65 struct ser_device { 66 struct caif_dev_common common; 67 struct list_head node; 68 struct net_device *dev; 69 struct sk_buff_head head; 70 struct tty_struct *tty; 71 bool tx_started; 72 unsigned long state; 73 char *tty_name; 74 #ifdef CONFIG_DEBUG_FS 75 struct dentry *debugfs_tty_dir; 76 struct debugfs_blob_wrapper tx_blob; 77 struct debugfs_blob_wrapper rx_blob; 78 u8 rx_data[128]; 79 u8 tx_data[128]; 80 u8 tty_status; 81 82 #endif 83 }; 84 85 static void caifdev_setup(struct net_device *dev); 86 static void ldisc_tx_wakeup(struct tty_struct *tty); 87 #ifdef CONFIG_DEBUG_FS 88 static inline void update_tty_status(struct ser_device *ser) 89 { 90 ser->tty_status = 91 ser->tty->stopped << 5 | 92 ser->tty->flow_stopped << 3 | 93 ser->tty->packet << 2 | 94 ser->tty->port->low_latency << 1; 95 } 96 static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty) 97 { 98 ser->debugfs_tty_dir = 99 debugfs_create_dir(tty->name, debugfsdir); 100 if (!IS_ERR(ser->debugfs_tty_dir)) { 101 debugfs_create_blob("last_tx_msg", S_IRUSR, 102 ser->debugfs_tty_dir, 103 &ser->tx_blob); 104 105 debugfs_create_blob("last_rx_msg", S_IRUSR, 106 ser->debugfs_tty_dir, 107 &ser->rx_blob); 108 109 debugfs_create_x32("ser_state", S_IRUSR, 110 ser->debugfs_tty_dir, 111 (u32 *)&ser->state); 112 113 debugfs_create_x8("tty_status", S_IRUSR, 114 ser->debugfs_tty_dir, 115 &ser->tty_status); 116 117 } 118 ser->tx_blob.data = ser->tx_data; 119 ser->tx_blob.size = 0; 120 ser->rx_blob.data = ser->rx_data; 121 ser->rx_blob.size = 0; 122 } 123 124 static inline void debugfs_deinit(struct ser_device *ser) 125 { 126 debugfs_remove_recursive(ser->debugfs_tty_dir); 127 } 128 129 static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size) 130 { 131 if (size > sizeof(ser->rx_data)) 132 size = sizeof(ser->rx_data); 133 memcpy(ser->rx_data, data, size); 134 ser->rx_blob.data = ser->rx_data; 135 ser->rx_blob.size = size; 136 } 137 138 static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size) 139 { 140 if (size > sizeof(ser->tx_data)) 141 size = sizeof(ser->tx_data); 142 memcpy(ser->tx_data, data, size); 143 ser->tx_blob.data = ser->tx_data; 144 ser->tx_blob.size = size; 145 } 146 #else 147 static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty) 148 { 149 } 150 151 static inline void debugfs_deinit(struct ser_device *ser) 152 { 153 } 154 155 static inline void update_tty_status(struct ser_device *ser) 156 { 157 } 158 159 static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size) 160 { 161 } 162 163 static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size) 164 { 165 } 166 167 #endif 168 169 static void ldisc_receive(struct tty_struct *tty, const u8 *data, 170 char *flags, int count) 171 { 172 struct sk_buff *skb = NULL; 173 struct ser_device *ser; 174 int ret; 175 u8 *p; 176 177 ser = tty->disc_data; 178 179 /* 180 * NOTE: flags may contain information about break or overrun. 181 * This is not yet handled. 182 */ 183 184 185 /* 186 * Workaround for garbage at start of transmission, 187 * only enable if STX handling is not enabled. 188 */ 189 if (!ser->common.use_stx && !ser->tx_started) { 190 dev_info(&ser->dev->dev, 191 "Bytes received before initial transmission -" 192 "bytes discarded.\n"); 193 return; 194 } 195 196 BUG_ON(ser->dev == NULL); 197 198 /* Get a suitable caif packet and copy in data. */ 199 skb = netdev_alloc_skb(ser->dev, count+1); 200 if (skb == NULL) 201 return; 202 p = skb_put(skb, count); 203 memcpy(p, data, count); 204 205 skb->protocol = htons(ETH_P_CAIF); 206 skb_reset_mac_header(skb); 207 skb->dev = ser->dev; 208 debugfs_rx(ser, data, count); 209 /* Push received packet up the stack. */ 210 ret = netif_rx_ni(skb); 211 if (!ret) { 212 ser->dev->stats.rx_packets++; 213 ser->dev->stats.rx_bytes += count; 214 } else 215 ++ser->dev->stats.rx_dropped; 216 update_tty_status(ser); 217 } 218 219 static int handle_tx(struct ser_device *ser) 220 { 221 struct tty_struct *tty; 222 struct sk_buff *skb; 223 int tty_wr, len, room; 224 225 tty = ser->tty; 226 ser->tx_started = true; 227 228 /* Enter critical section */ 229 if (test_and_set_bit(CAIF_SENDING, &ser->state)) 230 return 0; 231 232 /* skb_peek is safe because handle_tx is called after skb_queue_tail */ 233 while ((skb = skb_peek(&ser->head)) != NULL) { 234 235 /* Make sure you don't write too much */ 236 len = skb->len; 237 room = tty_write_room(tty); 238 if (!room) 239 break; 240 if (room > ser_write_chunk) 241 room = ser_write_chunk; 242 if (len > room) 243 len = room; 244 245 /* Write to tty or loopback */ 246 if (!ser_loop) { 247 tty_wr = tty->ops->write(tty, skb->data, len); 248 update_tty_status(ser); 249 } else { 250 tty_wr = len; 251 ldisc_receive(tty, skb->data, NULL, len); 252 } 253 ser->dev->stats.tx_packets++; 254 ser->dev->stats.tx_bytes += tty_wr; 255 256 /* Error on TTY ?! */ 257 if (tty_wr < 0) 258 goto error; 259 /* Reduce buffer written, and discard if empty */ 260 skb_pull(skb, tty_wr); 261 if (skb->len == 0) { 262 struct sk_buff *tmp = skb_dequeue(&ser->head); 263 WARN_ON(tmp != skb); 264 if (in_interrupt()) 265 dev_kfree_skb_irq(skb); 266 else 267 kfree_skb(skb); 268 } 269 } 270 /* Send flow off if queue is empty */ 271 if (ser->head.qlen <= SEND_QUEUE_LOW && 272 test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) && 273 ser->common.flowctrl != NULL) 274 ser->common.flowctrl(ser->dev, ON); 275 clear_bit(CAIF_SENDING, &ser->state); 276 return 0; 277 error: 278 clear_bit(CAIF_SENDING, &ser->state); 279 return tty_wr; 280 } 281 282 static int caif_xmit(struct sk_buff *skb, struct net_device *dev) 283 { 284 struct ser_device *ser; 285 286 BUG_ON(dev == NULL); 287 ser = netdev_priv(dev); 288 289 /* Send flow off once, on high water mark */ 290 if (ser->head.qlen > SEND_QUEUE_HIGH && 291 !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) && 292 ser->common.flowctrl != NULL) 293 294 ser->common.flowctrl(ser->dev, OFF); 295 296 skb_queue_tail(&ser->head, skb); 297 return handle_tx(ser); 298 } 299 300 301 static void ldisc_tx_wakeup(struct tty_struct *tty) 302 { 303 struct ser_device *ser; 304 305 ser = tty->disc_data; 306 BUG_ON(ser == NULL); 307 WARN_ON(ser->tty != tty); 308 handle_tx(ser); 309 } 310 311 312 static void ser_release(struct work_struct *work) 313 { 314 struct list_head list; 315 struct ser_device *ser, *tmp; 316 317 spin_lock(&ser_lock); 318 list_replace_init(&ser_release_list, &list); 319 spin_unlock(&ser_lock); 320 321 if (!list_empty(&list)) { 322 rtnl_lock(); 323 list_for_each_entry_safe(ser, tmp, &list, node) { 324 dev_close(ser->dev); 325 unregister_netdevice(ser->dev); 326 debugfs_deinit(ser); 327 } 328 rtnl_unlock(); 329 } 330 } 331 332 static DECLARE_WORK(ser_release_work, ser_release); 333 334 static int ldisc_open(struct tty_struct *tty) 335 { 336 struct ser_device *ser; 337 struct net_device *dev; 338 char name[64]; 339 int result; 340 341 /* No write no play */ 342 if (tty->ops->write == NULL) 343 return -EOPNOTSUPP; 344 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG)) 345 return -EPERM; 346 347 /* release devices to avoid name collision */ 348 ser_release(NULL); 349 350 result = snprintf(name, sizeof(name), "cf%s", tty->name); 351 if (result >= IFNAMSIZ) 352 return -EINVAL; 353 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup); 354 if (!dev) 355 return -ENOMEM; 356 357 ser = netdev_priv(dev); 358 ser->tty = tty_kref_get(tty); 359 ser->dev = dev; 360 debugfs_init(ser, tty); 361 tty->receive_room = N_TTY_BUF_SIZE; 362 tty->disc_data = ser; 363 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 364 rtnl_lock(); 365 result = register_netdevice(dev); 366 if (result) { 367 rtnl_unlock(); 368 free_netdev(dev); 369 return -ENODEV; 370 } 371 372 spin_lock(&ser_lock); 373 list_add(&ser->node, &ser_list); 374 spin_unlock(&ser_lock); 375 rtnl_unlock(); 376 netif_stop_queue(dev); 377 update_tty_status(ser); 378 return 0; 379 } 380 381 static void ldisc_close(struct tty_struct *tty) 382 { 383 struct ser_device *ser = tty->disc_data; 384 385 tty_kref_put(ser->tty); 386 387 spin_lock(&ser_lock); 388 list_move(&ser->node, &ser_release_list); 389 spin_unlock(&ser_lock); 390 schedule_work(&ser_release_work); 391 } 392 393 /* The line discipline structure. */ 394 static struct tty_ldisc_ops caif_ldisc = { 395 .owner = THIS_MODULE, 396 .magic = TTY_LDISC_MAGIC, 397 .name = "n_caif", 398 .open = ldisc_open, 399 .close = ldisc_close, 400 .receive_buf = ldisc_receive, 401 .write_wakeup = ldisc_tx_wakeup 402 }; 403 404 static int register_ldisc(void) 405 { 406 int result; 407 408 result = tty_register_ldisc(N_CAIF, &caif_ldisc); 409 if (result < 0) { 410 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF, 411 result); 412 return result; 413 } 414 return result; 415 } 416 static const struct net_device_ops netdev_ops = { 417 .ndo_open = caif_net_open, 418 .ndo_stop = caif_net_close, 419 .ndo_start_xmit = caif_xmit 420 }; 421 422 static void caifdev_setup(struct net_device *dev) 423 { 424 struct ser_device *serdev = netdev_priv(dev); 425 426 dev->features = 0; 427 dev->netdev_ops = &netdev_ops; 428 dev->type = ARPHRD_CAIF; 429 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 430 dev->mtu = CAIF_MAX_MTU; 431 dev->tx_queue_len = 0; 432 dev->destructor = free_netdev; 433 skb_queue_head_init(&serdev->head); 434 serdev->common.link_select = CAIF_LINK_LOW_LATENCY; 435 serdev->common.use_frag = true; 436 serdev->common.use_stx = ser_use_stx; 437 serdev->common.use_fcs = ser_use_fcs; 438 serdev->dev = dev; 439 } 440 441 442 static int caif_net_open(struct net_device *dev) 443 { 444 netif_wake_queue(dev); 445 return 0; 446 } 447 448 static int caif_net_close(struct net_device *dev) 449 { 450 netif_stop_queue(dev); 451 return 0; 452 } 453 454 static int __init caif_ser_init(void) 455 { 456 int ret; 457 458 ret = register_ldisc(); 459 debugfsdir = debugfs_create_dir("caif_serial", NULL); 460 return ret; 461 } 462 463 static void __exit caif_ser_exit(void) 464 { 465 spin_lock(&ser_lock); 466 list_splice(&ser_list, &ser_release_list); 467 spin_unlock(&ser_lock); 468 ser_release(NULL); 469 cancel_work_sync(&ser_release_work); 470 tty_unregister_ldisc(N_CAIF); 471 debugfs_remove_recursive(debugfsdir); 472 } 473 474 module_init(caif_ser_init); 475 module_exit(caif_ser_exit); 476