1 /* 2 * CAIF Interface registration. 3 * Copyright (C) ST-Ericsson AB 2010 4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 5 * License terms: GNU General Public License (GPL) version 2 6 * 7 * Borrowed heavily from file: pn_dev.c. Thanks to 8 * Remi Denis-Courmont <remi.denis-courmont@nokia.com> 9 * and Sakari Ailus <sakari.ailus@nokia.com> 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 13 14 #include <linux/kernel.h> 15 #include <linux/if_arp.h> 16 #include <linux/net.h> 17 #include <linux/netdevice.h> 18 #include <linux/mutex.h> 19 #include <linux/module.h> 20 #include <linux/spinlock.h> 21 #include <net/netns/generic.h> 22 #include <net/net_namespace.h> 23 #include <net/pkt_sched.h> 24 #include <net/caif/caif_device.h> 25 #include <net/caif/caif_layer.h> 26 #include <net/caif/cfpkt.h> 27 #include <net/caif/cfcnfg.h> 28 #include <net/caif/cfserl.h> 29 30 MODULE_LICENSE("GPL"); 31 32 /* Used for local tracking of the CAIF net devices */ 33 struct caif_device_entry { 34 struct cflayer layer; 35 struct list_head list; 36 struct net_device *netdev; 37 int __percpu *pcpu_refcnt; 38 spinlock_t flow_lock; 39 struct sk_buff *xoff_skb; 40 void (*xoff_skb_dtor)(struct sk_buff *skb); 41 bool xoff; 42 }; 43 44 struct caif_device_entry_list { 45 struct list_head list; 46 /* Protects simulanous deletes in list */ 47 struct mutex lock; 48 }; 49 50 struct caif_net { 51 struct cfcnfg *cfg; 52 struct caif_device_entry_list caifdevs; 53 }; 54 55 static int caif_net_id; 56 static int q_high = 50; /* Percent */ 57 58 struct cfcnfg *get_cfcnfg(struct net *net) 59 { 60 struct caif_net *caifn; 61 caifn = net_generic(net, caif_net_id); 62 return caifn->cfg; 63 } 64 EXPORT_SYMBOL(get_cfcnfg); 65 66 static struct caif_device_entry_list *caif_device_list(struct net *net) 67 { 68 struct caif_net *caifn; 69 caifn = net_generic(net, caif_net_id); 70 return &caifn->caifdevs; 71 } 72 73 static void caifd_put(struct caif_device_entry *e) 74 { 75 this_cpu_dec(*e->pcpu_refcnt); 76 } 77 78 static void caifd_hold(struct caif_device_entry *e) 79 { 80 this_cpu_inc(*e->pcpu_refcnt); 81 } 82 83 static int caifd_refcnt_read(struct caif_device_entry *e) 84 { 85 int i, refcnt = 0; 86 for_each_possible_cpu(i) 87 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); 88 return refcnt; 89 } 90 91 /* Allocate new CAIF device. */ 92 static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 93 { 94 struct caif_device_entry_list *caifdevs; 95 struct caif_device_entry *caifd; 96 97 caifdevs = caif_device_list(dev_net(dev)); 98 99 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 100 if (!caifd) 101 return NULL; 102 caifd->pcpu_refcnt = alloc_percpu(int); 103 if (!caifd->pcpu_refcnt) { 104 kfree(caifd); 105 return NULL; 106 } 107 caifd->netdev = dev; 108 dev_hold(dev); 109 return caifd; 110 } 111 112 static struct caif_device_entry *caif_get(struct net_device *dev) 113 { 114 struct caif_device_entry_list *caifdevs = 115 caif_device_list(dev_net(dev)); 116 struct caif_device_entry *caifd; 117 118 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 119 if (caifd->netdev == dev) 120 return caifd; 121 } 122 return NULL; 123 } 124 125 void caif_flow_cb(struct sk_buff *skb) 126 { 127 struct caif_device_entry *caifd; 128 void (*dtor)(struct sk_buff *skb) = NULL; 129 bool send_xoff; 130 131 WARN_ON(skb->dev == NULL); 132 133 rcu_read_lock(); 134 caifd = caif_get(skb->dev); 135 caifd_hold(caifd); 136 rcu_read_unlock(); 137 138 spin_lock_bh(&caifd->flow_lock); 139 send_xoff = caifd->xoff; 140 caifd->xoff = 0; 141 dtor = caifd->xoff_skb_dtor; 142 143 if (WARN_ON(caifd->xoff_skb != skb)) 144 skb = NULL; 145 146 caifd->xoff_skb = NULL; 147 caifd->xoff_skb_dtor = NULL; 148 149 spin_unlock_bh(&caifd->flow_lock); 150 151 if (dtor && skb) 152 dtor(skb); 153 154 if (send_xoff) 155 caifd->layer.up-> 156 ctrlcmd(caifd->layer.up, 157 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, 158 caifd->layer.id); 159 caifd_put(caifd); 160 } 161 162 static int transmit(struct cflayer *layer, struct cfpkt *pkt) 163 { 164 int err, high = 0, qlen = 0; 165 struct caif_device_entry *caifd = 166 container_of(layer, struct caif_device_entry, layer); 167 struct sk_buff *skb; 168 struct netdev_queue *txq; 169 170 rcu_read_lock_bh(); 171 172 skb = cfpkt_tonative(pkt); 173 skb->dev = caifd->netdev; 174 skb_reset_network_header(skb); 175 skb->protocol = htons(ETH_P_CAIF); 176 177 /* Check if we need to handle xoff */ 178 if (likely(caifd->netdev->tx_queue_len == 0)) 179 goto noxoff; 180 181 if (unlikely(caifd->xoff)) 182 goto noxoff; 183 184 if (likely(!netif_queue_stopped(caifd->netdev))) { 185 /* If we run with a TX queue, check if the queue is too long*/ 186 txq = netdev_get_tx_queue(skb->dev, 0); 187 qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); 188 189 if (likely(qlen == 0)) 190 goto noxoff; 191 192 high = (caifd->netdev->tx_queue_len * q_high) / 100; 193 if (likely(qlen < high)) 194 goto noxoff; 195 } 196 197 /* Hold lock while accessing xoff */ 198 spin_lock_bh(&caifd->flow_lock); 199 if (caifd->xoff) { 200 spin_unlock_bh(&caifd->flow_lock); 201 goto noxoff; 202 } 203 204 /* 205 * Handle flow off, we do this by temporary hi-jacking this 206 * skb's destructor function, and replace it with our own 207 * flow-on callback. The callback will set flow-on and call 208 * the original destructor. 209 */ 210 211 pr_debug("queue has stopped(%d) or is full (%d > %d)\n", 212 netif_queue_stopped(caifd->netdev), 213 qlen, high); 214 caifd->xoff = 1; 215 caifd->xoff_skb = skb; 216 caifd->xoff_skb_dtor = skb->destructor; 217 skb->destructor = caif_flow_cb; 218 spin_unlock_bh(&caifd->flow_lock); 219 220 caifd->layer.up->ctrlcmd(caifd->layer.up, 221 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 222 caifd->layer.id); 223 noxoff: 224 rcu_read_unlock_bh(); 225 226 err = dev_queue_xmit(skb); 227 if (err > 0) 228 err = -EIO; 229 230 return err; 231 } 232 233 /* 234 * Stuff received packets into the CAIF stack. 235 * On error, returns non-zero and releases the skb. 236 */ 237 static int receive(struct sk_buff *skb, struct net_device *dev, 238 struct packet_type *pkttype, struct net_device *orig_dev) 239 { 240 struct cfpkt *pkt; 241 struct caif_device_entry *caifd; 242 int err; 243 244 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 245 246 rcu_read_lock(); 247 caifd = caif_get(dev); 248 249 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || 250 !netif_oper_up(caifd->netdev)) { 251 rcu_read_unlock(); 252 kfree_skb(skb); 253 return NET_RX_DROP; 254 } 255 256 /* Hold reference to netdevice while using CAIF stack */ 257 caifd_hold(caifd); 258 rcu_read_unlock(); 259 260 err = caifd->layer.up->receive(caifd->layer.up, pkt); 261 262 /* For -EILSEQ the packet is not freed so so it now */ 263 if (err == -EILSEQ) 264 cfpkt_destroy(pkt); 265 266 /* Release reference to stack upwards */ 267 caifd_put(caifd); 268 269 if (err != 0) 270 err = NET_RX_DROP; 271 return err; 272 } 273 274 static struct packet_type caif_packet_type __read_mostly = { 275 .type = cpu_to_be16(ETH_P_CAIF), 276 .func = receive, 277 }; 278 279 static void dev_flowctrl(struct net_device *dev, int on) 280 { 281 struct caif_device_entry *caifd; 282 283 rcu_read_lock(); 284 285 caifd = caif_get(dev); 286 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 287 rcu_read_unlock(); 288 return; 289 } 290 291 caifd_hold(caifd); 292 rcu_read_unlock(); 293 294 caifd->layer.up->ctrlcmd(caifd->layer.up, 295 on ? 296 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : 297 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 298 caifd->layer.id); 299 caifd_put(caifd); 300 } 301 302 void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, 303 struct cflayer *link_support, int head_room, 304 struct cflayer **layer, int (**rcv_func)( 305 struct sk_buff *, struct net_device *, 306 struct packet_type *, struct net_device *)) 307 { 308 struct caif_device_entry *caifd; 309 enum cfcnfg_phy_preference pref; 310 struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); 311 struct caif_device_entry_list *caifdevs; 312 313 caifdevs = caif_device_list(dev_net(dev)); 314 caifd = caif_device_alloc(dev); 315 if (!caifd) 316 return; 317 *layer = &caifd->layer; 318 spin_lock_init(&caifd->flow_lock); 319 320 switch (caifdev->link_select) { 321 case CAIF_LINK_HIGH_BANDW: 322 pref = CFPHYPREF_HIGH_BW; 323 break; 324 case CAIF_LINK_LOW_LATENCY: 325 pref = CFPHYPREF_LOW_LAT; 326 break; 327 default: 328 pref = CFPHYPREF_HIGH_BW; 329 break; 330 } 331 mutex_lock(&caifdevs->lock); 332 list_add_rcu(&caifd->list, &caifdevs->list); 333 334 strncpy(caifd->layer.name, dev->name, 335 sizeof(caifd->layer.name) - 1); 336 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; 337 caifd->layer.transmit = transmit; 338 cfcnfg_add_phy_layer(cfg, 339 dev, 340 &caifd->layer, 341 pref, 342 link_support, 343 caifdev->use_fcs, 344 head_room); 345 mutex_unlock(&caifdevs->lock); 346 if (rcv_func) 347 *rcv_func = receive; 348 } 349 EXPORT_SYMBOL(caif_enroll_dev); 350 351 /* notify Caif of device events */ 352 static int caif_device_notify(struct notifier_block *me, unsigned long what, 353 void *arg) 354 { 355 struct net_device *dev = arg; 356 struct caif_device_entry *caifd = NULL; 357 struct caif_dev_common *caifdev; 358 struct cfcnfg *cfg; 359 struct cflayer *layer, *link_support; 360 int head_room = 0; 361 struct caif_device_entry_list *caifdevs; 362 363 cfg = get_cfcnfg(dev_net(dev)); 364 caifdevs = caif_device_list(dev_net(dev)); 365 366 caifd = caif_get(dev); 367 if (caifd == NULL && dev->type != ARPHRD_CAIF) 368 return 0; 369 370 switch (what) { 371 case NETDEV_REGISTER: 372 if (caifd != NULL) 373 break; 374 375 caifdev = netdev_priv(dev); 376 377 link_support = NULL; 378 if (caifdev->use_frag) { 379 head_room = 1; 380 link_support = cfserl_create(dev->ifindex, 381 caifdev->use_stx); 382 if (!link_support) { 383 pr_warn("Out of memory\n"); 384 break; 385 } 386 } 387 caif_enroll_dev(dev, caifdev, link_support, head_room, 388 &layer, NULL); 389 caifdev->flowctrl = dev_flowctrl; 390 break; 391 392 case NETDEV_UP: 393 rcu_read_lock(); 394 395 caifd = caif_get(dev); 396 if (caifd == NULL) { 397 rcu_read_unlock(); 398 break; 399 } 400 401 caifd->xoff = 0; 402 cfcnfg_set_phy_state(cfg, &caifd->layer, true); 403 rcu_read_unlock(); 404 405 break; 406 407 case NETDEV_DOWN: 408 rcu_read_lock(); 409 410 caifd = caif_get(dev); 411 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 412 rcu_read_unlock(); 413 return -EINVAL; 414 } 415 416 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 417 caifd_hold(caifd); 418 rcu_read_unlock(); 419 420 caifd->layer.up->ctrlcmd(caifd->layer.up, 421 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 422 caifd->layer.id); 423 424 spin_lock_bh(&caifd->flow_lock); 425 426 /* 427 * Replace our xoff-destructor with original destructor. 428 * We trust that skb->destructor *always* is called before 429 * the skb reference is invalid. The hijacked SKB destructor 430 * takes the flow_lock so manipulating the skb->destructor here 431 * should be safe. 432 */ 433 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL) 434 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor; 435 436 caifd->xoff = 0; 437 caifd->xoff_skb_dtor = NULL; 438 caifd->xoff_skb = NULL; 439 440 spin_unlock_bh(&caifd->flow_lock); 441 caifd_put(caifd); 442 break; 443 444 case NETDEV_UNREGISTER: 445 mutex_lock(&caifdevs->lock); 446 447 caifd = caif_get(dev); 448 if (caifd == NULL) { 449 mutex_unlock(&caifdevs->lock); 450 break; 451 } 452 list_del_rcu(&caifd->list); 453 454 /* 455 * NETDEV_UNREGISTER is called repeatedly until all reference 456 * counts for the net-device are released. If references to 457 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for 458 * the next call to NETDEV_UNREGISTER. 459 * 460 * If any packets are in flight down the CAIF Stack, 461 * cfcnfg_del_phy_layer will return nonzero. 462 * If no packets are in flight, the CAIF Stack associated 463 * with the net-device un-registering is freed. 464 */ 465 466 if (caifd_refcnt_read(caifd) != 0 || 467 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { 468 469 pr_info("Wait for device inuse\n"); 470 /* Enrole device if CAIF Stack is still in use */ 471 list_add_rcu(&caifd->list, &caifdevs->list); 472 mutex_unlock(&caifdevs->lock); 473 break; 474 } 475 476 synchronize_rcu(); 477 dev_put(caifd->netdev); 478 free_percpu(caifd->pcpu_refcnt); 479 kfree(caifd); 480 481 mutex_unlock(&caifdevs->lock); 482 break; 483 } 484 return 0; 485 } 486 487 static struct notifier_block caif_device_notifier = { 488 .notifier_call = caif_device_notify, 489 .priority = 0, 490 }; 491 492 /* Per-namespace Caif devices handling */ 493 static int caif_init_net(struct net *net) 494 { 495 struct caif_net *caifn = net_generic(net, caif_net_id); 496 INIT_LIST_HEAD(&caifn->caifdevs.list); 497 mutex_init(&caifn->caifdevs.lock); 498 499 caifn->cfg = cfcnfg_create(); 500 if (!caifn->cfg) 501 return -ENOMEM; 502 503 return 0; 504 } 505 506 static void caif_exit_net(struct net *net) 507 { 508 struct caif_device_entry *caifd, *tmp; 509 struct caif_device_entry_list *caifdevs = 510 caif_device_list(net); 511 struct cfcnfg *cfg = get_cfcnfg(net); 512 513 rtnl_lock(); 514 mutex_lock(&caifdevs->lock); 515 516 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { 517 int i = 0; 518 list_del_rcu(&caifd->list); 519 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 520 521 while (i < 10 && 522 (caifd_refcnt_read(caifd) != 0 || 523 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { 524 525 pr_info("Wait for device inuse\n"); 526 msleep(250); 527 i++; 528 } 529 synchronize_rcu(); 530 dev_put(caifd->netdev); 531 free_percpu(caifd->pcpu_refcnt); 532 kfree(caifd); 533 } 534 cfcnfg_remove(cfg); 535 536 mutex_unlock(&caifdevs->lock); 537 rtnl_unlock(); 538 } 539 540 static struct pernet_operations caif_net_ops = { 541 .init = caif_init_net, 542 .exit = caif_exit_net, 543 .id = &caif_net_id, 544 .size = sizeof(struct caif_net), 545 }; 546 547 /* Initialize Caif devices list */ 548 static int __init caif_device_init(void) 549 { 550 int result; 551 552 result = register_pernet_subsys(&caif_net_ops); 553 554 if (result) 555 return result; 556 557 register_netdevice_notifier(&caif_device_notifier); 558 dev_add_pack(&caif_packet_type); 559 560 return result; 561 } 562 563 static void __exit caif_device_exit(void) 564 { 565 unregister_pernet_subsys(&caif_net_ops); 566 unregister_netdevice_notifier(&caif_device_notifier); 567 dev_remove_pack(&caif_packet_type); 568 } 569 570 module_init(caif_device_init); 571 module_exit(caif_device_exit); 572