1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * CAIF Interface registration. 4 * Copyright (C) ST-Ericsson AB 2010 5 * Author: Sjur Brendeland 6 * 7 * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont 8 * and Sakari Ailus <sakari.ailus@nokia.com> 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 12 13 #include <linux/kernel.h> 14 #include <linux/if_arp.h> 15 #include <linux/net.h> 16 #include <linux/netdevice.h> 17 #include <linux/mutex.h> 18 #include <linux/module.h> 19 #include <linux/spinlock.h> 20 #include <net/netns/generic.h> 21 #include <net/net_namespace.h> 22 #include <net/pkt_sched.h> 23 #include <net/caif/caif_device.h> 24 #include <net/caif/caif_layer.h> 25 #include <net/caif/caif_dev.h> 26 #include <net/caif/cfpkt.h> 27 #include <net/caif/cfcnfg.h> 28 #include <net/caif/cfserl.h> 29 30 MODULE_LICENSE("GPL"); 31 32 /* Used for local tracking of the CAIF net devices */ 33 struct caif_device_entry { 34 struct cflayer layer; 35 struct list_head list; 36 struct net_device *netdev; 37 int __percpu *pcpu_refcnt; 38 spinlock_t flow_lock; 39 struct sk_buff *xoff_skb; 40 void (*xoff_skb_dtor)(struct sk_buff *skb); 41 bool xoff; 42 }; 43 44 struct caif_device_entry_list { 45 struct list_head list; 46 /* Protects simulanous deletes in list */ 47 struct mutex lock; 48 }; 49 50 struct caif_net { 51 struct cfcnfg *cfg; 52 struct caif_device_entry_list caifdevs; 53 }; 54 55 static unsigned int caif_net_id; 56 static int q_high = 50; /* Percent */ 57 58 struct cfcnfg *get_cfcnfg(struct net *net) 59 { 60 struct caif_net *caifn; 61 caifn = net_generic(net, caif_net_id); 62 return caifn->cfg; 63 } 64 EXPORT_SYMBOL(get_cfcnfg); 65 66 static struct caif_device_entry_list *caif_device_list(struct net *net) 67 { 68 struct caif_net *caifn; 69 caifn = net_generic(net, caif_net_id); 70 return &caifn->caifdevs; 71 } 72 73 static void caifd_put(struct caif_device_entry *e) 74 { 75 this_cpu_dec(*e->pcpu_refcnt); 76 } 77 78 static void caifd_hold(struct caif_device_entry *e) 79 { 80 this_cpu_inc(*e->pcpu_refcnt); 81 } 82 83 static int caifd_refcnt_read(struct caif_device_entry *e) 84 { 85 int i, refcnt = 0; 86 for_each_possible_cpu(i) 87 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); 88 return refcnt; 89 } 90 91 /* Allocate new CAIF device. */ 92 static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 93 { 94 struct caif_device_entry *caifd; 95 96 caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); 97 if (!caifd) 98 return NULL; 99 caifd->pcpu_refcnt = alloc_percpu(int); 100 if (!caifd->pcpu_refcnt) { 101 kfree(caifd); 102 return NULL; 103 } 104 caifd->netdev = dev; 105 dev_hold(dev); 106 return caifd; 107 } 108 109 static struct caif_device_entry *caif_get(struct net_device *dev) 110 { 111 struct caif_device_entry_list *caifdevs = 112 caif_device_list(dev_net(dev)); 113 struct caif_device_entry *caifd; 114 115 list_for_each_entry_rcu(caifd, &caifdevs->list, list) { 116 if (caifd->netdev == dev) 117 return caifd; 118 } 119 return NULL; 120 } 121 122 static void caif_flow_cb(struct sk_buff *skb) 123 { 124 struct caif_device_entry *caifd; 125 void (*dtor)(struct sk_buff *skb) = NULL; 126 bool send_xoff; 127 128 WARN_ON(skb->dev == NULL); 129 130 rcu_read_lock(); 131 caifd = caif_get(skb->dev); 132 133 WARN_ON(caifd == NULL); 134 if (!caifd) { 135 rcu_read_unlock(); 136 return; 137 } 138 139 caifd_hold(caifd); 140 rcu_read_unlock(); 141 142 spin_lock_bh(&caifd->flow_lock); 143 send_xoff = caifd->xoff; 144 caifd->xoff = 0; 145 dtor = caifd->xoff_skb_dtor; 146 147 if (WARN_ON(caifd->xoff_skb != skb)) 148 skb = NULL; 149 150 caifd->xoff_skb = NULL; 151 caifd->xoff_skb_dtor = NULL; 152 153 spin_unlock_bh(&caifd->flow_lock); 154 155 if (dtor && skb) 156 dtor(skb); 157 158 if (send_xoff) 159 caifd->layer.up-> 160 ctrlcmd(caifd->layer.up, 161 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, 162 caifd->layer.id); 163 caifd_put(caifd); 164 } 165 166 static int transmit(struct cflayer *layer, struct cfpkt *pkt) 167 { 168 int err, high = 0, qlen = 0; 169 struct caif_device_entry *caifd = 170 container_of(layer, struct caif_device_entry, layer); 171 struct sk_buff *skb; 172 struct netdev_queue *txq; 173 174 rcu_read_lock_bh(); 175 176 skb = cfpkt_tonative(pkt); 177 skb->dev = caifd->netdev; 178 skb_reset_network_header(skb); 179 skb->protocol = htons(ETH_P_CAIF); 180 181 /* Check if we need to handle xoff */ 182 if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE)) 183 goto noxoff; 184 185 if (unlikely(caifd->xoff)) 186 goto noxoff; 187 188 if (likely(!netif_queue_stopped(caifd->netdev))) { 189 struct Qdisc *sch; 190 191 /* If we run with a TX queue, check if the queue is too long*/ 192 txq = netdev_get_tx_queue(skb->dev, 0); 193 sch = rcu_dereference_bh(txq->qdisc); 194 if (likely(qdisc_is_empty(sch))) 195 goto noxoff; 196 197 /* can check for explicit qdisc len value only !NOLOCK, 198 * always set flow off otherwise 199 */ 200 high = (caifd->netdev->tx_queue_len * q_high) / 100; 201 if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high)) 202 goto noxoff; 203 } 204 205 /* Hold lock while accessing xoff */ 206 spin_lock_bh(&caifd->flow_lock); 207 if (caifd->xoff) { 208 spin_unlock_bh(&caifd->flow_lock); 209 goto noxoff; 210 } 211 212 /* 213 * Handle flow off, we do this by temporary hi-jacking this 214 * skb's destructor function, and replace it with our own 215 * flow-on callback. The callback will set flow-on and call 216 * the original destructor. 217 */ 218 219 pr_debug("queue has stopped(%d) or is full (%d > %d)\n", 220 netif_queue_stopped(caifd->netdev), 221 qlen, high); 222 caifd->xoff = 1; 223 caifd->xoff_skb = skb; 224 caifd->xoff_skb_dtor = skb->destructor; 225 skb->destructor = caif_flow_cb; 226 spin_unlock_bh(&caifd->flow_lock); 227 228 caifd->layer.up->ctrlcmd(caifd->layer.up, 229 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 230 caifd->layer.id); 231 noxoff: 232 rcu_read_unlock_bh(); 233 234 err = dev_queue_xmit(skb); 235 if (err > 0) 236 err = -EIO; 237 238 return err; 239 } 240 241 /* 242 * Stuff received packets into the CAIF stack. 243 * On error, returns non-zero and releases the skb. 244 */ 245 static int receive(struct sk_buff *skb, struct net_device *dev, 246 struct packet_type *pkttype, struct net_device *orig_dev) 247 { 248 struct cfpkt *pkt; 249 struct caif_device_entry *caifd; 250 int err; 251 252 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 253 254 rcu_read_lock(); 255 caifd = caif_get(dev); 256 257 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || 258 !netif_oper_up(caifd->netdev)) { 259 rcu_read_unlock(); 260 kfree_skb(skb); 261 return NET_RX_DROP; 262 } 263 264 /* Hold reference to netdevice while using CAIF stack */ 265 caifd_hold(caifd); 266 rcu_read_unlock(); 267 268 err = caifd->layer.up->receive(caifd->layer.up, pkt); 269 270 /* For -EILSEQ the packet is not freed so so it now */ 271 if (err == -EILSEQ) 272 cfpkt_destroy(pkt); 273 274 /* Release reference to stack upwards */ 275 caifd_put(caifd); 276 277 if (err != 0) 278 err = NET_RX_DROP; 279 return err; 280 } 281 282 static struct packet_type caif_packet_type __read_mostly = { 283 .type = cpu_to_be16(ETH_P_CAIF), 284 .func = receive, 285 }; 286 287 static void dev_flowctrl(struct net_device *dev, int on) 288 { 289 struct caif_device_entry *caifd; 290 291 rcu_read_lock(); 292 293 caifd = caif_get(dev); 294 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 295 rcu_read_unlock(); 296 return; 297 } 298 299 caifd_hold(caifd); 300 rcu_read_unlock(); 301 302 caifd->layer.up->ctrlcmd(caifd->layer.up, 303 on ? 304 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : 305 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 306 caifd->layer.id); 307 caifd_put(caifd); 308 } 309 310 void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, 311 struct cflayer *link_support, int head_room, 312 struct cflayer **layer, 313 int (**rcv_func)(struct sk_buff *, struct net_device *, 314 struct packet_type *, 315 struct net_device *)) 316 { 317 struct caif_device_entry *caifd; 318 enum cfcnfg_phy_preference pref; 319 struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); 320 struct caif_device_entry_list *caifdevs; 321 322 caifdevs = caif_device_list(dev_net(dev)); 323 caifd = caif_device_alloc(dev); 324 if (!caifd) 325 return; 326 *layer = &caifd->layer; 327 spin_lock_init(&caifd->flow_lock); 328 329 switch (caifdev->link_select) { 330 case CAIF_LINK_HIGH_BANDW: 331 pref = CFPHYPREF_HIGH_BW; 332 break; 333 case CAIF_LINK_LOW_LATENCY: 334 pref = CFPHYPREF_LOW_LAT; 335 break; 336 default: 337 pref = CFPHYPREF_HIGH_BW; 338 break; 339 } 340 mutex_lock(&caifdevs->lock); 341 list_add_rcu(&caifd->list, &caifdevs->list); 342 343 strlcpy(caifd->layer.name, dev->name, 344 sizeof(caifd->layer.name)); 345 caifd->layer.transmit = transmit; 346 cfcnfg_add_phy_layer(cfg, 347 dev, 348 &caifd->layer, 349 pref, 350 link_support, 351 caifdev->use_fcs, 352 head_room); 353 mutex_unlock(&caifdevs->lock); 354 if (rcv_func) 355 *rcv_func = receive; 356 } 357 EXPORT_SYMBOL(caif_enroll_dev); 358 359 /* notify Caif of device events */ 360 static int caif_device_notify(struct notifier_block *me, unsigned long what, 361 void *ptr) 362 { 363 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 364 struct caif_device_entry *caifd = NULL; 365 struct caif_dev_common *caifdev; 366 struct cfcnfg *cfg; 367 struct cflayer *layer, *link_support; 368 int head_room = 0; 369 struct caif_device_entry_list *caifdevs; 370 371 cfg = get_cfcnfg(dev_net(dev)); 372 caifdevs = caif_device_list(dev_net(dev)); 373 374 caifd = caif_get(dev); 375 if (caifd == NULL && dev->type != ARPHRD_CAIF) 376 return 0; 377 378 switch (what) { 379 case NETDEV_REGISTER: 380 if (caifd != NULL) 381 break; 382 383 caifdev = netdev_priv(dev); 384 385 link_support = NULL; 386 if (caifdev->use_frag) { 387 head_room = 1; 388 link_support = cfserl_create(dev->ifindex, 389 caifdev->use_stx); 390 if (!link_support) { 391 pr_warn("Out of memory\n"); 392 break; 393 } 394 } 395 caif_enroll_dev(dev, caifdev, link_support, head_room, 396 &layer, NULL); 397 caifdev->flowctrl = dev_flowctrl; 398 break; 399 400 case NETDEV_UP: 401 rcu_read_lock(); 402 403 caifd = caif_get(dev); 404 if (caifd == NULL) { 405 rcu_read_unlock(); 406 break; 407 } 408 409 caifd->xoff = 0; 410 cfcnfg_set_phy_state(cfg, &caifd->layer, true); 411 rcu_read_unlock(); 412 413 break; 414 415 case NETDEV_DOWN: 416 rcu_read_lock(); 417 418 caifd = caif_get(dev); 419 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { 420 rcu_read_unlock(); 421 return -EINVAL; 422 } 423 424 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 425 caifd_hold(caifd); 426 rcu_read_unlock(); 427 428 caifd->layer.up->ctrlcmd(caifd->layer.up, 429 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 430 caifd->layer.id); 431 432 spin_lock_bh(&caifd->flow_lock); 433 434 /* 435 * Replace our xoff-destructor with original destructor. 436 * We trust that skb->destructor *always* is called before 437 * the skb reference is invalid. The hijacked SKB destructor 438 * takes the flow_lock so manipulating the skb->destructor here 439 * should be safe. 440 */ 441 if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL) 442 caifd->xoff_skb->destructor = caifd->xoff_skb_dtor; 443 444 caifd->xoff = 0; 445 caifd->xoff_skb_dtor = NULL; 446 caifd->xoff_skb = NULL; 447 448 spin_unlock_bh(&caifd->flow_lock); 449 caifd_put(caifd); 450 break; 451 452 case NETDEV_UNREGISTER: 453 mutex_lock(&caifdevs->lock); 454 455 caifd = caif_get(dev); 456 if (caifd == NULL) { 457 mutex_unlock(&caifdevs->lock); 458 break; 459 } 460 list_del_rcu(&caifd->list); 461 462 /* 463 * NETDEV_UNREGISTER is called repeatedly until all reference 464 * counts for the net-device are released. If references to 465 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for 466 * the next call to NETDEV_UNREGISTER. 467 * 468 * If any packets are in flight down the CAIF Stack, 469 * cfcnfg_del_phy_layer will return nonzero. 470 * If no packets are in flight, the CAIF Stack associated 471 * with the net-device un-registering is freed. 472 */ 473 474 if (caifd_refcnt_read(caifd) != 0 || 475 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { 476 477 pr_info("Wait for device inuse\n"); 478 /* Enrole device if CAIF Stack is still in use */ 479 list_add_rcu(&caifd->list, &caifdevs->list); 480 mutex_unlock(&caifdevs->lock); 481 break; 482 } 483 484 synchronize_rcu(); 485 dev_put(caifd->netdev); 486 free_percpu(caifd->pcpu_refcnt); 487 kfree(caifd); 488 489 mutex_unlock(&caifdevs->lock); 490 break; 491 } 492 return 0; 493 } 494 495 static struct notifier_block caif_device_notifier = { 496 .notifier_call = caif_device_notify, 497 .priority = 0, 498 }; 499 500 /* Per-namespace Caif devices handling */ 501 static int caif_init_net(struct net *net) 502 { 503 struct caif_net *caifn = net_generic(net, caif_net_id); 504 INIT_LIST_HEAD(&caifn->caifdevs.list); 505 mutex_init(&caifn->caifdevs.lock); 506 507 caifn->cfg = cfcnfg_create(); 508 if (!caifn->cfg) 509 return -ENOMEM; 510 511 return 0; 512 } 513 514 static void caif_exit_net(struct net *net) 515 { 516 struct caif_device_entry *caifd, *tmp; 517 struct caif_device_entry_list *caifdevs = 518 caif_device_list(net); 519 struct cfcnfg *cfg = get_cfcnfg(net); 520 521 rtnl_lock(); 522 mutex_lock(&caifdevs->lock); 523 524 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { 525 int i = 0; 526 list_del_rcu(&caifd->list); 527 cfcnfg_set_phy_state(cfg, &caifd->layer, false); 528 529 while (i < 10 && 530 (caifd_refcnt_read(caifd) != 0 || 531 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { 532 533 pr_info("Wait for device inuse\n"); 534 msleep(250); 535 i++; 536 } 537 synchronize_rcu(); 538 dev_put(caifd->netdev); 539 free_percpu(caifd->pcpu_refcnt); 540 kfree(caifd); 541 } 542 cfcnfg_remove(cfg); 543 544 mutex_unlock(&caifdevs->lock); 545 rtnl_unlock(); 546 } 547 548 static struct pernet_operations caif_net_ops = { 549 .init = caif_init_net, 550 .exit = caif_exit_net, 551 .id = &caif_net_id, 552 .size = sizeof(struct caif_net), 553 }; 554 555 /* Initialize Caif devices list */ 556 static int __init caif_device_init(void) 557 { 558 int result; 559 560 result = register_pernet_subsys(&caif_net_ops); 561 562 if (result) 563 return result; 564 565 register_netdevice_notifier(&caif_device_notifier); 566 dev_add_pack(&caif_packet_type); 567 568 return result; 569 } 570 571 static void __exit caif_device_exit(void) 572 { 573 unregister_netdevice_notifier(&caif_device_notifier); 574 dev_remove_pack(&caif_packet_type); 575 unregister_pernet_subsys(&caif_net_ops); 576 } 577 578 module_init(caif_device_init); 579 module_exit(caif_device_exit); 580