1*a8e04698SSainath Grandhi #include <linux/etherdevice.h> 2*a8e04698SSainath Grandhi #include <linux/if_macvlan.h> 3*a8e04698SSainath Grandhi #include <linux/if_vlan.h> 4*a8e04698SSainath Grandhi #include <linux/interrupt.h> 5*a8e04698SSainath Grandhi #include <linux/nsproxy.h> 6*a8e04698SSainath Grandhi #include <linux/compat.h> 7*a8e04698SSainath Grandhi #include <linux/if_tun.h> 8*a8e04698SSainath Grandhi #include <linux/module.h> 9*a8e04698SSainath Grandhi #include <linux/skbuff.h> 10*a8e04698SSainath Grandhi #include <linux/cache.h> 11*a8e04698SSainath Grandhi #include <linux/sched.h> 12*a8e04698SSainath Grandhi #include <linux/types.h> 13*a8e04698SSainath Grandhi #include <linux/slab.h> 14*a8e04698SSainath Grandhi #include <linux/wait.h> 15*a8e04698SSainath Grandhi #include <linux/cdev.h> 16*a8e04698SSainath Grandhi #include <linux/idr.h> 17*a8e04698SSainath Grandhi #include <linux/fs.h> 18*a8e04698SSainath Grandhi #include <linux/uio.h> 19*a8e04698SSainath Grandhi 20*a8e04698SSainath Grandhi #include <net/net_namespace.h> 21*a8e04698SSainath Grandhi #include <net/rtnetlink.h> 22*a8e04698SSainath Grandhi #include <net/sock.h> 23*a8e04698SSainath Grandhi #include <linux/virtio_net.h> 24*a8e04698SSainath Grandhi #include <linux/skb_array.h> 25*a8e04698SSainath Grandhi 26*a8e04698SSainath Grandhi /* 27*a8e04698SSainath Grandhi * A macvtap queue is the central object of this driver, it connects 28*a8e04698SSainath Grandhi * an open character device to a macvlan interface. There can be 29*a8e04698SSainath Grandhi * multiple queues on one interface, which map back to queues 30*a8e04698SSainath Grandhi * implemented in hardware on the underlying device. 31*a8e04698SSainath Grandhi * 32*a8e04698SSainath Grandhi * macvtap_proto is used to allocate queues through the sock allocation 33*a8e04698SSainath Grandhi * mechanism. 34*a8e04698SSainath Grandhi * 35*a8e04698SSainath Grandhi */ 36*a8e04698SSainath Grandhi struct macvtap_queue { 37*a8e04698SSainath Grandhi struct sock sk; 38*a8e04698SSainath Grandhi struct socket sock; 39*a8e04698SSainath Grandhi struct socket_wq wq; 40*a8e04698SSainath Grandhi int vnet_hdr_sz; 41*a8e04698SSainath Grandhi struct macvlan_dev __rcu *vlan; 42*a8e04698SSainath Grandhi struct file *file; 43*a8e04698SSainath Grandhi unsigned int flags; 44*a8e04698SSainath Grandhi u16 queue_index; 45*a8e04698SSainath Grandhi bool enabled; 46*a8e04698SSainath Grandhi struct list_head next; 47*a8e04698SSainath Grandhi struct skb_array skb_array; 48*a8e04698SSainath Grandhi }; 49*a8e04698SSainath Grandhi 50*a8e04698SSainath Grandhi #define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) 51*a8e04698SSainath Grandhi 52*a8e04698SSainath Grandhi #define MACVTAP_VNET_LE 0x80000000 53*a8e04698SSainath Grandhi #define MACVTAP_VNET_BE 0x40000000 54*a8e04698SSainath Grandhi 55*a8e04698SSainath Grandhi #ifdef CONFIG_TUN_VNET_CROSS_LE 56*a8e04698SSainath Grandhi static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) 57*a8e04698SSainath Grandhi { 58*a8e04698SSainath Grandhi return q->flags & MACVTAP_VNET_BE ? false : 59*a8e04698SSainath Grandhi virtio_legacy_is_little_endian(); 60*a8e04698SSainath Grandhi } 61*a8e04698SSainath Grandhi 62*a8e04698SSainath Grandhi static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp) 63*a8e04698SSainath Grandhi { 64*a8e04698SSainath Grandhi int s = !!(q->flags & MACVTAP_VNET_BE); 65*a8e04698SSainath Grandhi 66*a8e04698SSainath Grandhi if (put_user(s, sp)) 67*a8e04698SSainath Grandhi return -EFAULT; 68*a8e04698SSainath Grandhi 69*a8e04698SSainath Grandhi return 0; 70*a8e04698SSainath Grandhi } 71*a8e04698SSainath Grandhi 72*a8e04698SSainath Grandhi static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp) 73*a8e04698SSainath Grandhi { 74*a8e04698SSainath Grandhi int s; 75*a8e04698SSainath Grandhi 76*a8e04698SSainath Grandhi if (get_user(s, sp)) 77*a8e04698SSainath Grandhi return -EFAULT; 78*a8e04698SSainath Grandhi 79*a8e04698SSainath Grandhi if (s) 80*a8e04698SSainath Grandhi q->flags |= MACVTAP_VNET_BE; 81*a8e04698SSainath Grandhi else 82*a8e04698SSainath Grandhi q->flags &= ~MACVTAP_VNET_BE; 83*a8e04698SSainath Grandhi 84*a8e04698SSainath Grandhi return 0; 85*a8e04698SSainath Grandhi } 86*a8e04698SSainath Grandhi #else 87*a8e04698SSainath Grandhi static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q) 88*a8e04698SSainath Grandhi { 89*a8e04698SSainath Grandhi return virtio_legacy_is_little_endian(); 90*a8e04698SSainath Grandhi } 91*a8e04698SSainath Grandhi 92*a8e04698SSainath Grandhi static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp) 93*a8e04698SSainath Grandhi { 94*a8e04698SSainath Grandhi return -EINVAL; 95*a8e04698SSainath Grandhi } 96*a8e04698SSainath Grandhi 97*a8e04698SSainath Grandhi static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp) 98*a8e04698SSainath Grandhi { 99*a8e04698SSainath Grandhi return -EINVAL; 100*a8e04698SSainath Grandhi } 101*a8e04698SSainath Grandhi #endif /* CONFIG_TUN_VNET_CROSS_LE */ 102*a8e04698SSainath Grandhi 103*a8e04698SSainath Grandhi static inline bool macvtap_is_little_endian(struct macvtap_queue *q) 104*a8e04698SSainath Grandhi { 105*a8e04698SSainath Grandhi return q->flags & MACVTAP_VNET_LE || 106*a8e04698SSainath Grandhi macvtap_legacy_is_little_endian(q); 107*a8e04698SSainath Grandhi } 108*a8e04698SSainath Grandhi 109*a8e04698SSainath Grandhi static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val) 110*a8e04698SSainath Grandhi { 111*a8e04698SSainath Grandhi return __virtio16_to_cpu(macvtap_is_little_endian(q), val); 112*a8e04698SSainath Grandhi } 113*a8e04698SSainath Grandhi 114*a8e04698SSainath Grandhi static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val) 115*a8e04698SSainath Grandhi { 116*a8e04698SSainath Grandhi return __cpu_to_virtio16(macvtap_is_little_endian(q), val); 117*a8e04698SSainath Grandhi } 118*a8e04698SSainath Grandhi 119*a8e04698SSainath Grandhi static struct proto macvtap_proto = { 120*a8e04698SSainath Grandhi .name = "macvtap", 121*a8e04698SSainath Grandhi .owner = THIS_MODULE, 122*a8e04698SSainath Grandhi .obj_size = sizeof (struct macvtap_queue), 123*a8e04698SSainath Grandhi }; 124*a8e04698SSainath Grandhi 125*a8e04698SSainath Grandhi #define MACVTAP_NUM_DEVS (1U << MINORBITS) 126*a8e04698SSainath Grandhi static DEFINE_MUTEX(minor_lock); 127*a8e04698SSainath Grandhi DEFINE_IDR(minor_idr); 128*a8e04698SSainath Grandhi 129*a8e04698SSainath Grandhi #define GOODCOPY_LEN 128 130*a8e04698SSainath Grandhi 131*a8e04698SSainath Grandhi static const struct proto_ops macvtap_socket_ops; 132*a8e04698SSainath Grandhi 133*a8e04698SSainath Grandhi #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 134*a8e04698SSainath Grandhi #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) 135*a8e04698SSainath Grandhi 136*a8e04698SSainath Grandhi static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) 137*a8e04698SSainath Grandhi { 138*a8e04698SSainath Grandhi return rcu_dereference(dev->rx_handler_data); 139*a8e04698SSainath Grandhi } 140*a8e04698SSainath Grandhi 141*a8e04698SSainath Grandhi /* 142*a8e04698SSainath Grandhi * RCU usage: 143*a8e04698SSainath Grandhi * The macvtap_queue and the macvlan_dev are loosely coupled, the 144*a8e04698SSainath Grandhi * pointers from one to the other can only be read while rcu_read_lock 145*a8e04698SSainath Grandhi * or rtnl is held. 146*a8e04698SSainath Grandhi * 147*a8e04698SSainath Grandhi * Both the file and the macvlan_dev hold a reference on the macvtap_queue 148*a8e04698SSainath Grandhi * through sock_hold(&q->sk). When the macvlan_dev goes away first, 149*a8e04698SSainath Grandhi * q->vlan becomes inaccessible. When the files gets closed, 150*a8e04698SSainath Grandhi * macvtap_get_queue() fails. 151*a8e04698SSainath Grandhi * 152*a8e04698SSainath Grandhi * There may still be references to the struct sock inside of the 153*a8e04698SSainath Grandhi * queue from outbound SKBs, but these never reference back to the 154*a8e04698SSainath Grandhi * file or the dev. The data structure is freed through __sk_free 155*a8e04698SSainath Grandhi * when both our references and any pending SKBs are gone. 156*a8e04698SSainath Grandhi */ 157*a8e04698SSainath Grandhi 158*a8e04698SSainath Grandhi static int macvtap_enable_queue(struct net_device *dev, struct file *file, 159*a8e04698SSainath Grandhi struct macvtap_queue *q) 160*a8e04698SSainath Grandhi { 161*a8e04698SSainath Grandhi struct macvlan_dev *vlan = netdev_priv(dev); 162*a8e04698SSainath Grandhi int err = -EINVAL; 163*a8e04698SSainath Grandhi 164*a8e04698SSainath Grandhi ASSERT_RTNL(); 165*a8e04698SSainath Grandhi 166*a8e04698SSainath Grandhi if (q->enabled) 167*a8e04698SSainath Grandhi goto out; 168*a8e04698SSainath Grandhi 169*a8e04698SSainath Grandhi err = 0; 170*a8e04698SSainath Grandhi rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); 171*a8e04698SSainath Grandhi q->queue_index = vlan->numvtaps; 172*a8e04698SSainath Grandhi q->enabled = true; 173*a8e04698SSainath Grandhi 174*a8e04698SSainath Grandhi vlan->numvtaps++; 175*a8e04698SSainath Grandhi out: 176*a8e04698SSainath Grandhi return err; 177*a8e04698SSainath Grandhi } 178*a8e04698SSainath Grandhi 179*a8e04698SSainath Grandhi /* Requires RTNL */ 180*a8e04698SSainath Grandhi static int macvtap_set_queue(struct net_device *dev, struct file *file, 181*a8e04698SSainath Grandhi struct macvtap_queue *q) 182*a8e04698SSainath Grandhi { 183*a8e04698SSainath Grandhi struct macvlan_dev *vlan = netdev_priv(dev); 184*a8e04698SSainath Grandhi 185*a8e04698SSainath Grandhi if (vlan->numqueues == MAX_MACVTAP_QUEUES) 186*a8e04698SSainath Grandhi return -EBUSY; 187*a8e04698SSainath Grandhi 188*a8e04698SSainath Grandhi rcu_assign_pointer(q->vlan, vlan); 189*a8e04698SSainath Grandhi rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); 190*a8e04698SSainath Grandhi sock_hold(&q->sk); 191*a8e04698SSainath Grandhi 192*a8e04698SSainath Grandhi q->file = file; 193*a8e04698SSainath Grandhi q->queue_index = vlan->numvtaps; 194*a8e04698SSainath Grandhi q->enabled = true; 195*a8e04698SSainath Grandhi file->private_data = q; 196*a8e04698SSainath Grandhi list_add_tail(&q->next, &vlan->queue_list); 197*a8e04698SSainath Grandhi 198*a8e04698SSainath Grandhi vlan->numvtaps++; 199*a8e04698SSainath Grandhi vlan->numqueues++; 200*a8e04698SSainath Grandhi 201*a8e04698SSainath Grandhi return 0; 202*a8e04698SSainath Grandhi } 203*a8e04698SSainath Grandhi 204*a8e04698SSainath Grandhi static int macvtap_disable_queue(struct macvtap_queue *q) 205*a8e04698SSainath Grandhi { 206*a8e04698SSainath Grandhi struct macvlan_dev *vlan; 207*a8e04698SSainath Grandhi struct macvtap_queue *nq; 208*a8e04698SSainath Grandhi 209*a8e04698SSainath Grandhi ASSERT_RTNL(); 210*a8e04698SSainath Grandhi if (!q->enabled) 211*a8e04698SSainath Grandhi return -EINVAL; 212*a8e04698SSainath Grandhi 213*a8e04698SSainath Grandhi vlan = rtnl_dereference(q->vlan); 214*a8e04698SSainath Grandhi 215*a8e04698SSainath Grandhi if (vlan) { 216*a8e04698SSainath Grandhi int index = q->queue_index; 217*a8e04698SSainath Grandhi BUG_ON(index >= vlan->numvtaps); 218*a8e04698SSainath Grandhi nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]); 219*a8e04698SSainath Grandhi nq->queue_index = index; 220*a8e04698SSainath Grandhi 221*a8e04698SSainath Grandhi rcu_assign_pointer(vlan->taps[index], nq); 222*a8e04698SSainath Grandhi RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); 223*a8e04698SSainath Grandhi q->enabled = false; 224*a8e04698SSainath Grandhi 225*a8e04698SSainath Grandhi vlan->numvtaps--; 226*a8e04698SSainath Grandhi } 227*a8e04698SSainath Grandhi 228*a8e04698SSainath Grandhi return 0; 229*a8e04698SSainath Grandhi } 230*a8e04698SSainath Grandhi 231*a8e04698SSainath Grandhi /* 232*a8e04698SSainath Grandhi * The file owning the queue got closed, give up both 233*a8e04698SSainath Grandhi * the reference that the files holds as well as the 234*a8e04698SSainath Grandhi * one from the macvlan_dev if that still exists. 235*a8e04698SSainath Grandhi * 236*a8e04698SSainath Grandhi * Using the spinlock makes sure that we don't get 237*a8e04698SSainath Grandhi * to the queue again after destroying it. 238*a8e04698SSainath Grandhi */ 239*a8e04698SSainath Grandhi static void macvtap_put_queue(struct macvtap_queue *q) 240*a8e04698SSainath Grandhi { 241*a8e04698SSainath Grandhi struct macvlan_dev *vlan; 242*a8e04698SSainath Grandhi 243*a8e04698SSainath Grandhi rtnl_lock(); 244*a8e04698SSainath Grandhi vlan = rtnl_dereference(q->vlan); 245*a8e04698SSainath Grandhi 246*a8e04698SSainath Grandhi if (vlan) { 247*a8e04698SSainath Grandhi if (q->enabled) 248*a8e04698SSainath Grandhi BUG_ON(macvtap_disable_queue(q)); 249*a8e04698SSainath Grandhi 250*a8e04698SSainath Grandhi vlan->numqueues--; 251*a8e04698SSainath Grandhi RCU_INIT_POINTER(q->vlan, NULL); 252*a8e04698SSainath Grandhi sock_put(&q->sk); 253*a8e04698SSainath Grandhi list_del_init(&q->next); 254*a8e04698SSainath Grandhi } 255*a8e04698SSainath Grandhi 256*a8e04698SSainath Grandhi rtnl_unlock(); 257*a8e04698SSainath Grandhi 258*a8e04698SSainath Grandhi synchronize_rcu(); 259*a8e04698SSainath Grandhi sock_put(&q->sk); 260*a8e04698SSainath Grandhi } 261*a8e04698SSainath Grandhi 262*a8e04698SSainath Grandhi /* 263*a8e04698SSainath Grandhi * Select a queue based on the rxq of the device on which this packet 264*a8e04698SSainath Grandhi * arrived. If the incoming device is not mq, calculate a flow hash 265*a8e04698SSainath Grandhi * to select a queue. If all fails, find the first available queue. 266*a8e04698SSainath Grandhi * Cache vlan->numvtaps since it can become zero during the execution 267*a8e04698SSainath Grandhi * of this function. 268*a8e04698SSainath Grandhi */ 269*a8e04698SSainath Grandhi static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, 270*a8e04698SSainath Grandhi struct sk_buff *skb) 271*a8e04698SSainath Grandhi { 272*a8e04698SSainath Grandhi struct macvlan_dev *vlan = netdev_priv(dev); 273*a8e04698SSainath Grandhi struct macvtap_queue *tap = NULL; 274*a8e04698SSainath Grandhi /* Access to taps array is protected by rcu, but access to numvtaps 275*a8e04698SSainath Grandhi * isn't. Below we use it to lookup a queue, but treat it as a hint 276*a8e04698SSainath Grandhi * and validate that the result isn't NULL - in case we are 277*a8e04698SSainath Grandhi * racing against queue removal. 278*a8e04698SSainath Grandhi */ 279*a8e04698SSainath Grandhi int numvtaps = ACCESS_ONCE(vlan->numvtaps); 280*a8e04698SSainath Grandhi __u32 rxq; 281*a8e04698SSainath Grandhi 282*a8e04698SSainath Grandhi if (!numvtaps) 283*a8e04698SSainath Grandhi goto out; 284*a8e04698SSainath Grandhi 285*a8e04698SSainath Grandhi if (numvtaps == 1) 286*a8e04698SSainath Grandhi goto single; 287*a8e04698SSainath Grandhi 288*a8e04698SSainath Grandhi /* Check if we can use flow to select a queue */ 289*a8e04698SSainath Grandhi rxq = skb_get_hash(skb); 290*a8e04698SSainath Grandhi if (rxq) { 291*a8e04698SSainath Grandhi tap = rcu_dereference(vlan->taps[rxq % numvtaps]); 292*a8e04698SSainath Grandhi goto out; 293*a8e04698SSainath Grandhi } 294*a8e04698SSainath Grandhi 295*a8e04698SSainath Grandhi if (likely(skb_rx_queue_recorded(skb))) { 296*a8e04698SSainath Grandhi rxq = skb_get_rx_queue(skb); 297*a8e04698SSainath Grandhi 298*a8e04698SSainath Grandhi while (unlikely(rxq >= numvtaps)) 299*a8e04698SSainath Grandhi rxq -= numvtaps; 300*a8e04698SSainath Grandhi 301*a8e04698SSainath Grandhi tap = rcu_dereference(vlan->taps[rxq]); 302*a8e04698SSainath Grandhi goto out; 303*a8e04698SSainath Grandhi } 304*a8e04698SSainath Grandhi 305*a8e04698SSainath Grandhi single: 306*a8e04698SSainath Grandhi tap = rcu_dereference(vlan->taps[0]); 307*a8e04698SSainath Grandhi out: 308*a8e04698SSainath Grandhi return tap; 309*a8e04698SSainath Grandhi } 310*a8e04698SSainath Grandhi 311*a8e04698SSainath Grandhi /* 312*a8e04698SSainath Grandhi * The net_device is going away, give up the reference 313*a8e04698SSainath Grandhi * that it holds on all queues and safely set the pointer 314*a8e04698SSainath Grandhi * from the queues to NULL. 315*a8e04698SSainath Grandhi */ 316*a8e04698SSainath Grandhi void macvtap_del_queues(struct net_device *dev) 317*a8e04698SSainath Grandhi { 318*a8e04698SSainath Grandhi struct macvlan_dev *vlan = netdev_priv(dev); 319*a8e04698SSainath Grandhi struct macvtap_queue *q, *tmp; 320*a8e04698SSainath Grandhi 321*a8e04698SSainath Grandhi ASSERT_RTNL(); 322*a8e04698SSainath Grandhi list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { 323*a8e04698SSainath Grandhi list_del_init(&q->next); 324*a8e04698SSainath Grandhi RCU_INIT_POINTER(q->vlan, NULL); 325*a8e04698SSainath Grandhi if (q->enabled) 326*a8e04698SSainath Grandhi vlan->numvtaps--; 327*a8e04698SSainath Grandhi vlan->numqueues--; 328*a8e04698SSainath Grandhi sock_put(&q->sk); 329*a8e04698SSainath Grandhi } 330*a8e04698SSainath Grandhi BUG_ON(vlan->numvtaps); 331*a8e04698SSainath Grandhi BUG_ON(vlan->numqueues); 332*a8e04698SSainath Grandhi /* guarantee that any future macvtap_set_queue will fail */ 333*a8e04698SSainath Grandhi vlan->numvtaps = MAX_MACVTAP_QUEUES; 334*a8e04698SSainath Grandhi } 335*a8e04698SSainath Grandhi 336*a8e04698SSainath Grandhi rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) 337*a8e04698SSainath Grandhi { 338*a8e04698SSainath Grandhi struct sk_buff *skb = *pskb; 339*a8e04698SSainath Grandhi struct net_device *dev = skb->dev; 340*a8e04698SSainath Grandhi struct macvlan_dev *vlan; 341*a8e04698SSainath Grandhi struct macvtap_queue *q; 342*a8e04698SSainath Grandhi netdev_features_t features = TAP_FEATURES; 343*a8e04698SSainath Grandhi 344*a8e04698SSainath Grandhi vlan = macvtap_get_vlan_rcu(dev); 345*a8e04698SSainath Grandhi if (!vlan) 346*a8e04698SSainath Grandhi return RX_HANDLER_PASS; 347*a8e04698SSainath Grandhi 348*a8e04698SSainath Grandhi q = macvtap_get_queue(dev, skb); 349*a8e04698SSainath Grandhi if (!q) 350*a8e04698SSainath Grandhi return RX_HANDLER_PASS; 351*a8e04698SSainath Grandhi 352*a8e04698SSainath Grandhi if (__skb_array_full(&q->skb_array)) 353*a8e04698SSainath Grandhi goto drop; 354*a8e04698SSainath Grandhi 355*a8e04698SSainath Grandhi skb_push(skb, ETH_HLEN); 356*a8e04698SSainath Grandhi 357*a8e04698SSainath Grandhi /* Apply the forward feature mask so that we perform segmentation 358*a8e04698SSainath Grandhi * according to users wishes. This only works if VNET_HDR is 359*a8e04698SSainath Grandhi * enabled. 360*a8e04698SSainath Grandhi */ 361*a8e04698SSainath Grandhi if (q->flags & IFF_VNET_HDR) 362*a8e04698SSainath Grandhi features |= vlan->tap_features; 363*a8e04698SSainath Grandhi if (netif_needs_gso(skb, features)) { 364*a8e04698SSainath Grandhi struct sk_buff *segs = __skb_gso_segment(skb, features, false); 365*a8e04698SSainath Grandhi 366*a8e04698SSainath Grandhi if (IS_ERR(segs)) 367*a8e04698SSainath Grandhi goto drop; 368*a8e04698SSainath Grandhi 369*a8e04698SSainath Grandhi if (!segs) { 370*a8e04698SSainath Grandhi if (skb_array_produce(&q->skb_array, skb)) 371*a8e04698SSainath Grandhi goto drop; 372*a8e04698SSainath Grandhi goto wake_up; 373*a8e04698SSainath Grandhi } 374*a8e04698SSainath Grandhi 375*a8e04698SSainath Grandhi consume_skb(skb); 376*a8e04698SSainath Grandhi while (segs) { 377*a8e04698SSainath Grandhi struct sk_buff *nskb = segs->next; 378*a8e04698SSainath Grandhi 379*a8e04698SSainath Grandhi segs->next = NULL; 380*a8e04698SSainath Grandhi if (skb_array_produce(&q->skb_array, segs)) { 381*a8e04698SSainath Grandhi kfree_skb(segs); 382*a8e04698SSainath Grandhi kfree_skb_list(nskb); 383*a8e04698SSainath Grandhi break; 384*a8e04698SSainath Grandhi } 385*a8e04698SSainath Grandhi segs = nskb; 386*a8e04698SSainath Grandhi } 387*a8e04698SSainath Grandhi } else { 388*a8e04698SSainath Grandhi /* If we receive a partial checksum and the tap side 389*a8e04698SSainath Grandhi * doesn't support checksum offload, compute the checksum. 390*a8e04698SSainath Grandhi * Note: it doesn't matter which checksum feature to 391*a8e04698SSainath Grandhi * check, we either support them all or none. 392*a8e04698SSainath Grandhi */ 393*a8e04698SSainath Grandhi if (skb->ip_summed == CHECKSUM_PARTIAL && 394*a8e04698SSainath Grandhi !(features & NETIF_F_CSUM_MASK) && 395*a8e04698SSainath Grandhi skb_checksum_help(skb)) 396*a8e04698SSainath Grandhi goto drop; 397*a8e04698SSainath Grandhi if (skb_array_produce(&q->skb_array, skb)) 398*a8e04698SSainath Grandhi goto drop; 399*a8e04698SSainath Grandhi } 400*a8e04698SSainath Grandhi 401*a8e04698SSainath Grandhi wake_up: 402*a8e04698SSainath Grandhi wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); 403*a8e04698SSainath Grandhi return RX_HANDLER_CONSUMED; 404*a8e04698SSainath Grandhi 405*a8e04698SSainath Grandhi drop: 406*a8e04698SSainath Grandhi /* Count errors/drops only here, thus don't care about args. */ 407*a8e04698SSainath Grandhi macvlan_count_rx(vlan, 0, 0, 0); 408*a8e04698SSainath Grandhi kfree_skb(skb); 409*a8e04698SSainath Grandhi return RX_HANDLER_CONSUMED; 410*a8e04698SSainath Grandhi } 411*a8e04698SSainath Grandhi 412*a8e04698SSainath Grandhi int macvtap_get_minor(struct macvlan_dev *vlan) 413*a8e04698SSainath Grandhi { 414*a8e04698SSainath Grandhi int retval = -ENOMEM; 415*a8e04698SSainath Grandhi 416*a8e04698SSainath Grandhi mutex_lock(&minor_lock); 417*a8e04698SSainath Grandhi retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL); 418*a8e04698SSainath Grandhi if (retval >= 0) { 419*a8e04698SSainath Grandhi vlan->minor = retval; 420*a8e04698SSainath Grandhi } else if (retval == -ENOSPC) { 421*a8e04698SSainath Grandhi netdev_err(vlan->dev, "Too many macvtap devices\n"); 422*a8e04698SSainath Grandhi retval = -EINVAL; 423*a8e04698SSainath Grandhi } 424*a8e04698SSainath Grandhi mutex_unlock(&minor_lock); 425*a8e04698SSainath Grandhi return retval < 0 ? retval : 0; 426*a8e04698SSainath Grandhi } 427*a8e04698SSainath Grandhi 428*a8e04698SSainath Grandhi void macvtap_free_minor(struct macvlan_dev *vlan) 429*a8e04698SSainath Grandhi { 430*a8e04698SSainath Grandhi mutex_lock(&minor_lock); 431*a8e04698SSainath Grandhi if (vlan->minor) { 432*a8e04698SSainath Grandhi idr_remove(&minor_idr, vlan->minor); 433*a8e04698SSainath Grandhi vlan->minor = 0; 434*a8e04698SSainath Grandhi } 435*a8e04698SSainath Grandhi mutex_unlock(&minor_lock); 436*a8e04698SSainath Grandhi } 437*a8e04698SSainath Grandhi 438*a8e04698SSainath Grandhi static struct net_device *dev_get_by_macvtap_minor(int minor) 439*a8e04698SSainath Grandhi { 440*a8e04698SSainath Grandhi struct net_device *dev = NULL; 441*a8e04698SSainath Grandhi struct macvlan_dev *vlan; 442*a8e04698SSainath Grandhi 443*a8e04698SSainath Grandhi mutex_lock(&minor_lock); 444*a8e04698SSainath Grandhi vlan = idr_find(&minor_idr, minor); 445*a8e04698SSainath Grandhi if (vlan) { 446*a8e04698SSainath Grandhi dev = vlan->dev; 447*a8e04698SSainath Grandhi dev_hold(dev); 448*a8e04698SSainath Grandhi } 449*a8e04698SSainath Grandhi mutex_unlock(&minor_lock); 450*a8e04698SSainath Grandhi return dev; 451*a8e04698SSainath Grandhi } 452*a8e04698SSainath Grandhi 453*a8e04698SSainath Grandhi static void macvtap_sock_write_space(struct sock *sk) 454*a8e04698SSainath Grandhi { 455*a8e04698SSainath Grandhi wait_queue_head_t *wqueue; 456*a8e04698SSainath Grandhi 457*a8e04698SSainath Grandhi if (!sock_writeable(sk) || 458*a8e04698SSainath Grandhi !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 459*a8e04698SSainath Grandhi return; 460*a8e04698SSainath Grandhi 461*a8e04698SSainath Grandhi wqueue = sk_sleep(sk); 462*a8e04698SSainath Grandhi if (wqueue && waitqueue_active(wqueue)) 463*a8e04698SSainath Grandhi wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); 464*a8e04698SSainath Grandhi } 465*a8e04698SSainath Grandhi 466*a8e04698SSainath Grandhi static void macvtap_sock_destruct(struct sock *sk) 467*a8e04698SSainath Grandhi { 468*a8e04698SSainath Grandhi struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); 469*a8e04698SSainath Grandhi 470*a8e04698SSainath Grandhi skb_array_cleanup(&q->skb_array); 471*a8e04698SSainath Grandhi } 472*a8e04698SSainath Grandhi 473*a8e04698SSainath Grandhi static int macvtap_open(struct inode *inode, struct file *file) 474*a8e04698SSainath Grandhi { 475*a8e04698SSainath Grandhi struct net *net = current->nsproxy->net_ns; 476*a8e04698SSainath Grandhi struct net_device *dev; 477*a8e04698SSainath Grandhi struct macvtap_queue *q; 478*a8e04698SSainath Grandhi int err = -ENODEV; 479*a8e04698SSainath Grandhi 480*a8e04698SSainath Grandhi rtnl_lock(); 481*a8e04698SSainath Grandhi dev = dev_get_by_macvtap_minor(iminor(inode)); 482*a8e04698SSainath Grandhi if (!dev) 483*a8e04698SSainath Grandhi goto err; 484*a8e04698SSainath Grandhi 485*a8e04698SSainath Grandhi err = -ENOMEM; 486*a8e04698SSainath Grandhi q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 487*a8e04698SSainath Grandhi &macvtap_proto, 0); 488*a8e04698SSainath Grandhi if (!q) 489*a8e04698SSainath Grandhi goto err; 490*a8e04698SSainath Grandhi 491*a8e04698SSainath Grandhi RCU_INIT_POINTER(q->sock.wq, &q->wq); 492*a8e04698SSainath Grandhi init_waitqueue_head(&q->wq.wait); 493*a8e04698SSainath Grandhi q->sock.type = SOCK_RAW; 494*a8e04698SSainath Grandhi q->sock.state = SS_CONNECTED; 495*a8e04698SSainath Grandhi q->sock.file = file; 496*a8e04698SSainath Grandhi q->sock.ops = &macvtap_socket_ops; 497*a8e04698SSainath Grandhi sock_init_data(&q->sock, &q->sk); 498*a8e04698SSainath Grandhi q->sk.sk_write_space = macvtap_sock_write_space; 499*a8e04698SSainath Grandhi q->sk.sk_destruct = macvtap_sock_destruct; 500*a8e04698SSainath Grandhi q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; 501*a8e04698SSainath Grandhi q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 502*a8e04698SSainath Grandhi 503*a8e04698SSainath Grandhi /* 504*a8e04698SSainath Grandhi * so far only KVM virtio_net uses macvtap, enable zero copy between 505*a8e04698SSainath Grandhi * guest kernel and host kernel when lower device supports zerocopy 506*a8e04698SSainath Grandhi * 507*a8e04698SSainath Grandhi * The macvlan supports zerocopy iff the lower device supports zero 508*a8e04698SSainath Grandhi * copy so we don't have to look at the lower device directly. 509*a8e04698SSainath Grandhi */ 510*a8e04698SSainath Grandhi if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) 511*a8e04698SSainath Grandhi sock_set_flag(&q->sk, SOCK_ZEROCOPY); 512*a8e04698SSainath Grandhi 513*a8e04698SSainath Grandhi err = -ENOMEM; 514*a8e04698SSainath Grandhi if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL)) 515*a8e04698SSainath Grandhi goto err_array; 516*a8e04698SSainath Grandhi 517*a8e04698SSainath Grandhi err = macvtap_set_queue(dev, file, q); 518*a8e04698SSainath Grandhi if (err) 519*a8e04698SSainath Grandhi goto err_queue; 520*a8e04698SSainath Grandhi 521*a8e04698SSainath Grandhi dev_put(dev); 522*a8e04698SSainath Grandhi 523*a8e04698SSainath Grandhi rtnl_unlock(); 524*a8e04698SSainath Grandhi return err; 525*a8e04698SSainath Grandhi 526*a8e04698SSainath Grandhi err_queue: 527*a8e04698SSainath Grandhi skb_array_cleanup(&q->skb_array); 528*a8e04698SSainath Grandhi err_array: 529*a8e04698SSainath Grandhi sock_put(&q->sk); 530*a8e04698SSainath Grandhi err: 531*a8e04698SSainath Grandhi if (dev) 532*a8e04698SSainath Grandhi dev_put(dev); 533*a8e04698SSainath Grandhi 534*a8e04698SSainath Grandhi rtnl_unlock(); 535*a8e04698SSainath Grandhi return err; 536*a8e04698SSainath Grandhi } 537*a8e04698SSainath Grandhi 538*a8e04698SSainath Grandhi static int macvtap_release(struct inode *inode, struct file *file) 539*a8e04698SSainath Grandhi { 540*a8e04698SSainath Grandhi struct macvtap_queue *q = file->private_data; 541*a8e04698SSainath Grandhi macvtap_put_queue(q); 542*a8e04698SSainath Grandhi return 0; 543*a8e04698SSainath Grandhi } 544*a8e04698SSainath Grandhi 545*a8e04698SSainath Grandhi static unsigned int macvtap_poll(struct file *file, poll_table * wait) 546*a8e04698SSainath Grandhi { 547*a8e04698SSainath Grandhi struct macvtap_queue *q = file->private_data; 548*a8e04698SSainath Grandhi unsigned int mask = POLLERR; 549*a8e04698SSainath Grandhi 550*a8e04698SSainath Grandhi if (!q) 551*a8e04698SSainath Grandhi goto out; 552*a8e04698SSainath Grandhi 553*a8e04698SSainath Grandhi mask = 0; 554*a8e04698SSainath Grandhi poll_wait(file, &q->wq.wait, wait); 555*a8e04698SSainath Grandhi 556*a8e04698SSainath Grandhi if (!skb_array_empty(&q->skb_array)) 557*a8e04698SSainath Grandhi mask |= POLLIN | POLLRDNORM; 558*a8e04698SSainath Grandhi 559*a8e04698SSainath Grandhi if (sock_writeable(&q->sk) || 560*a8e04698SSainath Grandhi (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && 561*a8e04698SSainath Grandhi sock_writeable(&q->sk))) 562*a8e04698SSainath Grandhi mask |= POLLOUT | POLLWRNORM; 563*a8e04698SSainath Grandhi 564*a8e04698SSainath Grandhi out: 565*a8e04698SSainath Grandhi return mask; 566*a8e04698SSainath Grandhi } 567*a8e04698SSainath Grandhi 568*a8e04698SSainath Grandhi static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, 569*a8e04698SSainath Grandhi size_t len, size_t linear, 570*a8e04698SSainath Grandhi int noblock, int *err) 571*a8e04698SSainath Grandhi { 572*a8e04698SSainath Grandhi struct sk_buff *skb; 573*a8e04698SSainath Grandhi 574*a8e04698SSainath Grandhi /* Under a page? Don't bother with paged skb. */ 575*a8e04698SSainath Grandhi if (prepad + len < PAGE_SIZE || !linear) 576*a8e04698SSainath Grandhi linear = len; 577*a8e04698SSainath Grandhi 578*a8e04698SSainath Grandhi skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 579*a8e04698SSainath Grandhi err, 0); 580*a8e04698SSainath Grandhi if (!skb) 581*a8e04698SSainath Grandhi return NULL; 582*a8e04698SSainath Grandhi 583*a8e04698SSainath Grandhi skb_reserve(skb, prepad); 584*a8e04698SSainath Grandhi skb_put(skb, linear); 585*a8e04698SSainath Grandhi skb->data_len = len - linear; 586*a8e04698SSainath Grandhi skb->len += len - linear; 587*a8e04698SSainath Grandhi 588*a8e04698SSainath Grandhi return skb; 589*a8e04698SSainath Grandhi } 590*a8e04698SSainath Grandhi 591*a8e04698SSainath Grandhi /* Neighbour code has some assumptions on HH_DATA_MOD alignment */ 592*a8e04698SSainath Grandhi #define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) 593*a8e04698SSainath Grandhi 594*a8e04698SSainath Grandhi /* Get packet from user space buffer */ 595*a8e04698SSainath Grandhi static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, 596*a8e04698SSainath Grandhi struct iov_iter *from, int noblock) 597*a8e04698SSainath Grandhi { 598*a8e04698SSainath Grandhi int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); 599*a8e04698SSainath Grandhi struct sk_buff *skb; 600*a8e04698SSainath Grandhi struct macvlan_dev *vlan; 601*a8e04698SSainath Grandhi unsigned long total_len = iov_iter_count(from); 602*a8e04698SSainath Grandhi unsigned long len = total_len; 603*a8e04698SSainath Grandhi int err; 604*a8e04698SSainath Grandhi struct virtio_net_hdr vnet_hdr = { 0 }; 605*a8e04698SSainath Grandhi int vnet_hdr_len = 0; 606*a8e04698SSainath Grandhi int copylen = 0; 607*a8e04698SSainath Grandhi int depth; 608*a8e04698SSainath Grandhi bool zerocopy = false; 609*a8e04698SSainath Grandhi size_t linear; 610*a8e04698SSainath Grandhi 611*a8e04698SSainath Grandhi if (q->flags & IFF_VNET_HDR) { 612*a8e04698SSainath Grandhi vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); 613*a8e04698SSainath Grandhi 614*a8e04698SSainath Grandhi err = -EINVAL; 615*a8e04698SSainath Grandhi if (len < vnet_hdr_len) 616*a8e04698SSainath Grandhi goto err; 617*a8e04698SSainath Grandhi len -= vnet_hdr_len; 618*a8e04698SSainath Grandhi 619*a8e04698SSainath Grandhi err = -EFAULT; 620*a8e04698SSainath Grandhi if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from)) 621*a8e04698SSainath Grandhi goto err; 622*a8e04698SSainath Grandhi iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); 623*a8e04698SSainath Grandhi if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 624*a8e04698SSainath Grandhi macvtap16_to_cpu(q, vnet_hdr.csum_start) + 625*a8e04698SSainath Grandhi macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > 626*a8e04698SSainath Grandhi macvtap16_to_cpu(q, vnet_hdr.hdr_len)) 627*a8e04698SSainath Grandhi vnet_hdr.hdr_len = cpu_to_macvtap16(q, 628*a8e04698SSainath Grandhi macvtap16_to_cpu(q, vnet_hdr.csum_start) + 629*a8e04698SSainath Grandhi macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2); 630*a8e04698SSainath Grandhi err = -EINVAL; 631*a8e04698SSainath Grandhi if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len) 632*a8e04698SSainath Grandhi goto err; 633*a8e04698SSainath Grandhi } 634*a8e04698SSainath Grandhi 635*a8e04698SSainath Grandhi err = -EINVAL; 636*a8e04698SSainath Grandhi if (unlikely(len < ETH_HLEN)) 637*a8e04698SSainath Grandhi goto err; 638*a8e04698SSainath Grandhi 639*a8e04698SSainath Grandhi if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { 640*a8e04698SSainath Grandhi struct iov_iter i; 641*a8e04698SSainath Grandhi 642*a8e04698SSainath Grandhi copylen = vnet_hdr.hdr_len ? 643*a8e04698SSainath Grandhi macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; 644*a8e04698SSainath Grandhi if (copylen > good_linear) 645*a8e04698SSainath Grandhi copylen = good_linear; 646*a8e04698SSainath Grandhi else if (copylen < ETH_HLEN) 647*a8e04698SSainath Grandhi copylen = ETH_HLEN; 648*a8e04698SSainath Grandhi linear = copylen; 649*a8e04698SSainath Grandhi i = *from; 650*a8e04698SSainath Grandhi iov_iter_advance(&i, copylen); 651*a8e04698SSainath Grandhi if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 652*a8e04698SSainath Grandhi zerocopy = true; 653*a8e04698SSainath Grandhi } 654*a8e04698SSainath Grandhi 655*a8e04698SSainath Grandhi if (!zerocopy) { 656*a8e04698SSainath Grandhi copylen = len; 657*a8e04698SSainath Grandhi linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); 658*a8e04698SSainath Grandhi if (linear > good_linear) 659*a8e04698SSainath Grandhi linear = good_linear; 660*a8e04698SSainath Grandhi else if (linear < ETH_HLEN) 661*a8e04698SSainath Grandhi linear = ETH_HLEN; 662*a8e04698SSainath Grandhi } 663*a8e04698SSainath Grandhi 664*a8e04698SSainath Grandhi skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, 665*a8e04698SSainath Grandhi linear, noblock, &err); 666*a8e04698SSainath Grandhi if (!skb) 667*a8e04698SSainath Grandhi goto err; 668*a8e04698SSainath Grandhi 669*a8e04698SSainath Grandhi if (zerocopy) 670*a8e04698SSainath Grandhi err = zerocopy_sg_from_iter(skb, from); 671*a8e04698SSainath Grandhi else 672*a8e04698SSainath Grandhi err = skb_copy_datagram_from_iter(skb, 0, from, len); 673*a8e04698SSainath Grandhi 674*a8e04698SSainath Grandhi if (err) 675*a8e04698SSainath Grandhi goto err_kfree; 676*a8e04698SSainath Grandhi 677*a8e04698SSainath Grandhi skb_set_network_header(skb, ETH_HLEN); 678*a8e04698SSainath Grandhi skb_reset_mac_header(skb); 679*a8e04698SSainath Grandhi skb->protocol = eth_hdr(skb)->h_proto; 680*a8e04698SSainath Grandhi 681*a8e04698SSainath Grandhi if (vnet_hdr_len) { 682*a8e04698SSainath Grandhi err = virtio_net_hdr_to_skb(skb, &vnet_hdr, 683*a8e04698SSainath Grandhi macvtap_is_little_endian(q)); 684*a8e04698SSainath Grandhi if (err) 685*a8e04698SSainath Grandhi goto err_kfree; 686*a8e04698SSainath Grandhi } 687*a8e04698SSainath Grandhi 688*a8e04698SSainath Grandhi skb_probe_transport_header(skb, ETH_HLEN); 689*a8e04698SSainath Grandhi 690*a8e04698SSainath Grandhi /* Move network header to the right position for VLAN tagged packets */ 691*a8e04698SSainath Grandhi if ((skb->protocol == htons(ETH_P_8021Q) || 692*a8e04698SSainath Grandhi skb->protocol == htons(ETH_P_8021AD)) && 693*a8e04698SSainath Grandhi __vlan_get_protocol(skb, skb->protocol, &depth) != 0) 694*a8e04698SSainath Grandhi skb_set_network_header(skb, depth); 695*a8e04698SSainath Grandhi 696*a8e04698SSainath Grandhi rcu_read_lock(); 697*a8e04698SSainath Grandhi vlan = rcu_dereference(q->vlan); 698*a8e04698SSainath Grandhi /* copy skb_ubuf_info for callback when skb has no error */ 699*a8e04698SSainath Grandhi if (zerocopy) { 700*a8e04698SSainath Grandhi skb_shinfo(skb)->destructor_arg = m->msg_control; 701*a8e04698SSainath Grandhi skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 702*a8e04698SSainath Grandhi skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 703*a8e04698SSainath Grandhi } else if (m && m->msg_control) { 704*a8e04698SSainath Grandhi struct ubuf_info *uarg = m->msg_control; 705*a8e04698SSainath Grandhi uarg->callback(uarg, false); 706*a8e04698SSainath Grandhi } 707*a8e04698SSainath Grandhi 708*a8e04698SSainath Grandhi if (vlan) { 709*a8e04698SSainath Grandhi skb->dev = vlan->dev; 710*a8e04698SSainath Grandhi dev_queue_xmit(skb); 711*a8e04698SSainath Grandhi } else { 712*a8e04698SSainath Grandhi kfree_skb(skb); 713*a8e04698SSainath Grandhi } 714*a8e04698SSainath Grandhi rcu_read_unlock(); 715*a8e04698SSainath Grandhi 716*a8e04698SSainath Grandhi return total_len; 717*a8e04698SSainath Grandhi 718*a8e04698SSainath Grandhi err_kfree: 719*a8e04698SSainath Grandhi kfree_skb(skb); 720*a8e04698SSainath Grandhi 721*a8e04698SSainath Grandhi err: 722*a8e04698SSainath Grandhi rcu_read_lock(); 723*a8e04698SSainath Grandhi vlan = rcu_dereference(q->vlan); 724*a8e04698SSainath Grandhi if (vlan) 725*a8e04698SSainath Grandhi this_cpu_inc(vlan->pcpu_stats->tx_dropped); 726*a8e04698SSainath Grandhi rcu_read_unlock(); 727*a8e04698SSainath Grandhi 728*a8e04698SSainath Grandhi return err; 729*a8e04698SSainath Grandhi } 730*a8e04698SSainath Grandhi 731*a8e04698SSainath Grandhi static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from) 732*a8e04698SSainath Grandhi { 733*a8e04698SSainath Grandhi struct file *file = iocb->ki_filp; 734*a8e04698SSainath Grandhi struct macvtap_queue *q = file->private_data; 735*a8e04698SSainath Grandhi 736*a8e04698SSainath Grandhi return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); 737*a8e04698SSainath Grandhi } 738*a8e04698SSainath Grandhi 739*a8e04698SSainath Grandhi /* Put packet to the user space buffer */ 740*a8e04698SSainath Grandhi static ssize_t macvtap_put_user(struct macvtap_queue *q, 741*a8e04698SSainath Grandhi const struct sk_buff *skb, 742*a8e04698SSainath Grandhi struct iov_iter *iter) 743*a8e04698SSainath Grandhi { 744*a8e04698SSainath Grandhi int ret; 745*a8e04698SSainath Grandhi int vnet_hdr_len = 0; 746*a8e04698SSainath Grandhi int vlan_offset = 0; 747*a8e04698SSainath Grandhi int total; 748*a8e04698SSainath Grandhi 749*a8e04698SSainath Grandhi if (q->flags & IFF_VNET_HDR) { 750*a8e04698SSainath Grandhi struct virtio_net_hdr vnet_hdr; 751*a8e04698SSainath Grandhi vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); 752*a8e04698SSainath Grandhi if (iov_iter_count(iter) < vnet_hdr_len) 753*a8e04698SSainath Grandhi return -EINVAL; 754*a8e04698SSainath Grandhi 755*a8e04698SSainath Grandhi if (virtio_net_hdr_from_skb(skb, &vnet_hdr, 756*a8e04698SSainath Grandhi macvtap_is_little_endian(q), true)) 757*a8e04698SSainath Grandhi BUG(); 758*a8e04698SSainath Grandhi 759*a8e04698SSainath Grandhi if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != 760*a8e04698SSainath Grandhi sizeof(vnet_hdr)) 761*a8e04698SSainath Grandhi return -EFAULT; 762*a8e04698SSainath Grandhi 763*a8e04698SSainath Grandhi iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); 764*a8e04698SSainath Grandhi } 765*a8e04698SSainath Grandhi total = vnet_hdr_len; 766*a8e04698SSainath Grandhi total += skb->len; 767*a8e04698SSainath Grandhi 768*a8e04698SSainath Grandhi if (skb_vlan_tag_present(skb)) { 769*a8e04698SSainath Grandhi struct { 770*a8e04698SSainath Grandhi __be16 h_vlan_proto; 771*a8e04698SSainath Grandhi __be16 h_vlan_TCI; 772*a8e04698SSainath Grandhi } veth; 773*a8e04698SSainath Grandhi veth.h_vlan_proto = skb->vlan_proto; 774*a8e04698SSainath Grandhi veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 775*a8e04698SSainath Grandhi 776*a8e04698SSainath Grandhi vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 777*a8e04698SSainath Grandhi total += VLAN_HLEN; 778*a8e04698SSainath Grandhi 779*a8e04698SSainath Grandhi ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 780*a8e04698SSainath Grandhi if (ret || !iov_iter_count(iter)) 781*a8e04698SSainath Grandhi goto done; 782*a8e04698SSainath Grandhi 783*a8e04698SSainath Grandhi ret = copy_to_iter(&veth, sizeof(veth), iter); 784*a8e04698SSainath Grandhi if (ret != sizeof(veth) || !iov_iter_count(iter)) 785*a8e04698SSainath Grandhi goto done; 786*a8e04698SSainath Grandhi } 787*a8e04698SSainath Grandhi 788*a8e04698SSainath Grandhi ret = skb_copy_datagram_iter(skb, vlan_offset, iter, 789*a8e04698SSainath Grandhi skb->len - vlan_offset); 790*a8e04698SSainath Grandhi 791*a8e04698SSainath Grandhi done: 792*a8e04698SSainath Grandhi return ret ? ret : total; 793*a8e04698SSainath Grandhi } 794*a8e04698SSainath Grandhi 795*a8e04698SSainath Grandhi static ssize_t macvtap_do_read(struct macvtap_queue *q, 796*a8e04698SSainath Grandhi struct iov_iter *to, 797*a8e04698SSainath Grandhi int noblock) 798*a8e04698SSainath Grandhi { 799*a8e04698SSainath Grandhi DEFINE_WAIT(wait); 800*a8e04698SSainath Grandhi struct sk_buff *skb; 801*a8e04698SSainath Grandhi ssize_t ret = 0; 802*a8e04698SSainath Grandhi 803*a8e04698SSainath Grandhi if (!iov_iter_count(to)) 804*a8e04698SSainath Grandhi return 0; 805*a8e04698SSainath Grandhi 806*a8e04698SSainath Grandhi while (1) { 807*a8e04698SSainath Grandhi if (!noblock) 808*a8e04698SSainath Grandhi prepare_to_wait(sk_sleep(&q->sk), &wait, 809*a8e04698SSainath Grandhi TASK_INTERRUPTIBLE); 810*a8e04698SSainath Grandhi 811*a8e04698SSainath Grandhi /* Read frames from the queue */ 812*a8e04698SSainath Grandhi skb = skb_array_consume(&q->skb_array); 813*a8e04698SSainath Grandhi if (skb) 814*a8e04698SSainath Grandhi break; 815*a8e04698SSainath Grandhi if (noblock) { 816*a8e04698SSainath Grandhi ret = -EAGAIN; 817*a8e04698SSainath Grandhi break; 818*a8e04698SSainath Grandhi } 819*a8e04698SSainath Grandhi if (signal_pending(current)) { 820*a8e04698SSainath Grandhi ret = -ERESTARTSYS; 821*a8e04698SSainath Grandhi break; 822*a8e04698SSainath Grandhi } 823*a8e04698SSainath Grandhi /* Nothing to read, let's sleep */ 824*a8e04698SSainath Grandhi schedule(); 825*a8e04698SSainath Grandhi } 826*a8e04698SSainath Grandhi if (!noblock) 827*a8e04698SSainath Grandhi finish_wait(sk_sleep(&q->sk), &wait); 828*a8e04698SSainath Grandhi 829*a8e04698SSainath Grandhi if (skb) { 830*a8e04698SSainath Grandhi ret = macvtap_put_user(q, skb, to); 831*a8e04698SSainath Grandhi if (unlikely(ret < 0)) 832*a8e04698SSainath Grandhi kfree_skb(skb); 833*a8e04698SSainath Grandhi else 834*a8e04698SSainath Grandhi consume_skb(skb); 835*a8e04698SSainath Grandhi } 836*a8e04698SSainath Grandhi return ret; 837*a8e04698SSainath Grandhi } 838*a8e04698SSainath Grandhi 839*a8e04698SSainath Grandhi static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to) 840*a8e04698SSainath Grandhi { 841*a8e04698SSainath Grandhi struct file *file = iocb->ki_filp; 842*a8e04698SSainath Grandhi struct macvtap_queue *q = file->private_data; 843*a8e04698SSainath Grandhi ssize_t len = iov_iter_count(to), ret; 844*a8e04698SSainath Grandhi 845*a8e04698SSainath Grandhi ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK); 846*a8e04698SSainath Grandhi ret = min_t(ssize_t, ret, len); 847*a8e04698SSainath Grandhi if (ret > 0) 848*a8e04698SSainath Grandhi iocb->ki_pos = ret; 849*a8e04698SSainath Grandhi return ret; 850*a8e04698SSainath Grandhi } 851*a8e04698SSainath Grandhi 852*a8e04698SSainath Grandhi static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) 853*a8e04698SSainath Grandhi { 854*a8e04698SSainath Grandhi struct macvlan_dev *vlan; 855*a8e04698SSainath Grandhi 856*a8e04698SSainath Grandhi ASSERT_RTNL(); 857*a8e04698SSainath Grandhi vlan = rtnl_dereference(q->vlan); 858*a8e04698SSainath Grandhi if (vlan) 859*a8e04698SSainath Grandhi dev_hold(vlan->dev); 860*a8e04698SSainath Grandhi 861*a8e04698SSainath Grandhi return vlan; 862*a8e04698SSainath Grandhi } 863*a8e04698SSainath Grandhi 864*a8e04698SSainath Grandhi static void macvtap_put_vlan(struct macvlan_dev *vlan) 865*a8e04698SSainath Grandhi { 866*a8e04698SSainath Grandhi dev_put(vlan->dev); 867*a8e04698SSainath Grandhi } 868*a8e04698SSainath Grandhi 869*a8e04698SSainath Grandhi static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) 870*a8e04698SSainath Grandhi { 871*a8e04698SSainath Grandhi struct macvtap_queue *q = file->private_data; 872*a8e04698SSainath Grandhi struct macvlan_dev *vlan; 873*a8e04698SSainath Grandhi int ret; 874*a8e04698SSainath Grandhi 875*a8e04698SSainath Grandhi vlan = macvtap_get_vlan(q); 876*a8e04698SSainath Grandhi if (!vlan) 877*a8e04698SSainath Grandhi return -EINVAL; 878*a8e04698SSainath Grandhi 879*a8e04698SSainath Grandhi if (flags & IFF_ATTACH_QUEUE) 880*a8e04698SSainath Grandhi ret = macvtap_enable_queue(vlan->dev, file, q); 881*a8e04698SSainath Grandhi else if (flags & IFF_DETACH_QUEUE) 882*a8e04698SSainath Grandhi ret = macvtap_disable_queue(q); 883*a8e04698SSainath Grandhi else 884*a8e04698SSainath Grandhi ret = -EINVAL; 885*a8e04698SSainath Grandhi 886*a8e04698SSainath Grandhi macvtap_put_vlan(vlan); 887*a8e04698SSainath Grandhi return ret; 888*a8e04698SSainath Grandhi } 889*a8e04698SSainath Grandhi 890*a8e04698SSainath Grandhi static int set_offload(struct macvtap_queue *q, unsigned long arg) 891*a8e04698SSainath Grandhi { 892*a8e04698SSainath Grandhi struct macvlan_dev *vlan; 893*a8e04698SSainath Grandhi netdev_features_t features; 894*a8e04698SSainath Grandhi netdev_features_t feature_mask = 0; 895*a8e04698SSainath Grandhi 896*a8e04698SSainath Grandhi vlan = rtnl_dereference(q->vlan); 897*a8e04698SSainath Grandhi if (!vlan) 898*a8e04698SSainath Grandhi return -ENOLINK; 899*a8e04698SSainath Grandhi 900*a8e04698SSainath Grandhi features = vlan->dev->features; 901*a8e04698SSainath Grandhi 902*a8e04698SSainath Grandhi if (arg & TUN_F_CSUM) { 903*a8e04698SSainath Grandhi feature_mask = NETIF_F_HW_CSUM; 904*a8e04698SSainath Grandhi 905*a8e04698SSainath Grandhi if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { 906*a8e04698SSainath Grandhi if (arg & TUN_F_TSO_ECN) 907*a8e04698SSainath Grandhi feature_mask |= NETIF_F_TSO_ECN; 908*a8e04698SSainath Grandhi if (arg & TUN_F_TSO4) 909*a8e04698SSainath Grandhi feature_mask |= NETIF_F_TSO; 910*a8e04698SSainath Grandhi if (arg & TUN_F_TSO6) 911*a8e04698SSainath Grandhi feature_mask |= NETIF_F_TSO6; 912*a8e04698SSainath Grandhi } 913*a8e04698SSainath Grandhi 914*a8e04698SSainath Grandhi if (arg & TUN_F_UFO) 915*a8e04698SSainath Grandhi feature_mask |= NETIF_F_UFO; 916*a8e04698SSainath Grandhi } 917*a8e04698SSainath Grandhi 918*a8e04698SSainath Grandhi /* tun/tap driver inverts the usage for TSO offloads, where 919*a8e04698SSainath Grandhi * setting the TSO bit means that the userspace wants to 920*a8e04698SSainath Grandhi * accept TSO frames and turning it off means that user space 921*a8e04698SSainath Grandhi * does not support TSO. 922*a8e04698SSainath Grandhi * For macvtap, we have to invert it to mean the same thing. 923*a8e04698SSainath Grandhi * When user space turns off TSO, we turn off GSO/LRO so that 924*a8e04698SSainath Grandhi * user-space will not receive TSO frames. 925*a8e04698SSainath Grandhi */ 926*a8e04698SSainath Grandhi if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) 927*a8e04698SSainath Grandhi features |= RX_OFFLOADS; 928*a8e04698SSainath Grandhi else 929*a8e04698SSainath Grandhi features &= ~RX_OFFLOADS; 930*a8e04698SSainath Grandhi 931*a8e04698SSainath Grandhi /* tap_features are the same as features on tun/tap and 932*a8e04698SSainath Grandhi * reflect user expectations. 933*a8e04698SSainath Grandhi */ 934*a8e04698SSainath Grandhi vlan->tap_features = feature_mask; 935*a8e04698SSainath Grandhi vlan->set_features = features; 936*a8e04698SSainath Grandhi netdev_update_features(vlan->dev); 937*a8e04698SSainath Grandhi 938*a8e04698SSainath Grandhi return 0; 939*a8e04698SSainath Grandhi } 940*a8e04698SSainath Grandhi 941*a8e04698SSainath Grandhi /* 942*a8e04698SSainath Grandhi * provide compatibility with generic tun/tap interface 943*a8e04698SSainath Grandhi */ 944*a8e04698SSainath Grandhi static long macvtap_ioctl(struct file *file, unsigned int cmd, 945*a8e04698SSainath Grandhi unsigned long arg) 946*a8e04698SSainath Grandhi { 947*a8e04698SSainath Grandhi struct macvtap_queue *q = file->private_data; 948*a8e04698SSainath Grandhi struct macvlan_dev *vlan; 949*a8e04698SSainath Grandhi void __user *argp = (void __user *)arg; 950*a8e04698SSainath Grandhi struct ifreq __user *ifr = argp; 951*a8e04698SSainath Grandhi unsigned int __user *up = argp; 952*a8e04698SSainath Grandhi unsigned short u; 953*a8e04698SSainath Grandhi int __user *sp = argp; 954*a8e04698SSainath Grandhi struct sockaddr sa; 955*a8e04698SSainath Grandhi int s; 956*a8e04698SSainath Grandhi int ret; 957*a8e04698SSainath Grandhi 958*a8e04698SSainath Grandhi switch (cmd) { 959*a8e04698SSainath Grandhi case TUNSETIFF: 960*a8e04698SSainath Grandhi /* ignore the name, just look at flags */ 961*a8e04698SSainath Grandhi if (get_user(u, &ifr->ifr_flags)) 962*a8e04698SSainath Grandhi return -EFAULT; 963*a8e04698SSainath Grandhi 964*a8e04698SSainath Grandhi ret = 0; 965*a8e04698SSainath Grandhi if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP)) 966*a8e04698SSainath Grandhi ret = -EINVAL; 967*a8e04698SSainath Grandhi else 968*a8e04698SSainath Grandhi q->flags = (q->flags & ~MACVTAP_FEATURES) | u; 969*a8e04698SSainath Grandhi 970*a8e04698SSainath Grandhi return ret; 971*a8e04698SSainath Grandhi 972*a8e04698SSainath Grandhi case TUNGETIFF: 973*a8e04698SSainath Grandhi rtnl_lock(); 974*a8e04698SSainath Grandhi vlan = macvtap_get_vlan(q); 975*a8e04698SSainath Grandhi if (!vlan) { 976*a8e04698SSainath Grandhi rtnl_unlock(); 977*a8e04698SSainath Grandhi return -ENOLINK; 978*a8e04698SSainath Grandhi } 979*a8e04698SSainath Grandhi 980*a8e04698SSainath Grandhi ret = 0; 981*a8e04698SSainath Grandhi u = q->flags; 982*a8e04698SSainath Grandhi if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || 983*a8e04698SSainath Grandhi put_user(u, &ifr->ifr_flags)) 984*a8e04698SSainath Grandhi ret = -EFAULT; 985*a8e04698SSainath Grandhi macvtap_put_vlan(vlan); 986*a8e04698SSainath Grandhi rtnl_unlock(); 987*a8e04698SSainath Grandhi return ret; 988*a8e04698SSainath Grandhi 989*a8e04698SSainath Grandhi case TUNSETQUEUE: 990*a8e04698SSainath Grandhi if (get_user(u, &ifr->ifr_flags)) 991*a8e04698SSainath Grandhi return -EFAULT; 992*a8e04698SSainath Grandhi rtnl_lock(); 993*a8e04698SSainath Grandhi ret = macvtap_ioctl_set_queue(file, u); 994*a8e04698SSainath Grandhi rtnl_unlock(); 995*a8e04698SSainath Grandhi return ret; 996*a8e04698SSainath Grandhi 997*a8e04698SSainath Grandhi case TUNGETFEATURES: 998*a8e04698SSainath Grandhi if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up)) 999*a8e04698SSainath Grandhi return -EFAULT; 1000*a8e04698SSainath Grandhi return 0; 1001*a8e04698SSainath Grandhi 1002*a8e04698SSainath Grandhi case TUNSETSNDBUF: 1003*a8e04698SSainath Grandhi if (get_user(s, sp)) 1004*a8e04698SSainath Grandhi return -EFAULT; 1005*a8e04698SSainath Grandhi 1006*a8e04698SSainath Grandhi q->sk.sk_sndbuf = s; 1007*a8e04698SSainath Grandhi return 0; 1008*a8e04698SSainath Grandhi 1009*a8e04698SSainath Grandhi case TUNGETVNETHDRSZ: 1010*a8e04698SSainath Grandhi s = q->vnet_hdr_sz; 1011*a8e04698SSainath Grandhi if (put_user(s, sp)) 1012*a8e04698SSainath Grandhi return -EFAULT; 1013*a8e04698SSainath Grandhi return 0; 1014*a8e04698SSainath Grandhi 1015*a8e04698SSainath Grandhi case TUNSETVNETHDRSZ: 1016*a8e04698SSainath Grandhi if (get_user(s, sp)) 1017*a8e04698SSainath Grandhi return -EFAULT; 1018*a8e04698SSainath Grandhi if (s < (int)sizeof(struct virtio_net_hdr)) 1019*a8e04698SSainath Grandhi return -EINVAL; 1020*a8e04698SSainath Grandhi 1021*a8e04698SSainath Grandhi q->vnet_hdr_sz = s; 1022*a8e04698SSainath Grandhi return 0; 1023*a8e04698SSainath Grandhi 1024*a8e04698SSainath Grandhi case TUNGETVNETLE: 1025*a8e04698SSainath Grandhi s = !!(q->flags & MACVTAP_VNET_LE); 1026*a8e04698SSainath Grandhi if (put_user(s, sp)) 1027*a8e04698SSainath Grandhi return -EFAULT; 1028*a8e04698SSainath Grandhi return 0; 1029*a8e04698SSainath Grandhi 1030*a8e04698SSainath Grandhi case TUNSETVNETLE: 1031*a8e04698SSainath Grandhi if (get_user(s, sp)) 1032*a8e04698SSainath Grandhi return -EFAULT; 1033*a8e04698SSainath Grandhi if (s) 1034*a8e04698SSainath Grandhi q->flags |= MACVTAP_VNET_LE; 1035*a8e04698SSainath Grandhi else 1036*a8e04698SSainath Grandhi q->flags &= ~MACVTAP_VNET_LE; 1037*a8e04698SSainath Grandhi return 0; 1038*a8e04698SSainath Grandhi 1039*a8e04698SSainath Grandhi case TUNGETVNETBE: 1040*a8e04698SSainath Grandhi return macvtap_get_vnet_be(q, sp); 1041*a8e04698SSainath Grandhi 1042*a8e04698SSainath Grandhi case TUNSETVNETBE: 1043*a8e04698SSainath Grandhi return macvtap_set_vnet_be(q, sp); 1044*a8e04698SSainath Grandhi 1045*a8e04698SSainath Grandhi case TUNSETOFFLOAD: 1046*a8e04698SSainath Grandhi /* let the user check for future flags */ 1047*a8e04698SSainath Grandhi if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1048*a8e04698SSainath Grandhi TUN_F_TSO_ECN | TUN_F_UFO)) 1049*a8e04698SSainath Grandhi return -EINVAL; 1050*a8e04698SSainath Grandhi 1051*a8e04698SSainath Grandhi rtnl_lock(); 1052*a8e04698SSainath Grandhi ret = set_offload(q, arg); 1053*a8e04698SSainath Grandhi rtnl_unlock(); 1054*a8e04698SSainath Grandhi return ret; 1055*a8e04698SSainath Grandhi 1056*a8e04698SSainath Grandhi case SIOCGIFHWADDR: 1057*a8e04698SSainath Grandhi rtnl_lock(); 1058*a8e04698SSainath Grandhi vlan = macvtap_get_vlan(q); 1059*a8e04698SSainath Grandhi if (!vlan) { 1060*a8e04698SSainath Grandhi rtnl_unlock(); 1061*a8e04698SSainath Grandhi return -ENOLINK; 1062*a8e04698SSainath Grandhi } 1063*a8e04698SSainath Grandhi ret = 0; 1064*a8e04698SSainath Grandhi u = vlan->dev->type; 1065*a8e04698SSainath Grandhi if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || 1066*a8e04698SSainath Grandhi copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) || 1067*a8e04698SSainath Grandhi put_user(u, &ifr->ifr_hwaddr.sa_family)) 1068*a8e04698SSainath Grandhi ret = -EFAULT; 1069*a8e04698SSainath Grandhi macvtap_put_vlan(vlan); 1070*a8e04698SSainath Grandhi rtnl_unlock(); 1071*a8e04698SSainath Grandhi return ret; 1072*a8e04698SSainath Grandhi 1073*a8e04698SSainath Grandhi case SIOCSIFHWADDR: 1074*a8e04698SSainath Grandhi if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa))) 1075*a8e04698SSainath Grandhi return -EFAULT; 1076*a8e04698SSainath Grandhi rtnl_lock(); 1077*a8e04698SSainath Grandhi vlan = macvtap_get_vlan(q); 1078*a8e04698SSainath Grandhi if (!vlan) { 1079*a8e04698SSainath Grandhi rtnl_unlock(); 1080*a8e04698SSainath Grandhi return -ENOLINK; 1081*a8e04698SSainath Grandhi } 1082*a8e04698SSainath Grandhi ret = dev_set_mac_address(vlan->dev, &sa); 1083*a8e04698SSainath Grandhi macvtap_put_vlan(vlan); 1084*a8e04698SSainath Grandhi rtnl_unlock(); 1085*a8e04698SSainath Grandhi return ret; 1086*a8e04698SSainath Grandhi 1087*a8e04698SSainath Grandhi default: 1088*a8e04698SSainath Grandhi return -EINVAL; 1089*a8e04698SSainath Grandhi } 1090*a8e04698SSainath Grandhi } 1091*a8e04698SSainath Grandhi 1092*a8e04698SSainath Grandhi #ifdef CONFIG_COMPAT 1093*a8e04698SSainath Grandhi static long macvtap_compat_ioctl(struct file *file, unsigned int cmd, 1094*a8e04698SSainath Grandhi unsigned long arg) 1095*a8e04698SSainath Grandhi { 1096*a8e04698SSainath Grandhi return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1097*a8e04698SSainath Grandhi } 1098*a8e04698SSainath Grandhi #endif 1099*a8e04698SSainath Grandhi 1100*a8e04698SSainath Grandhi const struct file_operations macvtap_fops = { 1101*a8e04698SSainath Grandhi .owner = THIS_MODULE, 1102*a8e04698SSainath Grandhi .open = macvtap_open, 1103*a8e04698SSainath Grandhi .release = macvtap_release, 1104*a8e04698SSainath Grandhi .read_iter = macvtap_read_iter, 1105*a8e04698SSainath Grandhi .write_iter = macvtap_write_iter, 1106*a8e04698SSainath Grandhi .poll = macvtap_poll, 1107*a8e04698SSainath Grandhi .llseek = no_llseek, 1108*a8e04698SSainath Grandhi .unlocked_ioctl = macvtap_ioctl, 1109*a8e04698SSainath Grandhi #ifdef CONFIG_COMPAT 1110*a8e04698SSainath Grandhi .compat_ioctl = macvtap_compat_ioctl, 1111*a8e04698SSainath Grandhi #endif 1112*a8e04698SSainath Grandhi }; 1113*a8e04698SSainath Grandhi 1114*a8e04698SSainath Grandhi static int macvtap_sendmsg(struct socket *sock, struct msghdr *m, 1115*a8e04698SSainath Grandhi size_t total_len) 1116*a8e04698SSainath Grandhi { 1117*a8e04698SSainath Grandhi struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); 1118*a8e04698SSainath Grandhi return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); 1119*a8e04698SSainath Grandhi } 1120*a8e04698SSainath Grandhi 1121*a8e04698SSainath Grandhi static int macvtap_recvmsg(struct socket *sock, struct msghdr *m, 1122*a8e04698SSainath Grandhi size_t total_len, int flags) 1123*a8e04698SSainath Grandhi { 1124*a8e04698SSainath Grandhi struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); 1125*a8e04698SSainath Grandhi int ret; 1126*a8e04698SSainath Grandhi if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) 1127*a8e04698SSainath Grandhi return -EINVAL; 1128*a8e04698SSainath Grandhi ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT); 1129*a8e04698SSainath Grandhi if (ret > total_len) { 1130*a8e04698SSainath Grandhi m->msg_flags |= MSG_TRUNC; 1131*a8e04698SSainath Grandhi ret = flags & MSG_TRUNC ? ret : total_len; 1132*a8e04698SSainath Grandhi } 1133*a8e04698SSainath Grandhi return ret; 1134*a8e04698SSainath Grandhi } 1135*a8e04698SSainath Grandhi 1136*a8e04698SSainath Grandhi static int macvtap_peek_len(struct socket *sock) 1137*a8e04698SSainath Grandhi { 1138*a8e04698SSainath Grandhi struct macvtap_queue *q = container_of(sock, struct macvtap_queue, 1139*a8e04698SSainath Grandhi sock); 1140*a8e04698SSainath Grandhi return skb_array_peek_len(&q->skb_array); 1141*a8e04698SSainath Grandhi } 1142*a8e04698SSainath Grandhi 1143*a8e04698SSainath Grandhi /* Ops structure to mimic raw sockets with tun */ 1144*a8e04698SSainath Grandhi static const struct proto_ops macvtap_socket_ops = { 1145*a8e04698SSainath Grandhi .sendmsg = macvtap_sendmsg, 1146*a8e04698SSainath Grandhi .recvmsg = macvtap_recvmsg, 1147*a8e04698SSainath Grandhi .peek_len = macvtap_peek_len, 1148*a8e04698SSainath Grandhi }; 1149*a8e04698SSainath Grandhi 1150*a8e04698SSainath Grandhi /* Get an underlying socket object from tun file. Returns error unless file is 1151*a8e04698SSainath Grandhi * attached to a device. The returned object works like a packet socket, it 1152*a8e04698SSainath Grandhi * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 1153*a8e04698SSainath Grandhi * holding a reference to the file for as long as the socket is in use. */ 1154*a8e04698SSainath Grandhi struct socket *macvtap_get_socket(struct file *file) 1155*a8e04698SSainath Grandhi { 1156*a8e04698SSainath Grandhi struct macvtap_queue *q; 1157*a8e04698SSainath Grandhi if (file->f_op != &macvtap_fops) 1158*a8e04698SSainath Grandhi return ERR_PTR(-EINVAL); 1159*a8e04698SSainath Grandhi q = file->private_data; 1160*a8e04698SSainath Grandhi if (!q) 1161*a8e04698SSainath Grandhi return ERR_PTR(-EBADFD); 1162*a8e04698SSainath Grandhi return &q->sock; 1163*a8e04698SSainath Grandhi } 1164*a8e04698SSainath Grandhi EXPORT_SYMBOL_GPL(macvtap_get_socket); 1165*a8e04698SSainath Grandhi 1166*a8e04698SSainath Grandhi int macvtap_queue_resize(struct macvlan_dev *vlan) 1167*a8e04698SSainath Grandhi { 1168*a8e04698SSainath Grandhi struct net_device *dev = vlan->dev; 1169*a8e04698SSainath Grandhi struct macvtap_queue *q; 1170*a8e04698SSainath Grandhi struct skb_array **arrays; 1171*a8e04698SSainath Grandhi int n = vlan->numqueues; 1172*a8e04698SSainath Grandhi int ret, i = 0; 1173*a8e04698SSainath Grandhi 1174*a8e04698SSainath Grandhi arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL); 1175*a8e04698SSainath Grandhi if (!arrays) 1176*a8e04698SSainath Grandhi return -ENOMEM; 1177*a8e04698SSainath Grandhi 1178*a8e04698SSainath Grandhi list_for_each_entry(q, &vlan->queue_list, next) 1179*a8e04698SSainath Grandhi arrays[i++] = &q->skb_array; 1180*a8e04698SSainath Grandhi 1181*a8e04698SSainath Grandhi ret = skb_array_resize_multiple(arrays, n, 1182*a8e04698SSainath Grandhi dev->tx_queue_len, GFP_KERNEL); 1183*a8e04698SSainath Grandhi 1184*a8e04698SSainath Grandhi kfree(arrays); 1185*a8e04698SSainath Grandhi return ret; 1186*a8e04698SSainath Grandhi } 1187