131762eaaSAaron Young /* sunvnet.c: Sun LDOM Virtual Network Driver. 231762eaaSAaron Young * 331762eaaSAaron Young * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 4867fa150SShannon Nelson * Copyright (C) 2016-2017 Oracle. All rights reserved. 531762eaaSAaron Young */ 631762eaaSAaron Young 731762eaaSAaron Young #include <linux/module.h> 831762eaaSAaron Young #include <linux/kernel.h> 931762eaaSAaron Young #include <linux/types.h> 1031762eaaSAaron Young #include <linux/slab.h> 1131762eaaSAaron Young #include <linux/delay.h> 1231762eaaSAaron Young #include <linux/init.h> 1331762eaaSAaron Young #include <linux/netdevice.h> 1431762eaaSAaron Young #include <linux/ethtool.h> 1531762eaaSAaron Young #include <linux/etherdevice.h> 1631762eaaSAaron Young #include <linux/mutex.h> 1731762eaaSAaron Young #include <linux/highmem.h> 1831762eaaSAaron Young #include <linux/if_vlan.h> 1931762eaaSAaron Young #define CREATE_TRACE_POINTS 2031762eaaSAaron Young #include <trace/events/sunvnet.h> 2131762eaaSAaron Young 2231762eaaSAaron Young #if IS_ENABLED(CONFIG_IPV6) 2331762eaaSAaron Young #include <linux/icmpv6.h> 2431762eaaSAaron Young #endif 2531762eaaSAaron Young 2631762eaaSAaron Young #include <net/ip.h> 2731762eaaSAaron Young #include <net/icmp.h> 2831762eaaSAaron Young #include <net/route.h> 2931762eaaSAaron Young 3031762eaaSAaron Young #include <asm/vio.h> 3131762eaaSAaron Young #include <asm/ldc.h> 3231762eaaSAaron Young 3331762eaaSAaron Young #include "sunvnet_common.h" 3431762eaaSAaron Young 3531762eaaSAaron Young /* Heuristic for the number of times to exponentially backoff and 3631762eaaSAaron Young * retry sending an LDC trigger when EAGAIN is encountered 3731762eaaSAaron Young */ 3831762eaaSAaron Young #define VNET_MAX_RETRIES 10 3931762eaaSAaron Young 402493b842SShannon Nelson MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 412493b842SShannon Nelson MODULE_DESCRIPTION("Sun LDOM virtual network support library"); 422493b842SShannon Nelson MODULE_LICENSE("GPL"); 432493b842SShannon Nelson MODULE_VERSION("1.1"); 442493b842SShannon Nelson 4531762eaaSAaron Young static int __vnet_tx_trigger(struct vnet_port *port, u32 start); 4631762eaaSAaron Young 4731762eaaSAaron Young static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) 4831762eaaSAaron Young { 4931762eaaSAaron Young return vio_dring_avail(dr, VNET_TX_RING_SIZE); 5031762eaaSAaron Young } 5131762eaaSAaron Young 5231762eaaSAaron Young static int vnet_handle_unknown(struct vnet_port *port, void *arg) 5331762eaaSAaron Young { 5431762eaaSAaron Young struct vio_msg_tag *pkt = arg; 5531762eaaSAaron Young 5631762eaaSAaron Young pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", 5731762eaaSAaron Young pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 5831762eaaSAaron Young pr_err("Resetting connection\n"); 5931762eaaSAaron Young 6031762eaaSAaron Young ldc_disconnect(port->vio.lp); 6131762eaaSAaron Young 6231762eaaSAaron Young return -ECONNRESET; 6331762eaaSAaron Young } 6431762eaaSAaron Young 6531762eaaSAaron Young static int vnet_port_alloc_tx_ring(struct vnet_port *port); 6631762eaaSAaron Young 6731762eaaSAaron Young int sunvnet_send_attr_common(struct vio_driver_state *vio) 6831762eaaSAaron Young { 6931762eaaSAaron Young struct vnet_port *port = to_vnet_port(vio); 7067d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 7131762eaaSAaron Young struct vio_net_attr_info pkt; 7231762eaaSAaron Young int framelen = ETH_FRAME_LEN; 7331762eaaSAaron Young int i, err; 7431762eaaSAaron Young 7531762eaaSAaron Young err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); 7631762eaaSAaron Young if (err) 7731762eaaSAaron Young return err; 7831762eaaSAaron Young 7931762eaaSAaron Young memset(&pkt, 0, sizeof(pkt)); 8031762eaaSAaron Young pkt.tag.type = VIO_TYPE_CTRL; 8131762eaaSAaron Young pkt.tag.stype = VIO_SUBTYPE_INFO; 8231762eaaSAaron Young pkt.tag.stype_env = VIO_ATTR_INFO; 8331762eaaSAaron Young pkt.tag.sid = vio_send_sid(vio); 8431762eaaSAaron Young if (vio_version_before(vio, 1, 2)) 8531762eaaSAaron Young pkt.xfer_mode = VIO_DRING_MODE; 8631762eaaSAaron Young else 8731762eaaSAaron Young pkt.xfer_mode = VIO_NEW_DRING_MODE; 8831762eaaSAaron Young pkt.addr_type = VNET_ADDR_ETHERMAC; 8931762eaaSAaron Young pkt.ack_freq = 0; 9031762eaaSAaron Young for (i = 0; i < 6; i++) 9131762eaaSAaron Young pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); 9231762eaaSAaron Young if (vio_version_after(vio, 1, 3)) { 9331762eaaSAaron Young if (port->rmtu) { 9431762eaaSAaron Young port->rmtu = min(VNET_MAXPACKET, port->rmtu); 9531762eaaSAaron Young pkt.mtu = port->rmtu; 9631762eaaSAaron Young } else { 9731762eaaSAaron Young port->rmtu = VNET_MAXPACKET; 9831762eaaSAaron Young pkt.mtu = port->rmtu; 9931762eaaSAaron Young } 10031762eaaSAaron Young if (vio_version_after_eq(vio, 1, 6)) 10131762eaaSAaron Young pkt.options = VIO_TX_DRING; 10231762eaaSAaron Young } else if (vio_version_before(vio, 1, 3)) { 10331762eaaSAaron Young pkt.mtu = framelen; 10431762eaaSAaron Young } else { /* v1.3 */ 10531762eaaSAaron Young pkt.mtu = framelen + VLAN_HLEN; 10631762eaaSAaron Young } 10731762eaaSAaron Young 10831762eaaSAaron Young pkt.cflags = 0; 10931762eaaSAaron Young if (vio_version_after_eq(vio, 1, 7) && port->tso) { 11031762eaaSAaron Young pkt.cflags |= VNET_LSO_IPV4_CAPAB; 11131762eaaSAaron Young if (!port->tsolen) 11231762eaaSAaron Young port->tsolen = VNET_MAXTSO; 11331762eaaSAaron Young pkt.ipv4_lso_maxlen = port->tsolen; 11431762eaaSAaron Young } 11531762eaaSAaron Young 11631762eaaSAaron Young pkt.plnk_updt = PHYSLINK_UPDATE_NONE; 11731762eaaSAaron Young 11831762eaaSAaron Young viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 11931762eaaSAaron Young "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " 12031762eaaSAaron Young "cflags[0x%04x] lso_max[%u]\n", 12131762eaaSAaron Young pkt.xfer_mode, pkt.addr_type, 12231762eaaSAaron Young (unsigned long long)pkt.addr, 12331762eaaSAaron Young pkt.ack_freq, pkt.plnk_updt, pkt.options, 12431762eaaSAaron Young (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); 12531762eaaSAaron Young 12631762eaaSAaron Young return vio_ldc_send(vio, &pkt, sizeof(pkt)); 12731762eaaSAaron Young } 12831762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_send_attr_common); 12931762eaaSAaron Young 13031762eaaSAaron Young static int handle_attr_info(struct vio_driver_state *vio, 13131762eaaSAaron Young struct vio_net_attr_info *pkt) 13231762eaaSAaron Young { 13331762eaaSAaron Young struct vnet_port *port = to_vnet_port(vio); 13431762eaaSAaron Young u64 localmtu; 13531762eaaSAaron Young u8 xfer_mode; 13631762eaaSAaron Young 13731762eaaSAaron Young viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 13831762eaaSAaron Young "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " 13931762eaaSAaron Young " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", 14031762eaaSAaron Young pkt->xfer_mode, pkt->addr_type, 14131762eaaSAaron Young (unsigned long long)pkt->addr, 14231762eaaSAaron Young pkt->ack_freq, pkt->plnk_updt, pkt->options, 14331762eaaSAaron Young (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, 14431762eaaSAaron Young pkt->ipv4_lso_maxlen); 14531762eaaSAaron Young 14631762eaaSAaron Young pkt->tag.sid = vio_send_sid(vio); 14731762eaaSAaron Young 14831762eaaSAaron Young xfer_mode = pkt->xfer_mode; 14931762eaaSAaron Young /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ 15031762eaaSAaron Young if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) 15131762eaaSAaron Young xfer_mode = VIO_NEW_DRING_MODE; 15231762eaaSAaron Young 15331762eaaSAaron Young /* MTU negotiation: 15431762eaaSAaron Young * < v1.3 - ETH_FRAME_LEN exactly 15531762eaaSAaron Young * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change 15631762eaaSAaron Young * pkt->mtu for ACK 15731762eaaSAaron Young * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly 15831762eaaSAaron Young */ 15931762eaaSAaron Young if (vio_version_before(vio, 1, 3)) { 16031762eaaSAaron Young localmtu = ETH_FRAME_LEN; 16131762eaaSAaron Young } else if (vio_version_after(vio, 1, 3)) { 16231762eaaSAaron Young localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; 16331762eaaSAaron Young localmtu = min(pkt->mtu, localmtu); 16431762eaaSAaron Young pkt->mtu = localmtu; 16531762eaaSAaron Young } else { /* v1.3 */ 16631762eaaSAaron Young localmtu = ETH_FRAME_LEN + VLAN_HLEN; 16731762eaaSAaron Young } 16831762eaaSAaron Young port->rmtu = localmtu; 16931762eaaSAaron Young 17031762eaaSAaron Young /* LSO negotiation */ 17131762eaaSAaron Young if (vio_version_after_eq(vio, 1, 7)) 17231762eaaSAaron Young port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); 17331762eaaSAaron Young else 17431762eaaSAaron Young port->tso = false; 17531762eaaSAaron Young if (port->tso) { 17631762eaaSAaron Young if (!port->tsolen) 17731762eaaSAaron Young port->tsolen = VNET_MAXTSO; 17831762eaaSAaron Young port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); 17931762eaaSAaron Young if (port->tsolen < VNET_MINTSO) { 18031762eaaSAaron Young port->tso = false; 18131762eaaSAaron Young port->tsolen = 0; 18231762eaaSAaron Young pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; 18331762eaaSAaron Young } 18431762eaaSAaron Young pkt->ipv4_lso_maxlen = port->tsolen; 18531762eaaSAaron Young } else { 18631762eaaSAaron Young pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; 18731762eaaSAaron Young pkt->ipv4_lso_maxlen = 0; 188bc221a34SShannon Nelson port->tsolen = 0; 18931762eaaSAaron Young } 19031762eaaSAaron Young 19131762eaaSAaron Young /* for version >= 1.6, ACK packet mode we support */ 19231762eaaSAaron Young if (vio_version_after_eq(vio, 1, 6)) { 19331762eaaSAaron Young pkt->xfer_mode = VIO_NEW_DRING_MODE; 19431762eaaSAaron Young pkt->options = VIO_TX_DRING; 19531762eaaSAaron Young } 19631762eaaSAaron Young 19731762eaaSAaron Young if (!(xfer_mode | VIO_NEW_DRING_MODE) || 19831762eaaSAaron Young pkt->addr_type != VNET_ADDR_ETHERMAC || 19931762eaaSAaron Young pkt->mtu != localmtu) { 20031762eaaSAaron Young viodbg(HS, "SEND NET ATTR NACK\n"); 20131762eaaSAaron Young 20231762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_NACK; 20331762eaaSAaron Young 20431762eaaSAaron Young (void)vio_ldc_send(vio, pkt, sizeof(*pkt)); 20531762eaaSAaron Young 20631762eaaSAaron Young return -ECONNRESET; 207dc153f85SAaron Young } 208dc153f85SAaron Young 20931762eaaSAaron Young viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " 21031762eaaSAaron Young "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " 21131762eaaSAaron Young "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", 21231762eaaSAaron Young pkt->xfer_mode, pkt->addr_type, 21331762eaaSAaron Young (unsigned long long)pkt->addr, 21431762eaaSAaron Young pkt->ack_freq, pkt->plnk_updt, pkt->options, 21531762eaaSAaron Young (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, 21631762eaaSAaron Young pkt->ipv4_lso_maxlen); 21731762eaaSAaron Young 21831762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_ACK; 21931762eaaSAaron Young 22031762eaaSAaron Young return vio_ldc_send(vio, pkt, sizeof(*pkt)); 22131762eaaSAaron Young } 22231762eaaSAaron Young 22331762eaaSAaron Young static int handle_attr_ack(struct vio_driver_state *vio, 22431762eaaSAaron Young struct vio_net_attr_info *pkt) 22531762eaaSAaron Young { 22631762eaaSAaron Young viodbg(HS, "GOT NET ATTR ACK\n"); 22731762eaaSAaron Young 22831762eaaSAaron Young return 0; 22931762eaaSAaron Young } 23031762eaaSAaron Young 23131762eaaSAaron Young static int handle_attr_nack(struct vio_driver_state *vio, 23231762eaaSAaron Young struct vio_net_attr_info *pkt) 23331762eaaSAaron Young { 23431762eaaSAaron Young viodbg(HS, "GOT NET ATTR NACK\n"); 23531762eaaSAaron Young 23631762eaaSAaron Young return -ECONNRESET; 23731762eaaSAaron Young } 23831762eaaSAaron Young 23931762eaaSAaron Young int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg) 24031762eaaSAaron Young { 24131762eaaSAaron Young struct vio_net_attr_info *pkt = arg; 24231762eaaSAaron Young 24331762eaaSAaron Young switch (pkt->tag.stype) { 24431762eaaSAaron Young case VIO_SUBTYPE_INFO: 24531762eaaSAaron Young return handle_attr_info(vio, pkt); 24631762eaaSAaron Young 24731762eaaSAaron Young case VIO_SUBTYPE_ACK: 24831762eaaSAaron Young return handle_attr_ack(vio, pkt); 24931762eaaSAaron Young 25031762eaaSAaron Young case VIO_SUBTYPE_NACK: 25131762eaaSAaron Young return handle_attr_nack(vio, pkt); 25231762eaaSAaron Young 25331762eaaSAaron Young default: 25431762eaaSAaron Young return -ECONNRESET; 25531762eaaSAaron Young } 25631762eaaSAaron Young } 25731762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common); 25831762eaaSAaron Young 25931762eaaSAaron Young void sunvnet_handshake_complete_common(struct vio_driver_state *vio) 26031762eaaSAaron Young { 26131762eaaSAaron Young struct vio_dring_state *dr; 26231762eaaSAaron Young 26331762eaaSAaron Young dr = &vio->drings[VIO_DRIVER_RX_RING]; 264dc153f85SAaron Young dr->rcv_nxt = 1; 265dc153f85SAaron Young dr->snd_nxt = 1; 26631762eaaSAaron Young 26731762eaaSAaron Young dr = &vio->drings[VIO_DRIVER_TX_RING]; 268dc153f85SAaron Young dr->rcv_nxt = 1; 269dc153f85SAaron Young dr->snd_nxt = 1; 27031762eaaSAaron Young } 27131762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common); 27231762eaaSAaron Young 27331762eaaSAaron Young /* The hypervisor interface that implements copying to/from imported 27431762eaaSAaron Young * memory from another domain requires that copies are done to 8-byte 27531762eaaSAaron Young * aligned buffers, and that the lengths of such copies are also 8-byte 27631762eaaSAaron Young * multiples. 27731762eaaSAaron Young * 27831762eaaSAaron Young * So we align skb->data to an 8-byte multiple and pad-out the data 27931762eaaSAaron Young * area so we can round the copy length up to the next multiple of 28031762eaaSAaron Young * 8 for the copy. 28131762eaaSAaron Young * 28231762eaaSAaron Young * The transmitter puts the actual start of the packet 6 bytes into 28331762eaaSAaron Young * the buffer it sends over, so that the IP headers after the ethernet 28431762eaaSAaron Young * header are aligned properly. These 6 bytes are not in the descriptor 28531762eaaSAaron Young * length, they are simply implied. This offset is represented using 28631762eaaSAaron Young * the VNET_PACKET_SKIP macro. 28731762eaaSAaron Young */ 28831762eaaSAaron Young static struct sk_buff *alloc_and_align_skb(struct net_device *dev, 28931762eaaSAaron Young unsigned int len) 29031762eaaSAaron Young { 291dc153f85SAaron Young struct sk_buff *skb; 29231762eaaSAaron Young unsigned long addr, off; 29331762eaaSAaron Young 294dc153f85SAaron Young skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8); 29531762eaaSAaron Young if (unlikely(!skb)) 29631762eaaSAaron Young return NULL; 29731762eaaSAaron Young 29831762eaaSAaron Young addr = (unsigned long)skb->data; 29931762eaaSAaron Young off = ((addr + 7UL) & ~7UL) - addr; 30031762eaaSAaron Young if (off) 30131762eaaSAaron Young skb_reserve(skb, off); 30231762eaaSAaron Young 30331762eaaSAaron Young return skb; 30431762eaaSAaron Young } 30531762eaaSAaron Young 30631762eaaSAaron Young static inline void vnet_fullcsum(struct sk_buff *skb) 30731762eaaSAaron Young { 30831762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 30931762eaaSAaron Young int offset = skb_transport_offset(skb); 31031762eaaSAaron Young 31131762eaaSAaron Young if (skb->protocol != htons(ETH_P_IP)) 31231762eaaSAaron Young return; 31331762eaaSAaron Young if (iph->protocol != IPPROTO_TCP && 31431762eaaSAaron Young iph->protocol != IPPROTO_UDP) 31531762eaaSAaron Young return; 31631762eaaSAaron Young skb->ip_summed = CHECKSUM_NONE; 31731762eaaSAaron Young skb->csum_level = 1; 31831762eaaSAaron Young skb->csum = 0; 31931762eaaSAaron Young if (iph->protocol == IPPROTO_TCP) { 32031762eaaSAaron Young struct tcphdr *ptcp = tcp_hdr(skb); 32131762eaaSAaron Young 32231762eaaSAaron Young ptcp->check = 0; 32331762eaaSAaron Young skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 32431762eaaSAaron Young ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 32531762eaaSAaron Young skb->len - offset, IPPROTO_TCP, 32631762eaaSAaron Young skb->csum); 32731762eaaSAaron Young } else if (iph->protocol == IPPROTO_UDP) { 32831762eaaSAaron Young struct udphdr *pudp = udp_hdr(skb); 32931762eaaSAaron Young 33031762eaaSAaron Young pudp->check = 0; 33131762eaaSAaron Young skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 33231762eaaSAaron Young pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 33331762eaaSAaron Young skb->len - offset, IPPROTO_UDP, 33431762eaaSAaron Young skb->csum); 33531762eaaSAaron Young } 33631762eaaSAaron Young } 33731762eaaSAaron Young 33831762eaaSAaron Young static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) 33931762eaaSAaron Young { 34067d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 34131762eaaSAaron Young unsigned int len = desc->size; 34231762eaaSAaron Young unsigned int copy_len; 34331762eaaSAaron Young struct sk_buff *skb; 34431762eaaSAaron Young int maxlen; 34531762eaaSAaron Young int err; 34631762eaaSAaron Young 34731762eaaSAaron Young err = -EMSGSIZE; 34831762eaaSAaron Young if (port->tso && port->tsolen > port->rmtu) 34931762eaaSAaron Young maxlen = port->tsolen; 35031762eaaSAaron Young else 35131762eaaSAaron Young maxlen = port->rmtu; 35231762eaaSAaron Young if (unlikely(len < ETH_ZLEN || len > maxlen)) { 35331762eaaSAaron Young dev->stats.rx_length_errors++; 35431762eaaSAaron Young goto out_dropped; 35531762eaaSAaron Young } 35631762eaaSAaron Young 35731762eaaSAaron Young skb = alloc_and_align_skb(dev, len); 35831762eaaSAaron Young err = -ENOMEM; 35931762eaaSAaron Young if (unlikely(!skb)) { 36031762eaaSAaron Young dev->stats.rx_missed_errors++; 36131762eaaSAaron Young goto out_dropped; 36231762eaaSAaron Young } 36331762eaaSAaron Young 36431762eaaSAaron Young copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; 36531762eaaSAaron Young skb_put(skb, copy_len); 36631762eaaSAaron Young err = ldc_copy(port->vio.lp, LDC_COPY_IN, 36731762eaaSAaron Young skb->data, copy_len, 0, 36831762eaaSAaron Young desc->cookies, desc->ncookies); 36931762eaaSAaron Young if (unlikely(err < 0)) { 37031762eaaSAaron Young dev->stats.rx_frame_errors++; 37131762eaaSAaron Young goto out_free_skb; 37231762eaaSAaron Young } 37331762eaaSAaron Young 37431762eaaSAaron Young skb_pull(skb, VNET_PACKET_SKIP); 37531762eaaSAaron Young skb_trim(skb, len); 37631762eaaSAaron Young skb->protocol = eth_type_trans(skb, dev); 37731762eaaSAaron Young 37831762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 8)) { 37931762eaaSAaron Young struct vio_net_dext *dext = vio_net_ext(desc); 38031762eaaSAaron Young 38131762eaaSAaron Young skb_reset_network_header(skb); 38231762eaaSAaron Young 38331762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { 38431762eaaSAaron Young if (skb->protocol == ETH_P_IP) { 38531762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 38631762eaaSAaron Young 38731762eaaSAaron Young iph->check = 0; 38831762eaaSAaron Young ip_send_check(iph); 38931762eaaSAaron Young } 39031762eaaSAaron Young } 39131762eaaSAaron Young if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && 39231762eaaSAaron Young skb->ip_summed == CHECKSUM_NONE) { 39331762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) { 39431762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 39531762eaaSAaron Young int ihl = iph->ihl * 4; 39631762eaaSAaron Young 39731762eaaSAaron Young skb_reset_transport_header(skb); 39831762eaaSAaron Young skb_set_transport_header(skb, ihl); 39931762eaaSAaron Young vnet_fullcsum(skb); 40031762eaaSAaron Young } 40131762eaaSAaron Young } 40231762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { 40331762eaaSAaron Young skb->ip_summed = CHECKSUM_PARTIAL; 40431762eaaSAaron Young skb->csum_level = 0; 40531762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) 40631762eaaSAaron Young skb->csum_level = 1; 40731762eaaSAaron Young } 40831762eaaSAaron Young } 40931762eaaSAaron Young 41031762eaaSAaron Young skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; 41131762eaaSAaron Young 41231762eaaSAaron Young dev->stats.rx_packets++; 41331762eaaSAaron Young dev->stats.rx_bytes += len; 41431762eaaSAaron Young napi_gro_receive(&port->napi, skb); 41531762eaaSAaron Young return 0; 41631762eaaSAaron Young 41731762eaaSAaron Young out_free_skb: 41831762eaaSAaron Young kfree_skb(skb); 41931762eaaSAaron Young 42031762eaaSAaron Young out_dropped: 42131762eaaSAaron Young dev->stats.rx_dropped++; 42231762eaaSAaron Young return err; 42331762eaaSAaron Young } 42431762eaaSAaron Young 42531762eaaSAaron Young static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, 42631762eaaSAaron Young u32 start, u32 end, u8 vio_dring_state) 42731762eaaSAaron Young { 42831762eaaSAaron Young struct vio_dring_data hdr = { 42931762eaaSAaron Young .tag = { 43031762eaaSAaron Young .type = VIO_TYPE_DATA, 43131762eaaSAaron Young .stype = VIO_SUBTYPE_ACK, 43231762eaaSAaron Young .stype_env = VIO_DRING_DATA, 43331762eaaSAaron Young .sid = vio_send_sid(&port->vio), 43431762eaaSAaron Young }, 43531762eaaSAaron Young .dring_ident = dr->ident, 43631762eaaSAaron Young .start_idx = start, 43731762eaaSAaron Young .end_idx = end, 43831762eaaSAaron Young .state = vio_dring_state, 43931762eaaSAaron Young }; 44031762eaaSAaron Young int err, delay; 44131762eaaSAaron Young int retries = 0; 44231762eaaSAaron Young 44331762eaaSAaron Young hdr.seq = dr->snd_nxt; 44431762eaaSAaron Young delay = 1; 44531762eaaSAaron Young do { 44631762eaaSAaron Young err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 44731762eaaSAaron Young if (err > 0) { 44831762eaaSAaron Young dr->snd_nxt++; 44931762eaaSAaron Young break; 45031762eaaSAaron Young } 45131762eaaSAaron Young udelay(delay); 45231762eaaSAaron Young if ((delay <<= 1) > 128) 45331762eaaSAaron Young delay = 128; 45431762eaaSAaron Young if (retries++ > VNET_MAX_RETRIES) { 45531762eaaSAaron Young pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", 45631762eaaSAaron Young port->raddr[0], port->raddr[1], 45731762eaaSAaron Young port->raddr[2], port->raddr[3], 45831762eaaSAaron Young port->raddr[4], port->raddr[5]); 45931762eaaSAaron Young break; 46031762eaaSAaron Young } 46131762eaaSAaron Young } while (err == -EAGAIN); 46231762eaaSAaron Young 46331762eaaSAaron Young if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { 46431762eaaSAaron Young port->stop_rx_idx = end; 46531762eaaSAaron Young port->stop_rx = true; 46631762eaaSAaron Young } else { 46731762eaaSAaron Young port->stop_rx_idx = 0; 46831762eaaSAaron Young port->stop_rx = false; 46931762eaaSAaron Young } 47031762eaaSAaron Young 47131762eaaSAaron Young return err; 47231762eaaSAaron Young } 47331762eaaSAaron Young 47431762eaaSAaron Young static struct vio_net_desc *get_rx_desc(struct vnet_port *port, 47531762eaaSAaron Young struct vio_dring_state *dr, 47631762eaaSAaron Young u32 index) 47731762eaaSAaron Young { 47831762eaaSAaron Young struct vio_net_desc *desc = port->vio.desc_buf; 47931762eaaSAaron Young int err; 48031762eaaSAaron Young 48131762eaaSAaron Young err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, 48231762eaaSAaron Young (index * dr->entry_size), 48331762eaaSAaron Young dr->cookies, dr->ncookies); 48431762eaaSAaron Young if (err < 0) 48531762eaaSAaron Young return ERR_PTR(err); 48631762eaaSAaron Young 48731762eaaSAaron Young return desc; 48831762eaaSAaron Young } 48931762eaaSAaron Young 49031762eaaSAaron Young static int put_rx_desc(struct vnet_port *port, 49131762eaaSAaron Young struct vio_dring_state *dr, 49231762eaaSAaron Young struct vio_net_desc *desc, 49331762eaaSAaron Young u32 index) 49431762eaaSAaron Young { 49531762eaaSAaron Young int err; 49631762eaaSAaron Young 49731762eaaSAaron Young err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, 49831762eaaSAaron Young (index * dr->entry_size), 49931762eaaSAaron Young dr->cookies, dr->ncookies); 50031762eaaSAaron Young if (err < 0) 50131762eaaSAaron Young return err; 50231762eaaSAaron Young 50331762eaaSAaron Young return 0; 50431762eaaSAaron Young } 50531762eaaSAaron Young 50631762eaaSAaron Young static int vnet_walk_rx_one(struct vnet_port *port, 50731762eaaSAaron Young struct vio_dring_state *dr, 50831762eaaSAaron Young u32 index, int *needs_ack) 50931762eaaSAaron Young { 51031762eaaSAaron Young struct vio_net_desc *desc = get_rx_desc(port, dr, index); 51131762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 51231762eaaSAaron Young int err; 51331762eaaSAaron Young 514dc153f85SAaron Young BUG_ON(!desc); 51531762eaaSAaron Young if (IS_ERR(desc)) 51631762eaaSAaron Young return PTR_ERR(desc); 51731762eaaSAaron Young 51831762eaaSAaron Young if (desc->hdr.state != VIO_DESC_READY) 51931762eaaSAaron Young return 1; 52031762eaaSAaron Young 52131762eaaSAaron Young dma_rmb(); 52231762eaaSAaron Young 52331762eaaSAaron Young viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", 52431762eaaSAaron Young desc->hdr.state, desc->hdr.ack, 52531762eaaSAaron Young desc->size, desc->ncookies, 52631762eaaSAaron Young desc->cookies[0].cookie_addr, 52731762eaaSAaron Young desc->cookies[0].cookie_size); 52831762eaaSAaron Young 52931762eaaSAaron Young err = vnet_rx_one(port, desc); 53031762eaaSAaron Young if (err == -ECONNRESET) 53131762eaaSAaron Young return err; 53231762eaaSAaron Young trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid, 53331762eaaSAaron Young index, desc->hdr.ack); 53431762eaaSAaron Young desc->hdr.state = VIO_DESC_DONE; 53531762eaaSAaron Young err = put_rx_desc(port, dr, desc, index); 53631762eaaSAaron Young if (err < 0) 53731762eaaSAaron Young return err; 53831762eaaSAaron Young *needs_ack = desc->hdr.ack; 53931762eaaSAaron Young return 0; 54031762eaaSAaron Young } 54131762eaaSAaron Young 54231762eaaSAaron Young static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, 54331762eaaSAaron Young u32 start, u32 end, int *npkts, int budget) 54431762eaaSAaron Young { 54531762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 54631762eaaSAaron Young int ack_start = -1, ack_end = -1; 54731762eaaSAaron Young bool send_ack = true; 54831762eaaSAaron Young 54931762eaaSAaron Young end = (end == (u32)-1) ? vio_dring_prev(dr, start) 55031762eaaSAaron Young : vio_dring_next(dr, end); 55131762eaaSAaron Young 55231762eaaSAaron Young viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); 55331762eaaSAaron Young 55431762eaaSAaron Young while (start != end) { 55531762eaaSAaron Young int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); 556dc153f85SAaron Young 55731762eaaSAaron Young if (err == -ECONNRESET) 55831762eaaSAaron Young return err; 55931762eaaSAaron Young if (err != 0) 56031762eaaSAaron Young break; 56131762eaaSAaron Young (*npkts)++; 56231762eaaSAaron Young if (ack_start == -1) 56331762eaaSAaron Young ack_start = start; 56431762eaaSAaron Young ack_end = start; 56531762eaaSAaron Young start = vio_dring_next(dr, start); 56631762eaaSAaron Young if (ack && start != end) { 56731762eaaSAaron Young err = vnet_send_ack(port, dr, ack_start, ack_end, 56831762eaaSAaron Young VIO_DRING_ACTIVE); 56931762eaaSAaron Young if (err == -ECONNRESET) 57031762eaaSAaron Young return err; 57131762eaaSAaron Young ack_start = -1; 57231762eaaSAaron Young } 57331762eaaSAaron Young if ((*npkts) >= budget) { 57431762eaaSAaron Young send_ack = false; 57531762eaaSAaron Young break; 57631762eaaSAaron Young } 57731762eaaSAaron Young } 578dc153f85SAaron Young if (unlikely(ack_start == -1)) { 579dc153f85SAaron Young ack_end = vio_dring_prev(dr, start); 580dc153f85SAaron Young ack_start = ack_end; 581dc153f85SAaron Young } 58231762eaaSAaron Young if (send_ack) { 58331762eaaSAaron Young port->napi_resume = false; 58431762eaaSAaron Young trace_vnet_tx_send_stopped_ack(port->vio._local_sid, 58531762eaaSAaron Young port->vio._peer_sid, 58631762eaaSAaron Young ack_end, *npkts); 58731762eaaSAaron Young return vnet_send_ack(port, dr, ack_start, ack_end, 58831762eaaSAaron Young VIO_DRING_STOPPED); 58931762eaaSAaron Young } else { 59031762eaaSAaron Young trace_vnet_tx_defer_stopped_ack(port->vio._local_sid, 59131762eaaSAaron Young port->vio._peer_sid, 59231762eaaSAaron Young ack_end, *npkts); 59331762eaaSAaron Young port->napi_resume = true; 59431762eaaSAaron Young port->napi_stop_idx = ack_end; 59531762eaaSAaron Young return 1; 59631762eaaSAaron Young } 59731762eaaSAaron Young } 59831762eaaSAaron Young 59931762eaaSAaron Young static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, 60031762eaaSAaron Young int budget) 60131762eaaSAaron Young { 60231762eaaSAaron Young struct vio_dring_data *pkt = msgbuf; 60331762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; 60431762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 60531762eaaSAaron Young 60631762eaaSAaron Young viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", 60731762eaaSAaron Young pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); 60831762eaaSAaron Young 60931762eaaSAaron Young if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 61031762eaaSAaron Young return 0; 61131762eaaSAaron Young if (unlikely(pkt->seq != dr->rcv_nxt)) { 61231762eaaSAaron Young pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", 61331762eaaSAaron Young pkt->seq, dr->rcv_nxt); 61431762eaaSAaron Young return 0; 61531762eaaSAaron Young } 61631762eaaSAaron Young 61731762eaaSAaron Young if (!port->napi_resume) 61831762eaaSAaron Young dr->rcv_nxt++; 61931762eaaSAaron Young 62031762eaaSAaron Young /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ 62131762eaaSAaron Young 62231762eaaSAaron Young return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, 62331762eaaSAaron Young npkts, budget); 62431762eaaSAaron Young } 62531762eaaSAaron Young 62631762eaaSAaron Young static int idx_is_pending(struct vio_dring_state *dr, u32 end) 62731762eaaSAaron Young { 62831762eaaSAaron Young u32 idx = dr->cons; 62931762eaaSAaron Young int found = 0; 63031762eaaSAaron Young 63131762eaaSAaron Young while (idx != dr->prod) { 63231762eaaSAaron Young if (idx == end) { 63331762eaaSAaron Young found = 1; 63431762eaaSAaron Young break; 63531762eaaSAaron Young } 63631762eaaSAaron Young idx = vio_dring_next(dr, idx); 63731762eaaSAaron Young } 63831762eaaSAaron Young return found; 63931762eaaSAaron Young } 64031762eaaSAaron Young 64131762eaaSAaron Young static int vnet_ack(struct vnet_port *port, void *msgbuf) 64231762eaaSAaron Young { 64331762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 64431762eaaSAaron Young struct vio_dring_data *pkt = msgbuf; 64531762eaaSAaron Young struct net_device *dev; 64631762eaaSAaron Young u32 end; 64731762eaaSAaron Young struct vio_net_desc *desc; 64831762eaaSAaron Young struct netdev_queue *txq; 64931762eaaSAaron Young 65031762eaaSAaron Young if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 65131762eaaSAaron Young return 0; 65231762eaaSAaron Young 65331762eaaSAaron Young end = pkt->end_idx; 65467d0719fSAaron Young dev = VNET_PORT_TO_NET_DEVICE(port); 65531762eaaSAaron Young netif_tx_lock(dev); 65631762eaaSAaron Young if (unlikely(!idx_is_pending(dr, end))) { 65731762eaaSAaron Young netif_tx_unlock(dev); 65831762eaaSAaron Young return 0; 65931762eaaSAaron Young } 66031762eaaSAaron Young 66131762eaaSAaron Young /* sync for race conditions with vnet_start_xmit() and tell xmit it 66231762eaaSAaron Young * is time to send a trigger. 66331762eaaSAaron Young */ 66431762eaaSAaron Young trace_vnet_rx_stopped_ack(port->vio._local_sid, 66531762eaaSAaron Young port->vio._peer_sid, end); 66631762eaaSAaron Young dr->cons = vio_dring_next(dr, end); 66731762eaaSAaron Young desc = vio_dring_entry(dr, dr->cons); 66831762eaaSAaron Young if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { 66931762eaaSAaron Young /* vnet_start_xmit() just populated this dring but missed 67031762eaaSAaron Young * sending the "start" LDC message to the consumer. 67131762eaaSAaron Young * Send a "start" trigger on its behalf. 67231762eaaSAaron Young */ 67331762eaaSAaron Young if (__vnet_tx_trigger(port, dr->cons) > 0) 67431762eaaSAaron Young port->start_cons = false; 67531762eaaSAaron Young else 67631762eaaSAaron Young port->start_cons = true; 67731762eaaSAaron Young } else { 67831762eaaSAaron Young port->start_cons = true; 67931762eaaSAaron Young } 68031762eaaSAaron Young netif_tx_unlock(dev); 68131762eaaSAaron Young 68231762eaaSAaron Young txq = netdev_get_tx_queue(dev, port->q_index); 68331762eaaSAaron Young if (unlikely(netif_tx_queue_stopped(txq) && 68431762eaaSAaron Young vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) 68531762eaaSAaron Young return 1; 68631762eaaSAaron Young 68731762eaaSAaron Young return 0; 68831762eaaSAaron Young } 68931762eaaSAaron Young 69031762eaaSAaron Young static int vnet_nack(struct vnet_port *port, void *msgbuf) 69131762eaaSAaron Young { 69231762eaaSAaron Young /* XXX just reset or similar XXX */ 69331762eaaSAaron Young return 0; 69431762eaaSAaron Young } 69531762eaaSAaron Young 69631762eaaSAaron Young static int handle_mcast(struct vnet_port *port, void *msgbuf) 69731762eaaSAaron Young { 69831762eaaSAaron Young struct vio_net_mcast_info *pkt = msgbuf; 69967d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 70031762eaaSAaron Young 70131762eaaSAaron Young if (pkt->tag.stype != VIO_SUBTYPE_ACK) 70231762eaaSAaron Young pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", 70367d0719fSAaron Young dev->name, 70431762eaaSAaron Young pkt->tag.type, 70531762eaaSAaron Young pkt->tag.stype, 70631762eaaSAaron Young pkt->tag.stype_env, 70731762eaaSAaron Young pkt->tag.sid); 70831762eaaSAaron Young 70931762eaaSAaron Young return 0; 71031762eaaSAaron Young } 71131762eaaSAaron Young 7128778b276SAaron Young /* If the queue is stopped, wake it up so that we'll 7138778b276SAaron Young * send out another START message at the next TX. 71431762eaaSAaron Young */ 71531762eaaSAaron Young static void maybe_tx_wakeup(struct vnet_port *port) 71631762eaaSAaron Young { 71731762eaaSAaron Young struct netdev_queue *txq; 71831762eaaSAaron Young 71967d0719fSAaron Young txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), 72067d0719fSAaron Young port->q_index); 72131762eaaSAaron Young __netif_tx_lock(txq, smp_processor_id()); 722d4aa89ccSSowmini Varadhan if (likely(netif_tx_queue_stopped(txq))) 72331762eaaSAaron Young netif_tx_wake_queue(txq); 72431762eaaSAaron Young __netif_tx_unlock(txq); 72531762eaaSAaron Young } 72631762eaaSAaron Young 72767d0719fSAaron Young bool sunvnet_port_is_up_common(struct vnet_port *vnet) 72831762eaaSAaron Young { 72931762eaaSAaron Young struct vio_driver_state *vio = &vnet->vio; 73031762eaaSAaron Young 73131762eaaSAaron Young return !!(vio->hs_state & VIO_HS_COMPLETE); 73231762eaaSAaron Young } 73367d0719fSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common); 73431762eaaSAaron Young 73531762eaaSAaron Young static int vnet_event_napi(struct vnet_port *port, int budget) 73631762eaaSAaron Young { 7378778b276SAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 73831762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 73931762eaaSAaron Young int tx_wakeup, err; 74031762eaaSAaron Young int npkts = 0; 74131762eaaSAaron Young 742bf091f3fSShannon Nelson /* we don't expect any other bits */ 743bf091f3fSShannon Nelson BUG_ON(port->rx_event & ~(LDC_EVENT_DATA_READY | 744bf091f3fSShannon Nelson LDC_EVENT_RESET | 745bf091f3fSShannon Nelson LDC_EVENT_UP)); 74631762eaaSAaron Young 747bf091f3fSShannon Nelson /* RESET takes precedent over any other event */ 748bf091f3fSShannon Nelson if (port->rx_event & LDC_EVENT_RESET) { 749867fa150SShannon Nelson /* a link went down */ 750867fa150SShannon Nelson 751867fa150SShannon Nelson if (port->vsw == 1) { 752867fa150SShannon Nelson netif_tx_stop_all_queues(dev); 753867fa150SShannon Nelson netif_carrier_off(dev); 754867fa150SShannon Nelson } 755867fa150SShannon Nelson 756bf091f3fSShannon Nelson vio_link_state_change(vio, LDC_EVENT_RESET); 75731762eaaSAaron Young vnet_port_reset(port); 75831762eaaSAaron Young vio_port_up(vio); 7598778b276SAaron Young 7608778b276SAaron Young /* If the device is running but its tx queue was 7618778b276SAaron Young * stopped (due to flow control), restart it. 7628778b276SAaron Young * This is necessary since vnet_port_reset() 7638778b276SAaron Young * clears the tx drings and thus we may never get 7648778b276SAaron Young * back a VIO_TYPE_DATA ACK packet - which is 7658778b276SAaron Young * the normal mechanism to restart the tx queue. 7668778b276SAaron Young */ 7678778b276SAaron Young if (netif_running(dev)) 7688778b276SAaron Young maybe_tx_wakeup(port); 769bf091f3fSShannon Nelson 77031762eaaSAaron Young port->rx_event = 0; 77131762eaaSAaron Young return 0; 77231762eaaSAaron Young } 77331762eaaSAaron Young 774bf091f3fSShannon Nelson if (port->rx_event & LDC_EVENT_UP) { 775867fa150SShannon Nelson /* a link came up */ 776867fa150SShannon Nelson 777867fa150SShannon Nelson if (port->vsw == 1) { 778867fa150SShannon Nelson netif_carrier_on(port->dev); 779867fa150SShannon Nelson netif_tx_start_all_queues(port->dev); 780867fa150SShannon Nelson } 781867fa150SShannon Nelson 782bf091f3fSShannon Nelson vio_link_state_change(vio, LDC_EVENT_UP); 783bf091f3fSShannon Nelson port->rx_event = 0; 784bf091f3fSShannon Nelson return 0; 785bf091f3fSShannon Nelson } 78631762eaaSAaron Young 787dc153f85SAaron Young err = 0; 788dc153f85SAaron Young tx_wakeup = 0; 78931762eaaSAaron Young while (1) { 79031762eaaSAaron Young union { 79131762eaaSAaron Young struct vio_msg_tag tag; 79231762eaaSAaron Young u64 raw[8]; 79331762eaaSAaron Young } msgbuf; 79431762eaaSAaron Young 79531762eaaSAaron Young if (port->napi_resume) { 79631762eaaSAaron Young struct vio_dring_data *pkt = 79731762eaaSAaron Young (struct vio_dring_data *)&msgbuf; 79831762eaaSAaron Young struct vio_dring_state *dr = 79931762eaaSAaron Young &port->vio.drings[VIO_DRIVER_RX_RING]; 80031762eaaSAaron Young 80131762eaaSAaron Young pkt->tag.type = VIO_TYPE_DATA; 80231762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_INFO; 80331762eaaSAaron Young pkt->tag.stype_env = VIO_DRING_DATA; 80431762eaaSAaron Young pkt->seq = dr->rcv_nxt; 805dc153f85SAaron Young pkt->start_idx = vio_dring_next(dr, 806dc153f85SAaron Young port->napi_stop_idx); 80731762eaaSAaron Young pkt->end_idx = -1; 808bf091f3fSShannon Nelson } else { 80931762eaaSAaron Young err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); 81031762eaaSAaron Young if (unlikely(err < 0)) { 81131762eaaSAaron Young if (err == -ECONNRESET) 81231762eaaSAaron Young vio_conn_reset(vio); 81331762eaaSAaron Young break; 81431762eaaSAaron Young } 81531762eaaSAaron Young if (err == 0) 81631762eaaSAaron Young break; 81731762eaaSAaron Young viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", 81831762eaaSAaron Young msgbuf.tag.type, 81931762eaaSAaron Young msgbuf.tag.stype, 82031762eaaSAaron Young msgbuf.tag.stype_env, 82131762eaaSAaron Young msgbuf.tag.sid); 82231762eaaSAaron Young err = vio_validate_sid(vio, &msgbuf.tag); 82331762eaaSAaron Young if (err < 0) 82431762eaaSAaron Young break; 825bf091f3fSShannon Nelson } 826bf091f3fSShannon Nelson 82731762eaaSAaron Young if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { 82831762eaaSAaron Young if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { 82967d0719fSAaron Young if (!sunvnet_port_is_up_common(port)) { 83031762eaaSAaron Young /* failures like handshake_failure() 83131762eaaSAaron Young * may have cleaned up dring, but 83231762eaaSAaron Young * NAPI polling may bring us here. 83331762eaaSAaron Young */ 83431762eaaSAaron Young err = -ECONNRESET; 83531762eaaSAaron Young break; 83631762eaaSAaron Young } 83731762eaaSAaron Young err = vnet_rx(port, &msgbuf, &npkts, budget); 83831762eaaSAaron Young if (npkts >= budget) 83931762eaaSAaron Young break; 84031762eaaSAaron Young if (npkts == 0) 84131762eaaSAaron Young break; 84231762eaaSAaron Young } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { 84331762eaaSAaron Young err = vnet_ack(port, &msgbuf); 84431762eaaSAaron Young if (err > 0) 84531762eaaSAaron Young tx_wakeup |= err; 84631762eaaSAaron Young } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { 84731762eaaSAaron Young err = vnet_nack(port, &msgbuf); 84831762eaaSAaron Young } 84931762eaaSAaron Young } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { 85031762eaaSAaron Young if (msgbuf.tag.stype_env == VNET_MCAST_INFO) 85131762eaaSAaron Young err = handle_mcast(port, &msgbuf); 85231762eaaSAaron Young else 85331762eaaSAaron Young err = vio_control_pkt_engine(vio, &msgbuf); 85431762eaaSAaron Young if (err) 85531762eaaSAaron Young break; 85631762eaaSAaron Young } else { 85731762eaaSAaron Young err = vnet_handle_unknown(port, &msgbuf); 85831762eaaSAaron Young } 85931762eaaSAaron Young if (err == -ECONNRESET) 86031762eaaSAaron Young break; 86131762eaaSAaron Young } 86231762eaaSAaron Young if (unlikely(tx_wakeup && err != -ECONNRESET)) 86331762eaaSAaron Young maybe_tx_wakeup(port); 86431762eaaSAaron Young return npkts; 86531762eaaSAaron Young } 86631762eaaSAaron Young 86731762eaaSAaron Young int sunvnet_poll_common(struct napi_struct *napi, int budget) 86831762eaaSAaron Young { 86931762eaaSAaron Young struct vnet_port *port = container_of(napi, struct vnet_port, napi); 87031762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 87131762eaaSAaron Young int processed = vnet_event_napi(port, budget); 87231762eaaSAaron Young 87331762eaaSAaron Young if (processed < budget) { 8746ad20165SEric Dumazet napi_complete_done(napi, processed); 87531762eaaSAaron Young port->rx_event &= ~LDC_EVENT_DATA_READY; 87631762eaaSAaron Young vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); 87731762eaaSAaron Young } 87831762eaaSAaron Young return processed; 87931762eaaSAaron Young } 88031762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_poll_common); 88131762eaaSAaron Young 88231762eaaSAaron Young void sunvnet_event_common(void *arg, int event) 88331762eaaSAaron Young { 88431762eaaSAaron Young struct vnet_port *port = arg; 88531762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 88631762eaaSAaron Young 88731762eaaSAaron Young port->rx_event |= event; 88831762eaaSAaron Young vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); 88931762eaaSAaron Young napi_schedule(&port->napi); 89031762eaaSAaron Young } 89131762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_event_common); 89231762eaaSAaron Young 89331762eaaSAaron Young static int __vnet_tx_trigger(struct vnet_port *port, u32 start) 89431762eaaSAaron Young { 89531762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 89631762eaaSAaron Young struct vio_dring_data hdr = { 89731762eaaSAaron Young .tag = { 89831762eaaSAaron Young .type = VIO_TYPE_DATA, 89931762eaaSAaron Young .stype = VIO_SUBTYPE_INFO, 90031762eaaSAaron Young .stype_env = VIO_DRING_DATA, 90131762eaaSAaron Young .sid = vio_send_sid(&port->vio), 90231762eaaSAaron Young }, 90331762eaaSAaron Young .dring_ident = dr->ident, 90431762eaaSAaron Young .start_idx = start, 90531762eaaSAaron Young .end_idx = (u32)-1, 90631762eaaSAaron Young }; 90731762eaaSAaron Young int err, delay; 90831762eaaSAaron Young int retries = 0; 90931762eaaSAaron Young 91031762eaaSAaron Young if (port->stop_rx) { 91131762eaaSAaron Young trace_vnet_tx_pending_stopped_ack(port->vio._local_sid, 91231762eaaSAaron Young port->vio._peer_sid, 91331762eaaSAaron Young port->stop_rx_idx, -1); 91431762eaaSAaron Young err = vnet_send_ack(port, 91531762eaaSAaron Young &port->vio.drings[VIO_DRIVER_RX_RING], 91631762eaaSAaron Young port->stop_rx_idx, -1, 91731762eaaSAaron Young VIO_DRING_STOPPED); 91831762eaaSAaron Young if (err <= 0) 91931762eaaSAaron Young return err; 92031762eaaSAaron Young } 92131762eaaSAaron Young 92231762eaaSAaron Young hdr.seq = dr->snd_nxt; 92331762eaaSAaron Young delay = 1; 92431762eaaSAaron Young do { 92531762eaaSAaron Young err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 92631762eaaSAaron Young if (err > 0) { 92731762eaaSAaron Young dr->snd_nxt++; 92831762eaaSAaron Young break; 92931762eaaSAaron Young } 93031762eaaSAaron Young udelay(delay); 93131762eaaSAaron Young if ((delay <<= 1) > 128) 93231762eaaSAaron Young delay = 128; 93331762eaaSAaron Young if (retries++ > VNET_MAX_RETRIES) 93431762eaaSAaron Young break; 93531762eaaSAaron Young } while (err == -EAGAIN); 93631762eaaSAaron Young trace_vnet_tx_trigger(port->vio._local_sid, 93731762eaaSAaron Young port->vio._peer_sid, start, err); 93831762eaaSAaron Young 93931762eaaSAaron Young return err; 94031762eaaSAaron Young } 94131762eaaSAaron Young 94231762eaaSAaron Young static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, 94331762eaaSAaron Young unsigned *pending) 94431762eaaSAaron Young { 94531762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 94631762eaaSAaron Young struct sk_buff *skb = NULL; 94731762eaaSAaron Young int i, txi; 94831762eaaSAaron Young 94931762eaaSAaron Young *pending = 0; 95031762eaaSAaron Young 95131762eaaSAaron Young txi = dr->prod; 95231762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 95331762eaaSAaron Young struct vio_net_desc *d; 95431762eaaSAaron Young 95531762eaaSAaron Young --txi; 95631762eaaSAaron Young if (txi < 0) 95731762eaaSAaron Young txi = VNET_TX_RING_SIZE - 1; 95831762eaaSAaron Young 95931762eaaSAaron Young d = vio_dring_entry(dr, txi); 96031762eaaSAaron Young 96131762eaaSAaron Young if (d->hdr.state == VIO_DESC_READY) { 96231762eaaSAaron Young (*pending)++; 96331762eaaSAaron Young continue; 96431762eaaSAaron Young } 96531762eaaSAaron Young if (port->tx_bufs[txi].skb) { 96631762eaaSAaron Young if (d->hdr.state != VIO_DESC_DONE) 96731762eaaSAaron Young pr_notice("invalid ring buffer state %d\n", 96831762eaaSAaron Young d->hdr.state); 96931762eaaSAaron Young BUG_ON(port->tx_bufs[txi].skb->next); 97031762eaaSAaron Young 97131762eaaSAaron Young port->tx_bufs[txi].skb->next = skb; 97231762eaaSAaron Young skb = port->tx_bufs[txi].skb; 97331762eaaSAaron Young port->tx_bufs[txi].skb = NULL; 97431762eaaSAaron Young 97531762eaaSAaron Young ldc_unmap(port->vio.lp, 97631762eaaSAaron Young port->tx_bufs[txi].cookies, 97731762eaaSAaron Young port->tx_bufs[txi].ncookies); 978dc153f85SAaron Young } else if (d->hdr.state == VIO_DESC_FREE) { 97931762eaaSAaron Young break; 980dc153f85SAaron Young } 98131762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 98231762eaaSAaron Young } 98331762eaaSAaron Young return skb; 98431762eaaSAaron Young } 98531762eaaSAaron Young 98631762eaaSAaron Young static inline void vnet_free_skbs(struct sk_buff *skb) 98731762eaaSAaron Young { 98831762eaaSAaron Young struct sk_buff *next; 98931762eaaSAaron Young 99031762eaaSAaron Young while (skb) { 99131762eaaSAaron Young next = skb->next; 99231762eaaSAaron Young skb->next = NULL; 99331762eaaSAaron Young dev_kfree_skb(skb); 99431762eaaSAaron Young skb = next; 99531762eaaSAaron Young } 99631762eaaSAaron Young } 99731762eaaSAaron Young 99831762eaaSAaron Young void sunvnet_clean_timer_expire_common(unsigned long port0) 99931762eaaSAaron Young { 100031762eaaSAaron Young struct vnet_port *port = (struct vnet_port *)port0; 100131762eaaSAaron Young struct sk_buff *freeskbs; 100231762eaaSAaron Young unsigned pending; 100331762eaaSAaron Young 100467d0719fSAaron Young netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port)); 100531762eaaSAaron Young freeskbs = vnet_clean_tx_ring(port, &pending); 100667d0719fSAaron Young netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port)); 100731762eaaSAaron Young 100831762eaaSAaron Young vnet_free_skbs(freeskbs); 100931762eaaSAaron Young 101031762eaaSAaron Young if (pending) 101131762eaaSAaron Young (void)mod_timer(&port->clean_timer, 101231762eaaSAaron Young jiffies + VNET_CLEAN_TIMEOUT); 101331762eaaSAaron Young else 101431762eaaSAaron Young del_timer(&port->clean_timer); 101531762eaaSAaron Young } 101631762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common); 101731762eaaSAaron Young 101831762eaaSAaron Young static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, 101931762eaaSAaron Young struct ldc_trans_cookie *cookies, int ncookies, 102031762eaaSAaron Young unsigned int map_perm) 102131762eaaSAaron Young { 102231762eaaSAaron Young int i, nc, err, blen; 102331762eaaSAaron Young 102431762eaaSAaron Young /* header */ 102531762eaaSAaron Young blen = skb_headlen(skb); 102631762eaaSAaron Young if (blen < ETH_ZLEN) 102731762eaaSAaron Young blen = ETH_ZLEN; 102831762eaaSAaron Young blen += VNET_PACKET_SKIP; 102931762eaaSAaron Young blen += 8 - (blen & 7); 103031762eaaSAaron Young 103131762eaaSAaron Young err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies, 103231762eaaSAaron Young ncookies, map_perm); 103331762eaaSAaron Young if (err < 0) 103431762eaaSAaron Young return err; 103531762eaaSAaron Young nc = err; 103631762eaaSAaron Young 103731762eaaSAaron Young for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 103831762eaaSAaron Young skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 103931762eaaSAaron Young u8 *vaddr; 104031762eaaSAaron Young 104131762eaaSAaron Young if (nc < ncookies) { 104231762eaaSAaron Young vaddr = kmap_atomic(skb_frag_page(f)); 104331762eaaSAaron Young blen = skb_frag_size(f); 104431762eaaSAaron Young blen += 8 - (blen & 7); 104531762eaaSAaron Young err = ldc_map_single(lp, vaddr + f->page_offset, 104631762eaaSAaron Young blen, cookies + nc, ncookies - nc, 104731762eaaSAaron Young map_perm); 104831762eaaSAaron Young kunmap_atomic(vaddr); 104931762eaaSAaron Young } else { 105031762eaaSAaron Young err = -EMSGSIZE; 105131762eaaSAaron Young } 105231762eaaSAaron Young 105331762eaaSAaron Young if (err < 0) { 105431762eaaSAaron Young ldc_unmap(lp, cookies, nc); 105531762eaaSAaron Young return err; 105631762eaaSAaron Young } 105731762eaaSAaron Young nc += err; 105831762eaaSAaron Young } 105931762eaaSAaron Young return nc; 106031762eaaSAaron Young } 106131762eaaSAaron Young 106231762eaaSAaron Young static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) 106331762eaaSAaron Young { 106431762eaaSAaron Young struct sk_buff *nskb; 106531762eaaSAaron Young int i, len, pad, docopy; 106631762eaaSAaron Young 106731762eaaSAaron Young len = skb->len; 106831762eaaSAaron Young pad = 0; 106931762eaaSAaron Young if (len < ETH_ZLEN) { 107031762eaaSAaron Young pad += ETH_ZLEN - skb->len; 107131762eaaSAaron Young len += pad; 107231762eaaSAaron Young } 107331762eaaSAaron Young len += VNET_PACKET_SKIP; 107431762eaaSAaron Young pad += 8 - (len & 7); 107531762eaaSAaron Young 107631762eaaSAaron Young /* make sure we have enough cookies and alignment in every frag */ 107731762eaaSAaron Young docopy = skb_shinfo(skb)->nr_frags >= ncookies; 107831762eaaSAaron Young for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 107931762eaaSAaron Young skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 108031762eaaSAaron Young 108131762eaaSAaron Young docopy |= f->page_offset & 7; 108231762eaaSAaron Young } 108331762eaaSAaron Young if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || 108431762eaaSAaron Young skb_tailroom(skb) < pad || 108531762eaaSAaron Young skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { 108631762eaaSAaron Young int start = 0, offset; 108731762eaaSAaron Young __wsum csum; 108831762eaaSAaron Young 108931762eaaSAaron Young len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; 109031762eaaSAaron Young nskb = alloc_and_align_skb(skb->dev, len); 1091dc153f85SAaron Young if (!nskb) { 109231762eaaSAaron Young dev_kfree_skb(skb); 109331762eaaSAaron Young return NULL; 109431762eaaSAaron Young } 109531762eaaSAaron Young skb_reserve(nskb, VNET_PACKET_SKIP); 109631762eaaSAaron Young 109731762eaaSAaron Young nskb->protocol = skb->protocol; 109831762eaaSAaron Young offset = skb_mac_header(skb) - skb->data; 109931762eaaSAaron Young skb_set_mac_header(nskb, offset); 110031762eaaSAaron Young offset = skb_network_header(skb) - skb->data; 110131762eaaSAaron Young skb_set_network_header(nskb, offset); 110231762eaaSAaron Young offset = skb_transport_header(skb) - skb->data; 110331762eaaSAaron Young skb_set_transport_header(nskb, offset); 110431762eaaSAaron Young 110531762eaaSAaron Young offset = 0; 110631762eaaSAaron Young nskb->csum_offset = skb->csum_offset; 110731762eaaSAaron Young nskb->ip_summed = skb->ip_summed; 110831762eaaSAaron Young 110931762eaaSAaron Young if (skb->ip_summed == CHECKSUM_PARTIAL) 111031762eaaSAaron Young start = skb_checksum_start_offset(skb); 111131762eaaSAaron Young if (start) { 111231762eaaSAaron Young struct iphdr *iph = ip_hdr(nskb); 111331762eaaSAaron Young int offset = start + nskb->csum_offset; 111431762eaaSAaron Young 111531762eaaSAaron Young if (skb_copy_bits(skb, 0, nskb->data, start)) { 111631762eaaSAaron Young dev_kfree_skb(nskb); 111731762eaaSAaron Young dev_kfree_skb(skb); 111831762eaaSAaron Young return NULL; 111931762eaaSAaron Young } 112031762eaaSAaron Young *(__sum16 *)(skb->data + offset) = 0; 112131762eaaSAaron Young csum = skb_copy_and_csum_bits(skb, start, 112231762eaaSAaron Young nskb->data + start, 112331762eaaSAaron Young skb->len - start, 0); 112431762eaaSAaron Young if (iph->protocol == IPPROTO_TCP || 112531762eaaSAaron Young iph->protocol == IPPROTO_UDP) { 112631762eaaSAaron Young csum = csum_tcpudp_magic(iph->saddr, iph->daddr, 112731762eaaSAaron Young skb->len - start, 112831762eaaSAaron Young iph->protocol, csum); 112931762eaaSAaron Young } 113031762eaaSAaron Young *(__sum16 *)(nskb->data + offset) = csum; 113131762eaaSAaron Young 113231762eaaSAaron Young nskb->ip_summed = CHECKSUM_NONE; 113331762eaaSAaron Young } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { 113431762eaaSAaron Young dev_kfree_skb(nskb); 113531762eaaSAaron Young dev_kfree_skb(skb); 113631762eaaSAaron Young return NULL; 113731762eaaSAaron Young } 113831762eaaSAaron Young (void)skb_put(nskb, skb->len); 113931762eaaSAaron Young if (skb_is_gso(skb)) { 114031762eaaSAaron Young skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; 114131762eaaSAaron Young skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 114231762eaaSAaron Young } 114331762eaaSAaron Young nskb->queue_mapping = skb->queue_mapping; 114431762eaaSAaron Young dev_kfree_skb(skb); 114531762eaaSAaron Young skb = nskb; 114631762eaaSAaron Young } 114731762eaaSAaron Young return skb; 114831762eaaSAaron Young } 114931762eaaSAaron Young 115067d0719fSAaron Young static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, 115167d0719fSAaron Young struct vnet_port *(*vnet_tx_port) 115267d0719fSAaron Young (struct sk_buff *, struct net_device *)) 115331762eaaSAaron Young { 115467d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 115531762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 115631762eaaSAaron Young struct sk_buff *segs; 115731762eaaSAaron Young int maclen, datalen; 115831762eaaSAaron Young int status; 115931762eaaSAaron Young int gso_size, gso_type, gso_segs; 116031762eaaSAaron Young int hlen = skb_transport_header(skb) - skb_mac_header(skb); 116131762eaaSAaron Young int proto = IPPROTO_IP; 116231762eaaSAaron Young 116331762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) 116431762eaaSAaron Young proto = ip_hdr(skb)->protocol; 116531762eaaSAaron Young else if (skb->protocol == htons(ETH_P_IPV6)) 116631762eaaSAaron Young proto = ipv6_hdr(skb)->nexthdr; 116731762eaaSAaron Young 1168dc153f85SAaron Young if (proto == IPPROTO_TCP) { 116931762eaaSAaron Young hlen += tcp_hdr(skb)->doff * 4; 1170dc153f85SAaron Young } else if (proto == IPPROTO_UDP) { 117131762eaaSAaron Young hlen += sizeof(struct udphdr); 1172dc153f85SAaron Young } else { 117331762eaaSAaron Young pr_err("vnet_handle_offloads GSO with unknown transport " 117431762eaaSAaron Young "protocol %d tproto %d\n", skb->protocol, proto); 117531762eaaSAaron Young hlen = 128; /* XXX */ 117631762eaaSAaron Young } 117731762eaaSAaron Young datalen = port->tsolen - hlen; 117831762eaaSAaron Young 117931762eaaSAaron Young gso_size = skb_shinfo(skb)->gso_size; 118031762eaaSAaron Young gso_type = skb_shinfo(skb)->gso_type; 118131762eaaSAaron Young gso_segs = skb_shinfo(skb)->gso_segs; 118231762eaaSAaron Young 118331762eaaSAaron Young if (port->tso && gso_size < datalen) 118431762eaaSAaron Young gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); 118531762eaaSAaron Young 118631762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { 118731762eaaSAaron Young struct netdev_queue *txq; 118831762eaaSAaron Young 118931762eaaSAaron Young txq = netdev_get_tx_queue(dev, port->q_index); 119031762eaaSAaron Young netif_tx_stop_queue(txq); 119131762eaaSAaron Young if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) 119231762eaaSAaron Young return NETDEV_TX_BUSY; 119331762eaaSAaron Young netif_tx_wake_queue(txq); 119431762eaaSAaron Young } 119531762eaaSAaron Young 119631762eaaSAaron Young maclen = skb_network_header(skb) - skb_mac_header(skb); 119731762eaaSAaron Young skb_pull(skb, maclen); 119831762eaaSAaron Young 119931762eaaSAaron Young if (port->tso && gso_size < datalen) { 120031762eaaSAaron Young if (skb_unclone(skb, GFP_ATOMIC)) 120131762eaaSAaron Young goto out_dropped; 120231762eaaSAaron Young 120331762eaaSAaron Young /* segment to TSO size */ 120431762eaaSAaron Young skb_shinfo(skb)->gso_size = datalen; 120531762eaaSAaron Young skb_shinfo(skb)->gso_segs = gso_segs; 120631762eaaSAaron Young } 120731762eaaSAaron Young segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); 120831762eaaSAaron Young if (IS_ERR(segs)) 120931762eaaSAaron Young goto out_dropped; 121031762eaaSAaron Young 121131762eaaSAaron Young skb_push(skb, maclen); 121231762eaaSAaron Young skb_reset_mac_header(skb); 121331762eaaSAaron Young 121431762eaaSAaron Young status = 0; 121531762eaaSAaron Young while (segs) { 121631762eaaSAaron Young struct sk_buff *curr = segs; 121731762eaaSAaron Young 121831762eaaSAaron Young segs = segs->next; 121931762eaaSAaron Young curr->next = NULL; 122031762eaaSAaron Young if (port->tso && curr->len > dev->mtu) { 122131762eaaSAaron Young skb_shinfo(curr)->gso_size = gso_size; 122231762eaaSAaron Young skb_shinfo(curr)->gso_type = gso_type; 122331762eaaSAaron Young skb_shinfo(curr)->gso_segs = 122431762eaaSAaron Young DIV_ROUND_UP(curr->len - hlen, gso_size); 1225dc153f85SAaron Young } else { 122631762eaaSAaron Young skb_shinfo(curr)->gso_size = 0; 1227dc153f85SAaron Young } 122831762eaaSAaron Young 122931762eaaSAaron Young skb_push(curr, maclen); 123031762eaaSAaron Young skb_reset_mac_header(curr); 123131762eaaSAaron Young memcpy(skb_mac_header(curr), skb_mac_header(skb), 123231762eaaSAaron Young maclen); 123331762eaaSAaron Young curr->csum_start = skb_transport_header(curr) - curr->head; 123431762eaaSAaron Young if (ip_hdr(curr)->protocol == IPPROTO_TCP) 123531762eaaSAaron Young curr->csum_offset = offsetof(struct tcphdr, check); 123631762eaaSAaron Young else if (ip_hdr(curr)->protocol == IPPROTO_UDP) 123731762eaaSAaron Young curr->csum_offset = offsetof(struct udphdr, check); 123831762eaaSAaron Young 123931762eaaSAaron Young if (!(status & NETDEV_TX_MASK)) 124067d0719fSAaron Young status = sunvnet_start_xmit_common(curr, dev, 124167d0719fSAaron Young vnet_tx_port); 124231762eaaSAaron Young if (status & NETDEV_TX_MASK) 124331762eaaSAaron Young dev_kfree_skb_any(curr); 124431762eaaSAaron Young } 124531762eaaSAaron Young 124631762eaaSAaron Young if (!(status & NETDEV_TX_MASK)) 124731762eaaSAaron Young dev_kfree_skb_any(skb); 124831762eaaSAaron Young return status; 124931762eaaSAaron Young out_dropped: 125031762eaaSAaron Young dev->stats.tx_dropped++; 125131762eaaSAaron Young dev_kfree_skb_any(skb); 125231762eaaSAaron Young return NETDEV_TX_OK; 125331762eaaSAaron Young } 125431762eaaSAaron Young 125567d0719fSAaron Young int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, 125667d0719fSAaron Young struct vnet_port *(*vnet_tx_port) 125767d0719fSAaron Young (struct sk_buff *, struct net_device *)) 125831762eaaSAaron Young { 125931762eaaSAaron Young struct vnet_port *port = NULL; 126031762eaaSAaron Young struct vio_dring_state *dr; 126131762eaaSAaron Young struct vio_net_desc *d; 126231762eaaSAaron Young unsigned int len; 126331762eaaSAaron Young struct sk_buff *freeskbs = NULL; 126431762eaaSAaron Young int i, err, txi; 126531762eaaSAaron Young unsigned pending = 0; 126631762eaaSAaron Young struct netdev_queue *txq; 126731762eaaSAaron Young 126831762eaaSAaron Young rcu_read_lock(); 126967d0719fSAaron Young port = vnet_tx_port(skb, dev); 1270daa86e50SShannon Nelson if (unlikely(!port)) 127131762eaaSAaron Young goto out_dropped; 127231762eaaSAaron Young 127331762eaaSAaron Young if (skb_is_gso(skb) && skb->len > port->tsolen) { 127467d0719fSAaron Young err = vnet_handle_offloads(port, skb, vnet_tx_port); 127531762eaaSAaron Young rcu_read_unlock(); 127631762eaaSAaron Young return err; 127731762eaaSAaron Young } 127831762eaaSAaron Young 127931762eaaSAaron Young if (!skb_is_gso(skb) && skb->len > port->rmtu) { 128031762eaaSAaron Young unsigned long localmtu = port->rmtu - ETH_HLEN; 128131762eaaSAaron Young 128231762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 3)) 128331762eaaSAaron Young localmtu -= VLAN_HLEN; 128431762eaaSAaron Young 128531762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) { 128631762eaaSAaron Young struct flowi4 fl4; 128731762eaaSAaron Young struct rtable *rt = NULL; 128831762eaaSAaron Young 128931762eaaSAaron Young memset(&fl4, 0, sizeof(fl4)); 129031762eaaSAaron Young fl4.flowi4_oif = dev->ifindex; 129131762eaaSAaron Young fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 129231762eaaSAaron Young fl4.daddr = ip_hdr(skb)->daddr; 129331762eaaSAaron Young fl4.saddr = ip_hdr(skb)->saddr; 129431762eaaSAaron Young 129531762eaaSAaron Young rt = ip_route_output_key(dev_net(dev), &fl4); 129631762eaaSAaron Young if (!IS_ERR(rt)) { 129731762eaaSAaron Young skb_dst_set(skb, &rt->dst); 129831762eaaSAaron Young icmp_send(skb, ICMP_DEST_UNREACH, 129931762eaaSAaron Young ICMP_FRAG_NEEDED, 130031762eaaSAaron Young htonl(localmtu)); 130131762eaaSAaron Young } 130231762eaaSAaron Young } 130331762eaaSAaron Young #if IS_ENABLED(CONFIG_IPV6) 130431762eaaSAaron Young else if (skb->protocol == htons(ETH_P_IPV6)) 130531762eaaSAaron Young icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); 130631762eaaSAaron Young #endif 130731762eaaSAaron Young goto out_dropped; 130831762eaaSAaron Young } 130931762eaaSAaron Young 131031762eaaSAaron Young skb = vnet_skb_shape(skb, 2); 131131762eaaSAaron Young 131231762eaaSAaron Young if (unlikely(!skb)) 131331762eaaSAaron Young goto out_dropped; 131431762eaaSAaron Young 131531762eaaSAaron Young if (skb->ip_summed == CHECKSUM_PARTIAL) 131631762eaaSAaron Young vnet_fullcsum(skb); 131731762eaaSAaron Young 131831762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 131931762eaaSAaron Young i = skb_get_queue_mapping(skb); 132031762eaaSAaron Young txq = netdev_get_tx_queue(dev, i); 132131762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 132231762eaaSAaron Young if (!netif_tx_queue_stopped(txq)) { 132331762eaaSAaron Young netif_tx_stop_queue(txq); 132431762eaaSAaron Young 132531762eaaSAaron Young /* This is a hard error, log it. */ 132631762eaaSAaron Young netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 132731762eaaSAaron Young dev->stats.tx_errors++; 132831762eaaSAaron Young } 132931762eaaSAaron Young rcu_read_unlock(); 133031762eaaSAaron Young return NETDEV_TX_BUSY; 133131762eaaSAaron Young } 133231762eaaSAaron Young 133331762eaaSAaron Young d = vio_dring_cur(dr); 133431762eaaSAaron Young 133531762eaaSAaron Young txi = dr->prod; 133631762eaaSAaron Young 133731762eaaSAaron Young freeskbs = vnet_clean_tx_ring(port, &pending); 133831762eaaSAaron Young 133931762eaaSAaron Young BUG_ON(port->tx_bufs[txi].skb); 134031762eaaSAaron Young 134131762eaaSAaron Young len = skb->len; 134231762eaaSAaron Young if (len < ETH_ZLEN) 134331762eaaSAaron Young len = ETH_ZLEN; 134431762eaaSAaron Young 134531762eaaSAaron Young err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, 134631762eaaSAaron Young (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); 134731762eaaSAaron Young if (err < 0) { 134831762eaaSAaron Young netdev_info(dev, "tx buffer map error %d\n", err); 134931762eaaSAaron Young goto out_dropped; 135031762eaaSAaron Young } 135131762eaaSAaron Young 135231762eaaSAaron Young port->tx_bufs[txi].skb = skb; 135331762eaaSAaron Young skb = NULL; 135431762eaaSAaron Young port->tx_bufs[txi].ncookies = err; 135531762eaaSAaron Young 135631762eaaSAaron Young /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), 135731762eaaSAaron Young * thus it is safe to not set VIO_ACK_ENABLE for each transmission: 135831762eaaSAaron Young * the protocol itself does not require it as long as the peer 135931762eaaSAaron Young * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. 136031762eaaSAaron Young * 136131762eaaSAaron Young * An ACK for every packet in the ring is expensive as the 136231762eaaSAaron Young * sending of LDC messages is slow and affects performance. 136331762eaaSAaron Young */ 136431762eaaSAaron Young d->hdr.ack = VIO_ACK_DISABLE; 136531762eaaSAaron Young d->size = len; 136631762eaaSAaron Young d->ncookies = port->tx_bufs[txi].ncookies; 136731762eaaSAaron Young for (i = 0; i < d->ncookies; i++) 136831762eaaSAaron Young d->cookies[i] = port->tx_bufs[txi].cookies[i]; 136931762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 7)) { 137031762eaaSAaron Young struct vio_net_dext *dext = vio_net_ext(d); 137131762eaaSAaron Young 137231762eaaSAaron Young memset(dext, 0, sizeof(*dext)); 137331762eaaSAaron Young if (skb_is_gso(port->tx_bufs[txi].skb)) { 137431762eaaSAaron Young dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) 137531762eaaSAaron Young ->gso_size; 137631762eaaSAaron Young dext->flags |= VNET_PKT_IPV4_LSO; 137731762eaaSAaron Young } 137831762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 8) && 137931762eaaSAaron Young !port->switch_port) { 138031762eaaSAaron Young dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; 138131762eaaSAaron Young dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; 138231762eaaSAaron Young } 138331762eaaSAaron Young } 138431762eaaSAaron Young 138531762eaaSAaron Young /* This has to be a non-SMP write barrier because we are writing 138631762eaaSAaron Young * to memory which is shared with the peer LDOM. 138731762eaaSAaron Young */ 138831762eaaSAaron Young dma_wmb(); 138931762eaaSAaron Young 139031762eaaSAaron Young d->hdr.state = VIO_DESC_READY; 139131762eaaSAaron Young 139231762eaaSAaron Young /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent 139331762eaaSAaron Young * to notify the consumer that some descriptors are READY. 139431762eaaSAaron Young * After that "start" trigger, no additional triggers are needed until 139531762eaaSAaron Young * a DRING_STOPPED is received from the consumer. The dr->cons field 139631762eaaSAaron Young * (set up by vnet_ack()) has the value of the next dring index 139731762eaaSAaron Young * that has not yet been ack-ed. We send a "start" trigger here 139831762eaaSAaron Young * if, and only if, start_cons is true (reset it afterward). Conversely, 139931762eaaSAaron Young * vnet_ack() should check if the dring corresponding to cons 140031762eaaSAaron Young * is marked READY, but start_cons was false. 140131762eaaSAaron Young * If so, vnet_ack() should send out the missed "start" trigger. 140231762eaaSAaron Young * 140331762eaaSAaron Young * Note that the dma_wmb() above makes sure the cookies et al. are 140431762eaaSAaron Young * not globally visible before the VIO_DESC_READY, and that the 140531762eaaSAaron Young * stores are ordered correctly by the compiler. The consumer will 140631762eaaSAaron Young * not proceed until the VIO_DESC_READY is visible assuring that 140731762eaaSAaron Young * the consumer does not observe anything related to descriptors 140831762eaaSAaron Young * out of order. The HV trap from the LDC start trigger is the 140931762eaaSAaron Young * producer to consumer announcement that work is available to the 141031762eaaSAaron Young * consumer 141131762eaaSAaron Young */ 141231762eaaSAaron Young if (!port->start_cons) { /* previous trigger suffices */ 141331762eaaSAaron Young trace_vnet_skip_tx_trigger(port->vio._local_sid, 141431762eaaSAaron Young port->vio._peer_sid, dr->cons); 141531762eaaSAaron Young goto ldc_start_done; 141631762eaaSAaron Young } 141731762eaaSAaron Young 141831762eaaSAaron Young err = __vnet_tx_trigger(port, dr->cons); 141931762eaaSAaron Young if (unlikely(err < 0)) { 142031762eaaSAaron Young netdev_info(dev, "TX trigger error %d\n", err); 142131762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 142231762eaaSAaron Young skb = port->tx_bufs[txi].skb; 142331762eaaSAaron Young port->tx_bufs[txi].skb = NULL; 142431762eaaSAaron Young dev->stats.tx_carrier_errors++; 142531762eaaSAaron Young goto out_dropped; 142631762eaaSAaron Young } 142731762eaaSAaron Young 142831762eaaSAaron Young ldc_start_done: 142931762eaaSAaron Young port->start_cons = false; 143031762eaaSAaron Young 143131762eaaSAaron Young dev->stats.tx_packets++; 143231762eaaSAaron Young dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; 143331762eaaSAaron Young 143431762eaaSAaron Young dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); 143531762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 143631762eaaSAaron Young netif_tx_stop_queue(txq); 1437fd263fb6SShannon Nelson smp_rmb(); 143831762eaaSAaron Young if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) 143931762eaaSAaron Young netif_tx_wake_queue(txq); 144031762eaaSAaron Young } 144131762eaaSAaron Young 144231762eaaSAaron Young (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); 144331762eaaSAaron Young rcu_read_unlock(); 144431762eaaSAaron Young 144531762eaaSAaron Young vnet_free_skbs(freeskbs); 144631762eaaSAaron Young 144731762eaaSAaron Young return NETDEV_TX_OK; 144831762eaaSAaron Young 144931762eaaSAaron Young out_dropped: 145031762eaaSAaron Young if (pending) 145131762eaaSAaron Young (void)mod_timer(&port->clean_timer, 145231762eaaSAaron Young jiffies + VNET_CLEAN_TIMEOUT); 145331762eaaSAaron Young else if (port) 145431762eaaSAaron Young del_timer(&port->clean_timer); 145531762eaaSAaron Young rcu_read_unlock(); 145631762eaaSAaron Young if (skb) 145731762eaaSAaron Young dev_kfree_skb(skb); 145831762eaaSAaron Young vnet_free_skbs(freeskbs); 145931762eaaSAaron Young dev->stats.tx_dropped++; 146031762eaaSAaron Young return NETDEV_TX_OK; 146131762eaaSAaron Young } 146231762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common); 146331762eaaSAaron Young 146431762eaaSAaron Young void sunvnet_tx_timeout_common(struct net_device *dev) 146531762eaaSAaron Young { 146631762eaaSAaron Young /* XXX Implement me XXX */ 146731762eaaSAaron Young } 146831762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common); 146931762eaaSAaron Young 147031762eaaSAaron Young int sunvnet_open_common(struct net_device *dev) 147131762eaaSAaron Young { 147231762eaaSAaron Young netif_carrier_on(dev); 147331762eaaSAaron Young netif_tx_start_all_queues(dev); 147431762eaaSAaron Young 147531762eaaSAaron Young return 0; 147631762eaaSAaron Young } 147731762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_open_common); 147831762eaaSAaron Young 147931762eaaSAaron Young int sunvnet_close_common(struct net_device *dev) 148031762eaaSAaron Young { 148131762eaaSAaron Young netif_tx_stop_all_queues(dev); 148231762eaaSAaron Young netif_carrier_off(dev); 148331762eaaSAaron Young 148431762eaaSAaron Young return 0; 148531762eaaSAaron Young } 148631762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_close_common); 148731762eaaSAaron Young 148831762eaaSAaron Young static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) 148931762eaaSAaron Young { 149031762eaaSAaron Young struct vnet_mcast_entry *m; 149131762eaaSAaron Young 149231762eaaSAaron Young for (m = vp->mcast_list; m; m = m->next) { 149331762eaaSAaron Young if (ether_addr_equal(m->addr, addr)) 149431762eaaSAaron Young return m; 149531762eaaSAaron Young } 149631762eaaSAaron Young return NULL; 149731762eaaSAaron Young } 149831762eaaSAaron Young 149931762eaaSAaron Young static void __update_mc_list(struct vnet *vp, struct net_device *dev) 150031762eaaSAaron Young { 150131762eaaSAaron Young struct netdev_hw_addr *ha; 150231762eaaSAaron Young 150331762eaaSAaron Young netdev_for_each_mc_addr(ha, dev) { 150431762eaaSAaron Young struct vnet_mcast_entry *m; 150531762eaaSAaron Young 150631762eaaSAaron Young m = __vnet_mc_find(vp, ha->addr); 150731762eaaSAaron Young if (m) { 150831762eaaSAaron Young m->hit = 1; 150931762eaaSAaron Young continue; 151031762eaaSAaron Young } 151131762eaaSAaron Young 151231762eaaSAaron Young if (!m) { 151331762eaaSAaron Young m = kzalloc(sizeof(*m), GFP_ATOMIC); 151431762eaaSAaron Young if (!m) 151531762eaaSAaron Young continue; 151631762eaaSAaron Young memcpy(m->addr, ha->addr, ETH_ALEN); 151731762eaaSAaron Young m->hit = 1; 151831762eaaSAaron Young 151931762eaaSAaron Young m->next = vp->mcast_list; 152031762eaaSAaron Young vp->mcast_list = m; 152131762eaaSAaron Young } 152231762eaaSAaron Young } 152331762eaaSAaron Young } 152431762eaaSAaron Young 152531762eaaSAaron Young static void __send_mc_list(struct vnet *vp, struct vnet_port *port) 152631762eaaSAaron Young { 152731762eaaSAaron Young struct vio_net_mcast_info info; 152831762eaaSAaron Young struct vnet_mcast_entry *m, **pp; 152931762eaaSAaron Young int n_addrs; 153031762eaaSAaron Young 153131762eaaSAaron Young memset(&info, 0, sizeof(info)); 153231762eaaSAaron Young 153331762eaaSAaron Young info.tag.type = VIO_TYPE_CTRL; 153431762eaaSAaron Young info.tag.stype = VIO_SUBTYPE_INFO; 153531762eaaSAaron Young info.tag.stype_env = VNET_MCAST_INFO; 153631762eaaSAaron Young info.tag.sid = vio_send_sid(&port->vio); 153731762eaaSAaron Young info.set = 1; 153831762eaaSAaron Young 153931762eaaSAaron Young n_addrs = 0; 154031762eaaSAaron Young for (m = vp->mcast_list; m; m = m->next) { 154131762eaaSAaron Young if (m->sent) 154231762eaaSAaron Young continue; 154331762eaaSAaron Young m->sent = 1; 154431762eaaSAaron Young memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 154531762eaaSAaron Young m->addr, ETH_ALEN); 154631762eaaSAaron Young if (++n_addrs == VNET_NUM_MCAST) { 154731762eaaSAaron Young info.count = n_addrs; 154831762eaaSAaron Young 154931762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, 155031762eaaSAaron Young sizeof(info)); 155131762eaaSAaron Young n_addrs = 0; 155231762eaaSAaron Young } 155331762eaaSAaron Young } 155431762eaaSAaron Young if (n_addrs) { 155531762eaaSAaron Young info.count = n_addrs; 155631762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, sizeof(info)); 155731762eaaSAaron Young } 155831762eaaSAaron Young 155931762eaaSAaron Young info.set = 0; 156031762eaaSAaron Young 156131762eaaSAaron Young n_addrs = 0; 156231762eaaSAaron Young pp = &vp->mcast_list; 156331762eaaSAaron Young while ((m = *pp) != NULL) { 156431762eaaSAaron Young if (m->hit) { 156531762eaaSAaron Young m->hit = 0; 156631762eaaSAaron Young pp = &m->next; 156731762eaaSAaron Young continue; 156831762eaaSAaron Young } 156931762eaaSAaron Young 157031762eaaSAaron Young memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 157131762eaaSAaron Young m->addr, ETH_ALEN); 157231762eaaSAaron Young if (++n_addrs == VNET_NUM_MCAST) { 157331762eaaSAaron Young info.count = n_addrs; 157431762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, 157531762eaaSAaron Young sizeof(info)); 157631762eaaSAaron Young n_addrs = 0; 157731762eaaSAaron Young } 157831762eaaSAaron Young 157931762eaaSAaron Young *pp = m->next; 158031762eaaSAaron Young kfree(m); 158131762eaaSAaron Young } 158231762eaaSAaron Young if (n_addrs) { 158331762eaaSAaron Young info.count = n_addrs; 158431762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, sizeof(info)); 158531762eaaSAaron Young } 158631762eaaSAaron Young } 158731762eaaSAaron Young 158867d0719fSAaron Young void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp) 158931762eaaSAaron Young { 159031762eaaSAaron Young struct vnet_port *port; 159131762eaaSAaron Young 159231762eaaSAaron Young rcu_read_lock(); 159331762eaaSAaron Young list_for_each_entry_rcu(port, &vp->port_list, list) { 159431762eaaSAaron Young if (port->switch_port) { 159531762eaaSAaron Young __update_mc_list(vp, dev); 159631762eaaSAaron Young __send_mc_list(vp, port); 159731762eaaSAaron Young break; 159831762eaaSAaron Young } 159931762eaaSAaron Young } 160031762eaaSAaron Young rcu_read_unlock(); 160131762eaaSAaron Young } 160231762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common); 160331762eaaSAaron Young 160431762eaaSAaron Young int sunvnet_set_mac_addr_common(struct net_device *dev, void *p) 160531762eaaSAaron Young { 160631762eaaSAaron Young return -EINVAL; 160731762eaaSAaron Young } 160831762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common); 160931762eaaSAaron Young 161031762eaaSAaron Young void sunvnet_port_free_tx_bufs_common(struct vnet_port *port) 161131762eaaSAaron Young { 161231762eaaSAaron Young struct vio_dring_state *dr; 161331762eaaSAaron Young int i; 161431762eaaSAaron Young 161531762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 161631762eaaSAaron Young 1617dc153f85SAaron Young if (!dr->base) 161831762eaaSAaron Young return; 161931762eaaSAaron Young 162031762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; i++) { 162131762eaaSAaron Young struct vio_net_desc *d; 162231762eaaSAaron Young void *skb = port->tx_bufs[i].skb; 162331762eaaSAaron Young 162431762eaaSAaron Young if (!skb) 162531762eaaSAaron Young continue; 162631762eaaSAaron Young 162731762eaaSAaron Young d = vio_dring_entry(dr, i); 162831762eaaSAaron Young 162931762eaaSAaron Young ldc_unmap(port->vio.lp, 163031762eaaSAaron Young port->tx_bufs[i].cookies, 163131762eaaSAaron Young port->tx_bufs[i].ncookies); 163231762eaaSAaron Young dev_kfree_skb(skb); 163331762eaaSAaron Young port->tx_bufs[i].skb = NULL; 163431762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 163531762eaaSAaron Young } 163631762eaaSAaron Young ldc_free_exp_dring(port->vio.lp, dr->base, 163731762eaaSAaron Young (dr->entry_size * dr->num_entries), 163831762eaaSAaron Young dr->cookies, dr->ncookies); 163931762eaaSAaron Young dr->base = NULL; 164031762eaaSAaron Young dr->entry_size = 0; 164131762eaaSAaron Young dr->num_entries = 0; 164231762eaaSAaron Young dr->pending = 0; 164331762eaaSAaron Young dr->ncookies = 0; 164431762eaaSAaron Young } 164531762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common); 164631762eaaSAaron Young 1647867fa150SShannon Nelson void vnet_port_reset(struct vnet_port *port) 164831762eaaSAaron Young { 164931762eaaSAaron Young del_timer(&port->clean_timer); 165031762eaaSAaron Young sunvnet_port_free_tx_bufs_common(port); 165131762eaaSAaron Young port->rmtu = 0; 1652bc221a34SShannon Nelson port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */ 165331762eaaSAaron Young port->tsolen = 0; 165431762eaaSAaron Young } 1655867fa150SShannon Nelson EXPORT_SYMBOL_GPL(vnet_port_reset); 165631762eaaSAaron Young 165731762eaaSAaron Young static int vnet_port_alloc_tx_ring(struct vnet_port *port) 165831762eaaSAaron Young { 165931762eaaSAaron Young struct vio_dring_state *dr; 166031762eaaSAaron Young unsigned long len, elen; 166131762eaaSAaron Young int i, err, ncookies; 166231762eaaSAaron Young void *dring; 166331762eaaSAaron Young 166431762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 166531762eaaSAaron Young 166631762eaaSAaron Young elen = sizeof(struct vio_net_desc) + 166731762eaaSAaron Young sizeof(struct ldc_trans_cookie) * 2; 166831762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 7)) 166931762eaaSAaron Young elen += sizeof(struct vio_net_dext); 167031762eaaSAaron Young len = VNET_TX_RING_SIZE * elen; 167131762eaaSAaron Young 167231762eaaSAaron Young ncookies = VIO_MAX_RING_COOKIES; 167331762eaaSAaron Young dring = ldc_alloc_exp_dring(port->vio.lp, len, 167431762eaaSAaron Young dr->cookies, &ncookies, 167531762eaaSAaron Young (LDC_MAP_SHADOW | 167631762eaaSAaron Young LDC_MAP_DIRECT | 167731762eaaSAaron Young LDC_MAP_RW)); 167831762eaaSAaron Young if (IS_ERR(dring)) { 167931762eaaSAaron Young err = PTR_ERR(dring); 168031762eaaSAaron Young goto err_out; 168131762eaaSAaron Young } 168231762eaaSAaron Young 168331762eaaSAaron Young dr->base = dring; 168431762eaaSAaron Young dr->entry_size = elen; 168531762eaaSAaron Young dr->num_entries = VNET_TX_RING_SIZE; 1686dc153f85SAaron Young dr->prod = 0; 1687dc153f85SAaron Young dr->cons = 0; 168831762eaaSAaron Young port->start_cons = true; /* need an initial trigger */ 168931762eaaSAaron Young dr->pending = VNET_TX_RING_SIZE; 169031762eaaSAaron Young dr->ncookies = ncookies; 169131762eaaSAaron Young 169231762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 169331762eaaSAaron Young struct vio_net_desc *d; 169431762eaaSAaron Young 169531762eaaSAaron Young d = vio_dring_entry(dr, i); 169631762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 169731762eaaSAaron Young } 169831762eaaSAaron Young return 0; 169931762eaaSAaron Young 170031762eaaSAaron Young err_out: 170131762eaaSAaron Young sunvnet_port_free_tx_bufs_common(port); 170231762eaaSAaron Young 170331762eaaSAaron Young return err; 170431762eaaSAaron Young } 170531762eaaSAaron Young 170631762eaaSAaron Young #ifdef CONFIG_NET_POLL_CONTROLLER 170767d0719fSAaron Young void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp) 170831762eaaSAaron Young { 170931762eaaSAaron Young struct vnet_port *port; 171031762eaaSAaron Young unsigned long flags; 171131762eaaSAaron Young 171231762eaaSAaron Young spin_lock_irqsave(&vp->lock, flags); 171331762eaaSAaron Young if (!list_empty(&vp->port_list)) { 171431762eaaSAaron Young port = list_entry(vp->port_list.next, struct vnet_port, list); 171531762eaaSAaron Young napi_schedule(&port->napi); 171631762eaaSAaron Young } 171731762eaaSAaron Young spin_unlock_irqrestore(&vp->lock, flags); 171831762eaaSAaron Young } 171931762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common); 172031762eaaSAaron Young #endif 172131762eaaSAaron Young 172231762eaaSAaron Young void sunvnet_port_add_txq_common(struct vnet_port *port) 172331762eaaSAaron Young { 172431762eaaSAaron Young struct vnet *vp = port->vp; 172531762eaaSAaron Young int n; 172631762eaaSAaron Young 172731762eaaSAaron Young n = vp->nports++; 172831762eaaSAaron Young n = n & (VNET_MAX_TXQS - 1); 172931762eaaSAaron Young port->q_index = n; 173067d0719fSAaron Young netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), 173167d0719fSAaron Young port->q_index)); 173231762eaaSAaron Young } 173331762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); 173431762eaaSAaron Young 173531762eaaSAaron Young void sunvnet_port_rm_txq_common(struct vnet_port *port) 173631762eaaSAaron Young { 173731762eaaSAaron Young port->vp->nports--; 173867d0719fSAaron Young netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), 173967d0719fSAaron Young port->q_index)); 174031762eaaSAaron Young } 174131762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common); 1742