131762eaaSAaron Young /* sunvnet.c: Sun LDOM Virtual Network Driver. 231762eaaSAaron Young * 331762eaaSAaron Young * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 467d0719fSAaron Young * Copyright (C) 2016 Oracle. All rights reserved. 531762eaaSAaron Young */ 631762eaaSAaron Young 731762eaaSAaron Young #include <linux/module.h> 831762eaaSAaron Young #include <linux/kernel.h> 931762eaaSAaron Young #include <linux/types.h> 1031762eaaSAaron Young #include <linux/slab.h> 1131762eaaSAaron Young #include <linux/delay.h> 1231762eaaSAaron Young #include <linux/init.h> 1331762eaaSAaron Young #include <linux/netdevice.h> 1431762eaaSAaron Young #include <linux/ethtool.h> 1531762eaaSAaron Young #include <linux/etherdevice.h> 1631762eaaSAaron Young #include <linux/mutex.h> 1731762eaaSAaron Young #include <linux/highmem.h> 1831762eaaSAaron Young #include <linux/if_vlan.h> 1931762eaaSAaron Young #define CREATE_TRACE_POINTS 2031762eaaSAaron Young #include <trace/events/sunvnet.h> 2131762eaaSAaron Young 2231762eaaSAaron Young #if IS_ENABLED(CONFIG_IPV6) 2331762eaaSAaron Young #include <linux/icmpv6.h> 2431762eaaSAaron Young #endif 2531762eaaSAaron Young 2631762eaaSAaron Young #include <net/ip.h> 2731762eaaSAaron Young #include <net/icmp.h> 2831762eaaSAaron Young #include <net/route.h> 2931762eaaSAaron Young 3031762eaaSAaron Young #include <asm/vio.h> 3131762eaaSAaron Young #include <asm/ldc.h> 3231762eaaSAaron Young 3331762eaaSAaron Young #include "sunvnet_common.h" 3431762eaaSAaron Young 3531762eaaSAaron Young /* Heuristic for the number of times to exponentially backoff and 3631762eaaSAaron Young * retry sending an LDC trigger when EAGAIN is encountered 3731762eaaSAaron Young */ 3831762eaaSAaron Young #define VNET_MAX_RETRIES 10 3931762eaaSAaron Young 4031762eaaSAaron Young static int __vnet_tx_trigger(struct vnet_port *port, u32 start); 4131762eaaSAaron Young static void vnet_port_reset(struct vnet_port *port); 4231762eaaSAaron Young 4331762eaaSAaron Young static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) 4431762eaaSAaron Young { 4531762eaaSAaron Young return vio_dring_avail(dr, VNET_TX_RING_SIZE); 4631762eaaSAaron Young } 4731762eaaSAaron Young 4831762eaaSAaron Young static int vnet_handle_unknown(struct vnet_port *port, void *arg) 4931762eaaSAaron Young { 5031762eaaSAaron Young struct vio_msg_tag *pkt = arg; 5131762eaaSAaron Young 5231762eaaSAaron Young pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", 5331762eaaSAaron Young pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 5431762eaaSAaron Young pr_err("Resetting connection\n"); 5531762eaaSAaron Young 5631762eaaSAaron Young ldc_disconnect(port->vio.lp); 5731762eaaSAaron Young 5831762eaaSAaron Young return -ECONNRESET; 5931762eaaSAaron Young } 6031762eaaSAaron Young 6131762eaaSAaron Young static int vnet_port_alloc_tx_ring(struct vnet_port *port); 6231762eaaSAaron Young 6331762eaaSAaron Young int sunvnet_send_attr_common(struct vio_driver_state *vio) 6431762eaaSAaron Young { 6531762eaaSAaron Young struct vnet_port *port = to_vnet_port(vio); 6667d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 6731762eaaSAaron Young struct vio_net_attr_info pkt; 6831762eaaSAaron Young int framelen = ETH_FRAME_LEN; 6931762eaaSAaron Young int i, err; 7031762eaaSAaron Young 7131762eaaSAaron Young err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); 7231762eaaSAaron Young if (err) 7331762eaaSAaron Young return err; 7431762eaaSAaron Young 7531762eaaSAaron Young memset(&pkt, 0, sizeof(pkt)); 7631762eaaSAaron Young pkt.tag.type = VIO_TYPE_CTRL; 7731762eaaSAaron Young pkt.tag.stype = VIO_SUBTYPE_INFO; 7831762eaaSAaron Young pkt.tag.stype_env = VIO_ATTR_INFO; 7931762eaaSAaron Young pkt.tag.sid = vio_send_sid(vio); 8031762eaaSAaron Young if (vio_version_before(vio, 1, 2)) 8131762eaaSAaron Young pkt.xfer_mode = VIO_DRING_MODE; 8231762eaaSAaron Young else 8331762eaaSAaron Young pkt.xfer_mode = VIO_NEW_DRING_MODE; 8431762eaaSAaron Young pkt.addr_type = VNET_ADDR_ETHERMAC; 8531762eaaSAaron Young pkt.ack_freq = 0; 8631762eaaSAaron Young for (i = 0; i < 6; i++) 8731762eaaSAaron Young pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); 8831762eaaSAaron Young if (vio_version_after(vio, 1, 3)) { 8931762eaaSAaron Young if (port->rmtu) { 9031762eaaSAaron Young port->rmtu = min(VNET_MAXPACKET, port->rmtu); 9131762eaaSAaron Young pkt.mtu = port->rmtu; 9231762eaaSAaron Young } else { 9331762eaaSAaron Young port->rmtu = VNET_MAXPACKET; 9431762eaaSAaron Young pkt.mtu = port->rmtu; 9531762eaaSAaron Young } 9631762eaaSAaron Young if (vio_version_after_eq(vio, 1, 6)) 9731762eaaSAaron Young pkt.options = VIO_TX_DRING; 9831762eaaSAaron Young } else if (vio_version_before(vio, 1, 3)) { 9931762eaaSAaron Young pkt.mtu = framelen; 10031762eaaSAaron Young } else { /* v1.3 */ 10131762eaaSAaron Young pkt.mtu = framelen + VLAN_HLEN; 10231762eaaSAaron Young } 10331762eaaSAaron Young 10431762eaaSAaron Young pkt.cflags = 0; 10531762eaaSAaron Young if (vio_version_after_eq(vio, 1, 7) && port->tso) { 10631762eaaSAaron Young pkt.cflags |= VNET_LSO_IPV4_CAPAB; 10731762eaaSAaron Young if (!port->tsolen) 10831762eaaSAaron Young port->tsolen = VNET_MAXTSO; 10931762eaaSAaron Young pkt.ipv4_lso_maxlen = port->tsolen; 11031762eaaSAaron Young } 11131762eaaSAaron Young 11231762eaaSAaron Young pkt.plnk_updt = PHYSLINK_UPDATE_NONE; 11331762eaaSAaron Young 11431762eaaSAaron Young viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 11531762eaaSAaron Young "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " 11631762eaaSAaron Young "cflags[0x%04x] lso_max[%u]\n", 11731762eaaSAaron Young pkt.xfer_mode, pkt.addr_type, 11831762eaaSAaron Young (unsigned long long)pkt.addr, 11931762eaaSAaron Young pkt.ack_freq, pkt.plnk_updt, pkt.options, 12031762eaaSAaron Young (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); 12131762eaaSAaron Young 12231762eaaSAaron Young return vio_ldc_send(vio, &pkt, sizeof(pkt)); 12331762eaaSAaron Young } 12431762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_send_attr_common); 12531762eaaSAaron Young 12631762eaaSAaron Young static int handle_attr_info(struct vio_driver_state *vio, 12731762eaaSAaron Young struct vio_net_attr_info *pkt) 12831762eaaSAaron Young { 12931762eaaSAaron Young struct vnet_port *port = to_vnet_port(vio); 13031762eaaSAaron Young u64 localmtu; 13131762eaaSAaron Young u8 xfer_mode; 13231762eaaSAaron Young 13331762eaaSAaron Young viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 13431762eaaSAaron Young "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " 13531762eaaSAaron Young " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", 13631762eaaSAaron Young pkt->xfer_mode, pkt->addr_type, 13731762eaaSAaron Young (unsigned long long)pkt->addr, 13831762eaaSAaron Young pkt->ack_freq, pkt->plnk_updt, pkt->options, 13931762eaaSAaron Young (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, 14031762eaaSAaron Young pkt->ipv4_lso_maxlen); 14131762eaaSAaron Young 14231762eaaSAaron Young pkt->tag.sid = vio_send_sid(vio); 14331762eaaSAaron Young 14431762eaaSAaron Young xfer_mode = pkt->xfer_mode; 14531762eaaSAaron Young /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ 14631762eaaSAaron Young if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) 14731762eaaSAaron Young xfer_mode = VIO_NEW_DRING_MODE; 14831762eaaSAaron Young 14931762eaaSAaron Young /* MTU negotiation: 15031762eaaSAaron Young * < v1.3 - ETH_FRAME_LEN exactly 15131762eaaSAaron Young * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change 15231762eaaSAaron Young * pkt->mtu for ACK 15331762eaaSAaron Young * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly 15431762eaaSAaron Young */ 15531762eaaSAaron Young if (vio_version_before(vio, 1, 3)) { 15631762eaaSAaron Young localmtu = ETH_FRAME_LEN; 15731762eaaSAaron Young } else if (vio_version_after(vio, 1, 3)) { 15831762eaaSAaron Young localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; 15931762eaaSAaron Young localmtu = min(pkt->mtu, localmtu); 16031762eaaSAaron Young pkt->mtu = localmtu; 16131762eaaSAaron Young } else { /* v1.3 */ 16231762eaaSAaron Young localmtu = ETH_FRAME_LEN + VLAN_HLEN; 16331762eaaSAaron Young } 16431762eaaSAaron Young port->rmtu = localmtu; 16531762eaaSAaron Young 16631762eaaSAaron Young /* LSO negotiation */ 16731762eaaSAaron Young if (vio_version_after_eq(vio, 1, 7)) 16831762eaaSAaron Young port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); 16931762eaaSAaron Young else 17031762eaaSAaron Young port->tso = false; 17131762eaaSAaron Young if (port->tso) { 17231762eaaSAaron Young if (!port->tsolen) 17331762eaaSAaron Young port->tsolen = VNET_MAXTSO; 17431762eaaSAaron Young port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); 17531762eaaSAaron Young if (port->tsolen < VNET_MINTSO) { 17631762eaaSAaron Young port->tso = false; 17731762eaaSAaron Young port->tsolen = 0; 17831762eaaSAaron Young pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; 17931762eaaSAaron Young } 18031762eaaSAaron Young pkt->ipv4_lso_maxlen = port->tsolen; 18131762eaaSAaron Young } else { 18231762eaaSAaron Young pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; 18331762eaaSAaron Young pkt->ipv4_lso_maxlen = 0; 18431762eaaSAaron Young } 18531762eaaSAaron Young 18631762eaaSAaron Young /* for version >= 1.6, ACK packet mode we support */ 18731762eaaSAaron Young if (vio_version_after_eq(vio, 1, 6)) { 18831762eaaSAaron Young pkt->xfer_mode = VIO_NEW_DRING_MODE; 18931762eaaSAaron Young pkt->options = VIO_TX_DRING; 19031762eaaSAaron Young } 19131762eaaSAaron Young 19231762eaaSAaron Young if (!(xfer_mode | VIO_NEW_DRING_MODE) || 19331762eaaSAaron Young pkt->addr_type != VNET_ADDR_ETHERMAC || 19431762eaaSAaron Young pkt->mtu != localmtu) { 19531762eaaSAaron Young viodbg(HS, "SEND NET ATTR NACK\n"); 19631762eaaSAaron Young 19731762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_NACK; 19831762eaaSAaron Young 19931762eaaSAaron Young (void)vio_ldc_send(vio, pkt, sizeof(*pkt)); 20031762eaaSAaron Young 20131762eaaSAaron Young return -ECONNRESET; 202dc153f85SAaron Young } 203dc153f85SAaron Young 20431762eaaSAaron Young viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " 20531762eaaSAaron Young "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " 20631762eaaSAaron Young "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", 20731762eaaSAaron Young pkt->xfer_mode, pkt->addr_type, 20831762eaaSAaron Young (unsigned long long)pkt->addr, 20931762eaaSAaron Young pkt->ack_freq, pkt->plnk_updt, pkt->options, 21031762eaaSAaron Young (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, 21131762eaaSAaron Young pkt->ipv4_lso_maxlen); 21231762eaaSAaron Young 21331762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_ACK; 21431762eaaSAaron Young 21531762eaaSAaron Young return vio_ldc_send(vio, pkt, sizeof(*pkt)); 21631762eaaSAaron Young } 21731762eaaSAaron Young 21831762eaaSAaron Young static int handle_attr_ack(struct vio_driver_state *vio, 21931762eaaSAaron Young struct vio_net_attr_info *pkt) 22031762eaaSAaron Young { 22131762eaaSAaron Young viodbg(HS, "GOT NET ATTR ACK\n"); 22231762eaaSAaron Young 22331762eaaSAaron Young return 0; 22431762eaaSAaron Young } 22531762eaaSAaron Young 22631762eaaSAaron Young static int handle_attr_nack(struct vio_driver_state *vio, 22731762eaaSAaron Young struct vio_net_attr_info *pkt) 22831762eaaSAaron Young { 22931762eaaSAaron Young viodbg(HS, "GOT NET ATTR NACK\n"); 23031762eaaSAaron Young 23131762eaaSAaron Young return -ECONNRESET; 23231762eaaSAaron Young } 23331762eaaSAaron Young 23431762eaaSAaron Young int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg) 23531762eaaSAaron Young { 23631762eaaSAaron Young struct vio_net_attr_info *pkt = arg; 23731762eaaSAaron Young 23831762eaaSAaron Young switch (pkt->tag.stype) { 23931762eaaSAaron Young case VIO_SUBTYPE_INFO: 24031762eaaSAaron Young return handle_attr_info(vio, pkt); 24131762eaaSAaron Young 24231762eaaSAaron Young case VIO_SUBTYPE_ACK: 24331762eaaSAaron Young return handle_attr_ack(vio, pkt); 24431762eaaSAaron Young 24531762eaaSAaron Young case VIO_SUBTYPE_NACK: 24631762eaaSAaron Young return handle_attr_nack(vio, pkt); 24731762eaaSAaron Young 24831762eaaSAaron Young default: 24931762eaaSAaron Young return -ECONNRESET; 25031762eaaSAaron Young } 25131762eaaSAaron Young } 25231762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common); 25331762eaaSAaron Young 25431762eaaSAaron Young void sunvnet_handshake_complete_common(struct vio_driver_state *vio) 25531762eaaSAaron Young { 25631762eaaSAaron Young struct vio_dring_state *dr; 25731762eaaSAaron Young 25831762eaaSAaron Young dr = &vio->drings[VIO_DRIVER_RX_RING]; 259dc153f85SAaron Young dr->rcv_nxt = 1; 260dc153f85SAaron Young dr->snd_nxt = 1; 26131762eaaSAaron Young 26231762eaaSAaron Young dr = &vio->drings[VIO_DRIVER_TX_RING]; 263dc153f85SAaron Young dr->rcv_nxt = 1; 264dc153f85SAaron Young dr->snd_nxt = 1; 26531762eaaSAaron Young } 26631762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common); 26731762eaaSAaron Young 26831762eaaSAaron Young /* The hypervisor interface that implements copying to/from imported 26931762eaaSAaron Young * memory from another domain requires that copies are done to 8-byte 27031762eaaSAaron Young * aligned buffers, and that the lengths of such copies are also 8-byte 27131762eaaSAaron Young * multiples. 27231762eaaSAaron Young * 27331762eaaSAaron Young * So we align skb->data to an 8-byte multiple and pad-out the data 27431762eaaSAaron Young * area so we can round the copy length up to the next multiple of 27531762eaaSAaron Young * 8 for the copy. 27631762eaaSAaron Young * 27731762eaaSAaron Young * The transmitter puts the actual start of the packet 6 bytes into 27831762eaaSAaron Young * the buffer it sends over, so that the IP headers after the ethernet 27931762eaaSAaron Young * header are aligned properly. These 6 bytes are not in the descriptor 28031762eaaSAaron Young * length, they are simply implied. This offset is represented using 28131762eaaSAaron Young * the VNET_PACKET_SKIP macro. 28231762eaaSAaron Young */ 28331762eaaSAaron Young static struct sk_buff *alloc_and_align_skb(struct net_device *dev, 28431762eaaSAaron Young unsigned int len) 28531762eaaSAaron Young { 286dc153f85SAaron Young struct sk_buff *skb; 28731762eaaSAaron Young unsigned long addr, off; 28831762eaaSAaron Young 289dc153f85SAaron Young skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8); 29031762eaaSAaron Young if (unlikely(!skb)) 29131762eaaSAaron Young return NULL; 29231762eaaSAaron Young 29331762eaaSAaron Young addr = (unsigned long)skb->data; 29431762eaaSAaron Young off = ((addr + 7UL) & ~7UL) - addr; 29531762eaaSAaron Young if (off) 29631762eaaSAaron Young skb_reserve(skb, off); 29731762eaaSAaron Young 29831762eaaSAaron Young return skb; 29931762eaaSAaron Young } 30031762eaaSAaron Young 30131762eaaSAaron Young static inline void vnet_fullcsum(struct sk_buff *skb) 30231762eaaSAaron Young { 30331762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 30431762eaaSAaron Young int offset = skb_transport_offset(skb); 30531762eaaSAaron Young 30631762eaaSAaron Young if (skb->protocol != htons(ETH_P_IP)) 30731762eaaSAaron Young return; 30831762eaaSAaron Young if (iph->protocol != IPPROTO_TCP && 30931762eaaSAaron Young iph->protocol != IPPROTO_UDP) 31031762eaaSAaron Young return; 31131762eaaSAaron Young skb->ip_summed = CHECKSUM_NONE; 31231762eaaSAaron Young skb->csum_level = 1; 31331762eaaSAaron Young skb->csum = 0; 31431762eaaSAaron Young if (iph->protocol == IPPROTO_TCP) { 31531762eaaSAaron Young struct tcphdr *ptcp = tcp_hdr(skb); 31631762eaaSAaron Young 31731762eaaSAaron Young ptcp->check = 0; 31831762eaaSAaron Young skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 31931762eaaSAaron Young ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 32031762eaaSAaron Young skb->len - offset, IPPROTO_TCP, 32131762eaaSAaron Young skb->csum); 32231762eaaSAaron Young } else if (iph->protocol == IPPROTO_UDP) { 32331762eaaSAaron Young struct udphdr *pudp = udp_hdr(skb); 32431762eaaSAaron Young 32531762eaaSAaron Young pudp->check = 0; 32631762eaaSAaron Young skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 32731762eaaSAaron Young pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 32831762eaaSAaron Young skb->len - offset, IPPROTO_UDP, 32931762eaaSAaron Young skb->csum); 33031762eaaSAaron Young } 33131762eaaSAaron Young } 33231762eaaSAaron Young 33331762eaaSAaron Young static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) 33431762eaaSAaron Young { 33567d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 33631762eaaSAaron Young unsigned int len = desc->size; 33731762eaaSAaron Young unsigned int copy_len; 33831762eaaSAaron Young struct sk_buff *skb; 33931762eaaSAaron Young int maxlen; 34031762eaaSAaron Young int err; 34131762eaaSAaron Young 34231762eaaSAaron Young err = -EMSGSIZE; 34331762eaaSAaron Young if (port->tso && port->tsolen > port->rmtu) 34431762eaaSAaron Young maxlen = port->tsolen; 34531762eaaSAaron Young else 34631762eaaSAaron Young maxlen = port->rmtu; 34731762eaaSAaron Young if (unlikely(len < ETH_ZLEN || len > maxlen)) { 34831762eaaSAaron Young dev->stats.rx_length_errors++; 34931762eaaSAaron Young goto out_dropped; 35031762eaaSAaron Young } 35131762eaaSAaron Young 35231762eaaSAaron Young skb = alloc_and_align_skb(dev, len); 35331762eaaSAaron Young err = -ENOMEM; 35431762eaaSAaron Young if (unlikely(!skb)) { 35531762eaaSAaron Young dev->stats.rx_missed_errors++; 35631762eaaSAaron Young goto out_dropped; 35731762eaaSAaron Young } 35831762eaaSAaron Young 35931762eaaSAaron Young copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; 36031762eaaSAaron Young skb_put(skb, copy_len); 36131762eaaSAaron Young err = ldc_copy(port->vio.lp, LDC_COPY_IN, 36231762eaaSAaron Young skb->data, copy_len, 0, 36331762eaaSAaron Young desc->cookies, desc->ncookies); 36431762eaaSAaron Young if (unlikely(err < 0)) { 36531762eaaSAaron Young dev->stats.rx_frame_errors++; 36631762eaaSAaron Young goto out_free_skb; 36731762eaaSAaron Young } 36831762eaaSAaron Young 36931762eaaSAaron Young skb_pull(skb, VNET_PACKET_SKIP); 37031762eaaSAaron Young skb_trim(skb, len); 37131762eaaSAaron Young skb->protocol = eth_type_trans(skb, dev); 37231762eaaSAaron Young 37331762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 8)) { 37431762eaaSAaron Young struct vio_net_dext *dext = vio_net_ext(desc); 37531762eaaSAaron Young 37631762eaaSAaron Young skb_reset_network_header(skb); 37731762eaaSAaron Young 37831762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { 37931762eaaSAaron Young if (skb->protocol == ETH_P_IP) { 38031762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 38131762eaaSAaron Young 38231762eaaSAaron Young iph->check = 0; 38331762eaaSAaron Young ip_send_check(iph); 38431762eaaSAaron Young } 38531762eaaSAaron Young } 38631762eaaSAaron Young if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && 38731762eaaSAaron Young skb->ip_summed == CHECKSUM_NONE) { 38831762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) { 38931762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 39031762eaaSAaron Young int ihl = iph->ihl * 4; 39131762eaaSAaron Young 39231762eaaSAaron Young skb_reset_transport_header(skb); 39331762eaaSAaron Young skb_set_transport_header(skb, ihl); 39431762eaaSAaron Young vnet_fullcsum(skb); 39531762eaaSAaron Young } 39631762eaaSAaron Young } 39731762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { 39831762eaaSAaron Young skb->ip_summed = CHECKSUM_PARTIAL; 39931762eaaSAaron Young skb->csum_level = 0; 40031762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) 40131762eaaSAaron Young skb->csum_level = 1; 40231762eaaSAaron Young } 40331762eaaSAaron Young } 40431762eaaSAaron Young 40531762eaaSAaron Young skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; 40631762eaaSAaron Young 40731762eaaSAaron Young dev->stats.rx_packets++; 40831762eaaSAaron Young dev->stats.rx_bytes += len; 40931762eaaSAaron Young napi_gro_receive(&port->napi, skb); 41031762eaaSAaron Young return 0; 41131762eaaSAaron Young 41231762eaaSAaron Young out_free_skb: 41331762eaaSAaron Young kfree_skb(skb); 41431762eaaSAaron Young 41531762eaaSAaron Young out_dropped: 41631762eaaSAaron Young dev->stats.rx_dropped++; 41731762eaaSAaron Young return err; 41831762eaaSAaron Young } 41931762eaaSAaron Young 42031762eaaSAaron Young static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, 42131762eaaSAaron Young u32 start, u32 end, u8 vio_dring_state) 42231762eaaSAaron Young { 42331762eaaSAaron Young struct vio_dring_data hdr = { 42431762eaaSAaron Young .tag = { 42531762eaaSAaron Young .type = VIO_TYPE_DATA, 42631762eaaSAaron Young .stype = VIO_SUBTYPE_ACK, 42731762eaaSAaron Young .stype_env = VIO_DRING_DATA, 42831762eaaSAaron Young .sid = vio_send_sid(&port->vio), 42931762eaaSAaron Young }, 43031762eaaSAaron Young .dring_ident = dr->ident, 43131762eaaSAaron Young .start_idx = start, 43231762eaaSAaron Young .end_idx = end, 43331762eaaSAaron Young .state = vio_dring_state, 43431762eaaSAaron Young }; 43531762eaaSAaron Young int err, delay; 43631762eaaSAaron Young int retries = 0; 43731762eaaSAaron Young 43831762eaaSAaron Young hdr.seq = dr->snd_nxt; 43931762eaaSAaron Young delay = 1; 44031762eaaSAaron Young do { 44131762eaaSAaron Young err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 44231762eaaSAaron Young if (err > 0) { 44331762eaaSAaron Young dr->snd_nxt++; 44431762eaaSAaron Young break; 44531762eaaSAaron Young } 44631762eaaSAaron Young udelay(delay); 44731762eaaSAaron Young if ((delay <<= 1) > 128) 44831762eaaSAaron Young delay = 128; 44931762eaaSAaron Young if (retries++ > VNET_MAX_RETRIES) { 45031762eaaSAaron Young pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", 45131762eaaSAaron Young port->raddr[0], port->raddr[1], 45231762eaaSAaron Young port->raddr[2], port->raddr[3], 45331762eaaSAaron Young port->raddr[4], port->raddr[5]); 45431762eaaSAaron Young break; 45531762eaaSAaron Young } 45631762eaaSAaron Young } while (err == -EAGAIN); 45731762eaaSAaron Young 45831762eaaSAaron Young if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { 45931762eaaSAaron Young port->stop_rx_idx = end; 46031762eaaSAaron Young port->stop_rx = true; 46131762eaaSAaron Young } else { 46231762eaaSAaron Young port->stop_rx_idx = 0; 46331762eaaSAaron Young port->stop_rx = false; 46431762eaaSAaron Young } 46531762eaaSAaron Young 46631762eaaSAaron Young return err; 46731762eaaSAaron Young } 46831762eaaSAaron Young 46931762eaaSAaron Young static struct vio_net_desc *get_rx_desc(struct vnet_port *port, 47031762eaaSAaron Young struct vio_dring_state *dr, 47131762eaaSAaron Young u32 index) 47231762eaaSAaron Young { 47331762eaaSAaron Young struct vio_net_desc *desc = port->vio.desc_buf; 47431762eaaSAaron Young int err; 47531762eaaSAaron Young 47631762eaaSAaron Young err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, 47731762eaaSAaron Young (index * dr->entry_size), 47831762eaaSAaron Young dr->cookies, dr->ncookies); 47931762eaaSAaron Young if (err < 0) 48031762eaaSAaron Young return ERR_PTR(err); 48131762eaaSAaron Young 48231762eaaSAaron Young return desc; 48331762eaaSAaron Young } 48431762eaaSAaron Young 48531762eaaSAaron Young static int put_rx_desc(struct vnet_port *port, 48631762eaaSAaron Young struct vio_dring_state *dr, 48731762eaaSAaron Young struct vio_net_desc *desc, 48831762eaaSAaron Young u32 index) 48931762eaaSAaron Young { 49031762eaaSAaron Young int err; 49131762eaaSAaron Young 49231762eaaSAaron Young err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, 49331762eaaSAaron Young (index * dr->entry_size), 49431762eaaSAaron Young dr->cookies, dr->ncookies); 49531762eaaSAaron Young if (err < 0) 49631762eaaSAaron Young return err; 49731762eaaSAaron Young 49831762eaaSAaron Young return 0; 49931762eaaSAaron Young } 50031762eaaSAaron Young 50131762eaaSAaron Young static int vnet_walk_rx_one(struct vnet_port *port, 50231762eaaSAaron Young struct vio_dring_state *dr, 50331762eaaSAaron Young u32 index, int *needs_ack) 50431762eaaSAaron Young { 50531762eaaSAaron Young struct vio_net_desc *desc = get_rx_desc(port, dr, index); 50631762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 50731762eaaSAaron Young int err; 50831762eaaSAaron Young 509dc153f85SAaron Young BUG_ON(!desc); 51031762eaaSAaron Young if (IS_ERR(desc)) 51131762eaaSAaron Young return PTR_ERR(desc); 51231762eaaSAaron Young 51331762eaaSAaron Young if (desc->hdr.state != VIO_DESC_READY) 51431762eaaSAaron Young return 1; 51531762eaaSAaron Young 51631762eaaSAaron Young dma_rmb(); 51731762eaaSAaron Young 51831762eaaSAaron Young viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", 51931762eaaSAaron Young desc->hdr.state, desc->hdr.ack, 52031762eaaSAaron Young desc->size, desc->ncookies, 52131762eaaSAaron Young desc->cookies[0].cookie_addr, 52231762eaaSAaron Young desc->cookies[0].cookie_size); 52331762eaaSAaron Young 52431762eaaSAaron Young err = vnet_rx_one(port, desc); 52531762eaaSAaron Young if (err == -ECONNRESET) 52631762eaaSAaron Young return err; 52731762eaaSAaron Young trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid, 52831762eaaSAaron Young index, desc->hdr.ack); 52931762eaaSAaron Young desc->hdr.state = VIO_DESC_DONE; 53031762eaaSAaron Young err = put_rx_desc(port, dr, desc, index); 53131762eaaSAaron Young if (err < 0) 53231762eaaSAaron Young return err; 53331762eaaSAaron Young *needs_ack = desc->hdr.ack; 53431762eaaSAaron Young return 0; 53531762eaaSAaron Young } 53631762eaaSAaron Young 53731762eaaSAaron Young static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, 53831762eaaSAaron Young u32 start, u32 end, int *npkts, int budget) 53931762eaaSAaron Young { 54031762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 54131762eaaSAaron Young int ack_start = -1, ack_end = -1; 54231762eaaSAaron Young bool send_ack = true; 54331762eaaSAaron Young 54431762eaaSAaron Young end = (end == (u32)-1) ? vio_dring_prev(dr, start) 54531762eaaSAaron Young : vio_dring_next(dr, end); 54631762eaaSAaron Young 54731762eaaSAaron Young viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); 54831762eaaSAaron Young 54931762eaaSAaron Young while (start != end) { 55031762eaaSAaron Young int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); 551dc153f85SAaron Young 55231762eaaSAaron Young if (err == -ECONNRESET) 55331762eaaSAaron Young return err; 55431762eaaSAaron Young if (err != 0) 55531762eaaSAaron Young break; 55631762eaaSAaron Young (*npkts)++; 55731762eaaSAaron Young if (ack_start == -1) 55831762eaaSAaron Young ack_start = start; 55931762eaaSAaron Young ack_end = start; 56031762eaaSAaron Young start = vio_dring_next(dr, start); 56131762eaaSAaron Young if (ack && start != end) { 56231762eaaSAaron Young err = vnet_send_ack(port, dr, ack_start, ack_end, 56331762eaaSAaron Young VIO_DRING_ACTIVE); 56431762eaaSAaron Young if (err == -ECONNRESET) 56531762eaaSAaron Young return err; 56631762eaaSAaron Young ack_start = -1; 56731762eaaSAaron Young } 56831762eaaSAaron Young if ((*npkts) >= budget) { 56931762eaaSAaron Young send_ack = false; 57031762eaaSAaron Young break; 57131762eaaSAaron Young } 57231762eaaSAaron Young } 573dc153f85SAaron Young if (unlikely(ack_start == -1)) { 574dc153f85SAaron Young ack_end = vio_dring_prev(dr, start); 575dc153f85SAaron Young ack_start = ack_end; 576dc153f85SAaron Young } 57731762eaaSAaron Young if (send_ack) { 57831762eaaSAaron Young port->napi_resume = false; 57931762eaaSAaron Young trace_vnet_tx_send_stopped_ack(port->vio._local_sid, 58031762eaaSAaron Young port->vio._peer_sid, 58131762eaaSAaron Young ack_end, *npkts); 58231762eaaSAaron Young return vnet_send_ack(port, dr, ack_start, ack_end, 58331762eaaSAaron Young VIO_DRING_STOPPED); 58431762eaaSAaron Young } else { 58531762eaaSAaron Young trace_vnet_tx_defer_stopped_ack(port->vio._local_sid, 58631762eaaSAaron Young port->vio._peer_sid, 58731762eaaSAaron Young ack_end, *npkts); 58831762eaaSAaron Young port->napi_resume = true; 58931762eaaSAaron Young port->napi_stop_idx = ack_end; 59031762eaaSAaron Young return 1; 59131762eaaSAaron Young } 59231762eaaSAaron Young } 59331762eaaSAaron Young 59431762eaaSAaron Young static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, 59531762eaaSAaron Young int budget) 59631762eaaSAaron Young { 59731762eaaSAaron Young struct vio_dring_data *pkt = msgbuf; 59831762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; 59931762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 60031762eaaSAaron Young 60131762eaaSAaron Young viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", 60231762eaaSAaron Young pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); 60331762eaaSAaron Young 60431762eaaSAaron Young if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 60531762eaaSAaron Young return 0; 60631762eaaSAaron Young if (unlikely(pkt->seq != dr->rcv_nxt)) { 60731762eaaSAaron Young pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", 60831762eaaSAaron Young pkt->seq, dr->rcv_nxt); 60931762eaaSAaron Young return 0; 61031762eaaSAaron Young } 61131762eaaSAaron Young 61231762eaaSAaron Young if (!port->napi_resume) 61331762eaaSAaron Young dr->rcv_nxt++; 61431762eaaSAaron Young 61531762eaaSAaron Young /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ 61631762eaaSAaron Young 61731762eaaSAaron Young return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, 61831762eaaSAaron Young npkts, budget); 61931762eaaSAaron Young } 62031762eaaSAaron Young 62131762eaaSAaron Young static int idx_is_pending(struct vio_dring_state *dr, u32 end) 62231762eaaSAaron Young { 62331762eaaSAaron Young u32 idx = dr->cons; 62431762eaaSAaron Young int found = 0; 62531762eaaSAaron Young 62631762eaaSAaron Young while (idx != dr->prod) { 62731762eaaSAaron Young if (idx == end) { 62831762eaaSAaron Young found = 1; 62931762eaaSAaron Young break; 63031762eaaSAaron Young } 63131762eaaSAaron Young idx = vio_dring_next(dr, idx); 63231762eaaSAaron Young } 63331762eaaSAaron Young return found; 63431762eaaSAaron Young } 63531762eaaSAaron Young 63631762eaaSAaron Young static int vnet_ack(struct vnet_port *port, void *msgbuf) 63731762eaaSAaron Young { 63831762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 63931762eaaSAaron Young struct vio_dring_data *pkt = msgbuf; 64031762eaaSAaron Young struct net_device *dev; 64131762eaaSAaron Young u32 end; 64231762eaaSAaron Young struct vio_net_desc *desc; 64331762eaaSAaron Young struct netdev_queue *txq; 64431762eaaSAaron Young 64531762eaaSAaron Young if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 64631762eaaSAaron Young return 0; 64731762eaaSAaron Young 64831762eaaSAaron Young end = pkt->end_idx; 64967d0719fSAaron Young dev = VNET_PORT_TO_NET_DEVICE(port); 65031762eaaSAaron Young netif_tx_lock(dev); 65131762eaaSAaron Young if (unlikely(!idx_is_pending(dr, end))) { 65231762eaaSAaron Young netif_tx_unlock(dev); 65331762eaaSAaron Young return 0; 65431762eaaSAaron Young } 65531762eaaSAaron Young 65631762eaaSAaron Young /* sync for race conditions with vnet_start_xmit() and tell xmit it 65731762eaaSAaron Young * is time to send a trigger. 65831762eaaSAaron Young */ 65931762eaaSAaron Young trace_vnet_rx_stopped_ack(port->vio._local_sid, 66031762eaaSAaron Young port->vio._peer_sid, end); 66131762eaaSAaron Young dr->cons = vio_dring_next(dr, end); 66231762eaaSAaron Young desc = vio_dring_entry(dr, dr->cons); 66331762eaaSAaron Young if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { 66431762eaaSAaron Young /* vnet_start_xmit() just populated this dring but missed 66531762eaaSAaron Young * sending the "start" LDC message to the consumer. 66631762eaaSAaron Young * Send a "start" trigger on its behalf. 66731762eaaSAaron Young */ 66831762eaaSAaron Young if (__vnet_tx_trigger(port, dr->cons) > 0) 66931762eaaSAaron Young port->start_cons = false; 67031762eaaSAaron Young else 67131762eaaSAaron Young port->start_cons = true; 67231762eaaSAaron Young } else { 67331762eaaSAaron Young port->start_cons = true; 67431762eaaSAaron Young } 67531762eaaSAaron Young netif_tx_unlock(dev); 67631762eaaSAaron Young 67731762eaaSAaron Young txq = netdev_get_tx_queue(dev, port->q_index); 67831762eaaSAaron Young if (unlikely(netif_tx_queue_stopped(txq) && 67931762eaaSAaron Young vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) 68031762eaaSAaron Young return 1; 68131762eaaSAaron Young 68231762eaaSAaron Young return 0; 68331762eaaSAaron Young } 68431762eaaSAaron Young 68531762eaaSAaron Young static int vnet_nack(struct vnet_port *port, void *msgbuf) 68631762eaaSAaron Young { 68731762eaaSAaron Young /* XXX just reset or similar XXX */ 68831762eaaSAaron Young return 0; 68931762eaaSAaron Young } 69031762eaaSAaron Young 69131762eaaSAaron Young static int handle_mcast(struct vnet_port *port, void *msgbuf) 69231762eaaSAaron Young { 69331762eaaSAaron Young struct vio_net_mcast_info *pkt = msgbuf; 69467d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 69531762eaaSAaron Young 69631762eaaSAaron Young if (pkt->tag.stype != VIO_SUBTYPE_ACK) 69731762eaaSAaron Young pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", 69867d0719fSAaron Young dev->name, 69931762eaaSAaron Young pkt->tag.type, 70031762eaaSAaron Young pkt->tag.stype, 70131762eaaSAaron Young pkt->tag.stype_env, 70231762eaaSAaron Young pkt->tag.sid); 70331762eaaSAaron Young 70431762eaaSAaron Young return 0; 70531762eaaSAaron Young } 70631762eaaSAaron Young 7078778b276SAaron Young /* If the queue is stopped, wake it up so that we'll 7088778b276SAaron Young * send out another START message at the next TX. 70931762eaaSAaron Young */ 71031762eaaSAaron Young static void maybe_tx_wakeup(struct vnet_port *port) 71131762eaaSAaron Young { 71231762eaaSAaron Young struct netdev_queue *txq; 71331762eaaSAaron Young 71467d0719fSAaron Young txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), 71567d0719fSAaron Young port->q_index); 71631762eaaSAaron Young __netif_tx_lock(txq, smp_processor_id()); 71731762eaaSAaron Young if (likely(netif_tx_queue_stopped(txq))) { 71831762eaaSAaron Young struct vio_dring_state *dr; 71931762eaaSAaron Young 72031762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 72131762eaaSAaron Young netif_tx_wake_queue(txq); 72231762eaaSAaron Young } 72331762eaaSAaron Young __netif_tx_unlock(txq); 72431762eaaSAaron Young } 72531762eaaSAaron Young 72667d0719fSAaron Young bool sunvnet_port_is_up_common(struct vnet_port *vnet) 72731762eaaSAaron Young { 72831762eaaSAaron Young struct vio_driver_state *vio = &vnet->vio; 72931762eaaSAaron Young 73031762eaaSAaron Young return !!(vio->hs_state & VIO_HS_COMPLETE); 73131762eaaSAaron Young } 73267d0719fSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common); 73331762eaaSAaron Young 73431762eaaSAaron Young static int vnet_event_napi(struct vnet_port *port, int budget) 73531762eaaSAaron Young { 7368778b276SAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 73731762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 73831762eaaSAaron Young int tx_wakeup, err; 73931762eaaSAaron Young int npkts = 0; 74031762eaaSAaron Young int event = (port->rx_event & LDC_EVENT_RESET); 74131762eaaSAaron Young 74231762eaaSAaron Young ldc_ctrl: 74331762eaaSAaron Young if (unlikely(event == LDC_EVENT_RESET || 74431762eaaSAaron Young event == LDC_EVENT_UP)) { 74531762eaaSAaron Young vio_link_state_change(vio, event); 74631762eaaSAaron Young 74731762eaaSAaron Young if (event == LDC_EVENT_RESET) { 74831762eaaSAaron Young vnet_port_reset(port); 74931762eaaSAaron Young vio_port_up(vio); 7508778b276SAaron Young 7518778b276SAaron Young /* If the device is running but its tx queue was 7528778b276SAaron Young * stopped (due to flow control), restart it. 7538778b276SAaron Young * This is necessary since vnet_port_reset() 7548778b276SAaron Young * clears the tx drings and thus we may never get 7558778b276SAaron Young * back a VIO_TYPE_DATA ACK packet - which is 7568778b276SAaron Young * the normal mechanism to restart the tx queue. 7578778b276SAaron Young */ 7588778b276SAaron Young if (netif_running(dev)) 7598778b276SAaron Young maybe_tx_wakeup(port); 76031762eaaSAaron Young } 76131762eaaSAaron Young port->rx_event = 0; 76231762eaaSAaron Young return 0; 76331762eaaSAaron Young } 76431762eaaSAaron Young /* We may have multiple LDC events in rx_event. Unroll send_events() */ 76531762eaaSAaron Young event = (port->rx_event & LDC_EVENT_UP); 76631762eaaSAaron Young port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP); 76731762eaaSAaron Young if (event == LDC_EVENT_UP) 76831762eaaSAaron Young goto ldc_ctrl; 76931762eaaSAaron Young event = port->rx_event; 77031762eaaSAaron Young if (!(event & LDC_EVENT_DATA_READY)) 77131762eaaSAaron Young return 0; 77231762eaaSAaron Young 77331762eaaSAaron Young /* we dont expect any other bits than RESET, UP, DATA_READY */ 77431762eaaSAaron Young BUG_ON(event != LDC_EVENT_DATA_READY); 77531762eaaSAaron Young 776dc153f85SAaron Young err = 0; 777dc153f85SAaron Young tx_wakeup = 0; 77831762eaaSAaron Young while (1) { 77931762eaaSAaron Young union { 78031762eaaSAaron Young struct vio_msg_tag tag; 78131762eaaSAaron Young u64 raw[8]; 78231762eaaSAaron Young } msgbuf; 78331762eaaSAaron Young 78431762eaaSAaron Young if (port->napi_resume) { 78531762eaaSAaron Young struct vio_dring_data *pkt = 78631762eaaSAaron Young (struct vio_dring_data *)&msgbuf; 78731762eaaSAaron Young struct vio_dring_state *dr = 78831762eaaSAaron Young &port->vio.drings[VIO_DRIVER_RX_RING]; 78931762eaaSAaron Young 79031762eaaSAaron Young pkt->tag.type = VIO_TYPE_DATA; 79131762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_INFO; 79231762eaaSAaron Young pkt->tag.stype_env = VIO_DRING_DATA; 79331762eaaSAaron Young pkt->seq = dr->rcv_nxt; 794dc153f85SAaron Young pkt->start_idx = vio_dring_next(dr, 795dc153f85SAaron Young port->napi_stop_idx); 79631762eaaSAaron Young pkt->end_idx = -1; 79731762eaaSAaron Young goto napi_resume; 79831762eaaSAaron Young } 79931762eaaSAaron Young err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); 80031762eaaSAaron Young if (unlikely(err < 0)) { 80131762eaaSAaron Young if (err == -ECONNRESET) 80231762eaaSAaron Young vio_conn_reset(vio); 80331762eaaSAaron Young break; 80431762eaaSAaron Young } 80531762eaaSAaron Young if (err == 0) 80631762eaaSAaron Young break; 80731762eaaSAaron Young viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", 80831762eaaSAaron Young msgbuf.tag.type, 80931762eaaSAaron Young msgbuf.tag.stype, 81031762eaaSAaron Young msgbuf.tag.stype_env, 81131762eaaSAaron Young msgbuf.tag.sid); 81231762eaaSAaron Young err = vio_validate_sid(vio, &msgbuf.tag); 81331762eaaSAaron Young if (err < 0) 81431762eaaSAaron Young break; 81531762eaaSAaron Young napi_resume: 81631762eaaSAaron Young if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { 81731762eaaSAaron Young if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { 81867d0719fSAaron Young if (!sunvnet_port_is_up_common(port)) { 81931762eaaSAaron Young /* failures like handshake_failure() 82031762eaaSAaron Young * may have cleaned up dring, but 82131762eaaSAaron Young * NAPI polling may bring us here. 82231762eaaSAaron Young */ 82331762eaaSAaron Young err = -ECONNRESET; 82431762eaaSAaron Young break; 82531762eaaSAaron Young } 82631762eaaSAaron Young err = vnet_rx(port, &msgbuf, &npkts, budget); 82731762eaaSAaron Young if (npkts >= budget) 82831762eaaSAaron Young break; 82931762eaaSAaron Young if (npkts == 0) 83031762eaaSAaron Young break; 83131762eaaSAaron Young } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { 83231762eaaSAaron Young err = vnet_ack(port, &msgbuf); 83331762eaaSAaron Young if (err > 0) 83431762eaaSAaron Young tx_wakeup |= err; 83531762eaaSAaron Young } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { 83631762eaaSAaron Young err = vnet_nack(port, &msgbuf); 83731762eaaSAaron Young } 83831762eaaSAaron Young } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { 83931762eaaSAaron Young if (msgbuf.tag.stype_env == VNET_MCAST_INFO) 84031762eaaSAaron Young err = handle_mcast(port, &msgbuf); 84131762eaaSAaron Young else 84231762eaaSAaron Young err = vio_control_pkt_engine(vio, &msgbuf); 84331762eaaSAaron Young if (err) 84431762eaaSAaron Young break; 84531762eaaSAaron Young } else { 84631762eaaSAaron Young err = vnet_handle_unknown(port, &msgbuf); 84731762eaaSAaron Young } 84831762eaaSAaron Young if (err == -ECONNRESET) 84931762eaaSAaron Young break; 85031762eaaSAaron Young } 85131762eaaSAaron Young if (unlikely(tx_wakeup && err != -ECONNRESET)) 85231762eaaSAaron Young maybe_tx_wakeup(port); 85331762eaaSAaron Young return npkts; 85431762eaaSAaron Young } 85531762eaaSAaron Young 85631762eaaSAaron Young int sunvnet_poll_common(struct napi_struct *napi, int budget) 85731762eaaSAaron Young { 85831762eaaSAaron Young struct vnet_port *port = container_of(napi, struct vnet_port, napi); 85931762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 86031762eaaSAaron Young int processed = vnet_event_napi(port, budget); 86131762eaaSAaron Young 86231762eaaSAaron Young if (processed < budget) { 86331762eaaSAaron Young napi_complete(napi); 86431762eaaSAaron Young port->rx_event &= ~LDC_EVENT_DATA_READY; 86531762eaaSAaron Young vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); 86631762eaaSAaron Young } 86731762eaaSAaron Young return processed; 86831762eaaSAaron Young } 86931762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_poll_common); 87031762eaaSAaron Young 87131762eaaSAaron Young void sunvnet_event_common(void *arg, int event) 87231762eaaSAaron Young { 87331762eaaSAaron Young struct vnet_port *port = arg; 87431762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 87531762eaaSAaron Young 87631762eaaSAaron Young port->rx_event |= event; 87731762eaaSAaron Young vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); 87831762eaaSAaron Young napi_schedule(&port->napi); 87931762eaaSAaron Young } 88031762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_event_common); 88131762eaaSAaron Young 88231762eaaSAaron Young static int __vnet_tx_trigger(struct vnet_port *port, u32 start) 88331762eaaSAaron Young { 88431762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 88531762eaaSAaron Young struct vio_dring_data hdr = { 88631762eaaSAaron Young .tag = { 88731762eaaSAaron Young .type = VIO_TYPE_DATA, 88831762eaaSAaron Young .stype = VIO_SUBTYPE_INFO, 88931762eaaSAaron Young .stype_env = VIO_DRING_DATA, 89031762eaaSAaron Young .sid = vio_send_sid(&port->vio), 89131762eaaSAaron Young }, 89231762eaaSAaron Young .dring_ident = dr->ident, 89331762eaaSAaron Young .start_idx = start, 89431762eaaSAaron Young .end_idx = (u32)-1, 89531762eaaSAaron Young }; 89631762eaaSAaron Young int err, delay; 89731762eaaSAaron Young int retries = 0; 89831762eaaSAaron Young 89931762eaaSAaron Young if (port->stop_rx) { 90031762eaaSAaron Young trace_vnet_tx_pending_stopped_ack(port->vio._local_sid, 90131762eaaSAaron Young port->vio._peer_sid, 90231762eaaSAaron Young port->stop_rx_idx, -1); 90331762eaaSAaron Young err = vnet_send_ack(port, 90431762eaaSAaron Young &port->vio.drings[VIO_DRIVER_RX_RING], 90531762eaaSAaron Young port->stop_rx_idx, -1, 90631762eaaSAaron Young VIO_DRING_STOPPED); 90731762eaaSAaron Young if (err <= 0) 90831762eaaSAaron Young return err; 90931762eaaSAaron Young } 91031762eaaSAaron Young 91131762eaaSAaron Young hdr.seq = dr->snd_nxt; 91231762eaaSAaron Young delay = 1; 91331762eaaSAaron Young do { 91431762eaaSAaron Young err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 91531762eaaSAaron Young if (err > 0) { 91631762eaaSAaron Young dr->snd_nxt++; 91731762eaaSAaron Young break; 91831762eaaSAaron Young } 91931762eaaSAaron Young udelay(delay); 92031762eaaSAaron Young if ((delay <<= 1) > 128) 92131762eaaSAaron Young delay = 128; 92231762eaaSAaron Young if (retries++ > VNET_MAX_RETRIES) 92331762eaaSAaron Young break; 92431762eaaSAaron Young } while (err == -EAGAIN); 92531762eaaSAaron Young trace_vnet_tx_trigger(port->vio._local_sid, 92631762eaaSAaron Young port->vio._peer_sid, start, err); 92731762eaaSAaron Young 92831762eaaSAaron Young return err; 92931762eaaSAaron Young } 93031762eaaSAaron Young 93131762eaaSAaron Young static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, 93231762eaaSAaron Young unsigned *pending) 93331762eaaSAaron Young { 93431762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 93531762eaaSAaron Young struct sk_buff *skb = NULL; 93631762eaaSAaron Young int i, txi; 93731762eaaSAaron Young 93831762eaaSAaron Young *pending = 0; 93931762eaaSAaron Young 94031762eaaSAaron Young txi = dr->prod; 94131762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 94231762eaaSAaron Young struct vio_net_desc *d; 94331762eaaSAaron Young 94431762eaaSAaron Young --txi; 94531762eaaSAaron Young if (txi < 0) 94631762eaaSAaron Young txi = VNET_TX_RING_SIZE - 1; 94731762eaaSAaron Young 94831762eaaSAaron Young d = vio_dring_entry(dr, txi); 94931762eaaSAaron Young 95031762eaaSAaron Young if (d->hdr.state == VIO_DESC_READY) { 95131762eaaSAaron Young (*pending)++; 95231762eaaSAaron Young continue; 95331762eaaSAaron Young } 95431762eaaSAaron Young if (port->tx_bufs[txi].skb) { 95531762eaaSAaron Young if (d->hdr.state != VIO_DESC_DONE) 95631762eaaSAaron Young pr_notice("invalid ring buffer state %d\n", 95731762eaaSAaron Young d->hdr.state); 95831762eaaSAaron Young BUG_ON(port->tx_bufs[txi].skb->next); 95931762eaaSAaron Young 96031762eaaSAaron Young port->tx_bufs[txi].skb->next = skb; 96131762eaaSAaron Young skb = port->tx_bufs[txi].skb; 96231762eaaSAaron Young port->tx_bufs[txi].skb = NULL; 96331762eaaSAaron Young 96431762eaaSAaron Young ldc_unmap(port->vio.lp, 96531762eaaSAaron Young port->tx_bufs[txi].cookies, 96631762eaaSAaron Young port->tx_bufs[txi].ncookies); 967dc153f85SAaron Young } else if (d->hdr.state == VIO_DESC_FREE) { 96831762eaaSAaron Young break; 969dc153f85SAaron Young } 97031762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 97131762eaaSAaron Young } 97231762eaaSAaron Young return skb; 97331762eaaSAaron Young } 97431762eaaSAaron Young 97531762eaaSAaron Young static inline void vnet_free_skbs(struct sk_buff *skb) 97631762eaaSAaron Young { 97731762eaaSAaron Young struct sk_buff *next; 97831762eaaSAaron Young 97931762eaaSAaron Young while (skb) { 98031762eaaSAaron Young next = skb->next; 98131762eaaSAaron Young skb->next = NULL; 98231762eaaSAaron Young dev_kfree_skb(skb); 98331762eaaSAaron Young skb = next; 98431762eaaSAaron Young } 98531762eaaSAaron Young } 98631762eaaSAaron Young 98731762eaaSAaron Young void sunvnet_clean_timer_expire_common(unsigned long port0) 98831762eaaSAaron Young { 98931762eaaSAaron Young struct vnet_port *port = (struct vnet_port *)port0; 99031762eaaSAaron Young struct sk_buff *freeskbs; 99131762eaaSAaron Young unsigned pending; 99231762eaaSAaron Young 99367d0719fSAaron Young netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port)); 99431762eaaSAaron Young freeskbs = vnet_clean_tx_ring(port, &pending); 99567d0719fSAaron Young netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port)); 99631762eaaSAaron Young 99731762eaaSAaron Young vnet_free_skbs(freeskbs); 99831762eaaSAaron Young 99931762eaaSAaron Young if (pending) 100031762eaaSAaron Young (void)mod_timer(&port->clean_timer, 100131762eaaSAaron Young jiffies + VNET_CLEAN_TIMEOUT); 100231762eaaSAaron Young else 100331762eaaSAaron Young del_timer(&port->clean_timer); 100431762eaaSAaron Young } 100531762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common); 100631762eaaSAaron Young 100731762eaaSAaron Young static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, 100831762eaaSAaron Young struct ldc_trans_cookie *cookies, int ncookies, 100931762eaaSAaron Young unsigned int map_perm) 101031762eaaSAaron Young { 101131762eaaSAaron Young int i, nc, err, blen; 101231762eaaSAaron Young 101331762eaaSAaron Young /* header */ 101431762eaaSAaron Young blen = skb_headlen(skb); 101531762eaaSAaron Young if (blen < ETH_ZLEN) 101631762eaaSAaron Young blen = ETH_ZLEN; 101731762eaaSAaron Young blen += VNET_PACKET_SKIP; 101831762eaaSAaron Young blen += 8 - (blen & 7); 101931762eaaSAaron Young 102031762eaaSAaron Young err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies, 102131762eaaSAaron Young ncookies, map_perm); 102231762eaaSAaron Young if (err < 0) 102331762eaaSAaron Young return err; 102431762eaaSAaron Young nc = err; 102531762eaaSAaron Young 102631762eaaSAaron Young for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 102731762eaaSAaron Young skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 102831762eaaSAaron Young u8 *vaddr; 102931762eaaSAaron Young 103031762eaaSAaron Young if (nc < ncookies) { 103131762eaaSAaron Young vaddr = kmap_atomic(skb_frag_page(f)); 103231762eaaSAaron Young blen = skb_frag_size(f); 103331762eaaSAaron Young blen += 8 - (blen & 7); 103431762eaaSAaron Young err = ldc_map_single(lp, vaddr + f->page_offset, 103531762eaaSAaron Young blen, cookies + nc, ncookies - nc, 103631762eaaSAaron Young map_perm); 103731762eaaSAaron Young kunmap_atomic(vaddr); 103831762eaaSAaron Young } else { 103931762eaaSAaron Young err = -EMSGSIZE; 104031762eaaSAaron Young } 104131762eaaSAaron Young 104231762eaaSAaron Young if (err < 0) { 104331762eaaSAaron Young ldc_unmap(lp, cookies, nc); 104431762eaaSAaron Young return err; 104531762eaaSAaron Young } 104631762eaaSAaron Young nc += err; 104731762eaaSAaron Young } 104831762eaaSAaron Young return nc; 104931762eaaSAaron Young } 105031762eaaSAaron Young 105131762eaaSAaron Young static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) 105231762eaaSAaron Young { 105331762eaaSAaron Young struct sk_buff *nskb; 105431762eaaSAaron Young int i, len, pad, docopy; 105531762eaaSAaron Young 105631762eaaSAaron Young len = skb->len; 105731762eaaSAaron Young pad = 0; 105831762eaaSAaron Young if (len < ETH_ZLEN) { 105931762eaaSAaron Young pad += ETH_ZLEN - skb->len; 106031762eaaSAaron Young len += pad; 106131762eaaSAaron Young } 106231762eaaSAaron Young len += VNET_PACKET_SKIP; 106331762eaaSAaron Young pad += 8 - (len & 7); 106431762eaaSAaron Young 106531762eaaSAaron Young /* make sure we have enough cookies and alignment in every frag */ 106631762eaaSAaron Young docopy = skb_shinfo(skb)->nr_frags >= ncookies; 106731762eaaSAaron Young for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 106831762eaaSAaron Young skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 106931762eaaSAaron Young 107031762eaaSAaron Young docopy |= f->page_offset & 7; 107131762eaaSAaron Young } 107231762eaaSAaron Young if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || 107331762eaaSAaron Young skb_tailroom(skb) < pad || 107431762eaaSAaron Young skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { 107531762eaaSAaron Young int start = 0, offset; 107631762eaaSAaron Young __wsum csum; 107731762eaaSAaron Young 107831762eaaSAaron Young len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; 107931762eaaSAaron Young nskb = alloc_and_align_skb(skb->dev, len); 1080dc153f85SAaron Young if (!nskb) { 108131762eaaSAaron Young dev_kfree_skb(skb); 108231762eaaSAaron Young return NULL; 108331762eaaSAaron Young } 108431762eaaSAaron Young skb_reserve(nskb, VNET_PACKET_SKIP); 108531762eaaSAaron Young 108631762eaaSAaron Young nskb->protocol = skb->protocol; 108731762eaaSAaron Young offset = skb_mac_header(skb) - skb->data; 108831762eaaSAaron Young skb_set_mac_header(nskb, offset); 108931762eaaSAaron Young offset = skb_network_header(skb) - skb->data; 109031762eaaSAaron Young skb_set_network_header(nskb, offset); 109131762eaaSAaron Young offset = skb_transport_header(skb) - skb->data; 109231762eaaSAaron Young skb_set_transport_header(nskb, offset); 109331762eaaSAaron Young 109431762eaaSAaron Young offset = 0; 109531762eaaSAaron Young nskb->csum_offset = skb->csum_offset; 109631762eaaSAaron Young nskb->ip_summed = skb->ip_summed; 109731762eaaSAaron Young 109831762eaaSAaron Young if (skb->ip_summed == CHECKSUM_PARTIAL) 109931762eaaSAaron Young start = skb_checksum_start_offset(skb); 110031762eaaSAaron Young if (start) { 110131762eaaSAaron Young struct iphdr *iph = ip_hdr(nskb); 110231762eaaSAaron Young int offset = start + nskb->csum_offset; 110331762eaaSAaron Young 110431762eaaSAaron Young if (skb_copy_bits(skb, 0, nskb->data, start)) { 110531762eaaSAaron Young dev_kfree_skb(nskb); 110631762eaaSAaron Young dev_kfree_skb(skb); 110731762eaaSAaron Young return NULL; 110831762eaaSAaron Young } 110931762eaaSAaron Young *(__sum16 *)(skb->data + offset) = 0; 111031762eaaSAaron Young csum = skb_copy_and_csum_bits(skb, start, 111131762eaaSAaron Young nskb->data + start, 111231762eaaSAaron Young skb->len - start, 0); 111331762eaaSAaron Young if (iph->protocol == IPPROTO_TCP || 111431762eaaSAaron Young iph->protocol == IPPROTO_UDP) { 111531762eaaSAaron Young csum = csum_tcpudp_magic(iph->saddr, iph->daddr, 111631762eaaSAaron Young skb->len - start, 111731762eaaSAaron Young iph->protocol, csum); 111831762eaaSAaron Young } 111931762eaaSAaron Young *(__sum16 *)(nskb->data + offset) = csum; 112031762eaaSAaron Young 112131762eaaSAaron Young nskb->ip_summed = CHECKSUM_NONE; 112231762eaaSAaron Young } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { 112331762eaaSAaron Young dev_kfree_skb(nskb); 112431762eaaSAaron Young dev_kfree_skb(skb); 112531762eaaSAaron Young return NULL; 112631762eaaSAaron Young } 112731762eaaSAaron Young (void)skb_put(nskb, skb->len); 112831762eaaSAaron Young if (skb_is_gso(skb)) { 112931762eaaSAaron Young skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; 113031762eaaSAaron Young skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 113131762eaaSAaron Young } 113231762eaaSAaron Young nskb->queue_mapping = skb->queue_mapping; 113331762eaaSAaron Young dev_kfree_skb(skb); 113431762eaaSAaron Young skb = nskb; 113531762eaaSAaron Young } 113631762eaaSAaron Young return skb; 113731762eaaSAaron Young } 113831762eaaSAaron Young 113967d0719fSAaron Young static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, 114067d0719fSAaron Young struct vnet_port *(*vnet_tx_port) 114167d0719fSAaron Young (struct sk_buff *, struct net_device *)) 114231762eaaSAaron Young { 114367d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 114431762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 114531762eaaSAaron Young struct sk_buff *segs; 114631762eaaSAaron Young int maclen, datalen; 114731762eaaSAaron Young int status; 114831762eaaSAaron Young int gso_size, gso_type, gso_segs; 114931762eaaSAaron Young int hlen = skb_transport_header(skb) - skb_mac_header(skb); 115031762eaaSAaron Young int proto = IPPROTO_IP; 115131762eaaSAaron Young 115231762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) 115331762eaaSAaron Young proto = ip_hdr(skb)->protocol; 115431762eaaSAaron Young else if (skb->protocol == htons(ETH_P_IPV6)) 115531762eaaSAaron Young proto = ipv6_hdr(skb)->nexthdr; 115631762eaaSAaron Young 1157dc153f85SAaron Young if (proto == IPPROTO_TCP) { 115831762eaaSAaron Young hlen += tcp_hdr(skb)->doff * 4; 1159dc153f85SAaron Young } else if (proto == IPPROTO_UDP) { 116031762eaaSAaron Young hlen += sizeof(struct udphdr); 1161dc153f85SAaron Young } else { 116231762eaaSAaron Young pr_err("vnet_handle_offloads GSO with unknown transport " 116331762eaaSAaron Young "protocol %d tproto %d\n", skb->protocol, proto); 116431762eaaSAaron Young hlen = 128; /* XXX */ 116531762eaaSAaron Young } 116631762eaaSAaron Young datalen = port->tsolen - hlen; 116731762eaaSAaron Young 116831762eaaSAaron Young gso_size = skb_shinfo(skb)->gso_size; 116931762eaaSAaron Young gso_type = skb_shinfo(skb)->gso_type; 117031762eaaSAaron Young gso_segs = skb_shinfo(skb)->gso_segs; 117131762eaaSAaron Young 117231762eaaSAaron Young if (port->tso && gso_size < datalen) 117331762eaaSAaron Young gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); 117431762eaaSAaron Young 117531762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { 117631762eaaSAaron Young struct netdev_queue *txq; 117731762eaaSAaron Young 117831762eaaSAaron Young txq = netdev_get_tx_queue(dev, port->q_index); 117931762eaaSAaron Young netif_tx_stop_queue(txq); 118031762eaaSAaron Young if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) 118131762eaaSAaron Young return NETDEV_TX_BUSY; 118231762eaaSAaron Young netif_tx_wake_queue(txq); 118331762eaaSAaron Young } 118431762eaaSAaron Young 118531762eaaSAaron Young maclen = skb_network_header(skb) - skb_mac_header(skb); 118631762eaaSAaron Young skb_pull(skb, maclen); 118731762eaaSAaron Young 118831762eaaSAaron Young if (port->tso && gso_size < datalen) { 118931762eaaSAaron Young if (skb_unclone(skb, GFP_ATOMIC)) 119031762eaaSAaron Young goto out_dropped; 119131762eaaSAaron Young 119231762eaaSAaron Young /* segment to TSO size */ 119331762eaaSAaron Young skb_shinfo(skb)->gso_size = datalen; 119431762eaaSAaron Young skb_shinfo(skb)->gso_segs = gso_segs; 119531762eaaSAaron Young } 119631762eaaSAaron Young segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); 119731762eaaSAaron Young if (IS_ERR(segs)) 119831762eaaSAaron Young goto out_dropped; 119931762eaaSAaron Young 120031762eaaSAaron Young skb_push(skb, maclen); 120131762eaaSAaron Young skb_reset_mac_header(skb); 120231762eaaSAaron Young 120331762eaaSAaron Young status = 0; 120431762eaaSAaron Young while (segs) { 120531762eaaSAaron Young struct sk_buff *curr = segs; 120631762eaaSAaron Young 120731762eaaSAaron Young segs = segs->next; 120831762eaaSAaron Young curr->next = NULL; 120931762eaaSAaron Young if (port->tso && curr->len > dev->mtu) { 121031762eaaSAaron Young skb_shinfo(curr)->gso_size = gso_size; 121131762eaaSAaron Young skb_shinfo(curr)->gso_type = gso_type; 121231762eaaSAaron Young skb_shinfo(curr)->gso_segs = 121331762eaaSAaron Young DIV_ROUND_UP(curr->len - hlen, gso_size); 1214dc153f85SAaron Young } else { 121531762eaaSAaron Young skb_shinfo(curr)->gso_size = 0; 1216dc153f85SAaron Young } 121731762eaaSAaron Young 121831762eaaSAaron Young skb_push(curr, maclen); 121931762eaaSAaron Young skb_reset_mac_header(curr); 122031762eaaSAaron Young memcpy(skb_mac_header(curr), skb_mac_header(skb), 122131762eaaSAaron Young maclen); 122231762eaaSAaron Young curr->csum_start = skb_transport_header(curr) - curr->head; 122331762eaaSAaron Young if (ip_hdr(curr)->protocol == IPPROTO_TCP) 122431762eaaSAaron Young curr->csum_offset = offsetof(struct tcphdr, check); 122531762eaaSAaron Young else if (ip_hdr(curr)->protocol == IPPROTO_UDP) 122631762eaaSAaron Young curr->csum_offset = offsetof(struct udphdr, check); 122731762eaaSAaron Young 122831762eaaSAaron Young if (!(status & NETDEV_TX_MASK)) 122967d0719fSAaron Young status = sunvnet_start_xmit_common(curr, dev, 123067d0719fSAaron Young vnet_tx_port); 123131762eaaSAaron Young if (status & NETDEV_TX_MASK) 123231762eaaSAaron Young dev_kfree_skb_any(curr); 123331762eaaSAaron Young } 123431762eaaSAaron Young 123531762eaaSAaron Young if (!(status & NETDEV_TX_MASK)) 123631762eaaSAaron Young dev_kfree_skb_any(skb); 123731762eaaSAaron Young return status; 123831762eaaSAaron Young out_dropped: 123931762eaaSAaron Young dev->stats.tx_dropped++; 124031762eaaSAaron Young dev_kfree_skb_any(skb); 124131762eaaSAaron Young return NETDEV_TX_OK; 124231762eaaSAaron Young } 124331762eaaSAaron Young 124467d0719fSAaron Young int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, 124567d0719fSAaron Young struct vnet_port *(*vnet_tx_port) 124667d0719fSAaron Young (struct sk_buff *, struct net_device *)) 124731762eaaSAaron Young { 124831762eaaSAaron Young struct vnet_port *port = NULL; 124931762eaaSAaron Young struct vio_dring_state *dr; 125031762eaaSAaron Young struct vio_net_desc *d; 125131762eaaSAaron Young unsigned int len; 125231762eaaSAaron Young struct sk_buff *freeskbs = NULL; 125331762eaaSAaron Young int i, err, txi; 125431762eaaSAaron Young unsigned pending = 0; 125531762eaaSAaron Young struct netdev_queue *txq; 125631762eaaSAaron Young 125731762eaaSAaron Young rcu_read_lock(); 125867d0719fSAaron Young port = vnet_tx_port(skb, dev); 125931762eaaSAaron Young if (unlikely(!port)) { 126031762eaaSAaron Young rcu_read_unlock(); 126131762eaaSAaron Young goto out_dropped; 126231762eaaSAaron Young } 126331762eaaSAaron Young 126431762eaaSAaron Young if (skb_is_gso(skb) && skb->len > port->tsolen) { 126567d0719fSAaron Young err = vnet_handle_offloads(port, skb, vnet_tx_port); 126631762eaaSAaron Young rcu_read_unlock(); 126731762eaaSAaron Young return err; 126831762eaaSAaron Young } 126931762eaaSAaron Young 127031762eaaSAaron Young if (!skb_is_gso(skb) && skb->len > port->rmtu) { 127131762eaaSAaron Young unsigned long localmtu = port->rmtu - ETH_HLEN; 127231762eaaSAaron Young 127331762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 3)) 127431762eaaSAaron Young localmtu -= VLAN_HLEN; 127531762eaaSAaron Young 127631762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) { 127731762eaaSAaron Young struct flowi4 fl4; 127831762eaaSAaron Young struct rtable *rt = NULL; 127931762eaaSAaron Young 128031762eaaSAaron Young memset(&fl4, 0, sizeof(fl4)); 128131762eaaSAaron Young fl4.flowi4_oif = dev->ifindex; 128231762eaaSAaron Young fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 128331762eaaSAaron Young fl4.daddr = ip_hdr(skb)->daddr; 128431762eaaSAaron Young fl4.saddr = ip_hdr(skb)->saddr; 128531762eaaSAaron Young 128631762eaaSAaron Young rt = ip_route_output_key(dev_net(dev), &fl4); 128731762eaaSAaron Young rcu_read_unlock(); 128831762eaaSAaron Young if (!IS_ERR(rt)) { 128931762eaaSAaron Young skb_dst_set(skb, &rt->dst); 129031762eaaSAaron Young icmp_send(skb, ICMP_DEST_UNREACH, 129131762eaaSAaron Young ICMP_FRAG_NEEDED, 129231762eaaSAaron Young htonl(localmtu)); 129331762eaaSAaron Young } 129431762eaaSAaron Young } 129531762eaaSAaron Young #if IS_ENABLED(CONFIG_IPV6) 129631762eaaSAaron Young else if (skb->protocol == htons(ETH_P_IPV6)) 129731762eaaSAaron Young icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); 129831762eaaSAaron Young #endif 129931762eaaSAaron Young goto out_dropped; 130031762eaaSAaron Young } 130131762eaaSAaron Young 130231762eaaSAaron Young skb = vnet_skb_shape(skb, 2); 130331762eaaSAaron Young 130431762eaaSAaron Young if (unlikely(!skb)) 130531762eaaSAaron Young goto out_dropped; 130631762eaaSAaron Young 130731762eaaSAaron Young if (skb->ip_summed == CHECKSUM_PARTIAL) 130831762eaaSAaron Young vnet_fullcsum(skb); 130931762eaaSAaron Young 131031762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 131131762eaaSAaron Young i = skb_get_queue_mapping(skb); 131231762eaaSAaron Young txq = netdev_get_tx_queue(dev, i); 131331762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 131431762eaaSAaron Young if (!netif_tx_queue_stopped(txq)) { 131531762eaaSAaron Young netif_tx_stop_queue(txq); 131631762eaaSAaron Young 131731762eaaSAaron Young /* This is a hard error, log it. */ 131831762eaaSAaron Young netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 131931762eaaSAaron Young dev->stats.tx_errors++; 132031762eaaSAaron Young } 132131762eaaSAaron Young rcu_read_unlock(); 132231762eaaSAaron Young return NETDEV_TX_BUSY; 132331762eaaSAaron Young } 132431762eaaSAaron Young 132531762eaaSAaron Young d = vio_dring_cur(dr); 132631762eaaSAaron Young 132731762eaaSAaron Young txi = dr->prod; 132831762eaaSAaron Young 132931762eaaSAaron Young freeskbs = vnet_clean_tx_ring(port, &pending); 133031762eaaSAaron Young 133131762eaaSAaron Young BUG_ON(port->tx_bufs[txi].skb); 133231762eaaSAaron Young 133331762eaaSAaron Young len = skb->len; 133431762eaaSAaron Young if (len < ETH_ZLEN) 133531762eaaSAaron Young len = ETH_ZLEN; 133631762eaaSAaron Young 133731762eaaSAaron Young err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, 133831762eaaSAaron Young (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); 133931762eaaSAaron Young if (err < 0) { 134031762eaaSAaron Young netdev_info(dev, "tx buffer map error %d\n", err); 134131762eaaSAaron Young goto out_dropped; 134231762eaaSAaron Young } 134331762eaaSAaron Young 134431762eaaSAaron Young port->tx_bufs[txi].skb = skb; 134531762eaaSAaron Young skb = NULL; 134631762eaaSAaron Young port->tx_bufs[txi].ncookies = err; 134731762eaaSAaron Young 134831762eaaSAaron Young /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), 134931762eaaSAaron Young * thus it is safe to not set VIO_ACK_ENABLE for each transmission: 135031762eaaSAaron Young * the protocol itself does not require it as long as the peer 135131762eaaSAaron Young * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. 135231762eaaSAaron Young * 135331762eaaSAaron Young * An ACK for every packet in the ring is expensive as the 135431762eaaSAaron Young * sending of LDC messages is slow and affects performance. 135531762eaaSAaron Young */ 135631762eaaSAaron Young d->hdr.ack = VIO_ACK_DISABLE; 135731762eaaSAaron Young d->size = len; 135831762eaaSAaron Young d->ncookies = port->tx_bufs[txi].ncookies; 135931762eaaSAaron Young for (i = 0; i < d->ncookies; i++) 136031762eaaSAaron Young d->cookies[i] = port->tx_bufs[txi].cookies[i]; 136131762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 7)) { 136231762eaaSAaron Young struct vio_net_dext *dext = vio_net_ext(d); 136331762eaaSAaron Young 136431762eaaSAaron Young memset(dext, 0, sizeof(*dext)); 136531762eaaSAaron Young if (skb_is_gso(port->tx_bufs[txi].skb)) { 136631762eaaSAaron Young dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) 136731762eaaSAaron Young ->gso_size; 136831762eaaSAaron Young dext->flags |= VNET_PKT_IPV4_LSO; 136931762eaaSAaron Young } 137031762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 8) && 137131762eaaSAaron Young !port->switch_port) { 137231762eaaSAaron Young dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; 137331762eaaSAaron Young dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; 137431762eaaSAaron Young } 137531762eaaSAaron Young } 137631762eaaSAaron Young 137731762eaaSAaron Young /* This has to be a non-SMP write barrier because we are writing 137831762eaaSAaron Young * to memory which is shared with the peer LDOM. 137931762eaaSAaron Young */ 138031762eaaSAaron Young dma_wmb(); 138131762eaaSAaron Young 138231762eaaSAaron Young d->hdr.state = VIO_DESC_READY; 138331762eaaSAaron Young 138431762eaaSAaron Young /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent 138531762eaaSAaron Young * to notify the consumer that some descriptors are READY. 138631762eaaSAaron Young * After that "start" trigger, no additional triggers are needed until 138731762eaaSAaron Young * a DRING_STOPPED is received from the consumer. The dr->cons field 138831762eaaSAaron Young * (set up by vnet_ack()) has the value of the next dring index 138931762eaaSAaron Young * that has not yet been ack-ed. We send a "start" trigger here 139031762eaaSAaron Young * if, and only if, start_cons is true (reset it afterward). Conversely, 139131762eaaSAaron Young * vnet_ack() should check if the dring corresponding to cons 139231762eaaSAaron Young * is marked READY, but start_cons was false. 139331762eaaSAaron Young * If so, vnet_ack() should send out the missed "start" trigger. 139431762eaaSAaron Young * 139531762eaaSAaron Young * Note that the dma_wmb() above makes sure the cookies et al. are 139631762eaaSAaron Young * not globally visible before the VIO_DESC_READY, and that the 139731762eaaSAaron Young * stores are ordered correctly by the compiler. The consumer will 139831762eaaSAaron Young * not proceed until the VIO_DESC_READY is visible assuring that 139931762eaaSAaron Young * the consumer does not observe anything related to descriptors 140031762eaaSAaron Young * out of order. The HV trap from the LDC start trigger is the 140131762eaaSAaron Young * producer to consumer announcement that work is available to the 140231762eaaSAaron Young * consumer 140331762eaaSAaron Young */ 140431762eaaSAaron Young if (!port->start_cons) { /* previous trigger suffices */ 140531762eaaSAaron Young trace_vnet_skip_tx_trigger(port->vio._local_sid, 140631762eaaSAaron Young port->vio._peer_sid, dr->cons); 140731762eaaSAaron Young goto ldc_start_done; 140831762eaaSAaron Young } 140931762eaaSAaron Young 141031762eaaSAaron Young err = __vnet_tx_trigger(port, dr->cons); 141131762eaaSAaron Young if (unlikely(err < 0)) { 141231762eaaSAaron Young netdev_info(dev, "TX trigger error %d\n", err); 141331762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 141431762eaaSAaron Young skb = port->tx_bufs[txi].skb; 141531762eaaSAaron Young port->tx_bufs[txi].skb = NULL; 141631762eaaSAaron Young dev->stats.tx_carrier_errors++; 141731762eaaSAaron Young goto out_dropped; 141831762eaaSAaron Young } 141931762eaaSAaron Young 142031762eaaSAaron Young ldc_start_done: 142131762eaaSAaron Young port->start_cons = false; 142231762eaaSAaron Young 142331762eaaSAaron Young dev->stats.tx_packets++; 142431762eaaSAaron Young dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; 142531762eaaSAaron Young 142631762eaaSAaron Young dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); 142731762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 142831762eaaSAaron Young netif_tx_stop_queue(txq); 142931762eaaSAaron Young if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) 143031762eaaSAaron Young netif_tx_wake_queue(txq); 143131762eaaSAaron Young } 143231762eaaSAaron Young 143331762eaaSAaron Young (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); 143431762eaaSAaron Young rcu_read_unlock(); 143531762eaaSAaron Young 143631762eaaSAaron Young vnet_free_skbs(freeskbs); 143731762eaaSAaron Young 143831762eaaSAaron Young return NETDEV_TX_OK; 143931762eaaSAaron Young 144031762eaaSAaron Young out_dropped: 144131762eaaSAaron Young if (pending) 144231762eaaSAaron Young (void)mod_timer(&port->clean_timer, 144331762eaaSAaron Young jiffies + VNET_CLEAN_TIMEOUT); 144431762eaaSAaron Young else if (port) 144531762eaaSAaron Young del_timer(&port->clean_timer); 144631762eaaSAaron Young if (port) 144731762eaaSAaron Young rcu_read_unlock(); 144831762eaaSAaron Young if (skb) 144931762eaaSAaron Young dev_kfree_skb(skb); 145031762eaaSAaron Young vnet_free_skbs(freeskbs); 145131762eaaSAaron Young dev->stats.tx_dropped++; 145231762eaaSAaron Young return NETDEV_TX_OK; 145331762eaaSAaron Young } 145431762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common); 145531762eaaSAaron Young 145631762eaaSAaron Young void sunvnet_tx_timeout_common(struct net_device *dev) 145731762eaaSAaron Young { 145831762eaaSAaron Young /* XXX Implement me XXX */ 145931762eaaSAaron Young } 146031762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common); 146131762eaaSAaron Young 146231762eaaSAaron Young int sunvnet_open_common(struct net_device *dev) 146331762eaaSAaron Young { 146431762eaaSAaron Young netif_carrier_on(dev); 146531762eaaSAaron Young netif_tx_start_all_queues(dev); 146631762eaaSAaron Young 146731762eaaSAaron Young return 0; 146831762eaaSAaron Young } 146931762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_open_common); 147031762eaaSAaron Young 147131762eaaSAaron Young int sunvnet_close_common(struct net_device *dev) 147231762eaaSAaron Young { 147331762eaaSAaron Young netif_tx_stop_all_queues(dev); 147431762eaaSAaron Young netif_carrier_off(dev); 147531762eaaSAaron Young 147631762eaaSAaron Young return 0; 147731762eaaSAaron Young } 147831762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_close_common); 147931762eaaSAaron Young 148031762eaaSAaron Young static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) 148131762eaaSAaron Young { 148231762eaaSAaron Young struct vnet_mcast_entry *m; 148331762eaaSAaron Young 148431762eaaSAaron Young for (m = vp->mcast_list; m; m = m->next) { 148531762eaaSAaron Young if (ether_addr_equal(m->addr, addr)) 148631762eaaSAaron Young return m; 148731762eaaSAaron Young } 148831762eaaSAaron Young return NULL; 148931762eaaSAaron Young } 149031762eaaSAaron Young 149131762eaaSAaron Young static void __update_mc_list(struct vnet *vp, struct net_device *dev) 149231762eaaSAaron Young { 149331762eaaSAaron Young struct netdev_hw_addr *ha; 149431762eaaSAaron Young 149531762eaaSAaron Young netdev_for_each_mc_addr(ha, dev) { 149631762eaaSAaron Young struct vnet_mcast_entry *m; 149731762eaaSAaron Young 149831762eaaSAaron Young m = __vnet_mc_find(vp, ha->addr); 149931762eaaSAaron Young if (m) { 150031762eaaSAaron Young m->hit = 1; 150131762eaaSAaron Young continue; 150231762eaaSAaron Young } 150331762eaaSAaron Young 150431762eaaSAaron Young if (!m) { 150531762eaaSAaron Young m = kzalloc(sizeof(*m), GFP_ATOMIC); 150631762eaaSAaron Young if (!m) 150731762eaaSAaron Young continue; 150831762eaaSAaron Young memcpy(m->addr, ha->addr, ETH_ALEN); 150931762eaaSAaron Young m->hit = 1; 151031762eaaSAaron Young 151131762eaaSAaron Young m->next = vp->mcast_list; 151231762eaaSAaron Young vp->mcast_list = m; 151331762eaaSAaron Young } 151431762eaaSAaron Young } 151531762eaaSAaron Young } 151631762eaaSAaron Young 151731762eaaSAaron Young static void __send_mc_list(struct vnet *vp, struct vnet_port *port) 151831762eaaSAaron Young { 151931762eaaSAaron Young struct vio_net_mcast_info info; 152031762eaaSAaron Young struct vnet_mcast_entry *m, **pp; 152131762eaaSAaron Young int n_addrs; 152231762eaaSAaron Young 152331762eaaSAaron Young memset(&info, 0, sizeof(info)); 152431762eaaSAaron Young 152531762eaaSAaron Young info.tag.type = VIO_TYPE_CTRL; 152631762eaaSAaron Young info.tag.stype = VIO_SUBTYPE_INFO; 152731762eaaSAaron Young info.tag.stype_env = VNET_MCAST_INFO; 152831762eaaSAaron Young info.tag.sid = vio_send_sid(&port->vio); 152931762eaaSAaron Young info.set = 1; 153031762eaaSAaron Young 153131762eaaSAaron Young n_addrs = 0; 153231762eaaSAaron Young for (m = vp->mcast_list; m; m = m->next) { 153331762eaaSAaron Young if (m->sent) 153431762eaaSAaron Young continue; 153531762eaaSAaron Young m->sent = 1; 153631762eaaSAaron Young memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 153731762eaaSAaron Young m->addr, ETH_ALEN); 153831762eaaSAaron Young if (++n_addrs == VNET_NUM_MCAST) { 153931762eaaSAaron Young info.count = n_addrs; 154031762eaaSAaron Young 154131762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, 154231762eaaSAaron Young sizeof(info)); 154331762eaaSAaron Young n_addrs = 0; 154431762eaaSAaron Young } 154531762eaaSAaron Young } 154631762eaaSAaron Young if (n_addrs) { 154731762eaaSAaron Young info.count = n_addrs; 154831762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, sizeof(info)); 154931762eaaSAaron Young } 155031762eaaSAaron Young 155131762eaaSAaron Young info.set = 0; 155231762eaaSAaron Young 155331762eaaSAaron Young n_addrs = 0; 155431762eaaSAaron Young pp = &vp->mcast_list; 155531762eaaSAaron Young while ((m = *pp) != NULL) { 155631762eaaSAaron Young if (m->hit) { 155731762eaaSAaron Young m->hit = 0; 155831762eaaSAaron Young pp = &m->next; 155931762eaaSAaron Young continue; 156031762eaaSAaron Young } 156131762eaaSAaron Young 156231762eaaSAaron Young memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 156331762eaaSAaron Young m->addr, ETH_ALEN); 156431762eaaSAaron Young if (++n_addrs == VNET_NUM_MCAST) { 156531762eaaSAaron Young info.count = n_addrs; 156631762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, 156731762eaaSAaron Young sizeof(info)); 156831762eaaSAaron Young n_addrs = 0; 156931762eaaSAaron Young } 157031762eaaSAaron Young 157131762eaaSAaron Young *pp = m->next; 157231762eaaSAaron Young kfree(m); 157331762eaaSAaron Young } 157431762eaaSAaron Young if (n_addrs) { 157531762eaaSAaron Young info.count = n_addrs; 157631762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, sizeof(info)); 157731762eaaSAaron Young } 157831762eaaSAaron Young } 157931762eaaSAaron Young 158067d0719fSAaron Young void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp) 158131762eaaSAaron Young { 158231762eaaSAaron Young struct vnet_port *port; 158331762eaaSAaron Young 158431762eaaSAaron Young rcu_read_lock(); 158531762eaaSAaron Young list_for_each_entry_rcu(port, &vp->port_list, list) { 158631762eaaSAaron Young if (port->switch_port) { 158731762eaaSAaron Young __update_mc_list(vp, dev); 158831762eaaSAaron Young __send_mc_list(vp, port); 158931762eaaSAaron Young break; 159031762eaaSAaron Young } 159131762eaaSAaron Young } 159231762eaaSAaron Young rcu_read_unlock(); 159331762eaaSAaron Young } 159431762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common); 159531762eaaSAaron Young 159631762eaaSAaron Young int sunvnet_set_mac_addr_common(struct net_device *dev, void *p) 159731762eaaSAaron Young { 159831762eaaSAaron Young return -EINVAL; 159931762eaaSAaron Young } 160031762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common); 160131762eaaSAaron Young 160231762eaaSAaron Young void sunvnet_port_free_tx_bufs_common(struct vnet_port *port) 160331762eaaSAaron Young { 160431762eaaSAaron Young struct vio_dring_state *dr; 160531762eaaSAaron Young int i; 160631762eaaSAaron Young 160731762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 160831762eaaSAaron Young 1609dc153f85SAaron Young if (!dr->base) 161031762eaaSAaron Young return; 161131762eaaSAaron Young 161231762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; i++) { 161331762eaaSAaron Young struct vio_net_desc *d; 161431762eaaSAaron Young void *skb = port->tx_bufs[i].skb; 161531762eaaSAaron Young 161631762eaaSAaron Young if (!skb) 161731762eaaSAaron Young continue; 161831762eaaSAaron Young 161931762eaaSAaron Young d = vio_dring_entry(dr, i); 162031762eaaSAaron Young 162131762eaaSAaron Young ldc_unmap(port->vio.lp, 162231762eaaSAaron Young port->tx_bufs[i].cookies, 162331762eaaSAaron Young port->tx_bufs[i].ncookies); 162431762eaaSAaron Young dev_kfree_skb(skb); 162531762eaaSAaron Young port->tx_bufs[i].skb = NULL; 162631762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 162731762eaaSAaron Young } 162831762eaaSAaron Young ldc_free_exp_dring(port->vio.lp, dr->base, 162931762eaaSAaron Young (dr->entry_size * dr->num_entries), 163031762eaaSAaron Young dr->cookies, dr->ncookies); 163131762eaaSAaron Young dr->base = NULL; 163231762eaaSAaron Young dr->entry_size = 0; 163331762eaaSAaron Young dr->num_entries = 0; 163431762eaaSAaron Young dr->pending = 0; 163531762eaaSAaron Young dr->ncookies = 0; 163631762eaaSAaron Young } 163731762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common); 163831762eaaSAaron Young 163931762eaaSAaron Young static void vnet_port_reset(struct vnet_port *port) 164031762eaaSAaron Young { 164131762eaaSAaron Young del_timer(&port->clean_timer); 164231762eaaSAaron Young sunvnet_port_free_tx_bufs_common(port); 164331762eaaSAaron Young port->rmtu = 0; 164431762eaaSAaron Young port->tso = true; 164531762eaaSAaron Young port->tsolen = 0; 164631762eaaSAaron Young } 164731762eaaSAaron Young 164831762eaaSAaron Young static int vnet_port_alloc_tx_ring(struct vnet_port *port) 164931762eaaSAaron Young { 165031762eaaSAaron Young struct vio_dring_state *dr; 165131762eaaSAaron Young unsigned long len, elen; 165231762eaaSAaron Young int i, err, ncookies; 165331762eaaSAaron Young void *dring; 165431762eaaSAaron Young 165531762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 165631762eaaSAaron Young 165731762eaaSAaron Young elen = sizeof(struct vio_net_desc) + 165831762eaaSAaron Young sizeof(struct ldc_trans_cookie) * 2; 165931762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 7)) 166031762eaaSAaron Young elen += sizeof(struct vio_net_dext); 166131762eaaSAaron Young len = VNET_TX_RING_SIZE * elen; 166231762eaaSAaron Young 166331762eaaSAaron Young ncookies = VIO_MAX_RING_COOKIES; 166431762eaaSAaron Young dring = ldc_alloc_exp_dring(port->vio.lp, len, 166531762eaaSAaron Young dr->cookies, &ncookies, 166631762eaaSAaron Young (LDC_MAP_SHADOW | 166731762eaaSAaron Young LDC_MAP_DIRECT | 166831762eaaSAaron Young LDC_MAP_RW)); 166931762eaaSAaron Young if (IS_ERR(dring)) { 167031762eaaSAaron Young err = PTR_ERR(dring); 167131762eaaSAaron Young goto err_out; 167231762eaaSAaron Young } 167331762eaaSAaron Young 167431762eaaSAaron Young dr->base = dring; 167531762eaaSAaron Young dr->entry_size = elen; 167631762eaaSAaron Young dr->num_entries = VNET_TX_RING_SIZE; 1677dc153f85SAaron Young dr->prod = 0; 1678dc153f85SAaron Young dr->cons = 0; 167931762eaaSAaron Young port->start_cons = true; /* need an initial trigger */ 168031762eaaSAaron Young dr->pending = VNET_TX_RING_SIZE; 168131762eaaSAaron Young dr->ncookies = ncookies; 168231762eaaSAaron Young 168331762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 168431762eaaSAaron Young struct vio_net_desc *d; 168531762eaaSAaron Young 168631762eaaSAaron Young d = vio_dring_entry(dr, i); 168731762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 168831762eaaSAaron Young } 168931762eaaSAaron Young return 0; 169031762eaaSAaron Young 169131762eaaSAaron Young err_out: 169231762eaaSAaron Young sunvnet_port_free_tx_bufs_common(port); 169331762eaaSAaron Young 169431762eaaSAaron Young return err; 169531762eaaSAaron Young } 169631762eaaSAaron Young 169731762eaaSAaron Young #ifdef CONFIG_NET_POLL_CONTROLLER 169867d0719fSAaron Young void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp) 169931762eaaSAaron Young { 170031762eaaSAaron Young struct vnet_port *port; 170131762eaaSAaron Young unsigned long flags; 170231762eaaSAaron Young 170331762eaaSAaron Young spin_lock_irqsave(&vp->lock, flags); 170431762eaaSAaron Young if (!list_empty(&vp->port_list)) { 170531762eaaSAaron Young port = list_entry(vp->port_list.next, struct vnet_port, list); 170631762eaaSAaron Young napi_schedule(&port->napi); 170731762eaaSAaron Young } 170831762eaaSAaron Young spin_unlock_irqrestore(&vp->lock, flags); 170931762eaaSAaron Young } 171031762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common); 171131762eaaSAaron Young #endif 171231762eaaSAaron Young 171331762eaaSAaron Young void sunvnet_port_add_txq_common(struct vnet_port *port) 171431762eaaSAaron Young { 171531762eaaSAaron Young struct vnet *vp = port->vp; 171631762eaaSAaron Young int n; 171731762eaaSAaron Young 171831762eaaSAaron Young n = vp->nports++; 171931762eaaSAaron Young n = n & (VNET_MAX_TXQS - 1); 172031762eaaSAaron Young port->q_index = n; 172167d0719fSAaron Young netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), 172267d0719fSAaron Young port->q_index)); 172331762eaaSAaron Young } 172431762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); 172531762eaaSAaron Young 172631762eaaSAaron Young void sunvnet_port_rm_txq_common(struct vnet_port *port) 172731762eaaSAaron Young { 172831762eaaSAaron Young port->vp->nports--; 172967d0719fSAaron Young netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), 173067d0719fSAaron Young port->q_index)); 173131762eaaSAaron Young } 173231762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common); 1733