131762eaaSAaron Young /* sunvnet.c: Sun LDOM Virtual Network Driver. 231762eaaSAaron Young * 331762eaaSAaron Young * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 467d0719fSAaron Young * Copyright (C) 2016 Oracle. All rights reserved. 531762eaaSAaron Young */ 631762eaaSAaron Young 731762eaaSAaron Young #include <linux/module.h> 831762eaaSAaron Young #include <linux/kernel.h> 931762eaaSAaron Young #include <linux/types.h> 1031762eaaSAaron Young #include <linux/slab.h> 1131762eaaSAaron Young #include <linux/delay.h> 1231762eaaSAaron Young #include <linux/init.h> 1331762eaaSAaron Young #include <linux/netdevice.h> 1431762eaaSAaron Young #include <linux/ethtool.h> 1531762eaaSAaron Young #include <linux/etherdevice.h> 1631762eaaSAaron Young #include <linux/mutex.h> 1731762eaaSAaron Young #include <linux/highmem.h> 1831762eaaSAaron Young #include <linux/if_vlan.h> 1931762eaaSAaron Young #define CREATE_TRACE_POINTS 2031762eaaSAaron Young #include <trace/events/sunvnet.h> 2131762eaaSAaron Young 2231762eaaSAaron Young #if IS_ENABLED(CONFIG_IPV6) 2331762eaaSAaron Young #include <linux/icmpv6.h> 2431762eaaSAaron Young #endif 2531762eaaSAaron Young 2631762eaaSAaron Young #include <net/ip.h> 2731762eaaSAaron Young #include <net/icmp.h> 2831762eaaSAaron Young #include <net/route.h> 2931762eaaSAaron Young 3031762eaaSAaron Young #include <asm/vio.h> 3131762eaaSAaron Young #include <asm/ldc.h> 3231762eaaSAaron Young 3331762eaaSAaron Young #include "sunvnet_common.h" 3431762eaaSAaron Young 3531762eaaSAaron Young /* Heuristic for the number of times to exponentially backoff and 3631762eaaSAaron Young * retry sending an LDC trigger when EAGAIN is encountered 3731762eaaSAaron Young */ 3831762eaaSAaron Young #define VNET_MAX_RETRIES 10 3931762eaaSAaron Young 4031762eaaSAaron Young static int __vnet_tx_trigger(struct vnet_port *port, u32 start); 4131762eaaSAaron Young static void vnet_port_reset(struct vnet_port *port); 4231762eaaSAaron Young 4331762eaaSAaron Young static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) 4431762eaaSAaron Young { 4531762eaaSAaron Young return vio_dring_avail(dr, VNET_TX_RING_SIZE); 4631762eaaSAaron Young } 4731762eaaSAaron Young 4831762eaaSAaron Young static int vnet_handle_unknown(struct vnet_port *port, void *arg) 4931762eaaSAaron Young { 5031762eaaSAaron Young struct vio_msg_tag *pkt = arg; 5131762eaaSAaron Young 5231762eaaSAaron Young pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", 5331762eaaSAaron Young pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 5431762eaaSAaron Young pr_err("Resetting connection\n"); 5531762eaaSAaron Young 5631762eaaSAaron Young ldc_disconnect(port->vio.lp); 5731762eaaSAaron Young 5831762eaaSAaron Young return -ECONNRESET; 5931762eaaSAaron Young } 6031762eaaSAaron Young 6131762eaaSAaron Young static int vnet_port_alloc_tx_ring(struct vnet_port *port); 6231762eaaSAaron Young 6331762eaaSAaron Young int sunvnet_send_attr_common(struct vio_driver_state *vio) 6431762eaaSAaron Young { 6531762eaaSAaron Young struct vnet_port *port = to_vnet_port(vio); 6667d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 6731762eaaSAaron Young struct vio_net_attr_info pkt; 6831762eaaSAaron Young int framelen = ETH_FRAME_LEN; 6931762eaaSAaron Young int i, err; 7031762eaaSAaron Young 7131762eaaSAaron Young err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); 7231762eaaSAaron Young if (err) 7331762eaaSAaron Young return err; 7431762eaaSAaron Young 7531762eaaSAaron Young memset(&pkt, 0, sizeof(pkt)); 7631762eaaSAaron Young pkt.tag.type = VIO_TYPE_CTRL; 7731762eaaSAaron Young pkt.tag.stype = VIO_SUBTYPE_INFO; 7831762eaaSAaron Young pkt.tag.stype_env = VIO_ATTR_INFO; 7931762eaaSAaron Young pkt.tag.sid = vio_send_sid(vio); 8031762eaaSAaron Young if (vio_version_before(vio, 1, 2)) 8131762eaaSAaron Young pkt.xfer_mode = VIO_DRING_MODE; 8231762eaaSAaron Young else 8331762eaaSAaron Young pkt.xfer_mode = VIO_NEW_DRING_MODE; 8431762eaaSAaron Young pkt.addr_type = VNET_ADDR_ETHERMAC; 8531762eaaSAaron Young pkt.ack_freq = 0; 8631762eaaSAaron Young for (i = 0; i < 6; i++) 8731762eaaSAaron Young pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); 8831762eaaSAaron Young if (vio_version_after(vio, 1, 3)) { 8931762eaaSAaron Young if (port->rmtu) { 9031762eaaSAaron Young port->rmtu = min(VNET_MAXPACKET, port->rmtu); 9131762eaaSAaron Young pkt.mtu = port->rmtu; 9231762eaaSAaron Young } else { 9331762eaaSAaron Young port->rmtu = VNET_MAXPACKET; 9431762eaaSAaron Young pkt.mtu = port->rmtu; 9531762eaaSAaron Young } 9631762eaaSAaron Young if (vio_version_after_eq(vio, 1, 6)) 9731762eaaSAaron Young pkt.options = VIO_TX_DRING; 9831762eaaSAaron Young } else if (vio_version_before(vio, 1, 3)) { 9931762eaaSAaron Young pkt.mtu = framelen; 10031762eaaSAaron Young } else { /* v1.3 */ 10131762eaaSAaron Young pkt.mtu = framelen + VLAN_HLEN; 10231762eaaSAaron Young } 10331762eaaSAaron Young 10431762eaaSAaron Young pkt.cflags = 0; 10531762eaaSAaron Young if (vio_version_after_eq(vio, 1, 7) && port->tso) { 10631762eaaSAaron Young pkt.cflags |= VNET_LSO_IPV4_CAPAB; 10731762eaaSAaron Young if (!port->tsolen) 10831762eaaSAaron Young port->tsolen = VNET_MAXTSO; 10931762eaaSAaron Young pkt.ipv4_lso_maxlen = port->tsolen; 11031762eaaSAaron Young } 11131762eaaSAaron Young 11231762eaaSAaron Young pkt.plnk_updt = PHYSLINK_UPDATE_NONE; 11331762eaaSAaron Young 11431762eaaSAaron Young viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 11531762eaaSAaron Young "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " 11631762eaaSAaron Young "cflags[0x%04x] lso_max[%u]\n", 11731762eaaSAaron Young pkt.xfer_mode, pkt.addr_type, 11831762eaaSAaron Young (unsigned long long)pkt.addr, 11931762eaaSAaron Young pkt.ack_freq, pkt.plnk_updt, pkt.options, 12031762eaaSAaron Young (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); 12131762eaaSAaron Young 12231762eaaSAaron Young 12331762eaaSAaron Young return vio_ldc_send(vio, &pkt, sizeof(pkt)); 12431762eaaSAaron Young } 12531762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_send_attr_common); 12631762eaaSAaron Young 12731762eaaSAaron Young static int handle_attr_info(struct vio_driver_state *vio, 12831762eaaSAaron Young struct vio_net_attr_info *pkt) 12931762eaaSAaron Young { 13031762eaaSAaron Young struct vnet_port *port = to_vnet_port(vio); 13131762eaaSAaron Young u64 localmtu; 13231762eaaSAaron Young u8 xfer_mode; 13331762eaaSAaron Young 13431762eaaSAaron Young viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 13531762eaaSAaron Young "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " 13631762eaaSAaron Young " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", 13731762eaaSAaron Young pkt->xfer_mode, pkt->addr_type, 13831762eaaSAaron Young (unsigned long long)pkt->addr, 13931762eaaSAaron Young pkt->ack_freq, pkt->plnk_updt, pkt->options, 14031762eaaSAaron Young (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, 14131762eaaSAaron Young pkt->ipv4_lso_maxlen); 14231762eaaSAaron Young 14331762eaaSAaron Young pkt->tag.sid = vio_send_sid(vio); 14431762eaaSAaron Young 14531762eaaSAaron Young xfer_mode = pkt->xfer_mode; 14631762eaaSAaron Young /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ 14731762eaaSAaron Young if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) 14831762eaaSAaron Young xfer_mode = VIO_NEW_DRING_MODE; 14931762eaaSAaron Young 15031762eaaSAaron Young /* MTU negotiation: 15131762eaaSAaron Young * < v1.3 - ETH_FRAME_LEN exactly 15231762eaaSAaron Young * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change 15331762eaaSAaron Young * pkt->mtu for ACK 15431762eaaSAaron Young * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly 15531762eaaSAaron Young */ 15631762eaaSAaron Young if (vio_version_before(vio, 1, 3)) { 15731762eaaSAaron Young localmtu = ETH_FRAME_LEN; 15831762eaaSAaron Young } else if (vio_version_after(vio, 1, 3)) { 15931762eaaSAaron Young localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; 16031762eaaSAaron Young localmtu = min(pkt->mtu, localmtu); 16131762eaaSAaron Young pkt->mtu = localmtu; 16231762eaaSAaron Young } else { /* v1.3 */ 16331762eaaSAaron Young localmtu = ETH_FRAME_LEN + VLAN_HLEN; 16431762eaaSAaron Young } 16531762eaaSAaron Young port->rmtu = localmtu; 16631762eaaSAaron Young 16731762eaaSAaron Young /* LSO negotiation */ 16831762eaaSAaron Young if (vio_version_after_eq(vio, 1, 7)) 16931762eaaSAaron Young port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); 17031762eaaSAaron Young else 17131762eaaSAaron Young port->tso = false; 17231762eaaSAaron Young if (port->tso) { 17331762eaaSAaron Young if (!port->tsolen) 17431762eaaSAaron Young port->tsolen = VNET_MAXTSO; 17531762eaaSAaron Young port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); 17631762eaaSAaron Young if (port->tsolen < VNET_MINTSO) { 17731762eaaSAaron Young port->tso = false; 17831762eaaSAaron Young port->tsolen = 0; 17931762eaaSAaron Young pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; 18031762eaaSAaron Young } 18131762eaaSAaron Young pkt->ipv4_lso_maxlen = port->tsolen; 18231762eaaSAaron Young } else { 18331762eaaSAaron Young pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; 18431762eaaSAaron Young pkt->ipv4_lso_maxlen = 0; 18531762eaaSAaron Young } 18631762eaaSAaron Young 18731762eaaSAaron Young /* for version >= 1.6, ACK packet mode we support */ 18831762eaaSAaron Young if (vio_version_after_eq(vio, 1, 6)) { 18931762eaaSAaron Young pkt->xfer_mode = VIO_NEW_DRING_MODE; 19031762eaaSAaron Young pkt->options = VIO_TX_DRING; 19131762eaaSAaron Young } 19231762eaaSAaron Young 19331762eaaSAaron Young if (!(xfer_mode | VIO_NEW_DRING_MODE) || 19431762eaaSAaron Young pkt->addr_type != VNET_ADDR_ETHERMAC || 19531762eaaSAaron Young pkt->mtu != localmtu) { 19631762eaaSAaron Young viodbg(HS, "SEND NET ATTR NACK\n"); 19731762eaaSAaron Young 19831762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_NACK; 19931762eaaSAaron Young 20031762eaaSAaron Young (void) vio_ldc_send(vio, pkt, sizeof(*pkt)); 20131762eaaSAaron Young 20231762eaaSAaron Young return -ECONNRESET; 20331762eaaSAaron Young } else { 20431762eaaSAaron Young viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " 20531762eaaSAaron Young "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " 20631762eaaSAaron Young "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", 20731762eaaSAaron Young pkt->xfer_mode, pkt->addr_type, 20831762eaaSAaron Young (unsigned long long)pkt->addr, 20931762eaaSAaron Young pkt->ack_freq, pkt->plnk_updt, pkt->options, 21031762eaaSAaron Young (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, 21131762eaaSAaron Young pkt->ipv4_lso_maxlen); 21231762eaaSAaron Young 21331762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_ACK; 21431762eaaSAaron Young 21531762eaaSAaron Young return vio_ldc_send(vio, pkt, sizeof(*pkt)); 21631762eaaSAaron Young } 21731762eaaSAaron Young 21831762eaaSAaron Young } 21931762eaaSAaron Young 22031762eaaSAaron Young static int handle_attr_ack(struct vio_driver_state *vio, 22131762eaaSAaron Young struct vio_net_attr_info *pkt) 22231762eaaSAaron Young { 22331762eaaSAaron Young viodbg(HS, "GOT NET ATTR ACK\n"); 22431762eaaSAaron Young 22531762eaaSAaron Young return 0; 22631762eaaSAaron Young } 22731762eaaSAaron Young 22831762eaaSAaron Young static int handle_attr_nack(struct vio_driver_state *vio, 22931762eaaSAaron Young struct vio_net_attr_info *pkt) 23031762eaaSAaron Young { 23131762eaaSAaron Young viodbg(HS, "GOT NET ATTR NACK\n"); 23231762eaaSAaron Young 23331762eaaSAaron Young return -ECONNRESET; 23431762eaaSAaron Young } 23531762eaaSAaron Young 23631762eaaSAaron Young int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg) 23731762eaaSAaron Young { 23831762eaaSAaron Young struct vio_net_attr_info *pkt = arg; 23931762eaaSAaron Young 24031762eaaSAaron Young switch (pkt->tag.stype) { 24131762eaaSAaron Young case VIO_SUBTYPE_INFO: 24231762eaaSAaron Young return handle_attr_info(vio, pkt); 24331762eaaSAaron Young 24431762eaaSAaron Young case VIO_SUBTYPE_ACK: 24531762eaaSAaron Young return handle_attr_ack(vio, pkt); 24631762eaaSAaron Young 24731762eaaSAaron Young case VIO_SUBTYPE_NACK: 24831762eaaSAaron Young return handle_attr_nack(vio, pkt); 24931762eaaSAaron Young 25031762eaaSAaron Young default: 25131762eaaSAaron Young return -ECONNRESET; 25231762eaaSAaron Young } 25331762eaaSAaron Young } 25431762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common); 25531762eaaSAaron Young 25631762eaaSAaron Young void sunvnet_handshake_complete_common(struct vio_driver_state *vio) 25731762eaaSAaron Young { 25831762eaaSAaron Young struct vio_dring_state *dr; 25931762eaaSAaron Young 26031762eaaSAaron Young dr = &vio->drings[VIO_DRIVER_RX_RING]; 26131762eaaSAaron Young dr->snd_nxt = dr->rcv_nxt = 1; 26231762eaaSAaron Young 26331762eaaSAaron Young dr = &vio->drings[VIO_DRIVER_TX_RING]; 26431762eaaSAaron Young dr->snd_nxt = dr->rcv_nxt = 1; 26531762eaaSAaron Young } 26631762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common); 26731762eaaSAaron Young 26831762eaaSAaron Young /* The hypervisor interface that implements copying to/from imported 26931762eaaSAaron Young * memory from another domain requires that copies are done to 8-byte 27031762eaaSAaron Young * aligned buffers, and that the lengths of such copies are also 8-byte 27131762eaaSAaron Young * multiples. 27231762eaaSAaron Young * 27331762eaaSAaron Young * So we align skb->data to an 8-byte multiple and pad-out the data 27431762eaaSAaron Young * area so we can round the copy length up to the next multiple of 27531762eaaSAaron Young * 8 for the copy. 27631762eaaSAaron Young * 27731762eaaSAaron Young * The transmitter puts the actual start of the packet 6 bytes into 27831762eaaSAaron Young * the buffer it sends over, so that the IP headers after the ethernet 27931762eaaSAaron Young * header are aligned properly. These 6 bytes are not in the descriptor 28031762eaaSAaron Young * length, they are simply implied. This offset is represented using 28131762eaaSAaron Young * the VNET_PACKET_SKIP macro. 28231762eaaSAaron Young */ 28331762eaaSAaron Young static struct sk_buff *alloc_and_align_skb(struct net_device *dev, 28431762eaaSAaron Young unsigned int len) 28531762eaaSAaron Young { 28631762eaaSAaron Young struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); 28731762eaaSAaron Young unsigned long addr, off; 28831762eaaSAaron Young 28931762eaaSAaron Young if (unlikely(!skb)) 29031762eaaSAaron Young return NULL; 29131762eaaSAaron Young 29231762eaaSAaron Young addr = (unsigned long) skb->data; 29331762eaaSAaron Young off = ((addr + 7UL) & ~7UL) - addr; 29431762eaaSAaron Young if (off) 29531762eaaSAaron Young skb_reserve(skb, off); 29631762eaaSAaron Young 29731762eaaSAaron Young return skb; 29831762eaaSAaron Young } 29931762eaaSAaron Young 30031762eaaSAaron Young static inline void vnet_fullcsum(struct sk_buff *skb) 30131762eaaSAaron Young { 30231762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 30331762eaaSAaron Young int offset = skb_transport_offset(skb); 30431762eaaSAaron Young 30531762eaaSAaron Young if (skb->protocol != htons(ETH_P_IP)) 30631762eaaSAaron Young return; 30731762eaaSAaron Young if (iph->protocol != IPPROTO_TCP && 30831762eaaSAaron Young iph->protocol != IPPROTO_UDP) 30931762eaaSAaron Young return; 31031762eaaSAaron Young skb->ip_summed = CHECKSUM_NONE; 31131762eaaSAaron Young skb->csum_level = 1; 31231762eaaSAaron Young skb->csum = 0; 31331762eaaSAaron Young if (iph->protocol == IPPROTO_TCP) { 31431762eaaSAaron Young struct tcphdr *ptcp = tcp_hdr(skb); 31531762eaaSAaron Young 31631762eaaSAaron Young ptcp->check = 0; 31731762eaaSAaron Young skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 31831762eaaSAaron Young ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 31931762eaaSAaron Young skb->len - offset, IPPROTO_TCP, 32031762eaaSAaron Young skb->csum); 32131762eaaSAaron Young } else if (iph->protocol == IPPROTO_UDP) { 32231762eaaSAaron Young struct udphdr *pudp = udp_hdr(skb); 32331762eaaSAaron Young 32431762eaaSAaron Young pudp->check = 0; 32531762eaaSAaron Young skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 32631762eaaSAaron Young pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 32731762eaaSAaron Young skb->len - offset, IPPROTO_UDP, 32831762eaaSAaron Young skb->csum); 32931762eaaSAaron Young } 33031762eaaSAaron Young } 33131762eaaSAaron Young 33231762eaaSAaron Young static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) 33331762eaaSAaron Young { 33467d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 33531762eaaSAaron Young unsigned int len = desc->size; 33631762eaaSAaron Young unsigned int copy_len; 33731762eaaSAaron Young struct sk_buff *skb; 33831762eaaSAaron Young int maxlen; 33931762eaaSAaron Young int err; 34031762eaaSAaron Young 34131762eaaSAaron Young err = -EMSGSIZE; 34231762eaaSAaron Young if (port->tso && port->tsolen > port->rmtu) 34331762eaaSAaron Young maxlen = port->tsolen; 34431762eaaSAaron Young else 34531762eaaSAaron Young maxlen = port->rmtu; 34631762eaaSAaron Young if (unlikely(len < ETH_ZLEN || len > maxlen)) { 34731762eaaSAaron Young dev->stats.rx_length_errors++; 34831762eaaSAaron Young goto out_dropped; 34931762eaaSAaron Young } 35031762eaaSAaron Young 35131762eaaSAaron Young skb = alloc_and_align_skb(dev, len); 35231762eaaSAaron Young err = -ENOMEM; 35331762eaaSAaron Young if (unlikely(!skb)) { 35431762eaaSAaron Young dev->stats.rx_missed_errors++; 35531762eaaSAaron Young goto out_dropped; 35631762eaaSAaron Young } 35731762eaaSAaron Young 35831762eaaSAaron Young copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; 35931762eaaSAaron Young skb_put(skb, copy_len); 36031762eaaSAaron Young err = ldc_copy(port->vio.lp, LDC_COPY_IN, 36131762eaaSAaron Young skb->data, copy_len, 0, 36231762eaaSAaron Young desc->cookies, desc->ncookies); 36331762eaaSAaron Young if (unlikely(err < 0)) { 36431762eaaSAaron Young dev->stats.rx_frame_errors++; 36531762eaaSAaron Young goto out_free_skb; 36631762eaaSAaron Young } 36731762eaaSAaron Young 36831762eaaSAaron Young skb_pull(skb, VNET_PACKET_SKIP); 36931762eaaSAaron Young skb_trim(skb, len); 37031762eaaSAaron Young skb->protocol = eth_type_trans(skb, dev); 37131762eaaSAaron Young 37231762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 8)) { 37331762eaaSAaron Young struct vio_net_dext *dext = vio_net_ext(desc); 37431762eaaSAaron Young 37531762eaaSAaron Young skb_reset_network_header(skb); 37631762eaaSAaron Young 37731762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { 37831762eaaSAaron Young if (skb->protocol == ETH_P_IP) { 37931762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 38031762eaaSAaron Young 38131762eaaSAaron Young iph->check = 0; 38231762eaaSAaron Young ip_send_check(iph); 38331762eaaSAaron Young } 38431762eaaSAaron Young } 38531762eaaSAaron Young if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && 38631762eaaSAaron Young skb->ip_summed == CHECKSUM_NONE) { 38731762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) { 38831762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 38931762eaaSAaron Young int ihl = iph->ihl * 4; 39031762eaaSAaron Young 39131762eaaSAaron Young skb_reset_transport_header(skb); 39231762eaaSAaron Young skb_set_transport_header(skb, ihl); 39331762eaaSAaron Young vnet_fullcsum(skb); 39431762eaaSAaron Young } 39531762eaaSAaron Young } 39631762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { 39731762eaaSAaron Young skb->ip_summed = CHECKSUM_PARTIAL; 39831762eaaSAaron Young skb->csum_level = 0; 39931762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) 40031762eaaSAaron Young skb->csum_level = 1; 40131762eaaSAaron Young } 40231762eaaSAaron Young } 40331762eaaSAaron Young 40431762eaaSAaron Young skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; 40531762eaaSAaron Young 40631762eaaSAaron Young dev->stats.rx_packets++; 40731762eaaSAaron Young dev->stats.rx_bytes += len; 40831762eaaSAaron Young napi_gro_receive(&port->napi, skb); 40931762eaaSAaron Young return 0; 41031762eaaSAaron Young 41131762eaaSAaron Young out_free_skb: 41231762eaaSAaron Young kfree_skb(skb); 41331762eaaSAaron Young 41431762eaaSAaron Young out_dropped: 41531762eaaSAaron Young dev->stats.rx_dropped++; 41631762eaaSAaron Young return err; 41731762eaaSAaron Young } 41831762eaaSAaron Young 41931762eaaSAaron Young static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, 42031762eaaSAaron Young u32 start, u32 end, u8 vio_dring_state) 42131762eaaSAaron Young { 42231762eaaSAaron Young struct vio_dring_data hdr = { 42331762eaaSAaron Young .tag = { 42431762eaaSAaron Young .type = VIO_TYPE_DATA, 42531762eaaSAaron Young .stype = VIO_SUBTYPE_ACK, 42631762eaaSAaron Young .stype_env = VIO_DRING_DATA, 42731762eaaSAaron Young .sid = vio_send_sid(&port->vio), 42831762eaaSAaron Young }, 42931762eaaSAaron Young .dring_ident = dr->ident, 43031762eaaSAaron Young .start_idx = start, 43131762eaaSAaron Young .end_idx = end, 43231762eaaSAaron Young .state = vio_dring_state, 43331762eaaSAaron Young }; 43431762eaaSAaron Young int err, delay; 43531762eaaSAaron Young int retries = 0; 43631762eaaSAaron Young 43731762eaaSAaron Young hdr.seq = dr->snd_nxt; 43831762eaaSAaron Young delay = 1; 43931762eaaSAaron Young do { 44031762eaaSAaron Young err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 44131762eaaSAaron Young if (err > 0) { 44231762eaaSAaron Young dr->snd_nxt++; 44331762eaaSAaron Young break; 44431762eaaSAaron Young } 44531762eaaSAaron Young udelay(delay); 44631762eaaSAaron Young if ((delay <<= 1) > 128) 44731762eaaSAaron Young delay = 128; 44831762eaaSAaron Young if (retries++ > VNET_MAX_RETRIES) { 44931762eaaSAaron Young pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", 45031762eaaSAaron Young port->raddr[0], port->raddr[1], 45131762eaaSAaron Young port->raddr[2], port->raddr[3], 45231762eaaSAaron Young port->raddr[4], port->raddr[5]); 45331762eaaSAaron Young break; 45431762eaaSAaron Young } 45531762eaaSAaron Young } while (err == -EAGAIN); 45631762eaaSAaron Young 45731762eaaSAaron Young if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { 45831762eaaSAaron Young port->stop_rx_idx = end; 45931762eaaSAaron Young port->stop_rx = true; 46031762eaaSAaron Young } else { 46131762eaaSAaron Young port->stop_rx_idx = 0; 46231762eaaSAaron Young port->stop_rx = false; 46331762eaaSAaron Young } 46431762eaaSAaron Young 46531762eaaSAaron Young return err; 46631762eaaSAaron Young } 46731762eaaSAaron Young 46831762eaaSAaron Young static struct vio_net_desc *get_rx_desc(struct vnet_port *port, 46931762eaaSAaron Young struct vio_dring_state *dr, 47031762eaaSAaron Young u32 index) 47131762eaaSAaron Young { 47231762eaaSAaron Young struct vio_net_desc *desc = port->vio.desc_buf; 47331762eaaSAaron Young int err; 47431762eaaSAaron Young 47531762eaaSAaron Young err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, 47631762eaaSAaron Young (index * dr->entry_size), 47731762eaaSAaron Young dr->cookies, dr->ncookies); 47831762eaaSAaron Young if (err < 0) 47931762eaaSAaron Young return ERR_PTR(err); 48031762eaaSAaron Young 48131762eaaSAaron Young return desc; 48231762eaaSAaron Young } 48331762eaaSAaron Young 48431762eaaSAaron Young static int put_rx_desc(struct vnet_port *port, 48531762eaaSAaron Young struct vio_dring_state *dr, 48631762eaaSAaron Young struct vio_net_desc *desc, 48731762eaaSAaron Young u32 index) 48831762eaaSAaron Young { 48931762eaaSAaron Young int err; 49031762eaaSAaron Young 49131762eaaSAaron Young err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, 49231762eaaSAaron Young (index * dr->entry_size), 49331762eaaSAaron Young dr->cookies, dr->ncookies); 49431762eaaSAaron Young if (err < 0) 49531762eaaSAaron Young return err; 49631762eaaSAaron Young 49731762eaaSAaron Young return 0; 49831762eaaSAaron Young } 49931762eaaSAaron Young 50031762eaaSAaron Young static int vnet_walk_rx_one(struct vnet_port *port, 50131762eaaSAaron Young struct vio_dring_state *dr, 50231762eaaSAaron Young u32 index, int *needs_ack) 50331762eaaSAaron Young { 50431762eaaSAaron Young struct vio_net_desc *desc = get_rx_desc(port, dr, index); 50531762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 50631762eaaSAaron Young int err; 50731762eaaSAaron Young 50831762eaaSAaron Young BUG_ON(desc == NULL); 50931762eaaSAaron Young if (IS_ERR(desc)) 51031762eaaSAaron Young return PTR_ERR(desc); 51131762eaaSAaron Young 51231762eaaSAaron Young if (desc->hdr.state != VIO_DESC_READY) 51331762eaaSAaron Young return 1; 51431762eaaSAaron Young 51531762eaaSAaron Young dma_rmb(); 51631762eaaSAaron Young 51731762eaaSAaron Young viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", 51831762eaaSAaron Young desc->hdr.state, desc->hdr.ack, 51931762eaaSAaron Young desc->size, desc->ncookies, 52031762eaaSAaron Young desc->cookies[0].cookie_addr, 52131762eaaSAaron Young desc->cookies[0].cookie_size); 52231762eaaSAaron Young 52331762eaaSAaron Young err = vnet_rx_one(port, desc); 52431762eaaSAaron Young if (err == -ECONNRESET) 52531762eaaSAaron Young return err; 52631762eaaSAaron Young trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid, 52731762eaaSAaron Young index, desc->hdr.ack); 52831762eaaSAaron Young desc->hdr.state = VIO_DESC_DONE; 52931762eaaSAaron Young err = put_rx_desc(port, dr, desc, index); 53031762eaaSAaron Young if (err < 0) 53131762eaaSAaron Young return err; 53231762eaaSAaron Young *needs_ack = desc->hdr.ack; 53331762eaaSAaron Young return 0; 53431762eaaSAaron Young } 53531762eaaSAaron Young 53631762eaaSAaron Young static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, 53731762eaaSAaron Young u32 start, u32 end, int *npkts, int budget) 53831762eaaSAaron Young { 53931762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 54031762eaaSAaron Young int ack_start = -1, ack_end = -1; 54131762eaaSAaron Young bool send_ack = true; 54231762eaaSAaron Young 54331762eaaSAaron Young end = (end == (u32) -1) ? vio_dring_prev(dr, start) 54431762eaaSAaron Young : vio_dring_next(dr, end); 54531762eaaSAaron Young 54631762eaaSAaron Young viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); 54731762eaaSAaron Young 54831762eaaSAaron Young while (start != end) { 54931762eaaSAaron Young int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); 55031762eaaSAaron Young if (err == -ECONNRESET) 55131762eaaSAaron Young return err; 55231762eaaSAaron Young if (err != 0) 55331762eaaSAaron Young break; 55431762eaaSAaron Young (*npkts)++; 55531762eaaSAaron Young if (ack_start == -1) 55631762eaaSAaron Young ack_start = start; 55731762eaaSAaron Young ack_end = start; 55831762eaaSAaron Young start = vio_dring_next(dr, start); 55931762eaaSAaron Young if (ack && start != end) { 56031762eaaSAaron Young err = vnet_send_ack(port, dr, ack_start, ack_end, 56131762eaaSAaron Young VIO_DRING_ACTIVE); 56231762eaaSAaron Young if (err == -ECONNRESET) 56331762eaaSAaron Young return err; 56431762eaaSAaron Young ack_start = -1; 56531762eaaSAaron Young } 56631762eaaSAaron Young if ((*npkts) >= budget) { 56731762eaaSAaron Young send_ack = false; 56831762eaaSAaron Young break; 56931762eaaSAaron Young } 57031762eaaSAaron Young } 57131762eaaSAaron Young if (unlikely(ack_start == -1)) 57231762eaaSAaron Young ack_start = ack_end = vio_dring_prev(dr, start); 57331762eaaSAaron Young if (send_ack) { 57431762eaaSAaron Young port->napi_resume = false; 57531762eaaSAaron Young trace_vnet_tx_send_stopped_ack(port->vio._local_sid, 57631762eaaSAaron Young port->vio._peer_sid, 57731762eaaSAaron Young ack_end, *npkts); 57831762eaaSAaron Young return vnet_send_ack(port, dr, ack_start, ack_end, 57931762eaaSAaron Young VIO_DRING_STOPPED); 58031762eaaSAaron Young } else { 58131762eaaSAaron Young trace_vnet_tx_defer_stopped_ack(port->vio._local_sid, 58231762eaaSAaron Young port->vio._peer_sid, 58331762eaaSAaron Young ack_end, *npkts); 58431762eaaSAaron Young port->napi_resume = true; 58531762eaaSAaron Young port->napi_stop_idx = ack_end; 58631762eaaSAaron Young return 1; 58731762eaaSAaron Young } 58831762eaaSAaron Young } 58931762eaaSAaron Young 59031762eaaSAaron Young static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, 59131762eaaSAaron Young int budget) 59231762eaaSAaron Young { 59331762eaaSAaron Young struct vio_dring_data *pkt = msgbuf; 59431762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; 59531762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 59631762eaaSAaron Young 59731762eaaSAaron Young viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", 59831762eaaSAaron Young pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); 59931762eaaSAaron Young 60031762eaaSAaron Young if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 60131762eaaSAaron Young return 0; 60231762eaaSAaron Young if (unlikely(pkt->seq != dr->rcv_nxt)) { 60331762eaaSAaron Young pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", 60431762eaaSAaron Young pkt->seq, dr->rcv_nxt); 60531762eaaSAaron Young return 0; 60631762eaaSAaron Young } 60731762eaaSAaron Young 60831762eaaSAaron Young if (!port->napi_resume) 60931762eaaSAaron Young dr->rcv_nxt++; 61031762eaaSAaron Young 61131762eaaSAaron Young /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ 61231762eaaSAaron Young 61331762eaaSAaron Young return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, 61431762eaaSAaron Young npkts, budget); 61531762eaaSAaron Young } 61631762eaaSAaron Young 61731762eaaSAaron Young static int idx_is_pending(struct vio_dring_state *dr, u32 end) 61831762eaaSAaron Young { 61931762eaaSAaron Young u32 idx = dr->cons; 62031762eaaSAaron Young int found = 0; 62131762eaaSAaron Young 62231762eaaSAaron Young while (idx != dr->prod) { 62331762eaaSAaron Young if (idx == end) { 62431762eaaSAaron Young found = 1; 62531762eaaSAaron Young break; 62631762eaaSAaron Young } 62731762eaaSAaron Young idx = vio_dring_next(dr, idx); 62831762eaaSAaron Young } 62931762eaaSAaron Young return found; 63031762eaaSAaron Young } 63131762eaaSAaron Young 63231762eaaSAaron Young static int vnet_ack(struct vnet_port *port, void *msgbuf) 63331762eaaSAaron Young { 63431762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 63531762eaaSAaron Young struct vio_dring_data *pkt = msgbuf; 63631762eaaSAaron Young struct net_device *dev; 63731762eaaSAaron Young u32 end; 63831762eaaSAaron Young struct vio_net_desc *desc; 63931762eaaSAaron Young struct netdev_queue *txq; 64031762eaaSAaron Young 64131762eaaSAaron Young if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 64231762eaaSAaron Young return 0; 64331762eaaSAaron Young 64431762eaaSAaron Young end = pkt->end_idx; 64567d0719fSAaron Young dev = VNET_PORT_TO_NET_DEVICE(port); 64631762eaaSAaron Young netif_tx_lock(dev); 64731762eaaSAaron Young if (unlikely(!idx_is_pending(dr, end))) { 64831762eaaSAaron Young netif_tx_unlock(dev); 64931762eaaSAaron Young return 0; 65031762eaaSAaron Young } 65131762eaaSAaron Young 65231762eaaSAaron Young /* sync for race conditions with vnet_start_xmit() and tell xmit it 65331762eaaSAaron Young * is time to send a trigger. 65431762eaaSAaron Young */ 65531762eaaSAaron Young trace_vnet_rx_stopped_ack(port->vio._local_sid, 65631762eaaSAaron Young port->vio._peer_sid, end); 65731762eaaSAaron Young dr->cons = vio_dring_next(dr, end); 65831762eaaSAaron Young desc = vio_dring_entry(dr, dr->cons); 65931762eaaSAaron Young if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { 66031762eaaSAaron Young /* vnet_start_xmit() just populated this dring but missed 66131762eaaSAaron Young * sending the "start" LDC message to the consumer. 66231762eaaSAaron Young * Send a "start" trigger on its behalf. 66331762eaaSAaron Young */ 66431762eaaSAaron Young if (__vnet_tx_trigger(port, dr->cons) > 0) 66531762eaaSAaron Young port->start_cons = false; 66631762eaaSAaron Young else 66731762eaaSAaron Young port->start_cons = true; 66831762eaaSAaron Young } else { 66931762eaaSAaron Young port->start_cons = true; 67031762eaaSAaron Young } 67131762eaaSAaron Young netif_tx_unlock(dev); 67231762eaaSAaron Young 67331762eaaSAaron Young txq = netdev_get_tx_queue(dev, port->q_index); 67431762eaaSAaron Young if (unlikely(netif_tx_queue_stopped(txq) && 67531762eaaSAaron Young vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) 67631762eaaSAaron Young return 1; 67731762eaaSAaron Young 67831762eaaSAaron Young return 0; 67931762eaaSAaron Young } 68031762eaaSAaron Young 68131762eaaSAaron Young static int vnet_nack(struct vnet_port *port, void *msgbuf) 68231762eaaSAaron Young { 68331762eaaSAaron Young /* XXX just reset or similar XXX */ 68431762eaaSAaron Young return 0; 68531762eaaSAaron Young } 68631762eaaSAaron Young 68731762eaaSAaron Young static int handle_mcast(struct vnet_port *port, void *msgbuf) 68831762eaaSAaron Young { 68931762eaaSAaron Young struct vio_net_mcast_info *pkt = msgbuf; 69067d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 69131762eaaSAaron Young 69231762eaaSAaron Young if (pkt->tag.stype != VIO_SUBTYPE_ACK) 69331762eaaSAaron Young pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", 69467d0719fSAaron Young dev->name, 69531762eaaSAaron Young pkt->tag.type, 69631762eaaSAaron Young pkt->tag.stype, 69731762eaaSAaron Young pkt->tag.stype_env, 69831762eaaSAaron Young pkt->tag.sid); 69931762eaaSAaron Young 70031762eaaSAaron Young return 0; 70131762eaaSAaron Young } 70231762eaaSAaron Young 70331762eaaSAaron Young /* Got back a STOPPED LDC message on port. If the queue is stopped, 70431762eaaSAaron Young * wake it up so that we'll send out another START message at the 70531762eaaSAaron Young * next TX. 70631762eaaSAaron Young */ 70731762eaaSAaron Young static void maybe_tx_wakeup(struct vnet_port *port) 70831762eaaSAaron Young { 70931762eaaSAaron Young struct netdev_queue *txq; 71031762eaaSAaron Young 71167d0719fSAaron Young txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), 71267d0719fSAaron Young port->q_index); 71331762eaaSAaron Young __netif_tx_lock(txq, smp_processor_id()); 71431762eaaSAaron Young if (likely(netif_tx_queue_stopped(txq))) { 71531762eaaSAaron Young struct vio_dring_state *dr; 71631762eaaSAaron Young 71731762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 71831762eaaSAaron Young netif_tx_wake_queue(txq); 71931762eaaSAaron Young } 72031762eaaSAaron Young __netif_tx_unlock(txq); 72131762eaaSAaron Young } 72231762eaaSAaron Young 72367d0719fSAaron Young bool sunvnet_port_is_up_common(struct vnet_port *vnet) 72431762eaaSAaron Young { 72531762eaaSAaron Young struct vio_driver_state *vio = &vnet->vio; 72631762eaaSAaron Young 72731762eaaSAaron Young return !!(vio->hs_state & VIO_HS_COMPLETE); 72831762eaaSAaron Young } 72967d0719fSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common); 73031762eaaSAaron Young 73131762eaaSAaron Young static int vnet_event_napi(struct vnet_port *port, int budget) 73231762eaaSAaron Young { 73331762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 73431762eaaSAaron Young int tx_wakeup, err; 73531762eaaSAaron Young int npkts = 0; 73631762eaaSAaron Young int event = (port->rx_event & LDC_EVENT_RESET); 73731762eaaSAaron Young 73831762eaaSAaron Young ldc_ctrl: 73931762eaaSAaron Young if (unlikely(event == LDC_EVENT_RESET || 74031762eaaSAaron Young event == LDC_EVENT_UP)) { 74131762eaaSAaron Young vio_link_state_change(vio, event); 74231762eaaSAaron Young 74331762eaaSAaron Young if (event == LDC_EVENT_RESET) { 74431762eaaSAaron Young vnet_port_reset(port); 74531762eaaSAaron Young vio_port_up(vio); 74631762eaaSAaron Young } 74731762eaaSAaron Young port->rx_event = 0; 74831762eaaSAaron Young return 0; 74931762eaaSAaron Young } 75031762eaaSAaron Young /* We may have multiple LDC events in rx_event. Unroll send_events() */ 75131762eaaSAaron Young event = (port->rx_event & LDC_EVENT_UP); 75231762eaaSAaron Young port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP); 75331762eaaSAaron Young if (event == LDC_EVENT_UP) 75431762eaaSAaron Young goto ldc_ctrl; 75531762eaaSAaron Young event = port->rx_event; 75631762eaaSAaron Young if (!(event & LDC_EVENT_DATA_READY)) 75731762eaaSAaron Young return 0; 75831762eaaSAaron Young 75931762eaaSAaron Young /* we dont expect any other bits than RESET, UP, DATA_READY */ 76031762eaaSAaron Young BUG_ON(event != LDC_EVENT_DATA_READY); 76131762eaaSAaron Young 76231762eaaSAaron Young tx_wakeup = err = 0; 76331762eaaSAaron Young while (1) { 76431762eaaSAaron Young union { 76531762eaaSAaron Young struct vio_msg_tag tag; 76631762eaaSAaron Young u64 raw[8]; 76731762eaaSAaron Young } msgbuf; 76831762eaaSAaron Young 76931762eaaSAaron Young if (port->napi_resume) { 77031762eaaSAaron Young struct vio_dring_data *pkt = 77131762eaaSAaron Young (struct vio_dring_data *)&msgbuf; 77231762eaaSAaron Young struct vio_dring_state *dr = 77331762eaaSAaron Young &port->vio.drings[VIO_DRIVER_RX_RING]; 77431762eaaSAaron Young 77531762eaaSAaron Young pkt->tag.type = VIO_TYPE_DATA; 77631762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_INFO; 77731762eaaSAaron Young pkt->tag.stype_env = VIO_DRING_DATA; 77831762eaaSAaron Young pkt->seq = dr->rcv_nxt; 77931762eaaSAaron Young pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); 78031762eaaSAaron Young pkt->end_idx = -1; 78131762eaaSAaron Young goto napi_resume; 78231762eaaSAaron Young } 78331762eaaSAaron Young err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); 78431762eaaSAaron Young if (unlikely(err < 0)) { 78531762eaaSAaron Young if (err == -ECONNRESET) 78631762eaaSAaron Young vio_conn_reset(vio); 78731762eaaSAaron Young break; 78831762eaaSAaron Young } 78931762eaaSAaron Young if (err == 0) 79031762eaaSAaron Young break; 79131762eaaSAaron Young viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", 79231762eaaSAaron Young msgbuf.tag.type, 79331762eaaSAaron Young msgbuf.tag.stype, 79431762eaaSAaron Young msgbuf.tag.stype_env, 79531762eaaSAaron Young msgbuf.tag.sid); 79631762eaaSAaron Young err = vio_validate_sid(vio, &msgbuf.tag); 79731762eaaSAaron Young if (err < 0) 79831762eaaSAaron Young break; 79931762eaaSAaron Young napi_resume: 80031762eaaSAaron Young if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { 80131762eaaSAaron Young if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { 80267d0719fSAaron Young if (!sunvnet_port_is_up_common(port)) { 80331762eaaSAaron Young /* failures like handshake_failure() 80431762eaaSAaron Young * may have cleaned up dring, but 80531762eaaSAaron Young * NAPI polling may bring us here. 80631762eaaSAaron Young */ 80731762eaaSAaron Young err = -ECONNRESET; 80831762eaaSAaron Young break; 80931762eaaSAaron Young } 81031762eaaSAaron Young err = vnet_rx(port, &msgbuf, &npkts, budget); 81131762eaaSAaron Young if (npkts >= budget) 81231762eaaSAaron Young break; 81331762eaaSAaron Young if (npkts == 0) 81431762eaaSAaron Young break; 81531762eaaSAaron Young } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { 81631762eaaSAaron Young err = vnet_ack(port, &msgbuf); 81731762eaaSAaron Young if (err > 0) 81831762eaaSAaron Young tx_wakeup |= err; 81931762eaaSAaron Young } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { 82031762eaaSAaron Young err = vnet_nack(port, &msgbuf); 82131762eaaSAaron Young } 82231762eaaSAaron Young } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { 82331762eaaSAaron Young if (msgbuf.tag.stype_env == VNET_MCAST_INFO) 82431762eaaSAaron Young err = handle_mcast(port, &msgbuf); 82531762eaaSAaron Young else 82631762eaaSAaron Young err = vio_control_pkt_engine(vio, &msgbuf); 82731762eaaSAaron Young if (err) 82831762eaaSAaron Young break; 82931762eaaSAaron Young } else { 83031762eaaSAaron Young err = vnet_handle_unknown(port, &msgbuf); 83131762eaaSAaron Young } 83231762eaaSAaron Young if (err == -ECONNRESET) 83331762eaaSAaron Young break; 83431762eaaSAaron Young } 83531762eaaSAaron Young if (unlikely(tx_wakeup && err != -ECONNRESET)) 83631762eaaSAaron Young maybe_tx_wakeup(port); 83731762eaaSAaron Young return npkts; 83831762eaaSAaron Young } 83931762eaaSAaron Young 84031762eaaSAaron Young int sunvnet_poll_common(struct napi_struct *napi, int budget) 84131762eaaSAaron Young { 84231762eaaSAaron Young struct vnet_port *port = container_of(napi, struct vnet_port, napi); 84331762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 84431762eaaSAaron Young int processed = vnet_event_napi(port, budget); 84531762eaaSAaron Young 84631762eaaSAaron Young if (processed < budget) { 84731762eaaSAaron Young napi_complete(napi); 84831762eaaSAaron Young port->rx_event &= ~LDC_EVENT_DATA_READY; 84931762eaaSAaron Young vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); 85031762eaaSAaron Young } 85131762eaaSAaron Young return processed; 85231762eaaSAaron Young } 85331762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_poll_common); 85431762eaaSAaron Young 85531762eaaSAaron Young void sunvnet_event_common(void *arg, int event) 85631762eaaSAaron Young { 85731762eaaSAaron Young struct vnet_port *port = arg; 85831762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 85931762eaaSAaron Young 86031762eaaSAaron Young port->rx_event |= event; 86131762eaaSAaron Young vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); 86231762eaaSAaron Young napi_schedule(&port->napi); 86331762eaaSAaron Young 86431762eaaSAaron Young } 86531762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_event_common); 86631762eaaSAaron Young 86731762eaaSAaron Young static int __vnet_tx_trigger(struct vnet_port *port, u32 start) 86831762eaaSAaron Young { 86931762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 87031762eaaSAaron Young struct vio_dring_data hdr = { 87131762eaaSAaron Young .tag = { 87231762eaaSAaron Young .type = VIO_TYPE_DATA, 87331762eaaSAaron Young .stype = VIO_SUBTYPE_INFO, 87431762eaaSAaron Young .stype_env = VIO_DRING_DATA, 87531762eaaSAaron Young .sid = vio_send_sid(&port->vio), 87631762eaaSAaron Young }, 87731762eaaSAaron Young .dring_ident = dr->ident, 87831762eaaSAaron Young .start_idx = start, 87931762eaaSAaron Young .end_idx = (u32) -1, 88031762eaaSAaron Young }; 88131762eaaSAaron Young int err, delay; 88231762eaaSAaron Young int retries = 0; 88331762eaaSAaron Young 88431762eaaSAaron Young if (port->stop_rx) { 88531762eaaSAaron Young trace_vnet_tx_pending_stopped_ack(port->vio._local_sid, 88631762eaaSAaron Young port->vio._peer_sid, 88731762eaaSAaron Young port->stop_rx_idx, -1); 88831762eaaSAaron Young err = vnet_send_ack(port, 88931762eaaSAaron Young &port->vio.drings[VIO_DRIVER_RX_RING], 89031762eaaSAaron Young port->stop_rx_idx, -1, 89131762eaaSAaron Young VIO_DRING_STOPPED); 89231762eaaSAaron Young if (err <= 0) 89331762eaaSAaron Young return err; 89431762eaaSAaron Young } 89531762eaaSAaron Young 89631762eaaSAaron Young hdr.seq = dr->snd_nxt; 89731762eaaSAaron Young delay = 1; 89831762eaaSAaron Young do { 89931762eaaSAaron Young err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 90031762eaaSAaron Young if (err > 0) { 90131762eaaSAaron Young dr->snd_nxt++; 90231762eaaSAaron Young break; 90331762eaaSAaron Young } 90431762eaaSAaron Young udelay(delay); 90531762eaaSAaron Young if ((delay <<= 1) > 128) 90631762eaaSAaron Young delay = 128; 90731762eaaSAaron Young if (retries++ > VNET_MAX_RETRIES) 90831762eaaSAaron Young break; 90931762eaaSAaron Young } while (err == -EAGAIN); 91031762eaaSAaron Young trace_vnet_tx_trigger(port->vio._local_sid, 91131762eaaSAaron Young port->vio._peer_sid, start, err); 91231762eaaSAaron Young 91331762eaaSAaron Young return err; 91431762eaaSAaron Young } 91531762eaaSAaron Young 91631762eaaSAaron Young static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, 91731762eaaSAaron Young unsigned *pending) 91831762eaaSAaron Young { 91931762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 92031762eaaSAaron Young struct sk_buff *skb = NULL; 92131762eaaSAaron Young int i, txi; 92231762eaaSAaron Young 92331762eaaSAaron Young *pending = 0; 92431762eaaSAaron Young 92531762eaaSAaron Young txi = dr->prod; 92631762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 92731762eaaSAaron Young struct vio_net_desc *d; 92831762eaaSAaron Young 92931762eaaSAaron Young --txi; 93031762eaaSAaron Young if (txi < 0) 93131762eaaSAaron Young txi = VNET_TX_RING_SIZE-1; 93231762eaaSAaron Young 93331762eaaSAaron Young d = vio_dring_entry(dr, txi); 93431762eaaSAaron Young 93531762eaaSAaron Young if (d->hdr.state == VIO_DESC_READY) { 93631762eaaSAaron Young (*pending)++; 93731762eaaSAaron Young continue; 93831762eaaSAaron Young } 93931762eaaSAaron Young if (port->tx_bufs[txi].skb) { 94031762eaaSAaron Young if (d->hdr.state != VIO_DESC_DONE) 94131762eaaSAaron Young pr_notice("invalid ring buffer state %d\n", 94231762eaaSAaron Young d->hdr.state); 94331762eaaSAaron Young BUG_ON(port->tx_bufs[txi].skb->next); 94431762eaaSAaron Young 94531762eaaSAaron Young port->tx_bufs[txi].skb->next = skb; 94631762eaaSAaron Young skb = port->tx_bufs[txi].skb; 94731762eaaSAaron Young port->tx_bufs[txi].skb = NULL; 94831762eaaSAaron Young 94931762eaaSAaron Young ldc_unmap(port->vio.lp, 95031762eaaSAaron Young port->tx_bufs[txi].cookies, 95131762eaaSAaron Young port->tx_bufs[txi].ncookies); 95231762eaaSAaron Young } else if (d->hdr.state == VIO_DESC_FREE) 95331762eaaSAaron Young break; 95431762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 95531762eaaSAaron Young } 95631762eaaSAaron Young return skb; 95731762eaaSAaron Young } 95831762eaaSAaron Young 95931762eaaSAaron Young static inline void vnet_free_skbs(struct sk_buff *skb) 96031762eaaSAaron Young { 96131762eaaSAaron Young struct sk_buff *next; 96231762eaaSAaron Young 96331762eaaSAaron Young while (skb) { 96431762eaaSAaron Young next = skb->next; 96531762eaaSAaron Young skb->next = NULL; 96631762eaaSAaron Young dev_kfree_skb(skb); 96731762eaaSAaron Young skb = next; 96831762eaaSAaron Young } 96931762eaaSAaron Young } 97031762eaaSAaron Young 97131762eaaSAaron Young void sunvnet_clean_timer_expire_common(unsigned long port0) 97231762eaaSAaron Young { 97331762eaaSAaron Young struct vnet_port *port = (struct vnet_port *)port0; 97431762eaaSAaron Young struct sk_buff *freeskbs; 97531762eaaSAaron Young unsigned pending; 97631762eaaSAaron Young 97767d0719fSAaron Young netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port)); 97831762eaaSAaron Young freeskbs = vnet_clean_tx_ring(port, &pending); 97967d0719fSAaron Young netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port)); 98031762eaaSAaron Young 98131762eaaSAaron Young vnet_free_skbs(freeskbs); 98231762eaaSAaron Young 98331762eaaSAaron Young if (pending) 98431762eaaSAaron Young (void)mod_timer(&port->clean_timer, 98531762eaaSAaron Young jiffies + VNET_CLEAN_TIMEOUT); 98631762eaaSAaron Young else 98731762eaaSAaron Young del_timer(&port->clean_timer); 98831762eaaSAaron Young } 98931762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common); 99031762eaaSAaron Young 99131762eaaSAaron Young static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, 99231762eaaSAaron Young struct ldc_trans_cookie *cookies, int ncookies, 99331762eaaSAaron Young unsigned int map_perm) 99431762eaaSAaron Young { 99531762eaaSAaron Young int i, nc, err, blen; 99631762eaaSAaron Young 99731762eaaSAaron Young /* header */ 99831762eaaSAaron Young blen = skb_headlen(skb); 99931762eaaSAaron Young if (blen < ETH_ZLEN) 100031762eaaSAaron Young blen = ETH_ZLEN; 100131762eaaSAaron Young blen += VNET_PACKET_SKIP; 100231762eaaSAaron Young blen += 8 - (blen & 7); 100331762eaaSAaron Young 100431762eaaSAaron Young err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies, 100531762eaaSAaron Young ncookies, map_perm); 100631762eaaSAaron Young if (err < 0) 100731762eaaSAaron Young return err; 100831762eaaSAaron Young nc = err; 100931762eaaSAaron Young 101031762eaaSAaron Young for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 101131762eaaSAaron Young skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 101231762eaaSAaron Young u8 *vaddr; 101331762eaaSAaron Young 101431762eaaSAaron Young if (nc < ncookies) { 101531762eaaSAaron Young vaddr = kmap_atomic(skb_frag_page(f)); 101631762eaaSAaron Young blen = skb_frag_size(f); 101731762eaaSAaron Young blen += 8 - (blen & 7); 101831762eaaSAaron Young err = ldc_map_single(lp, vaddr + f->page_offset, 101931762eaaSAaron Young blen, cookies + nc, ncookies - nc, 102031762eaaSAaron Young map_perm); 102131762eaaSAaron Young kunmap_atomic(vaddr); 102231762eaaSAaron Young } else { 102331762eaaSAaron Young err = -EMSGSIZE; 102431762eaaSAaron Young } 102531762eaaSAaron Young 102631762eaaSAaron Young if (err < 0) { 102731762eaaSAaron Young ldc_unmap(lp, cookies, nc); 102831762eaaSAaron Young return err; 102931762eaaSAaron Young } 103031762eaaSAaron Young nc += err; 103131762eaaSAaron Young } 103231762eaaSAaron Young return nc; 103331762eaaSAaron Young } 103431762eaaSAaron Young 103531762eaaSAaron Young static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) 103631762eaaSAaron Young { 103731762eaaSAaron Young struct sk_buff *nskb; 103831762eaaSAaron Young int i, len, pad, docopy; 103931762eaaSAaron Young 104031762eaaSAaron Young len = skb->len; 104131762eaaSAaron Young pad = 0; 104231762eaaSAaron Young if (len < ETH_ZLEN) { 104331762eaaSAaron Young pad += ETH_ZLEN - skb->len; 104431762eaaSAaron Young len += pad; 104531762eaaSAaron Young } 104631762eaaSAaron Young len += VNET_PACKET_SKIP; 104731762eaaSAaron Young pad += 8 - (len & 7); 104831762eaaSAaron Young 104931762eaaSAaron Young /* make sure we have enough cookies and alignment in every frag */ 105031762eaaSAaron Young docopy = skb_shinfo(skb)->nr_frags >= ncookies; 105131762eaaSAaron Young for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 105231762eaaSAaron Young skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 105331762eaaSAaron Young 105431762eaaSAaron Young docopy |= f->page_offset & 7; 105531762eaaSAaron Young } 105631762eaaSAaron Young if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || 105731762eaaSAaron Young skb_tailroom(skb) < pad || 105831762eaaSAaron Young skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { 105931762eaaSAaron Young int start = 0, offset; 106031762eaaSAaron Young __wsum csum; 106131762eaaSAaron Young 106231762eaaSAaron Young len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; 106331762eaaSAaron Young nskb = alloc_and_align_skb(skb->dev, len); 106431762eaaSAaron Young if (nskb == NULL) { 106531762eaaSAaron Young dev_kfree_skb(skb); 106631762eaaSAaron Young return NULL; 106731762eaaSAaron Young } 106831762eaaSAaron Young skb_reserve(nskb, VNET_PACKET_SKIP); 106931762eaaSAaron Young 107031762eaaSAaron Young nskb->protocol = skb->protocol; 107131762eaaSAaron Young offset = skb_mac_header(skb) - skb->data; 107231762eaaSAaron Young skb_set_mac_header(nskb, offset); 107331762eaaSAaron Young offset = skb_network_header(skb) - skb->data; 107431762eaaSAaron Young skb_set_network_header(nskb, offset); 107531762eaaSAaron Young offset = skb_transport_header(skb) - skb->data; 107631762eaaSAaron Young skb_set_transport_header(nskb, offset); 107731762eaaSAaron Young 107831762eaaSAaron Young offset = 0; 107931762eaaSAaron Young nskb->csum_offset = skb->csum_offset; 108031762eaaSAaron Young nskb->ip_summed = skb->ip_summed; 108131762eaaSAaron Young 108231762eaaSAaron Young if (skb->ip_summed == CHECKSUM_PARTIAL) 108331762eaaSAaron Young start = skb_checksum_start_offset(skb); 108431762eaaSAaron Young if (start) { 108531762eaaSAaron Young struct iphdr *iph = ip_hdr(nskb); 108631762eaaSAaron Young int offset = start + nskb->csum_offset; 108731762eaaSAaron Young 108831762eaaSAaron Young if (skb_copy_bits(skb, 0, nskb->data, start)) { 108931762eaaSAaron Young dev_kfree_skb(nskb); 109031762eaaSAaron Young dev_kfree_skb(skb); 109131762eaaSAaron Young return NULL; 109231762eaaSAaron Young } 109331762eaaSAaron Young *(__sum16 *)(skb->data + offset) = 0; 109431762eaaSAaron Young csum = skb_copy_and_csum_bits(skb, start, 109531762eaaSAaron Young nskb->data + start, 109631762eaaSAaron Young skb->len - start, 0); 109731762eaaSAaron Young if (iph->protocol == IPPROTO_TCP || 109831762eaaSAaron Young iph->protocol == IPPROTO_UDP) { 109931762eaaSAaron Young csum = csum_tcpudp_magic(iph->saddr, iph->daddr, 110031762eaaSAaron Young skb->len - start, 110131762eaaSAaron Young iph->protocol, csum); 110231762eaaSAaron Young } 110331762eaaSAaron Young *(__sum16 *)(nskb->data + offset) = csum; 110431762eaaSAaron Young 110531762eaaSAaron Young nskb->ip_summed = CHECKSUM_NONE; 110631762eaaSAaron Young } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { 110731762eaaSAaron Young dev_kfree_skb(nskb); 110831762eaaSAaron Young dev_kfree_skb(skb); 110931762eaaSAaron Young return NULL; 111031762eaaSAaron Young } 111131762eaaSAaron Young (void)skb_put(nskb, skb->len); 111231762eaaSAaron Young if (skb_is_gso(skb)) { 111331762eaaSAaron Young skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; 111431762eaaSAaron Young skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 111531762eaaSAaron Young } 111631762eaaSAaron Young nskb->queue_mapping = skb->queue_mapping; 111731762eaaSAaron Young dev_kfree_skb(skb); 111831762eaaSAaron Young skb = nskb; 111931762eaaSAaron Young } 112031762eaaSAaron Young return skb; 112131762eaaSAaron Young } 112231762eaaSAaron Young 112367d0719fSAaron Young static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, 112467d0719fSAaron Young struct vnet_port *(*vnet_tx_port) 112567d0719fSAaron Young (struct sk_buff *, struct net_device *)) 112631762eaaSAaron Young { 112767d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 112831762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 112931762eaaSAaron Young struct sk_buff *segs; 113031762eaaSAaron Young int maclen, datalen; 113131762eaaSAaron Young int status; 113231762eaaSAaron Young int gso_size, gso_type, gso_segs; 113331762eaaSAaron Young int hlen = skb_transport_header(skb) - skb_mac_header(skb); 113431762eaaSAaron Young int proto = IPPROTO_IP; 113531762eaaSAaron Young 113631762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) 113731762eaaSAaron Young proto = ip_hdr(skb)->protocol; 113831762eaaSAaron Young else if (skb->protocol == htons(ETH_P_IPV6)) 113931762eaaSAaron Young proto = ipv6_hdr(skb)->nexthdr; 114031762eaaSAaron Young 114131762eaaSAaron Young if (proto == IPPROTO_TCP) 114231762eaaSAaron Young hlen += tcp_hdr(skb)->doff * 4; 114331762eaaSAaron Young else if (proto == IPPROTO_UDP) 114431762eaaSAaron Young hlen += sizeof(struct udphdr); 114531762eaaSAaron Young else { 114631762eaaSAaron Young pr_err("vnet_handle_offloads GSO with unknown transport " 114731762eaaSAaron Young "protocol %d tproto %d\n", skb->protocol, proto); 114831762eaaSAaron Young hlen = 128; /* XXX */ 114931762eaaSAaron Young } 115031762eaaSAaron Young datalen = port->tsolen - hlen; 115131762eaaSAaron Young 115231762eaaSAaron Young gso_size = skb_shinfo(skb)->gso_size; 115331762eaaSAaron Young gso_type = skb_shinfo(skb)->gso_type; 115431762eaaSAaron Young gso_segs = skb_shinfo(skb)->gso_segs; 115531762eaaSAaron Young 115631762eaaSAaron Young if (port->tso && gso_size < datalen) 115731762eaaSAaron Young gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); 115831762eaaSAaron Young 115931762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { 116031762eaaSAaron Young struct netdev_queue *txq; 116131762eaaSAaron Young 116231762eaaSAaron Young txq = netdev_get_tx_queue(dev, port->q_index); 116331762eaaSAaron Young netif_tx_stop_queue(txq); 116431762eaaSAaron Young if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) 116531762eaaSAaron Young return NETDEV_TX_BUSY; 116631762eaaSAaron Young netif_tx_wake_queue(txq); 116731762eaaSAaron Young } 116831762eaaSAaron Young 116931762eaaSAaron Young maclen = skb_network_header(skb) - skb_mac_header(skb); 117031762eaaSAaron Young skb_pull(skb, maclen); 117131762eaaSAaron Young 117231762eaaSAaron Young if (port->tso && gso_size < datalen) { 117331762eaaSAaron Young if (skb_unclone(skb, GFP_ATOMIC)) 117431762eaaSAaron Young goto out_dropped; 117531762eaaSAaron Young 117631762eaaSAaron Young /* segment to TSO size */ 117731762eaaSAaron Young skb_shinfo(skb)->gso_size = datalen; 117831762eaaSAaron Young skb_shinfo(skb)->gso_segs = gso_segs; 117931762eaaSAaron Young } 118031762eaaSAaron Young segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); 118131762eaaSAaron Young if (IS_ERR(segs)) 118231762eaaSAaron Young goto out_dropped; 118331762eaaSAaron Young 118431762eaaSAaron Young skb_push(skb, maclen); 118531762eaaSAaron Young skb_reset_mac_header(skb); 118631762eaaSAaron Young 118731762eaaSAaron Young status = 0; 118831762eaaSAaron Young while (segs) { 118931762eaaSAaron Young struct sk_buff *curr = segs; 119031762eaaSAaron Young 119131762eaaSAaron Young segs = segs->next; 119231762eaaSAaron Young curr->next = NULL; 119331762eaaSAaron Young if (port->tso && curr->len > dev->mtu) { 119431762eaaSAaron Young skb_shinfo(curr)->gso_size = gso_size; 119531762eaaSAaron Young skb_shinfo(curr)->gso_type = gso_type; 119631762eaaSAaron Young skb_shinfo(curr)->gso_segs = 119731762eaaSAaron Young DIV_ROUND_UP(curr->len - hlen, gso_size); 119831762eaaSAaron Young } else 119931762eaaSAaron Young skb_shinfo(curr)->gso_size = 0; 120031762eaaSAaron Young 120131762eaaSAaron Young skb_push(curr, maclen); 120231762eaaSAaron Young skb_reset_mac_header(curr); 120331762eaaSAaron Young memcpy(skb_mac_header(curr), skb_mac_header(skb), 120431762eaaSAaron Young maclen); 120531762eaaSAaron Young curr->csum_start = skb_transport_header(curr) - curr->head; 120631762eaaSAaron Young if (ip_hdr(curr)->protocol == IPPROTO_TCP) 120731762eaaSAaron Young curr->csum_offset = offsetof(struct tcphdr, check); 120831762eaaSAaron Young else if (ip_hdr(curr)->protocol == IPPROTO_UDP) 120931762eaaSAaron Young curr->csum_offset = offsetof(struct udphdr, check); 121031762eaaSAaron Young 121131762eaaSAaron Young if (!(status & NETDEV_TX_MASK)) 121267d0719fSAaron Young status = sunvnet_start_xmit_common(curr, dev, 121367d0719fSAaron Young vnet_tx_port); 121431762eaaSAaron Young if (status & NETDEV_TX_MASK) 121531762eaaSAaron Young dev_kfree_skb_any(curr); 121631762eaaSAaron Young } 121731762eaaSAaron Young 121831762eaaSAaron Young if (!(status & NETDEV_TX_MASK)) 121931762eaaSAaron Young dev_kfree_skb_any(skb); 122031762eaaSAaron Young return status; 122131762eaaSAaron Young out_dropped: 122231762eaaSAaron Young dev->stats.tx_dropped++; 122331762eaaSAaron Young dev_kfree_skb_any(skb); 122431762eaaSAaron Young return NETDEV_TX_OK; 122531762eaaSAaron Young } 122631762eaaSAaron Young 122767d0719fSAaron Young int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, 122867d0719fSAaron Young struct vnet_port *(*vnet_tx_port) 122967d0719fSAaron Young (struct sk_buff *, struct net_device *)) 123031762eaaSAaron Young { 123131762eaaSAaron Young struct vnet_port *port = NULL; 123231762eaaSAaron Young struct vio_dring_state *dr; 123331762eaaSAaron Young struct vio_net_desc *d; 123431762eaaSAaron Young unsigned int len; 123531762eaaSAaron Young struct sk_buff *freeskbs = NULL; 123631762eaaSAaron Young int i, err, txi; 123731762eaaSAaron Young unsigned pending = 0; 123831762eaaSAaron Young struct netdev_queue *txq; 123931762eaaSAaron Young 124031762eaaSAaron Young rcu_read_lock(); 124167d0719fSAaron Young port = vnet_tx_port(skb, dev); 124231762eaaSAaron Young if (unlikely(!port)) { 124331762eaaSAaron Young rcu_read_unlock(); 124431762eaaSAaron Young goto out_dropped; 124531762eaaSAaron Young } 124631762eaaSAaron Young 124731762eaaSAaron Young if (skb_is_gso(skb) && skb->len > port->tsolen) { 124867d0719fSAaron Young err = vnet_handle_offloads(port, skb, vnet_tx_port); 124931762eaaSAaron Young rcu_read_unlock(); 125031762eaaSAaron Young return err; 125131762eaaSAaron Young } 125231762eaaSAaron Young 125331762eaaSAaron Young if (!skb_is_gso(skb) && skb->len > port->rmtu) { 125431762eaaSAaron Young unsigned long localmtu = port->rmtu - ETH_HLEN; 125531762eaaSAaron Young 125631762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 3)) 125731762eaaSAaron Young localmtu -= VLAN_HLEN; 125831762eaaSAaron Young 125931762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) { 126031762eaaSAaron Young struct flowi4 fl4; 126131762eaaSAaron Young struct rtable *rt = NULL; 126231762eaaSAaron Young 126331762eaaSAaron Young memset(&fl4, 0, sizeof(fl4)); 126431762eaaSAaron Young fl4.flowi4_oif = dev->ifindex; 126531762eaaSAaron Young fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 126631762eaaSAaron Young fl4.daddr = ip_hdr(skb)->daddr; 126731762eaaSAaron Young fl4.saddr = ip_hdr(skb)->saddr; 126831762eaaSAaron Young 126931762eaaSAaron Young rt = ip_route_output_key(dev_net(dev), &fl4); 127031762eaaSAaron Young rcu_read_unlock(); 127131762eaaSAaron Young if (!IS_ERR(rt)) { 127231762eaaSAaron Young skb_dst_set(skb, &rt->dst); 127331762eaaSAaron Young icmp_send(skb, ICMP_DEST_UNREACH, 127431762eaaSAaron Young ICMP_FRAG_NEEDED, 127531762eaaSAaron Young htonl(localmtu)); 127631762eaaSAaron Young } 127731762eaaSAaron Young } 127831762eaaSAaron Young #if IS_ENABLED(CONFIG_IPV6) 127931762eaaSAaron Young else if (skb->protocol == htons(ETH_P_IPV6)) 128031762eaaSAaron Young icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); 128131762eaaSAaron Young #endif 128231762eaaSAaron Young goto out_dropped; 128331762eaaSAaron Young } 128431762eaaSAaron Young 128531762eaaSAaron Young skb = vnet_skb_shape(skb, 2); 128631762eaaSAaron Young 128731762eaaSAaron Young if (unlikely(!skb)) 128831762eaaSAaron Young goto out_dropped; 128931762eaaSAaron Young 129031762eaaSAaron Young if (skb->ip_summed == CHECKSUM_PARTIAL) 129131762eaaSAaron Young vnet_fullcsum(skb); 129231762eaaSAaron Young 129331762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 129431762eaaSAaron Young i = skb_get_queue_mapping(skb); 129531762eaaSAaron Young txq = netdev_get_tx_queue(dev, i); 129631762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 129731762eaaSAaron Young if (!netif_tx_queue_stopped(txq)) { 129831762eaaSAaron Young netif_tx_stop_queue(txq); 129931762eaaSAaron Young 130031762eaaSAaron Young /* This is a hard error, log it. */ 130131762eaaSAaron Young netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 130231762eaaSAaron Young dev->stats.tx_errors++; 130331762eaaSAaron Young } 130431762eaaSAaron Young rcu_read_unlock(); 130531762eaaSAaron Young return NETDEV_TX_BUSY; 130631762eaaSAaron Young } 130731762eaaSAaron Young 130831762eaaSAaron Young d = vio_dring_cur(dr); 130931762eaaSAaron Young 131031762eaaSAaron Young txi = dr->prod; 131131762eaaSAaron Young 131231762eaaSAaron Young freeskbs = vnet_clean_tx_ring(port, &pending); 131331762eaaSAaron Young 131431762eaaSAaron Young BUG_ON(port->tx_bufs[txi].skb); 131531762eaaSAaron Young 131631762eaaSAaron Young len = skb->len; 131731762eaaSAaron Young if (len < ETH_ZLEN) 131831762eaaSAaron Young len = ETH_ZLEN; 131931762eaaSAaron Young 132031762eaaSAaron Young err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, 132131762eaaSAaron Young (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); 132231762eaaSAaron Young if (err < 0) { 132331762eaaSAaron Young netdev_info(dev, "tx buffer map error %d\n", err); 132431762eaaSAaron Young goto out_dropped; 132531762eaaSAaron Young } 132631762eaaSAaron Young 132731762eaaSAaron Young port->tx_bufs[txi].skb = skb; 132831762eaaSAaron Young skb = NULL; 132931762eaaSAaron Young port->tx_bufs[txi].ncookies = err; 133031762eaaSAaron Young 133131762eaaSAaron Young /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), 133231762eaaSAaron Young * thus it is safe to not set VIO_ACK_ENABLE for each transmission: 133331762eaaSAaron Young * the protocol itself does not require it as long as the peer 133431762eaaSAaron Young * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. 133531762eaaSAaron Young * 133631762eaaSAaron Young * An ACK for every packet in the ring is expensive as the 133731762eaaSAaron Young * sending of LDC messages is slow and affects performance. 133831762eaaSAaron Young */ 133931762eaaSAaron Young d->hdr.ack = VIO_ACK_DISABLE; 134031762eaaSAaron Young d->size = len; 134131762eaaSAaron Young d->ncookies = port->tx_bufs[txi].ncookies; 134231762eaaSAaron Young for (i = 0; i < d->ncookies; i++) 134331762eaaSAaron Young d->cookies[i] = port->tx_bufs[txi].cookies[i]; 134431762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 7)) { 134531762eaaSAaron Young struct vio_net_dext *dext = vio_net_ext(d); 134631762eaaSAaron Young 134731762eaaSAaron Young memset(dext, 0, sizeof(*dext)); 134831762eaaSAaron Young if (skb_is_gso(port->tx_bufs[txi].skb)) { 134931762eaaSAaron Young dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) 135031762eaaSAaron Young ->gso_size; 135131762eaaSAaron Young dext->flags |= VNET_PKT_IPV4_LSO; 135231762eaaSAaron Young } 135331762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 8) && 135431762eaaSAaron Young !port->switch_port) { 135531762eaaSAaron Young dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; 135631762eaaSAaron Young dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; 135731762eaaSAaron Young } 135831762eaaSAaron Young } 135931762eaaSAaron Young 136031762eaaSAaron Young /* This has to be a non-SMP write barrier because we are writing 136131762eaaSAaron Young * to memory which is shared with the peer LDOM. 136231762eaaSAaron Young */ 136331762eaaSAaron Young dma_wmb(); 136431762eaaSAaron Young 136531762eaaSAaron Young d->hdr.state = VIO_DESC_READY; 136631762eaaSAaron Young 136731762eaaSAaron Young /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent 136831762eaaSAaron Young * to notify the consumer that some descriptors are READY. 136931762eaaSAaron Young * After that "start" trigger, no additional triggers are needed until 137031762eaaSAaron Young * a DRING_STOPPED is received from the consumer. The dr->cons field 137131762eaaSAaron Young * (set up by vnet_ack()) has the value of the next dring index 137231762eaaSAaron Young * that has not yet been ack-ed. We send a "start" trigger here 137331762eaaSAaron Young * if, and only if, start_cons is true (reset it afterward). Conversely, 137431762eaaSAaron Young * vnet_ack() should check if the dring corresponding to cons 137531762eaaSAaron Young * is marked READY, but start_cons was false. 137631762eaaSAaron Young * If so, vnet_ack() should send out the missed "start" trigger. 137731762eaaSAaron Young * 137831762eaaSAaron Young * Note that the dma_wmb() above makes sure the cookies et al. are 137931762eaaSAaron Young * not globally visible before the VIO_DESC_READY, and that the 138031762eaaSAaron Young * stores are ordered correctly by the compiler. The consumer will 138131762eaaSAaron Young * not proceed until the VIO_DESC_READY is visible assuring that 138231762eaaSAaron Young * the consumer does not observe anything related to descriptors 138331762eaaSAaron Young * out of order. The HV trap from the LDC start trigger is the 138431762eaaSAaron Young * producer to consumer announcement that work is available to the 138531762eaaSAaron Young * consumer 138631762eaaSAaron Young */ 138731762eaaSAaron Young if (!port->start_cons) { /* previous trigger suffices */ 138831762eaaSAaron Young trace_vnet_skip_tx_trigger(port->vio._local_sid, 138931762eaaSAaron Young port->vio._peer_sid, dr->cons); 139031762eaaSAaron Young goto ldc_start_done; 139131762eaaSAaron Young } 139231762eaaSAaron Young 139331762eaaSAaron Young err = __vnet_tx_trigger(port, dr->cons); 139431762eaaSAaron Young if (unlikely(err < 0)) { 139531762eaaSAaron Young netdev_info(dev, "TX trigger error %d\n", err); 139631762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 139731762eaaSAaron Young skb = port->tx_bufs[txi].skb; 139831762eaaSAaron Young port->tx_bufs[txi].skb = NULL; 139931762eaaSAaron Young dev->stats.tx_carrier_errors++; 140031762eaaSAaron Young goto out_dropped; 140131762eaaSAaron Young } 140231762eaaSAaron Young 140331762eaaSAaron Young ldc_start_done: 140431762eaaSAaron Young port->start_cons = false; 140531762eaaSAaron Young 140631762eaaSAaron Young dev->stats.tx_packets++; 140731762eaaSAaron Young dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; 140831762eaaSAaron Young 140931762eaaSAaron Young dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); 141031762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 141131762eaaSAaron Young netif_tx_stop_queue(txq); 141231762eaaSAaron Young if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) 141331762eaaSAaron Young netif_tx_wake_queue(txq); 141431762eaaSAaron Young } 141531762eaaSAaron Young 141631762eaaSAaron Young (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); 141731762eaaSAaron Young rcu_read_unlock(); 141831762eaaSAaron Young 141931762eaaSAaron Young vnet_free_skbs(freeskbs); 142031762eaaSAaron Young 142131762eaaSAaron Young return NETDEV_TX_OK; 142231762eaaSAaron Young 142331762eaaSAaron Young out_dropped: 142431762eaaSAaron Young if (pending) 142531762eaaSAaron Young (void)mod_timer(&port->clean_timer, 142631762eaaSAaron Young jiffies + VNET_CLEAN_TIMEOUT); 142731762eaaSAaron Young else if (port) 142831762eaaSAaron Young del_timer(&port->clean_timer); 142931762eaaSAaron Young if (port) 143031762eaaSAaron Young rcu_read_unlock(); 143131762eaaSAaron Young if (skb) 143231762eaaSAaron Young dev_kfree_skb(skb); 143331762eaaSAaron Young vnet_free_skbs(freeskbs); 143431762eaaSAaron Young dev->stats.tx_dropped++; 143531762eaaSAaron Young return NETDEV_TX_OK; 143631762eaaSAaron Young } 143731762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common); 143831762eaaSAaron Young 143931762eaaSAaron Young void sunvnet_tx_timeout_common(struct net_device *dev) 144031762eaaSAaron Young { 144131762eaaSAaron Young /* XXX Implement me XXX */ 144231762eaaSAaron Young } 144331762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common); 144431762eaaSAaron Young 144531762eaaSAaron Young int sunvnet_open_common(struct net_device *dev) 144631762eaaSAaron Young { 144731762eaaSAaron Young netif_carrier_on(dev); 144831762eaaSAaron Young netif_tx_start_all_queues(dev); 144931762eaaSAaron Young 145031762eaaSAaron Young return 0; 145131762eaaSAaron Young } 145231762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_open_common); 145331762eaaSAaron Young 145431762eaaSAaron Young int sunvnet_close_common(struct net_device *dev) 145531762eaaSAaron Young { 145631762eaaSAaron Young netif_tx_stop_all_queues(dev); 145731762eaaSAaron Young netif_carrier_off(dev); 145831762eaaSAaron Young 145931762eaaSAaron Young return 0; 146031762eaaSAaron Young } 146131762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_close_common); 146231762eaaSAaron Young 146331762eaaSAaron Young static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) 146431762eaaSAaron Young { 146531762eaaSAaron Young struct vnet_mcast_entry *m; 146631762eaaSAaron Young 146731762eaaSAaron Young for (m = vp->mcast_list; m; m = m->next) { 146831762eaaSAaron Young if (ether_addr_equal(m->addr, addr)) 146931762eaaSAaron Young return m; 147031762eaaSAaron Young } 147131762eaaSAaron Young return NULL; 147231762eaaSAaron Young } 147331762eaaSAaron Young 147431762eaaSAaron Young static void __update_mc_list(struct vnet *vp, struct net_device *dev) 147531762eaaSAaron Young { 147631762eaaSAaron Young struct netdev_hw_addr *ha; 147731762eaaSAaron Young 147831762eaaSAaron Young netdev_for_each_mc_addr(ha, dev) { 147931762eaaSAaron Young struct vnet_mcast_entry *m; 148031762eaaSAaron Young 148131762eaaSAaron Young m = __vnet_mc_find(vp, ha->addr); 148231762eaaSAaron Young if (m) { 148331762eaaSAaron Young m->hit = 1; 148431762eaaSAaron Young continue; 148531762eaaSAaron Young } 148631762eaaSAaron Young 148731762eaaSAaron Young if (!m) { 148831762eaaSAaron Young m = kzalloc(sizeof(*m), GFP_ATOMIC); 148931762eaaSAaron Young if (!m) 149031762eaaSAaron Young continue; 149131762eaaSAaron Young memcpy(m->addr, ha->addr, ETH_ALEN); 149231762eaaSAaron Young m->hit = 1; 149331762eaaSAaron Young 149431762eaaSAaron Young m->next = vp->mcast_list; 149531762eaaSAaron Young vp->mcast_list = m; 149631762eaaSAaron Young } 149731762eaaSAaron Young } 149831762eaaSAaron Young } 149931762eaaSAaron Young 150031762eaaSAaron Young static void __send_mc_list(struct vnet *vp, struct vnet_port *port) 150131762eaaSAaron Young { 150231762eaaSAaron Young struct vio_net_mcast_info info; 150331762eaaSAaron Young struct vnet_mcast_entry *m, **pp; 150431762eaaSAaron Young int n_addrs; 150531762eaaSAaron Young 150631762eaaSAaron Young memset(&info, 0, sizeof(info)); 150731762eaaSAaron Young 150831762eaaSAaron Young info.tag.type = VIO_TYPE_CTRL; 150931762eaaSAaron Young info.tag.stype = VIO_SUBTYPE_INFO; 151031762eaaSAaron Young info.tag.stype_env = VNET_MCAST_INFO; 151131762eaaSAaron Young info.tag.sid = vio_send_sid(&port->vio); 151231762eaaSAaron Young info.set = 1; 151331762eaaSAaron Young 151431762eaaSAaron Young n_addrs = 0; 151531762eaaSAaron Young for (m = vp->mcast_list; m; m = m->next) { 151631762eaaSAaron Young if (m->sent) 151731762eaaSAaron Young continue; 151831762eaaSAaron Young m->sent = 1; 151931762eaaSAaron Young memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 152031762eaaSAaron Young m->addr, ETH_ALEN); 152131762eaaSAaron Young if (++n_addrs == VNET_NUM_MCAST) { 152231762eaaSAaron Young info.count = n_addrs; 152331762eaaSAaron Young 152431762eaaSAaron Young (void) vio_ldc_send(&port->vio, &info, 152531762eaaSAaron Young sizeof(info)); 152631762eaaSAaron Young n_addrs = 0; 152731762eaaSAaron Young } 152831762eaaSAaron Young } 152931762eaaSAaron Young if (n_addrs) { 153031762eaaSAaron Young info.count = n_addrs; 153131762eaaSAaron Young (void) vio_ldc_send(&port->vio, &info, sizeof(info)); 153231762eaaSAaron Young } 153331762eaaSAaron Young 153431762eaaSAaron Young info.set = 0; 153531762eaaSAaron Young 153631762eaaSAaron Young n_addrs = 0; 153731762eaaSAaron Young pp = &vp->mcast_list; 153831762eaaSAaron Young while ((m = *pp) != NULL) { 153931762eaaSAaron Young if (m->hit) { 154031762eaaSAaron Young m->hit = 0; 154131762eaaSAaron Young pp = &m->next; 154231762eaaSAaron Young continue; 154331762eaaSAaron Young } 154431762eaaSAaron Young 154531762eaaSAaron Young memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 154631762eaaSAaron Young m->addr, ETH_ALEN); 154731762eaaSAaron Young if (++n_addrs == VNET_NUM_MCAST) { 154831762eaaSAaron Young info.count = n_addrs; 154931762eaaSAaron Young (void) vio_ldc_send(&port->vio, &info, 155031762eaaSAaron Young sizeof(info)); 155131762eaaSAaron Young n_addrs = 0; 155231762eaaSAaron Young } 155331762eaaSAaron Young 155431762eaaSAaron Young *pp = m->next; 155531762eaaSAaron Young kfree(m); 155631762eaaSAaron Young } 155731762eaaSAaron Young if (n_addrs) { 155831762eaaSAaron Young info.count = n_addrs; 155931762eaaSAaron Young (void) vio_ldc_send(&port->vio, &info, sizeof(info)); 156031762eaaSAaron Young } 156131762eaaSAaron Young } 156231762eaaSAaron Young 156367d0719fSAaron Young void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp) 156431762eaaSAaron Young { 156531762eaaSAaron Young struct vnet_port *port; 156631762eaaSAaron Young 156731762eaaSAaron Young rcu_read_lock(); 156831762eaaSAaron Young list_for_each_entry_rcu(port, &vp->port_list, list) { 156931762eaaSAaron Young 157031762eaaSAaron Young if (port->switch_port) { 157131762eaaSAaron Young __update_mc_list(vp, dev); 157231762eaaSAaron Young __send_mc_list(vp, port); 157331762eaaSAaron Young break; 157431762eaaSAaron Young } 157531762eaaSAaron Young } 157631762eaaSAaron Young rcu_read_unlock(); 157731762eaaSAaron Young } 157831762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common); 157931762eaaSAaron Young 158031762eaaSAaron Young int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu) 158131762eaaSAaron Young { 158231762eaaSAaron Young if (new_mtu < 68 || new_mtu > 65535) 158331762eaaSAaron Young return -EINVAL; 158431762eaaSAaron Young 158531762eaaSAaron Young dev->mtu = new_mtu; 158631762eaaSAaron Young return 0; 158731762eaaSAaron Young } 158831762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_change_mtu_common); 158931762eaaSAaron Young 159031762eaaSAaron Young int sunvnet_set_mac_addr_common(struct net_device *dev, void *p) 159131762eaaSAaron Young { 159231762eaaSAaron Young return -EINVAL; 159331762eaaSAaron Young } 159431762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common); 159531762eaaSAaron Young 159631762eaaSAaron Young void sunvnet_port_free_tx_bufs_common(struct vnet_port *port) 159731762eaaSAaron Young { 159831762eaaSAaron Young struct vio_dring_state *dr; 159931762eaaSAaron Young int i; 160031762eaaSAaron Young 160131762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 160231762eaaSAaron Young 160331762eaaSAaron Young if (dr->base == NULL) 160431762eaaSAaron Young return; 160531762eaaSAaron Young 160631762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; i++) { 160731762eaaSAaron Young struct vio_net_desc *d; 160831762eaaSAaron Young void *skb = port->tx_bufs[i].skb; 160931762eaaSAaron Young 161031762eaaSAaron Young if (!skb) 161131762eaaSAaron Young continue; 161231762eaaSAaron Young 161331762eaaSAaron Young d = vio_dring_entry(dr, i); 161431762eaaSAaron Young 161531762eaaSAaron Young ldc_unmap(port->vio.lp, 161631762eaaSAaron Young port->tx_bufs[i].cookies, 161731762eaaSAaron Young port->tx_bufs[i].ncookies); 161831762eaaSAaron Young dev_kfree_skb(skb); 161931762eaaSAaron Young port->tx_bufs[i].skb = NULL; 162031762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 162131762eaaSAaron Young } 162231762eaaSAaron Young ldc_free_exp_dring(port->vio.lp, dr->base, 162331762eaaSAaron Young (dr->entry_size * dr->num_entries), 162431762eaaSAaron Young dr->cookies, dr->ncookies); 162531762eaaSAaron Young dr->base = NULL; 162631762eaaSAaron Young dr->entry_size = 0; 162731762eaaSAaron Young dr->num_entries = 0; 162831762eaaSAaron Young dr->pending = 0; 162931762eaaSAaron Young dr->ncookies = 0; 163031762eaaSAaron Young } 163131762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common); 163231762eaaSAaron Young 163331762eaaSAaron Young static void vnet_port_reset(struct vnet_port *port) 163431762eaaSAaron Young { 163531762eaaSAaron Young del_timer(&port->clean_timer); 163631762eaaSAaron Young sunvnet_port_free_tx_bufs_common(port); 163731762eaaSAaron Young port->rmtu = 0; 163831762eaaSAaron Young port->tso = true; 163931762eaaSAaron Young port->tsolen = 0; 164031762eaaSAaron Young } 164131762eaaSAaron Young 164231762eaaSAaron Young static int vnet_port_alloc_tx_ring(struct vnet_port *port) 164331762eaaSAaron Young { 164431762eaaSAaron Young struct vio_dring_state *dr; 164531762eaaSAaron Young unsigned long len, elen; 164631762eaaSAaron Young int i, err, ncookies; 164731762eaaSAaron Young void *dring; 164831762eaaSAaron Young 164931762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 165031762eaaSAaron Young 165131762eaaSAaron Young elen = sizeof(struct vio_net_desc) + 165231762eaaSAaron Young sizeof(struct ldc_trans_cookie) * 2; 165331762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 7)) 165431762eaaSAaron Young elen += sizeof(struct vio_net_dext); 165531762eaaSAaron Young len = VNET_TX_RING_SIZE * elen; 165631762eaaSAaron Young 165731762eaaSAaron Young ncookies = VIO_MAX_RING_COOKIES; 165831762eaaSAaron Young dring = ldc_alloc_exp_dring(port->vio.lp, len, 165931762eaaSAaron Young dr->cookies, &ncookies, 166031762eaaSAaron Young (LDC_MAP_SHADOW | 166131762eaaSAaron Young LDC_MAP_DIRECT | 166231762eaaSAaron Young LDC_MAP_RW)); 166331762eaaSAaron Young if (IS_ERR(dring)) { 166431762eaaSAaron Young err = PTR_ERR(dring); 166531762eaaSAaron Young goto err_out; 166631762eaaSAaron Young } 166731762eaaSAaron Young 166831762eaaSAaron Young dr->base = dring; 166931762eaaSAaron Young dr->entry_size = elen; 167031762eaaSAaron Young dr->num_entries = VNET_TX_RING_SIZE; 167131762eaaSAaron Young dr->prod = dr->cons = 0; 167231762eaaSAaron Young port->start_cons = true; /* need an initial trigger */ 167331762eaaSAaron Young dr->pending = VNET_TX_RING_SIZE; 167431762eaaSAaron Young dr->ncookies = ncookies; 167531762eaaSAaron Young 167631762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 167731762eaaSAaron Young struct vio_net_desc *d; 167831762eaaSAaron Young 167931762eaaSAaron Young d = vio_dring_entry(dr, i); 168031762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 168131762eaaSAaron Young } 168231762eaaSAaron Young return 0; 168331762eaaSAaron Young 168431762eaaSAaron Young err_out: 168531762eaaSAaron Young sunvnet_port_free_tx_bufs_common(port); 168631762eaaSAaron Young 168731762eaaSAaron Young return err; 168831762eaaSAaron Young } 168931762eaaSAaron Young 169031762eaaSAaron Young #ifdef CONFIG_NET_POLL_CONTROLLER 169167d0719fSAaron Young void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp) 169231762eaaSAaron Young { 169331762eaaSAaron Young struct vnet_port *port; 169431762eaaSAaron Young unsigned long flags; 169531762eaaSAaron Young 169631762eaaSAaron Young spin_lock_irqsave(&vp->lock, flags); 169731762eaaSAaron Young if (!list_empty(&vp->port_list)) { 169831762eaaSAaron Young port = list_entry(vp->port_list.next, struct vnet_port, list); 169931762eaaSAaron Young napi_schedule(&port->napi); 170031762eaaSAaron Young } 170131762eaaSAaron Young spin_unlock_irqrestore(&vp->lock, flags); 170231762eaaSAaron Young } 170331762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common); 170431762eaaSAaron Young #endif 170531762eaaSAaron Young 170631762eaaSAaron Young void sunvnet_port_add_txq_common(struct vnet_port *port) 170731762eaaSAaron Young { 170831762eaaSAaron Young struct vnet *vp = port->vp; 170931762eaaSAaron Young int n; 171031762eaaSAaron Young 171131762eaaSAaron Young n = vp->nports++; 171231762eaaSAaron Young n = n & (VNET_MAX_TXQS - 1); 171331762eaaSAaron Young port->q_index = n; 171467d0719fSAaron Young netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), 171567d0719fSAaron Young port->q_index)); 171667d0719fSAaron Young 171731762eaaSAaron Young } 171831762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); 171931762eaaSAaron Young 172031762eaaSAaron Young void sunvnet_port_rm_txq_common(struct vnet_port *port) 172131762eaaSAaron Young { 172231762eaaSAaron Young port->vp->nports--; 172367d0719fSAaron Young netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), 172467d0719fSAaron Young port->q_index)); 172531762eaaSAaron Young } 172631762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common); 1727