131762eaaSAaron Young /* sunvnet.c: Sun LDOM Virtual Network Driver. 231762eaaSAaron Young * 331762eaaSAaron Young * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 431762eaaSAaron Young */ 531762eaaSAaron Young 631762eaaSAaron Young #include <linux/module.h> 731762eaaSAaron Young #include <linux/kernel.h> 831762eaaSAaron Young #include <linux/types.h> 931762eaaSAaron Young #include <linux/slab.h> 1031762eaaSAaron Young #include <linux/delay.h> 1131762eaaSAaron Young #include <linux/init.h> 1231762eaaSAaron Young #include <linux/netdevice.h> 1331762eaaSAaron Young #include <linux/ethtool.h> 1431762eaaSAaron Young #include <linux/etherdevice.h> 1531762eaaSAaron Young #include <linux/mutex.h> 1631762eaaSAaron Young #include <linux/highmem.h> 1731762eaaSAaron Young #include <linux/if_vlan.h> 1831762eaaSAaron Young #define CREATE_TRACE_POINTS 1931762eaaSAaron Young #include <trace/events/sunvnet.h> 2031762eaaSAaron Young 2131762eaaSAaron Young #if IS_ENABLED(CONFIG_IPV6) 2231762eaaSAaron Young #include <linux/icmpv6.h> 2331762eaaSAaron Young #endif 2431762eaaSAaron Young 2531762eaaSAaron Young #include <net/ip.h> 2631762eaaSAaron Young #include <net/icmp.h> 2731762eaaSAaron Young #include <net/route.h> 2831762eaaSAaron Young 2931762eaaSAaron Young #include <asm/vio.h> 3031762eaaSAaron Young #include <asm/ldc.h> 3131762eaaSAaron Young 3231762eaaSAaron Young #include "sunvnet_common.h" 3331762eaaSAaron Young 3431762eaaSAaron Young /* Heuristic for the number of times to exponentially backoff and 3531762eaaSAaron Young * retry sending an LDC trigger when EAGAIN is encountered 3631762eaaSAaron Young */ 3731762eaaSAaron Young #define VNET_MAX_RETRIES 10 3831762eaaSAaron Young 3931762eaaSAaron Young static int __vnet_tx_trigger(struct vnet_port *port, u32 start); 4031762eaaSAaron Young static void vnet_port_reset(struct vnet_port *port); 4131762eaaSAaron Young 4231762eaaSAaron Young static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) 4331762eaaSAaron Young { 4431762eaaSAaron Young return vio_dring_avail(dr, VNET_TX_RING_SIZE); 4531762eaaSAaron Young } 4631762eaaSAaron Young 4731762eaaSAaron Young static int vnet_handle_unknown(struct vnet_port *port, void *arg) 4831762eaaSAaron Young { 4931762eaaSAaron Young struct vio_msg_tag *pkt = arg; 5031762eaaSAaron Young 5131762eaaSAaron Young pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", 5231762eaaSAaron Young pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 5331762eaaSAaron Young pr_err("Resetting connection\n"); 5431762eaaSAaron Young 5531762eaaSAaron Young ldc_disconnect(port->vio.lp); 5631762eaaSAaron Young 5731762eaaSAaron Young return -ECONNRESET; 5831762eaaSAaron Young } 5931762eaaSAaron Young 6031762eaaSAaron Young static int vnet_port_alloc_tx_ring(struct vnet_port *port); 6131762eaaSAaron Young 6231762eaaSAaron Young int sunvnet_send_attr_common(struct vio_driver_state *vio) 6331762eaaSAaron Young { 6431762eaaSAaron Young struct vnet_port *port = to_vnet_port(vio); 6531762eaaSAaron Young struct net_device *dev = port->vp->dev; 6631762eaaSAaron Young struct vio_net_attr_info pkt; 6731762eaaSAaron Young int framelen = ETH_FRAME_LEN; 6831762eaaSAaron Young int i, err; 6931762eaaSAaron Young 7031762eaaSAaron Young err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); 7131762eaaSAaron Young if (err) 7231762eaaSAaron Young return err; 7331762eaaSAaron Young 7431762eaaSAaron Young memset(&pkt, 0, sizeof(pkt)); 7531762eaaSAaron Young pkt.tag.type = VIO_TYPE_CTRL; 7631762eaaSAaron Young pkt.tag.stype = VIO_SUBTYPE_INFO; 7731762eaaSAaron Young pkt.tag.stype_env = VIO_ATTR_INFO; 7831762eaaSAaron Young pkt.tag.sid = vio_send_sid(vio); 7931762eaaSAaron Young if (vio_version_before(vio, 1, 2)) 8031762eaaSAaron Young pkt.xfer_mode = VIO_DRING_MODE; 8131762eaaSAaron Young else 8231762eaaSAaron Young pkt.xfer_mode = VIO_NEW_DRING_MODE; 8331762eaaSAaron Young pkt.addr_type = VNET_ADDR_ETHERMAC; 8431762eaaSAaron Young pkt.ack_freq = 0; 8531762eaaSAaron Young for (i = 0; i < 6; i++) 8631762eaaSAaron Young pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); 8731762eaaSAaron Young if (vio_version_after(vio, 1, 3)) { 8831762eaaSAaron Young if (port->rmtu) { 8931762eaaSAaron Young port->rmtu = min(VNET_MAXPACKET, port->rmtu); 9031762eaaSAaron Young pkt.mtu = port->rmtu; 9131762eaaSAaron Young } else { 9231762eaaSAaron Young port->rmtu = VNET_MAXPACKET; 9331762eaaSAaron Young pkt.mtu = port->rmtu; 9431762eaaSAaron Young } 9531762eaaSAaron Young if (vio_version_after_eq(vio, 1, 6)) 9631762eaaSAaron Young pkt.options = VIO_TX_DRING; 9731762eaaSAaron Young } else if (vio_version_before(vio, 1, 3)) { 9831762eaaSAaron Young pkt.mtu = framelen; 9931762eaaSAaron Young } else { /* v1.3 */ 10031762eaaSAaron Young pkt.mtu = framelen + VLAN_HLEN; 10131762eaaSAaron Young } 10231762eaaSAaron Young 10331762eaaSAaron Young pkt.cflags = 0; 10431762eaaSAaron Young if (vio_version_after_eq(vio, 1, 7) && port->tso) { 10531762eaaSAaron Young pkt.cflags |= VNET_LSO_IPV4_CAPAB; 10631762eaaSAaron Young if (!port->tsolen) 10731762eaaSAaron Young port->tsolen = VNET_MAXTSO; 10831762eaaSAaron Young pkt.ipv4_lso_maxlen = port->tsolen; 10931762eaaSAaron Young } 11031762eaaSAaron Young 11131762eaaSAaron Young pkt.plnk_updt = PHYSLINK_UPDATE_NONE; 11231762eaaSAaron Young 11331762eaaSAaron Young viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 11431762eaaSAaron Young "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " 11531762eaaSAaron Young "cflags[0x%04x] lso_max[%u]\n", 11631762eaaSAaron Young pkt.xfer_mode, pkt.addr_type, 11731762eaaSAaron Young (unsigned long long)pkt.addr, 11831762eaaSAaron Young pkt.ack_freq, pkt.plnk_updt, pkt.options, 11931762eaaSAaron Young (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); 12031762eaaSAaron Young 12131762eaaSAaron Young 12231762eaaSAaron Young return vio_ldc_send(vio, &pkt, sizeof(pkt)); 12331762eaaSAaron Young } 12431762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_send_attr_common); 12531762eaaSAaron Young 12631762eaaSAaron Young static int handle_attr_info(struct vio_driver_state *vio, 12731762eaaSAaron Young struct vio_net_attr_info *pkt) 12831762eaaSAaron Young { 12931762eaaSAaron Young struct vnet_port *port = to_vnet_port(vio); 13031762eaaSAaron Young u64 localmtu; 13131762eaaSAaron Young u8 xfer_mode; 13231762eaaSAaron Young 13331762eaaSAaron Young viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 13431762eaaSAaron Young "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " 13531762eaaSAaron Young " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", 13631762eaaSAaron Young pkt->xfer_mode, pkt->addr_type, 13731762eaaSAaron Young (unsigned long long)pkt->addr, 13831762eaaSAaron Young pkt->ack_freq, pkt->plnk_updt, pkt->options, 13931762eaaSAaron Young (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, 14031762eaaSAaron Young pkt->ipv4_lso_maxlen); 14131762eaaSAaron Young 14231762eaaSAaron Young pkt->tag.sid = vio_send_sid(vio); 14331762eaaSAaron Young 14431762eaaSAaron Young xfer_mode = pkt->xfer_mode; 14531762eaaSAaron Young /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ 14631762eaaSAaron Young if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) 14731762eaaSAaron Young xfer_mode = VIO_NEW_DRING_MODE; 14831762eaaSAaron Young 14931762eaaSAaron Young /* MTU negotiation: 15031762eaaSAaron Young * < v1.3 - ETH_FRAME_LEN exactly 15131762eaaSAaron Young * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change 15231762eaaSAaron Young * pkt->mtu for ACK 15331762eaaSAaron Young * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly 15431762eaaSAaron Young */ 15531762eaaSAaron Young if (vio_version_before(vio, 1, 3)) { 15631762eaaSAaron Young localmtu = ETH_FRAME_LEN; 15731762eaaSAaron Young } else if (vio_version_after(vio, 1, 3)) { 15831762eaaSAaron Young localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; 15931762eaaSAaron Young localmtu = min(pkt->mtu, localmtu); 16031762eaaSAaron Young pkt->mtu = localmtu; 16131762eaaSAaron Young } else { /* v1.3 */ 16231762eaaSAaron Young localmtu = ETH_FRAME_LEN + VLAN_HLEN; 16331762eaaSAaron Young } 16431762eaaSAaron Young port->rmtu = localmtu; 16531762eaaSAaron Young 16631762eaaSAaron Young /* LSO negotiation */ 16731762eaaSAaron Young if (vio_version_after_eq(vio, 1, 7)) 16831762eaaSAaron Young port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); 16931762eaaSAaron Young else 17031762eaaSAaron Young port->tso = false; 17131762eaaSAaron Young if (port->tso) { 17231762eaaSAaron Young if (!port->tsolen) 17331762eaaSAaron Young port->tsolen = VNET_MAXTSO; 17431762eaaSAaron Young port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); 17531762eaaSAaron Young if (port->tsolen < VNET_MINTSO) { 17631762eaaSAaron Young port->tso = false; 17731762eaaSAaron Young port->tsolen = 0; 17831762eaaSAaron Young pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; 17931762eaaSAaron Young } 18031762eaaSAaron Young pkt->ipv4_lso_maxlen = port->tsolen; 18131762eaaSAaron Young } else { 18231762eaaSAaron Young pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; 18331762eaaSAaron Young pkt->ipv4_lso_maxlen = 0; 18431762eaaSAaron Young } 18531762eaaSAaron Young 18631762eaaSAaron Young /* for version >= 1.6, ACK packet mode we support */ 18731762eaaSAaron Young if (vio_version_after_eq(vio, 1, 6)) { 18831762eaaSAaron Young pkt->xfer_mode = VIO_NEW_DRING_MODE; 18931762eaaSAaron Young pkt->options = VIO_TX_DRING; 19031762eaaSAaron Young } 19131762eaaSAaron Young 19231762eaaSAaron Young if (!(xfer_mode | VIO_NEW_DRING_MODE) || 19331762eaaSAaron Young pkt->addr_type != VNET_ADDR_ETHERMAC || 19431762eaaSAaron Young pkt->mtu != localmtu) { 19531762eaaSAaron Young viodbg(HS, "SEND NET ATTR NACK\n"); 19631762eaaSAaron Young 19731762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_NACK; 19831762eaaSAaron Young 19931762eaaSAaron Young (void) vio_ldc_send(vio, pkt, sizeof(*pkt)); 20031762eaaSAaron Young 20131762eaaSAaron Young return -ECONNRESET; 20231762eaaSAaron Young } else { 20331762eaaSAaron Young viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " 20431762eaaSAaron Young "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " 20531762eaaSAaron Young "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", 20631762eaaSAaron Young pkt->xfer_mode, pkt->addr_type, 20731762eaaSAaron Young (unsigned long long)pkt->addr, 20831762eaaSAaron Young pkt->ack_freq, pkt->plnk_updt, pkt->options, 20931762eaaSAaron Young (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, 21031762eaaSAaron Young pkt->ipv4_lso_maxlen); 21131762eaaSAaron Young 21231762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_ACK; 21331762eaaSAaron Young 21431762eaaSAaron Young return vio_ldc_send(vio, pkt, sizeof(*pkt)); 21531762eaaSAaron Young } 21631762eaaSAaron Young 21731762eaaSAaron Young } 21831762eaaSAaron Young 21931762eaaSAaron Young static int handle_attr_ack(struct vio_driver_state *vio, 22031762eaaSAaron Young struct vio_net_attr_info *pkt) 22131762eaaSAaron Young { 22231762eaaSAaron Young viodbg(HS, "GOT NET ATTR ACK\n"); 22331762eaaSAaron Young 22431762eaaSAaron Young return 0; 22531762eaaSAaron Young } 22631762eaaSAaron Young 22731762eaaSAaron Young static int handle_attr_nack(struct vio_driver_state *vio, 22831762eaaSAaron Young struct vio_net_attr_info *pkt) 22931762eaaSAaron Young { 23031762eaaSAaron Young viodbg(HS, "GOT NET ATTR NACK\n"); 23131762eaaSAaron Young 23231762eaaSAaron Young return -ECONNRESET; 23331762eaaSAaron Young } 23431762eaaSAaron Young 23531762eaaSAaron Young int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg) 23631762eaaSAaron Young { 23731762eaaSAaron Young struct vio_net_attr_info *pkt = arg; 23831762eaaSAaron Young 23931762eaaSAaron Young switch (pkt->tag.stype) { 24031762eaaSAaron Young case VIO_SUBTYPE_INFO: 24131762eaaSAaron Young return handle_attr_info(vio, pkt); 24231762eaaSAaron Young 24331762eaaSAaron Young case VIO_SUBTYPE_ACK: 24431762eaaSAaron Young return handle_attr_ack(vio, pkt); 24531762eaaSAaron Young 24631762eaaSAaron Young case VIO_SUBTYPE_NACK: 24731762eaaSAaron Young return handle_attr_nack(vio, pkt); 24831762eaaSAaron Young 24931762eaaSAaron Young default: 25031762eaaSAaron Young return -ECONNRESET; 25131762eaaSAaron Young } 25231762eaaSAaron Young } 25331762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common); 25431762eaaSAaron Young 25531762eaaSAaron Young void sunvnet_handshake_complete_common(struct vio_driver_state *vio) 25631762eaaSAaron Young { 25731762eaaSAaron Young struct vio_dring_state *dr; 25831762eaaSAaron Young 25931762eaaSAaron Young dr = &vio->drings[VIO_DRIVER_RX_RING]; 26031762eaaSAaron Young dr->snd_nxt = dr->rcv_nxt = 1; 26131762eaaSAaron Young 26231762eaaSAaron Young dr = &vio->drings[VIO_DRIVER_TX_RING]; 26331762eaaSAaron Young dr->snd_nxt = dr->rcv_nxt = 1; 26431762eaaSAaron Young } 26531762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common); 26631762eaaSAaron Young 26731762eaaSAaron Young /* The hypervisor interface that implements copying to/from imported 26831762eaaSAaron Young * memory from another domain requires that copies are done to 8-byte 26931762eaaSAaron Young * aligned buffers, and that the lengths of such copies are also 8-byte 27031762eaaSAaron Young * multiples. 27131762eaaSAaron Young * 27231762eaaSAaron Young * So we align skb->data to an 8-byte multiple and pad-out the data 27331762eaaSAaron Young * area so we can round the copy length up to the next multiple of 27431762eaaSAaron Young * 8 for the copy. 27531762eaaSAaron Young * 27631762eaaSAaron Young * The transmitter puts the actual start of the packet 6 bytes into 27731762eaaSAaron Young * the buffer it sends over, so that the IP headers after the ethernet 27831762eaaSAaron Young * header are aligned properly. These 6 bytes are not in the descriptor 27931762eaaSAaron Young * length, they are simply implied. This offset is represented using 28031762eaaSAaron Young * the VNET_PACKET_SKIP macro. 28131762eaaSAaron Young */ 28231762eaaSAaron Young static struct sk_buff *alloc_and_align_skb(struct net_device *dev, 28331762eaaSAaron Young unsigned int len) 28431762eaaSAaron Young { 28531762eaaSAaron Young struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); 28631762eaaSAaron Young unsigned long addr, off; 28731762eaaSAaron Young 28831762eaaSAaron Young if (unlikely(!skb)) 28931762eaaSAaron Young return NULL; 29031762eaaSAaron Young 29131762eaaSAaron Young addr = (unsigned long) skb->data; 29231762eaaSAaron Young off = ((addr + 7UL) & ~7UL) - addr; 29331762eaaSAaron Young if (off) 29431762eaaSAaron Young skb_reserve(skb, off); 29531762eaaSAaron Young 29631762eaaSAaron Young return skb; 29731762eaaSAaron Young } 29831762eaaSAaron Young 29931762eaaSAaron Young static inline void vnet_fullcsum(struct sk_buff *skb) 30031762eaaSAaron Young { 30131762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 30231762eaaSAaron Young int offset = skb_transport_offset(skb); 30331762eaaSAaron Young 30431762eaaSAaron Young if (skb->protocol != htons(ETH_P_IP)) 30531762eaaSAaron Young return; 30631762eaaSAaron Young if (iph->protocol != IPPROTO_TCP && 30731762eaaSAaron Young iph->protocol != IPPROTO_UDP) 30831762eaaSAaron Young return; 30931762eaaSAaron Young skb->ip_summed = CHECKSUM_NONE; 31031762eaaSAaron Young skb->csum_level = 1; 31131762eaaSAaron Young skb->csum = 0; 31231762eaaSAaron Young if (iph->protocol == IPPROTO_TCP) { 31331762eaaSAaron Young struct tcphdr *ptcp = tcp_hdr(skb); 31431762eaaSAaron Young 31531762eaaSAaron Young ptcp->check = 0; 31631762eaaSAaron Young skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 31731762eaaSAaron Young ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 31831762eaaSAaron Young skb->len - offset, IPPROTO_TCP, 31931762eaaSAaron Young skb->csum); 32031762eaaSAaron Young } else if (iph->protocol == IPPROTO_UDP) { 32131762eaaSAaron Young struct udphdr *pudp = udp_hdr(skb); 32231762eaaSAaron Young 32331762eaaSAaron Young pudp->check = 0; 32431762eaaSAaron Young skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 32531762eaaSAaron Young pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 32631762eaaSAaron Young skb->len - offset, IPPROTO_UDP, 32731762eaaSAaron Young skb->csum); 32831762eaaSAaron Young } 32931762eaaSAaron Young } 33031762eaaSAaron Young 33131762eaaSAaron Young static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) 33231762eaaSAaron Young { 33331762eaaSAaron Young struct net_device *dev = port->vp->dev; 33431762eaaSAaron Young unsigned int len = desc->size; 33531762eaaSAaron Young unsigned int copy_len; 33631762eaaSAaron Young struct sk_buff *skb; 33731762eaaSAaron Young int maxlen; 33831762eaaSAaron Young int err; 33931762eaaSAaron Young 34031762eaaSAaron Young err = -EMSGSIZE; 34131762eaaSAaron Young if (port->tso && port->tsolen > port->rmtu) 34231762eaaSAaron Young maxlen = port->tsolen; 34331762eaaSAaron Young else 34431762eaaSAaron Young maxlen = port->rmtu; 34531762eaaSAaron Young if (unlikely(len < ETH_ZLEN || len > maxlen)) { 34631762eaaSAaron Young dev->stats.rx_length_errors++; 34731762eaaSAaron Young goto out_dropped; 34831762eaaSAaron Young } 34931762eaaSAaron Young 35031762eaaSAaron Young skb = alloc_and_align_skb(dev, len); 35131762eaaSAaron Young err = -ENOMEM; 35231762eaaSAaron Young if (unlikely(!skb)) { 35331762eaaSAaron Young dev->stats.rx_missed_errors++; 35431762eaaSAaron Young goto out_dropped; 35531762eaaSAaron Young } 35631762eaaSAaron Young 35731762eaaSAaron Young copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; 35831762eaaSAaron Young skb_put(skb, copy_len); 35931762eaaSAaron Young err = ldc_copy(port->vio.lp, LDC_COPY_IN, 36031762eaaSAaron Young skb->data, copy_len, 0, 36131762eaaSAaron Young desc->cookies, desc->ncookies); 36231762eaaSAaron Young if (unlikely(err < 0)) { 36331762eaaSAaron Young dev->stats.rx_frame_errors++; 36431762eaaSAaron Young goto out_free_skb; 36531762eaaSAaron Young } 36631762eaaSAaron Young 36731762eaaSAaron Young skb_pull(skb, VNET_PACKET_SKIP); 36831762eaaSAaron Young skb_trim(skb, len); 36931762eaaSAaron Young skb->protocol = eth_type_trans(skb, dev); 37031762eaaSAaron Young 37131762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 8)) { 37231762eaaSAaron Young struct vio_net_dext *dext = vio_net_ext(desc); 37331762eaaSAaron Young 37431762eaaSAaron Young skb_reset_network_header(skb); 37531762eaaSAaron Young 37631762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { 37731762eaaSAaron Young if (skb->protocol == ETH_P_IP) { 37831762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 37931762eaaSAaron Young 38031762eaaSAaron Young iph->check = 0; 38131762eaaSAaron Young ip_send_check(iph); 38231762eaaSAaron Young } 38331762eaaSAaron Young } 38431762eaaSAaron Young if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && 38531762eaaSAaron Young skb->ip_summed == CHECKSUM_NONE) { 38631762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) { 38731762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 38831762eaaSAaron Young int ihl = iph->ihl * 4; 38931762eaaSAaron Young 39031762eaaSAaron Young skb_reset_transport_header(skb); 39131762eaaSAaron Young skb_set_transport_header(skb, ihl); 39231762eaaSAaron Young vnet_fullcsum(skb); 39331762eaaSAaron Young } 39431762eaaSAaron Young } 39531762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { 39631762eaaSAaron Young skb->ip_summed = CHECKSUM_PARTIAL; 39731762eaaSAaron Young skb->csum_level = 0; 39831762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) 39931762eaaSAaron Young skb->csum_level = 1; 40031762eaaSAaron Young } 40131762eaaSAaron Young } 40231762eaaSAaron Young 40331762eaaSAaron Young skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; 40431762eaaSAaron Young 40531762eaaSAaron Young dev->stats.rx_packets++; 40631762eaaSAaron Young dev->stats.rx_bytes += len; 40731762eaaSAaron Young napi_gro_receive(&port->napi, skb); 40831762eaaSAaron Young return 0; 40931762eaaSAaron Young 41031762eaaSAaron Young out_free_skb: 41131762eaaSAaron Young kfree_skb(skb); 41231762eaaSAaron Young 41331762eaaSAaron Young out_dropped: 41431762eaaSAaron Young dev->stats.rx_dropped++; 41531762eaaSAaron Young return err; 41631762eaaSAaron Young } 41731762eaaSAaron Young 41831762eaaSAaron Young static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, 41931762eaaSAaron Young u32 start, u32 end, u8 vio_dring_state) 42031762eaaSAaron Young { 42131762eaaSAaron Young struct vio_dring_data hdr = { 42231762eaaSAaron Young .tag = { 42331762eaaSAaron Young .type = VIO_TYPE_DATA, 42431762eaaSAaron Young .stype = VIO_SUBTYPE_ACK, 42531762eaaSAaron Young .stype_env = VIO_DRING_DATA, 42631762eaaSAaron Young .sid = vio_send_sid(&port->vio), 42731762eaaSAaron Young }, 42831762eaaSAaron Young .dring_ident = dr->ident, 42931762eaaSAaron Young .start_idx = start, 43031762eaaSAaron Young .end_idx = end, 43131762eaaSAaron Young .state = vio_dring_state, 43231762eaaSAaron Young }; 43331762eaaSAaron Young int err, delay; 43431762eaaSAaron Young int retries = 0; 43531762eaaSAaron Young 43631762eaaSAaron Young hdr.seq = dr->snd_nxt; 43731762eaaSAaron Young delay = 1; 43831762eaaSAaron Young do { 43931762eaaSAaron Young err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 44031762eaaSAaron Young if (err > 0) { 44131762eaaSAaron Young dr->snd_nxt++; 44231762eaaSAaron Young break; 44331762eaaSAaron Young } 44431762eaaSAaron Young udelay(delay); 44531762eaaSAaron Young if ((delay <<= 1) > 128) 44631762eaaSAaron Young delay = 128; 44731762eaaSAaron Young if (retries++ > VNET_MAX_RETRIES) { 44831762eaaSAaron Young pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", 44931762eaaSAaron Young port->raddr[0], port->raddr[1], 45031762eaaSAaron Young port->raddr[2], port->raddr[3], 45131762eaaSAaron Young port->raddr[4], port->raddr[5]); 45231762eaaSAaron Young break; 45331762eaaSAaron Young } 45431762eaaSAaron Young } while (err == -EAGAIN); 45531762eaaSAaron Young 45631762eaaSAaron Young if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { 45731762eaaSAaron Young port->stop_rx_idx = end; 45831762eaaSAaron Young port->stop_rx = true; 45931762eaaSAaron Young } else { 46031762eaaSAaron Young port->stop_rx_idx = 0; 46131762eaaSAaron Young port->stop_rx = false; 46231762eaaSAaron Young } 46331762eaaSAaron Young 46431762eaaSAaron Young return err; 46531762eaaSAaron Young } 46631762eaaSAaron Young 46731762eaaSAaron Young static struct vio_net_desc *get_rx_desc(struct vnet_port *port, 46831762eaaSAaron Young struct vio_dring_state *dr, 46931762eaaSAaron Young u32 index) 47031762eaaSAaron Young { 47131762eaaSAaron Young struct vio_net_desc *desc = port->vio.desc_buf; 47231762eaaSAaron Young int err; 47331762eaaSAaron Young 47431762eaaSAaron Young err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, 47531762eaaSAaron Young (index * dr->entry_size), 47631762eaaSAaron Young dr->cookies, dr->ncookies); 47731762eaaSAaron Young if (err < 0) 47831762eaaSAaron Young return ERR_PTR(err); 47931762eaaSAaron Young 48031762eaaSAaron Young return desc; 48131762eaaSAaron Young } 48231762eaaSAaron Young 48331762eaaSAaron Young static int put_rx_desc(struct vnet_port *port, 48431762eaaSAaron Young struct vio_dring_state *dr, 48531762eaaSAaron Young struct vio_net_desc *desc, 48631762eaaSAaron Young u32 index) 48731762eaaSAaron Young { 48831762eaaSAaron Young int err; 48931762eaaSAaron Young 49031762eaaSAaron Young err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, 49131762eaaSAaron Young (index * dr->entry_size), 49231762eaaSAaron Young dr->cookies, dr->ncookies); 49331762eaaSAaron Young if (err < 0) 49431762eaaSAaron Young return err; 49531762eaaSAaron Young 49631762eaaSAaron Young return 0; 49731762eaaSAaron Young } 49831762eaaSAaron Young 49931762eaaSAaron Young static int vnet_walk_rx_one(struct vnet_port *port, 50031762eaaSAaron Young struct vio_dring_state *dr, 50131762eaaSAaron Young u32 index, int *needs_ack) 50231762eaaSAaron Young { 50331762eaaSAaron Young struct vio_net_desc *desc = get_rx_desc(port, dr, index); 50431762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 50531762eaaSAaron Young int err; 50631762eaaSAaron Young 50731762eaaSAaron Young BUG_ON(desc == NULL); 50831762eaaSAaron Young if (IS_ERR(desc)) 50931762eaaSAaron Young return PTR_ERR(desc); 51031762eaaSAaron Young 51131762eaaSAaron Young if (desc->hdr.state != VIO_DESC_READY) 51231762eaaSAaron Young return 1; 51331762eaaSAaron Young 51431762eaaSAaron Young dma_rmb(); 51531762eaaSAaron Young 51631762eaaSAaron Young viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", 51731762eaaSAaron Young desc->hdr.state, desc->hdr.ack, 51831762eaaSAaron Young desc->size, desc->ncookies, 51931762eaaSAaron Young desc->cookies[0].cookie_addr, 52031762eaaSAaron Young desc->cookies[0].cookie_size); 52131762eaaSAaron Young 52231762eaaSAaron Young err = vnet_rx_one(port, desc); 52331762eaaSAaron Young if (err == -ECONNRESET) 52431762eaaSAaron Young return err; 52531762eaaSAaron Young trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid, 52631762eaaSAaron Young index, desc->hdr.ack); 52731762eaaSAaron Young desc->hdr.state = VIO_DESC_DONE; 52831762eaaSAaron Young err = put_rx_desc(port, dr, desc, index); 52931762eaaSAaron Young if (err < 0) 53031762eaaSAaron Young return err; 53131762eaaSAaron Young *needs_ack = desc->hdr.ack; 53231762eaaSAaron Young return 0; 53331762eaaSAaron Young } 53431762eaaSAaron Young 53531762eaaSAaron Young static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, 53631762eaaSAaron Young u32 start, u32 end, int *npkts, int budget) 53731762eaaSAaron Young { 53831762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 53931762eaaSAaron Young int ack_start = -1, ack_end = -1; 54031762eaaSAaron Young bool send_ack = true; 54131762eaaSAaron Young 54231762eaaSAaron Young end = (end == (u32) -1) ? vio_dring_prev(dr, start) 54331762eaaSAaron Young : vio_dring_next(dr, end); 54431762eaaSAaron Young 54531762eaaSAaron Young viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); 54631762eaaSAaron Young 54731762eaaSAaron Young while (start != end) { 54831762eaaSAaron Young int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); 54931762eaaSAaron Young if (err == -ECONNRESET) 55031762eaaSAaron Young return err; 55131762eaaSAaron Young if (err != 0) 55231762eaaSAaron Young break; 55331762eaaSAaron Young (*npkts)++; 55431762eaaSAaron Young if (ack_start == -1) 55531762eaaSAaron Young ack_start = start; 55631762eaaSAaron Young ack_end = start; 55731762eaaSAaron Young start = vio_dring_next(dr, start); 55831762eaaSAaron Young if (ack && start != end) { 55931762eaaSAaron Young err = vnet_send_ack(port, dr, ack_start, ack_end, 56031762eaaSAaron Young VIO_DRING_ACTIVE); 56131762eaaSAaron Young if (err == -ECONNRESET) 56231762eaaSAaron Young return err; 56331762eaaSAaron Young ack_start = -1; 56431762eaaSAaron Young } 56531762eaaSAaron Young if ((*npkts) >= budget) { 56631762eaaSAaron Young send_ack = false; 56731762eaaSAaron Young break; 56831762eaaSAaron Young } 56931762eaaSAaron Young } 57031762eaaSAaron Young if (unlikely(ack_start == -1)) 57131762eaaSAaron Young ack_start = ack_end = vio_dring_prev(dr, start); 57231762eaaSAaron Young if (send_ack) { 57331762eaaSAaron Young port->napi_resume = false; 57431762eaaSAaron Young trace_vnet_tx_send_stopped_ack(port->vio._local_sid, 57531762eaaSAaron Young port->vio._peer_sid, 57631762eaaSAaron Young ack_end, *npkts); 57731762eaaSAaron Young return vnet_send_ack(port, dr, ack_start, ack_end, 57831762eaaSAaron Young VIO_DRING_STOPPED); 57931762eaaSAaron Young } else { 58031762eaaSAaron Young trace_vnet_tx_defer_stopped_ack(port->vio._local_sid, 58131762eaaSAaron Young port->vio._peer_sid, 58231762eaaSAaron Young ack_end, *npkts); 58331762eaaSAaron Young port->napi_resume = true; 58431762eaaSAaron Young port->napi_stop_idx = ack_end; 58531762eaaSAaron Young return 1; 58631762eaaSAaron Young } 58731762eaaSAaron Young } 58831762eaaSAaron Young 58931762eaaSAaron Young static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, 59031762eaaSAaron Young int budget) 59131762eaaSAaron Young { 59231762eaaSAaron Young struct vio_dring_data *pkt = msgbuf; 59331762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; 59431762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 59531762eaaSAaron Young 59631762eaaSAaron Young viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", 59731762eaaSAaron Young pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); 59831762eaaSAaron Young 59931762eaaSAaron Young if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 60031762eaaSAaron Young return 0; 60131762eaaSAaron Young if (unlikely(pkt->seq != dr->rcv_nxt)) { 60231762eaaSAaron Young pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", 60331762eaaSAaron Young pkt->seq, dr->rcv_nxt); 60431762eaaSAaron Young return 0; 60531762eaaSAaron Young } 60631762eaaSAaron Young 60731762eaaSAaron Young if (!port->napi_resume) 60831762eaaSAaron Young dr->rcv_nxt++; 60931762eaaSAaron Young 61031762eaaSAaron Young /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ 61131762eaaSAaron Young 61231762eaaSAaron Young return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, 61331762eaaSAaron Young npkts, budget); 61431762eaaSAaron Young } 61531762eaaSAaron Young 61631762eaaSAaron Young static int idx_is_pending(struct vio_dring_state *dr, u32 end) 61731762eaaSAaron Young { 61831762eaaSAaron Young u32 idx = dr->cons; 61931762eaaSAaron Young int found = 0; 62031762eaaSAaron Young 62131762eaaSAaron Young while (idx != dr->prod) { 62231762eaaSAaron Young if (idx == end) { 62331762eaaSAaron Young found = 1; 62431762eaaSAaron Young break; 62531762eaaSAaron Young } 62631762eaaSAaron Young idx = vio_dring_next(dr, idx); 62731762eaaSAaron Young } 62831762eaaSAaron Young return found; 62931762eaaSAaron Young } 63031762eaaSAaron Young 63131762eaaSAaron Young static int vnet_ack(struct vnet_port *port, void *msgbuf) 63231762eaaSAaron Young { 63331762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 63431762eaaSAaron Young struct vio_dring_data *pkt = msgbuf; 63531762eaaSAaron Young struct net_device *dev; 63631762eaaSAaron Young struct vnet *vp; 63731762eaaSAaron Young u32 end; 63831762eaaSAaron Young struct vio_net_desc *desc; 63931762eaaSAaron Young struct netdev_queue *txq; 64031762eaaSAaron Young 64131762eaaSAaron Young if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 64231762eaaSAaron Young return 0; 64331762eaaSAaron Young 64431762eaaSAaron Young end = pkt->end_idx; 64531762eaaSAaron Young vp = port->vp; 64631762eaaSAaron Young dev = vp->dev; 64731762eaaSAaron Young netif_tx_lock(dev); 64831762eaaSAaron Young if (unlikely(!idx_is_pending(dr, end))) { 64931762eaaSAaron Young netif_tx_unlock(dev); 65031762eaaSAaron Young return 0; 65131762eaaSAaron Young } 65231762eaaSAaron Young 65331762eaaSAaron Young /* sync for race conditions with vnet_start_xmit() and tell xmit it 65431762eaaSAaron Young * is time to send a trigger. 65531762eaaSAaron Young */ 65631762eaaSAaron Young trace_vnet_rx_stopped_ack(port->vio._local_sid, 65731762eaaSAaron Young port->vio._peer_sid, end); 65831762eaaSAaron Young dr->cons = vio_dring_next(dr, end); 65931762eaaSAaron Young desc = vio_dring_entry(dr, dr->cons); 66031762eaaSAaron Young if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { 66131762eaaSAaron Young /* vnet_start_xmit() just populated this dring but missed 66231762eaaSAaron Young * sending the "start" LDC message to the consumer. 66331762eaaSAaron Young * Send a "start" trigger on its behalf. 66431762eaaSAaron Young */ 66531762eaaSAaron Young if (__vnet_tx_trigger(port, dr->cons) > 0) 66631762eaaSAaron Young port->start_cons = false; 66731762eaaSAaron Young else 66831762eaaSAaron Young port->start_cons = true; 66931762eaaSAaron Young } else { 67031762eaaSAaron Young port->start_cons = true; 67131762eaaSAaron Young } 67231762eaaSAaron Young netif_tx_unlock(dev); 67331762eaaSAaron Young 67431762eaaSAaron Young txq = netdev_get_tx_queue(dev, port->q_index); 67531762eaaSAaron Young if (unlikely(netif_tx_queue_stopped(txq) && 67631762eaaSAaron Young vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) 67731762eaaSAaron Young return 1; 67831762eaaSAaron Young 67931762eaaSAaron Young return 0; 68031762eaaSAaron Young } 68131762eaaSAaron Young 68231762eaaSAaron Young static int vnet_nack(struct vnet_port *port, void *msgbuf) 68331762eaaSAaron Young { 68431762eaaSAaron Young /* XXX just reset or similar XXX */ 68531762eaaSAaron Young return 0; 68631762eaaSAaron Young } 68731762eaaSAaron Young 68831762eaaSAaron Young static int handle_mcast(struct vnet_port *port, void *msgbuf) 68931762eaaSAaron Young { 69031762eaaSAaron Young struct vio_net_mcast_info *pkt = msgbuf; 69131762eaaSAaron Young 69231762eaaSAaron Young if (pkt->tag.stype != VIO_SUBTYPE_ACK) 69331762eaaSAaron Young pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", 69431762eaaSAaron Young port->vp->dev->name, 69531762eaaSAaron Young pkt->tag.type, 69631762eaaSAaron Young pkt->tag.stype, 69731762eaaSAaron Young pkt->tag.stype_env, 69831762eaaSAaron Young pkt->tag.sid); 69931762eaaSAaron Young 70031762eaaSAaron Young return 0; 70131762eaaSAaron Young } 70231762eaaSAaron Young 70331762eaaSAaron Young /* Got back a STOPPED LDC message on port. If the queue is stopped, 70431762eaaSAaron Young * wake it up so that we'll send out another START message at the 70531762eaaSAaron Young * next TX. 70631762eaaSAaron Young */ 70731762eaaSAaron Young static void maybe_tx_wakeup(struct vnet_port *port) 70831762eaaSAaron Young { 70931762eaaSAaron Young struct netdev_queue *txq; 71031762eaaSAaron Young 71131762eaaSAaron Young txq = netdev_get_tx_queue(port->vp->dev, port->q_index); 71231762eaaSAaron Young __netif_tx_lock(txq, smp_processor_id()); 71331762eaaSAaron Young if (likely(netif_tx_queue_stopped(txq))) { 71431762eaaSAaron Young struct vio_dring_state *dr; 71531762eaaSAaron Young 71631762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 71731762eaaSAaron Young netif_tx_wake_queue(txq); 71831762eaaSAaron Young } 71931762eaaSAaron Young __netif_tx_unlock(txq); 72031762eaaSAaron Young } 72131762eaaSAaron Young 72231762eaaSAaron Young static inline bool port_is_up(struct vnet_port *vnet) 72331762eaaSAaron Young { 72431762eaaSAaron Young struct vio_driver_state *vio = &vnet->vio; 72531762eaaSAaron Young 72631762eaaSAaron Young return !!(vio->hs_state & VIO_HS_COMPLETE); 72731762eaaSAaron Young } 72831762eaaSAaron Young 72931762eaaSAaron Young static int vnet_event_napi(struct vnet_port *port, int budget) 73031762eaaSAaron Young { 73131762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 73231762eaaSAaron Young int tx_wakeup, err; 73331762eaaSAaron Young int npkts = 0; 73431762eaaSAaron Young int event = (port->rx_event & LDC_EVENT_RESET); 73531762eaaSAaron Young 73631762eaaSAaron Young ldc_ctrl: 73731762eaaSAaron Young if (unlikely(event == LDC_EVENT_RESET || 73831762eaaSAaron Young event == LDC_EVENT_UP)) { 73931762eaaSAaron Young vio_link_state_change(vio, event); 74031762eaaSAaron Young 74131762eaaSAaron Young if (event == LDC_EVENT_RESET) { 74231762eaaSAaron Young vnet_port_reset(port); 74331762eaaSAaron Young vio_port_up(vio); 74431762eaaSAaron Young } 74531762eaaSAaron Young port->rx_event = 0; 74631762eaaSAaron Young return 0; 74731762eaaSAaron Young } 74831762eaaSAaron Young /* We may have multiple LDC events in rx_event. Unroll send_events() */ 74931762eaaSAaron Young event = (port->rx_event & LDC_EVENT_UP); 75031762eaaSAaron Young port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP); 75131762eaaSAaron Young if (event == LDC_EVENT_UP) 75231762eaaSAaron Young goto ldc_ctrl; 75331762eaaSAaron Young event = port->rx_event; 75431762eaaSAaron Young if (!(event & LDC_EVENT_DATA_READY)) 75531762eaaSAaron Young return 0; 75631762eaaSAaron Young 75731762eaaSAaron Young /* we dont expect any other bits than RESET, UP, DATA_READY */ 75831762eaaSAaron Young BUG_ON(event != LDC_EVENT_DATA_READY); 75931762eaaSAaron Young 76031762eaaSAaron Young tx_wakeup = err = 0; 76131762eaaSAaron Young while (1) { 76231762eaaSAaron Young union { 76331762eaaSAaron Young struct vio_msg_tag tag; 76431762eaaSAaron Young u64 raw[8]; 76531762eaaSAaron Young } msgbuf; 76631762eaaSAaron Young 76731762eaaSAaron Young if (port->napi_resume) { 76831762eaaSAaron Young struct vio_dring_data *pkt = 76931762eaaSAaron Young (struct vio_dring_data *)&msgbuf; 77031762eaaSAaron Young struct vio_dring_state *dr = 77131762eaaSAaron Young &port->vio.drings[VIO_DRIVER_RX_RING]; 77231762eaaSAaron Young 77331762eaaSAaron Young pkt->tag.type = VIO_TYPE_DATA; 77431762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_INFO; 77531762eaaSAaron Young pkt->tag.stype_env = VIO_DRING_DATA; 77631762eaaSAaron Young pkt->seq = dr->rcv_nxt; 77731762eaaSAaron Young pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); 77831762eaaSAaron Young pkt->end_idx = -1; 77931762eaaSAaron Young goto napi_resume; 78031762eaaSAaron Young } 78131762eaaSAaron Young err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); 78231762eaaSAaron Young if (unlikely(err < 0)) { 78331762eaaSAaron Young if (err == -ECONNRESET) 78431762eaaSAaron Young vio_conn_reset(vio); 78531762eaaSAaron Young break; 78631762eaaSAaron Young } 78731762eaaSAaron Young if (err == 0) 78831762eaaSAaron Young break; 78931762eaaSAaron Young viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", 79031762eaaSAaron Young msgbuf.tag.type, 79131762eaaSAaron Young msgbuf.tag.stype, 79231762eaaSAaron Young msgbuf.tag.stype_env, 79331762eaaSAaron Young msgbuf.tag.sid); 79431762eaaSAaron Young err = vio_validate_sid(vio, &msgbuf.tag); 79531762eaaSAaron Young if (err < 0) 79631762eaaSAaron Young break; 79731762eaaSAaron Young napi_resume: 79831762eaaSAaron Young if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { 79931762eaaSAaron Young if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { 80031762eaaSAaron Young if (!port_is_up(port)) { 80131762eaaSAaron Young /* failures like handshake_failure() 80231762eaaSAaron Young * may have cleaned up dring, but 80331762eaaSAaron Young * NAPI polling may bring us here. 80431762eaaSAaron Young */ 80531762eaaSAaron Young err = -ECONNRESET; 80631762eaaSAaron Young break; 80731762eaaSAaron Young } 80831762eaaSAaron Young err = vnet_rx(port, &msgbuf, &npkts, budget); 80931762eaaSAaron Young if (npkts >= budget) 81031762eaaSAaron Young break; 81131762eaaSAaron Young if (npkts == 0) 81231762eaaSAaron Young break; 81331762eaaSAaron Young } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { 81431762eaaSAaron Young err = vnet_ack(port, &msgbuf); 81531762eaaSAaron Young if (err > 0) 81631762eaaSAaron Young tx_wakeup |= err; 81731762eaaSAaron Young } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { 81831762eaaSAaron Young err = vnet_nack(port, &msgbuf); 81931762eaaSAaron Young } 82031762eaaSAaron Young } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { 82131762eaaSAaron Young if (msgbuf.tag.stype_env == VNET_MCAST_INFO) 82231762eaaSAaron Young err = handle_mcast(port, &msgbuf); 82331762eaaSAaron Young else 82431762eaaSAaron Young err = vio_control_pkt_engine(vio, &msgbuf); 82531762eaaSAaron Young if (err) 82631762eaaSAaron Young break; 82731762eaaSAaron Young } else { 82831762eaaSAaron Young err = vnet_handle_unknown(port, &msgbuf); 82931762eaaSAaron Young } 83031762eaaSAaron Young if (err == -ECONNRESET) 83131762eaaSAaron Young break; 83231762eaaSAaron Young } 83331762eaaSAaron Young if (unlikely(tx_wakeup && err != -ECONNRESET)) 83431762eaaSAaron Young maybe_tx_wakeup(port); 83531762eaaSAaron Young return npkts; 83631762eaaSAaron Young } 83731762eaaSAaron Young 83831762eaaSAaron Young int sunvnet_poll_common(struct napi_struct *napi, int budget) 83931762eaaSAaron Young { 84031762eaaSAaron Young struct vnet_port *port = container_of(napi, struct vnet_port, napi); 84131762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 84231762eaaSAaron Young int processed = vnet_event_napi(port, budget); 84331762eaaSAaron Young 84431762eaaSAaron Young if (processed < budget) { 84531762eaaSAaron Young napi_complete(napi); 84631762eaaSAaron Young port->rx_event &= ~LDC_EVENT_DATA_READY; 84731762eaaSAaron Young vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); 84831762eaaSAaron Young } 84931762eaaSAaron Young return processed; 85031762eaaSAaron Young } 85131762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_poll_common); 85231762eaaSAaron Young 85331762eaaSAaron Young void sunvnet_event_common(void *arg, int event) 85431762eaaSAaron Young { 85531762eaaSAaron Young struct vnet_port *port = arg; 85631762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 85731762eaaSAaron Young 85831762eaaSAaron Young port->rx_event |= event; 85931762eaaSAaron Young vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); 86031762eaaSAaron Young napi_schedule(&port->napi); 86131762eaaSAaron Young 86231762eaaSAaron Young } 86331762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_event_common); 86431762eaaSAaron Young 86531762eaaSAaron Young static int __vnet_tx_trigger(struct vnet_port *port, u32 start) 86631762eaaSAaron Young { 86731762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 86831762eaaSAaron Young struct vio_dring_data hdr = { 86931762eaaSAaron Young .tag = { 87031762eaaSAaron Young .type = VIO_TYPE_DATA, 87131762eaaSAaron Young .stype = VIO_SUBTYPE_INFO, 87231762eaaSAaron Young .stype_env = VIO_DRING_DATA, 87331762eaaSAaron Young .sid = vio_send_sid(&port->vio), 87431762eaaSAaron Young }, 87531762eaaSAaron Young .dring_ident = dr->ident, 87631762eaaSAaron Young .start_idx = start, 87731762eaaSAaron Young .end_idx = (u32) -1, 87831762eaaSAaron Young }; 87931762eaaSAaron Young int err, delay; 88031762eaaSAaron Young int retries = 0; 88131762eaaSAaron Young 88231762eaaSAaron Young if (port->stop_rx) { 88331762eaaSAaron Young trace_vnet_tx_pending_stopped_ack(port->vio._local_sid, 88431762eaaSAaron Young port->vio._peer_sid, 88531762eaaSAaron Young port->stop_rx_idx, -1); 88631762eaaSAaron Young err = vnet_send_ack(port, 88731762eaaSAaron Young &port->vio.drings[VIO_DRIVER_RX_RING], 88831762eaaSAaron Young port->stop_rx_idx, -1, 88931762eaaSAaron Young VIO_DRING_STOPPED); 89031762eaaSAaron Young if (err <= 0) 89131762eaaSAaron Young return err; 89231762eaaSAaron Young } 89331762eaaSAaron Young 89431762eaaSAaron Young hdr.seq = dr->snd_nxt; 89531762eaaSAaron Young delay = 1; 89631762eaaSAaron Young do { 89731762eaaSAaron Young err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 89831762eaaSAaron Young if (err > 0) { 89931762eaaSAaron Young dr->snd_nxt++; 90031762eaaSAaron Young break; 90131762eaaSAaron Young } 90231762eaaSAaron Young udelay(delay); 90331762eaaSAaron Young if ((delay <<= 1) > 128) 90431762eaaSAaron Young delay = 128; 90531762eaaSAaron Young if (retries++ > VNET_MAX_RETRIES) 90631762eaaSAaron Young break; 90731762eaaSAaron Young } while (err == -EAGAIN); 90831762eaaSAaron Young trace_vnet_tx_trigger(port->vio._local_sid, 90931762eaaSAaron Young port->vio._peer_sid, start, err); 91031762eaaSAaron Young 91131762eaaSAaron Young return err; 91231762eaaSAaron Young } 91331762eaaSAaron Young 91431762eaaSAaron Young static struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) 91531762eaaSAaron Young { 91631762eaaSAaron Young unsigned int hash = vnet_hashfn(skb->data); 91731762eaaSAaron Young struct hlist_head *hp = &vp->port_hash[hash]; 91831762eaaSAaron Young struct vnet_port *port; 91931762eaaSAaron Young 92031762eaaSAaron Young hlist_for_each_entry_rcu(port, hp, hash) { 92131762eaaSAaron Young if (!port_is_up(port)) 92231762eaaSAaron Young continue; 92331762eaaSAaron Young if (ether_addr_equal(port->raddr, skb->data)) 92431762eaaSAaron Young return port; 92531762eaaSAaron Young } 92631762eaaSAaron Young list_for_each_entry_rcu(port, &vp->port_list, list) { 92731762eaaSAaron Young if (!port->switch_port) 92831762eaaSAaron Young continue; 92931762eaaSAaron Young if (!port_is_up(port)) 93031762eaaSAaron Young continue; 93131762eaaSAaron Young return port; 93231762eaaSAaron Young } 93331762eaaSAaron Young return NULL; 93431762eaaSAaron Young } 93531762eaaSAaron Young 93631762eaaSAaron Young static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, 93731762eaaSAaron Young unsigned *pending) 93831762eaaSAaron Young { 93931762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 94031762eaaSAaron Young struct sk_buff *skb = NULL; 94131762eaaSAaron Young int i, txi; 94231762eaaSAaron Young 94331762eaaSAaron Young *pending = 0; 94431762eaaSAaron Young 94531762eaaSAaron Young txi = dr->prod; 94631762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 94731762eaaSAaron Young struct vio_net_desc *d; 94831762eaaSAaron Young 94931762eaaSAaron Young --txi; 95031762eaaSAaron Young if (txi < 0) 95131762eaaSAaron Young txi = VNET_TX_RING_SIZE-1; 95231762eaaSAaron Young 95331762eaaSAaron Young d = vio_dring_entry(dr, txi); 95431762eaaSAaron Young 95531762eaaSAaron Young if (d->hdr.state == VIO_DESC_READY) { 95631762eaaSAaron Young (*pending)++; 95731762eaaSAaron Young continue; 95831762eaaSAaron Young } 95931762eaaSAaron Young if (port->tx_bufs[txi].skb) { 96031762eaaSAaron Young if (d->hdr.state != VIO_DESC_DONE) 96131762eaaSAaron Young pr_notice("invalid ring buffer state %d\n", 96231762eaaSAaron Young d->hdr.state); 96331762eaaSAaron Young BUG_ON(port->tx_bufs[txi].skb->next); 96431762eaaSAaron Young 96531762eaaSAaron Young port->tx_bufs[txi].skb->next = skb; 96631762eaaSAaron Young skb = port->tx_bufs[txi].skb; 96731762eaaSAaron Young port->tx_bufs[txi].skb = NULL; 96831762eaaSAaron Young 96931762eaaSAaron Young ldc_unmap(port->vio.lp, 97031762eaaSAaron Young port->tx_bufs[txi].cookies, 97131762eaaSAaron Young port->tx_bufs[txi].ncookies); 97231762eaaSAaron Young } else if (d->hdr.state == VIO_DESC_FREE) 97331762eaaSAaron Young break; 97431762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 97531762eaaSAaron Young } 97631762eaaSAaron Young return skb; 97731762eaaSAaron Young } 97831762eaaSAaron Young 97931762eaaSAaron Young static inline void vnet_free_skbs(struct sk_buff *skb) 98031762eaaSAaron Young { 98131762eaaSAaron Young struct sk_buff *next; 98231762eaaSAaron Young 98331762eaaSAaron Young while (skb) { 98431762eaaSAaron Young next = skb->next; 98531762eaaSAaron Young skb->next = NULL; 98631762eaaSAaron Young dev_kfree_skb(skb); 98731762eaaSAaron Young skb = next; 98831762eaaSAaron Young } 98931762eaaSAaron Young } 99031762eaaSAaron Young 99131762eaaSAaron Young void sunvnet_clean_timer_expire_common(unsigned long port0) 99231762eaaSAaron Young { 99331762eaaSAaron Young struct vnet_port *port = (struct vnet_port *)port0; 99431762eaaSAaron Young struct sk_buff *freeskbs; 99531762eaaSAaron Young unsigned pending; 99631762eaaSAaron Young 99731762eaaSAaron Young netif_tx_lock(port->vp->dev); 99831762eaaSAaron Young freeskbs = vnet_clean_tx_ring(port, &pending); 99931762eaaSAaron Young netif_tx_unlock(port->vp->dev); 100031762eaaSAaron Young 100131762eaaSAaron Young vnet_free_skbs(freeskbs); 100231762eaaSAaron Young 100331762eaaSAaron Young if (pending) 100431762eaaSAaron Young (void)mod_timer(&port->clean_timer, 100531762eaaSAaron Young jiffies + VNET_CLEAN_TIMEOUT); 100631762eaaSAaron Young else 100731762eaaSAaron Young del_timer(&port->clean_timer); 100831762eaaSAaron Young } 100931762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common); 101031762eaaSAaron Young 101131762eaaSAaron Young static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, 101231762eaaSAaron Young struct ldc_trans_cookie *cookies, int ncookies, 101331762eaaSAaron Young unsigned int map_perm) 101431762eaaSAaron Young { 101531762eaaSAaron Young int i, nc, err, blen; 101631762eaaSAaron Young 101731762eaaSAaron Young /* header */ 101831762eaaSAaron Young blen = skb_headlen(skb); 101931762eaaSAaron Young if (blen < ETH_ZLEN) 102031762eaaSAaron Young blen = ETH_ZLEN; 102131762eaaSAaron Young blen += VNET_PACKET_SKIP; 102231762eaaSAaron Young blen += 8 - (blen & 7); 102331762eaaSAaron Young 102431762eaaSAaron Young err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies, 102531762eaaSAaron Young ncookies, map_perm); 102631762eaaSAaron Young if (err < 0) 102731762eaaSAaron Young return err; 102831762eaaSAaron Young nc = err; 102931762eaaSAaron Young 103031762eaaSAaron Young for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 103131762eaaSAaron Young skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 103231762eaaSAaron Young u8 *vaddr; 103331762eaaSAaron Young 103431762eaaSAaron Young if (nc < ncookies) { 103531762eaaSAaron Young vaddr = kmap_atomic(skb_frag_page(f)); 103631762eaaSAaron Young blen = skb_frag_size(f); 103731762eaaSAaron Young blen += 8 - (blen & 7); 103831762eaaSAaron Young err = ldc_map_single(lp, vaddr + f->page_offset, 103931762eaaSAaron Young blen, cookies + nc, ncookies - nc, 104031762eaaSAaron Young map_perm); 104131762eaaSAaron Young kunmap_atomic(vaddr); 104231762eaaSAaron Young } else { 104331762eaaSAaron Young err = -EMSGSIZE; 104431762eaaSAaron Young } 104531762eaaSAaron Young 104631762eaaSAaron Young if (err < 0) { 104731762eaaSAaron Young ldc_unmap(lp, cookies, nc); 104831762eaaSAaron Young return err; 104931762eaaSAaron Young } 105031762eaaSAaron Young nc += err; 105131762eaaSAaron Young } 105231762eaaSAaron Young return nc; 105331762eaaSAaron Young } 105431762eaaSAaron Young 105531762eaaSAaron Young static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) 105631762eaaSAaron Young { 105731762eaaSAaron Young struct sk_buff *nskb; 105831762eaaSAaron Young int i, len, pad, docopy; 105931762eaaSAaron Young 106031762eaaSAaron Young len = skb->len; 106131762eaaSAaron Young pad = 0; 106231762eaaSAaron Young if (len < ETH_ZLEN) { 106331762eaaSAaron Young pad += ETH_ZLEN - skb->len; 106431762eaaSAaron Young len += pad; 106531762eaaSAaron Young } 106631762eaaSAaron Young len += VNET_PACKET_SKIP; 106731762eaaSAaron Young pad += 8 - (len & 7); 106831762eaaSAaron Young 106931762eaaSAaron Young /* make sure we have enough cookies and alignment in every frag */ 107031762eaaSAaron Young docopy = skb_shinfo(skb)->nr_frags >= ncookies; 107131762eaaSAaron Young for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 107231762eaaSAaron Young skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 107331762eaaSAaron Young 107431762eaaSAaron Young docopy |= f->page_offset & 7; 107531762eaaSAaron Young } 107631762eaaSAaron Young if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || 107731762eaaSAaron Young skb_tailroom(skb) < pad || 107831762eaaSAaron Young skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { 107931762eaaSAaron Young int start = 0, offset; 108031762eaaSAaron Young __wsum csum; 108131762eaaSAaron Young 108231762eaaSAaron Young len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; 108331762eaaSAaron Young nskb = alloc_and_align_skb(skb->dev, len); 108431762eaaSAaron Young if (nskb == NULL) { 108531762eaaSAaron Young dev_kfree_skb(skb); 108631762eaaSAaron Young return NULL; 108731762eaaSAaron Young } 108831762eaaSAaron Young skb_reserve(nskb, VNET_PACKET_SKIP); 108931762eaaSAaron Young 109031762eaaSAaron Young nskb->protocol = skb->protocol; 109131762eaaSAaron Young offset = skb_mac_header(skb) - skb->data; 109231762eaaSAaron Young skb_set_mac_header(nskb, offset); 109331762eaaSAaron Young offset = skb_network_header(skb) - skb->data; 109431762eaaSAaron Young skb_set_network_header(nskb, offset); 109531762eaaSAaron Young offset = skb_transport_header(skb) - skb->data; 109631762eaaSAaron Young skb_set_transport_header(nskb, offset); 109731762eaaSAaron Young 109831762eaaSAaron Young offset = 0; 109931762eaaSAaron Young nskb->csum_offset = skb->csum_offset; 110031762eaaSAaron Young nskb->ip_summed = skb->ip_summed; 110131762eaaSAaron Young 110231762eaaSAaron Young if (skb->ip_summed == CHECKSUM_PARTIAL) 110331762eaaSAaron Young start = skb_checksum_start_offset(skb); 110431762eaaSAaron Young if (start) { 110531762eaaSAaron Young struct iphdr *iph = ip_hdr(nskb); 110631762eaaSAaron Young int offset = start + nskb->csum_offset; 110731762eaaSAaron Young 110831762eaaSAaron Young if (skb_copy_bits(skb, 0, nskb->data, start)) { 110931762eaaSAaron Young dev_kfree_skb(nskb); 111031762eaaSAaron Young dev_kfree_skb(skb); 111131762eaaSAaron Young return NULL; 111231762eaaSAaron Young } 111331762eaaSAaron Young *(__sum16 *)(skb->data + offset) = 0; 111431762eaaSAaron Young csum = skb_copy_and_csum_bits(skb, start, 111531762eaaSAaron Young nskb->data + start, 111631762eaaSAaron Young skb->len - start, 0); 111731762eaaSAaron Young if (iph->protocol == IPPROTO_TCP || 111831762eaaSAaron Young iph->protocol == IPPROTO_UDP) { 111931762eaaSAaron Young csum = csum_tcpudp_magic(iph->saddr, iph->daddr, 112031762eaaSAaron Young skb->len - start, 112131762eaaSAaron Young iph->protocol, csum); 112231762eaaSAaron Young } 112331762eaaSAaron Young *(__sum16 *)(nskb->data + offset) = csum; 112431762eaaSAaron Young 112531762eaaSAaron Young nskb->ip_summed = CHECKSUM_NONE; 112631762eaaSAaron Young } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { 112731762eaaSAaron Young dev_kfree_skb(nskb); 112831762eaaSAaron Young dev_kfree_skb(skb); 112931762eaaSAaron Young return NULL; 113031762eaaSAaron Young } 113131762eaaSAaron Young (void)skb_put(nskb, skb->len); 113231762eaaSAaron Young if (skb_is_gso(skb)) { 113331762eaaSAaron Young skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; 113431762eaaSAaron Young skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 113531762eaaSAaron Young } 113631762eaaSAaron Young nskb->queue_mapping = skb->queue_mapping; 113731762eaaSAaron Young dev_kfree_skb(skb); 113831762eaaSAaron Young skb = nskb; 113931762eaaSAaron Young } 114031762eaaSAaron Young return skb; 114131762eaaSAaron Young } 114231762eaaSAaron Young 114331762eaaSAaron Young u16 sunvnet_select_queue_common(struct net_device *dev, struct sk_buff *skb, 114431762eaaSAaron Young void *accel_priv, select_queue_fallback_t fallback) 114531762eaaSAaron Young { 114631762eaaSAaron Young struct vnet *vp = netdev_priv(dev); 114731762eaaSAaron Young struct vnet_port *port = __tx_port_find(vp, skb); 114831762eaaSAaron Young 114931762eaaSAaron Young if (port == NULL) 115031762eaaSAaron Young return 0; 115131762eaaSAaron Young return port->q_index; 115231762eaaSAaron Young } 115331762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_select_queue_common); 115431762eaaSAaron Young 115531762eaaSAaron Young static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb) 115631762eaaSAaron Young { 115731762eaaSAaron Young struct net_device *dev = port->vp->dev; 115831762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 115931762eaaSAaron Young struct sk_buff *segs; 116031762eaaSAaron Young int maclen, datalen; 116131762eaaSAaron Young int status; 116231762eaaSAaron Young int gso_size, gso_type, gso_segs; 116331762eaaSAaron Young int hlen = skb_transport_header(skb) - skb_mac_header(skb); 116431762eaaSAaron Young int proto = IPPROTO_IP; 116531762eaaSAaron Young 116631762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) 116731762eaaSAaron Young proto = ip_hdr(skb)->protocol; 116831762eaaSAaron Young else if (skb->protocol == htons(ETH_P_IPV6)) 116931762eaaSAaron Young proto = ipv6_hdr(skb)->nexthdr; 117031762eaaSAaron Young 117131762eaaSAaron Young if (proto == IPPROTO_TCP) 117231762eaaSAaron Young hlen += tcp_hdr(skb)->doff * 4; 117331762eaaSAaron Young else if (proto == IPPROTO_UDP) 117431762eaaSAaron Young hlen += sizeof(struct udphdr); 117531762eaaSAaron Young else { 117631762eaaSAaron Young pr_err("vnet_handle_offloads GSO with unknown transport " 117731762eaaSAaron Young "protocol %d tproto %d\n", skb->protocol, proto); 117831762eaaSAaron Young hlen = 128; /* XXX */ 117931762eaaSAaron Young } 118031762eaaSAaron Young datalen = port->tsolen - hlen; 118131762eaaSAaron Young 118231762eaaSAaron Young gso_size = skb_shinfo(skb)->gso_size; 118331762eaaSAaron Young gso_type = skb_shinfo(skb)->gso_type; 118431762eaaSAaron Young gso_segs = skb_shinfo(skb)->gso_segs; 118531762eaaSAaron Young 118631762eaaSAaron Young if (port->tso && gso_size < datalen) 118731762eaaSAaron Young gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); 118831762eaaSAaron Young 118931762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { 119031762eaaSAaron Young struct netdev_queue *txq; 119131762eaaSAaron Young 119231762eaaSAaron Young txq = netdev_get_tx_queue(dev, port->q_index); 119331762eaaSAaron Young netif_tx_stop_queue(txq); 119431762eaaSAaron Young if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) 119531762eaaSAaron Young return NETDEV_TX_BUSY; 119631762eaaSAaron Young netif_tx_wake_queue(txq); 119731762eaaSAaron Young } 119831762eaaSAaron Young 119931762eaaSAaron Young maclen = skb_network_header(skb) - skb_mac_header(skb); 120031762eaaSAaron Young skb_pull(skb, maclen); 120131762eaaSAaron Young 120231762eaaSAaron Young if (port->tso && gso_size < datalen) { 120331762eaaSAaron Young if (skb_unclone(skb, GFP_ATOMIC)) 120431762eaaSAaron Young goto out_dropped; 120531762eaaSAaron Young 120631762eaaSAaron Young /* segment to TSO size */ 120731762eaaSAaron Young skb_shinfo(skb)->gso_size = datalen; 120831762eaaSAaron Young skb_shinfo(skb)->gso_segs = gso_segs; 120931762eaaSAaron Young } 121031762eaaSAaron Young segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); 121131762eaaSAaron Young if (IS_ERR(segs)) 121231762eaaSAaron Young goto out_dropped; 121331762eaaSAaron Young 121431762eaaSAaron Young skb_push(skb, maclen); 121531762eaaSAaron Young skb_reset_mac_header(skb); 121631762eaaSAaron Young 121731762eaaSAaron Young status = 0; 121831762eaaSAaron Young while (segs) { 121931762eaaSAaron Young struct sk_buff *curr = segs; 122031762eaaSAaron Young 122131762eaaSAaron Young segs = segs->next; 122231762eaaSAaron Young curr->next = NULL; 122331762eaaSAaron Young if (port->tso && curr->len > dev->mtu) { 122431762eaaSAaron Young skb_shinfo(curr)->gso_size = gso_size; 122531762eaaSAaron Young skb_shinfo(curr)->gso_type = gso_type; 122631762eaaSAaron Young skb_shinfo(curr)->gso_segs = 122731762eaaSAaron Young DIV_ROUND_UP(curr->len - hlen, gso_size); 122831762eaaSAaron Young } else 122931762eaaSAaron Young skb_shinfo(curr)->gso_size = 0; 123031762eaaSAaron Young 123131762eaaSAaron Young skb_push(curr, maclen); 123231762eaaSAaron Young skb_reset_mac_header(curr); 123331762eaaSAaron Young memcpy(skb_mac_header(curr), skb_mac_header(skb), 123431762eaaSAaron Young maclen); 123531762eaaSAaron Young curr->csum_start = skb_transport_header(curr) - curr->head; 123631762eaaSAaron Young if (ip_hdr(curr)->protocol == IPPROTO_TCP) 123731762eaaSAaron Young curr->csum_offset = offsetof(struct tcphdr, check); 123831762eaaSAaron Young else if (ip_hdr(curr)->protocol == IPPROTO_UDP) 123931762eaaSAaron Young curr->csum_offset = offsetof(struct udphdr, check); 124031762eaaSAaron Young 124131762eaaSAaron Young if (!(status & NETDEV_TX_MASK)) 124231762eaaSAaron Young status = sunvnet_start_xmit_common(curr, dev); 124331762eaaSAaron Young if (status & NETDEV_TX_MASK) 124431762eaaSAaron Young dev_kfree_skb_any(curr); 124531762eaaSAaron Young } 124631762eaaSAaron Young 124731762eaaSAaron Young if (!(status & NETDEV_TX_MASK)) 124831762eaaSAaron Young dev_kfree_skb_any(skb); 124931762eaaSAaron Young return status; 125031762eaaSAaron Young out_dropped: 125131762eaaSAaron Young dev->stats.tx_dropped++; 125231762eaaSAaron Young dev_kfree_skb_any(skb); 125331762eaaSAaron Young return NETDEV_TX_OK; 125431762eaaSAaron Young } 125531762eaaSAaron Young 125631762eaaSAaron Young int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev) 125731762eaaSAaron Young { 125831762eaaSAaron Young struct vnet *vp = netdev_priv(dev); 125931762eaaSAaron Young struct vnet_port *port = NULL; 126031762eaaSAaron Young struct vio_dring_state *dr; 126131762eaaSAaron Young struct vio_net_desc *d; 126231762eaaSAaron Young unsigned int len; 126331762eaaSAaron Young struct sk_buff *freeskbs = NULL; 126431762eaaSAaron Young int i, err, txi; 126531762eaaSAaron Young unsigned pending = 0; 126631762eaaSAaron Young struct netdev_queue *txq; 126731762eaaSAaron Young 126831762eaaSAaron Young rcu_read_lock(); 126931762eaaSAaron Young port = __tx_port_find(vp, skb); 127031762eaaSAaron Young if (unlikely(!port)) { 127131762eaaSAaron Young rcu_read_unlock(); 127231762eaaSAaron Young goto out_dropped; 127331762eaaSAaron Young } 127431762eaaSAaron Young 127531762eaaSAaron Young if (skb_is_gso(skb) && skb->len > port->tsolen) { 127631762eaaSAaron Young err = vnet_handle_offloads(port, skb); 127731762eaaSAaron Young rcu_read_unlock(); 127831762eaaSAaron Young return err; 127931762eaaSAaron Young } 128031762eaaSAaron Young 128131762eaaSAaron Young if (!skb_is_gso(skb) && skb->len > port->rmtu) { 128231762eaaSAaron Young unsigned long localmtu = port->rmtu - ETH_HLEN; 128331762eaaSAaron Young 128431762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 3)) 128531762eaaSAaron Young localmtu -= VLAN_HLEN; 128631762eaaSAaron Young 128731762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) { 128831762eaaSAaron Young struct flowi4 fl4; 128931762eaaSAaron Young struct rtable *rt = NULL; 129031762eaaSAaron Young 129131762eaaSAaron Young memset(&fl4, 0, sizeof(fl4)); 129231762eaaSAaron Young fl4.flowi4_oif = dev->ifindex; 129331762eaaSAaron Young fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 129431762eaaSAaron Young fl4.daddr = ip_hdr(skb)->daddr; 129531762eaaSAaron Young fl4.saddr = ip_hdr(skb)->saddr; 129631762eaaSAaron Young 129731762eaaSAaron Young rt = ip_route_output_key(dev_net(dev), &fl4); 129831762eaaSAaron Young rcu_read_unlock(); 129931762eaaSAaron Young if (!IS_ERR(rt)) { 130031762eaaSAaron Young skb_dst_set(skb, &rt->dst); 130131762eaaSAaron Young icmp_send(skb, ICMP_DEST_UNREACH, 130231762eaaSAaron Young ICMP_FRAG_NEEDED, 130331762eaaSAaron Young htonl(localmtu)); 130431762eaaSAaron Young } 130531762eaaSAaron Young } 130631762eaaSAaron Young #if IS_ENABLED(CONFIG_IPV6) 130731762eaaSAaron Young else if (skb->protocol == htons(ETH_P_IPV6)) 130831762eaaSAaron Young icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); 130931762eaaSAaron Young #endif 131031762eaaSAaron Young goto out_dropped; 131131762eaaSAaron Young } 131231762eaaSAaron Young 131331762eaaSAaron Young skb = vnet_skb_shape(skb, 2); 131431762eaaSAaron Young 131531762eaaSAaron Young if (unlikely(!skb)) 131631762eaaSAaron Young goto out_dropped; 131731762eaaSAaron Young 131831762eaaSAaron Young if (skb->ip_summed == CHECKSUM_PARTIAL) 131931762eaaSAaron Young vnet_fullcsum(skb); 132031762eaaSAaron Young 132131762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 132231762eaaSAaron Young i = skb_get_queue_mapping(skb); 132331762eaaSAaron Young txq = netdev_get_tx_queue(dev, i); 132431762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 132531762eaaSAaron Young if (!netif_tx_queue_stopped(txq)) { 132631762eaaSAaron Young netif_tx_stop_queue(txq); 132731762eaaSAaron Young 132831762eaaSAaron Young /* This is a hard error, log it. */ 132931762eaaSAaron Young netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 133031762eaaSAaron Young dev->stats.tx_errors++; 133131762eaaSAaron Young } 133231762eaaSAaron Young rcu_read_unlock(); 133331762eaaSAaron Young return NETDEV_TX_BUSY; 133431762eaaSAaron Young } 133531762eaaSAaron Young 133631762eaaSAaron Young d = vio_dring_cur(dr); 133731762eaaSAaron Young 133831762eaaSAaron Young txi = dr->prod; 133931762eaaSAaron Young 134031762eaaSAaron Young freeskbs = vnet_clean_tx_ring(port, &pending); 134131762eaaSAaron Young 134231762eaaSAaron Young BUG_ON(port->tx_bufs[txi].skb); 134331762eaaSAaron Young 134431762eaaSAaron Young len = skb->len; 134531762eaaSAaron Young if (len < ETH_ZLEN) 134631762eaaSAaron Young len = ETH_ZLEN; 134731762eaaSAaron Young 134831762eaaSAaron Young err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, 134931762eaaSAaron Young (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); 135031762eaaSAaron Young if (err < 0) { 135131762eaaSAaron Young netdev_info(dev, "tx buffer map error %d\n", err); 135231762eaaSAaron Young goto out_dropped; 135331762eaaSAaron Young } 135431762eaaSAaron Young 135531762eaaSAaron Young port->tx_bufs[txi].skb = skb; 135631762eaaSAaron Young skb = NULL; 135731762eaaSAaron Young port->tx_bufs[txi].ncookies = err; 135831762eaaSAaron Young 135931762eaaSAaron Young /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), 136031762eaaSAaron Young * thus it is safe to not set VIO_ACK_ENABLE for each transmission: 136131762eaaSAaron Young * the protocol itself does not require it as long as the peer 136231762eaaSAaron Young * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. 136331762eaaSAaron Young * 136431762eaaSAaron Young * An ACK for every packet in the ring is expensive as the 136531762eaaSAaron Young * sending of LDC messages is slow and affects performance. 136631762eaaSAaron Young */ 136731762eaaSAaron Young d->hdr.ack = VIO_ACK_DISABLE; 136831762eaaSAaron Young d->size = len; 136931762eaaSAaron Young d->ncookies = port->tx_bufs[txi].ncookies; 137031762eaaSAaron Young for (i = 0; i < d->ncookies; i++) 137131762eaaSAaron Young d->cookies[i] = port->tx_bufs[txi].cookies[i]; 137231762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 7)) { 137331762eaaSAaron Young struct vio_net_dext *dext = vio_net_ext(d); 137431762eaaSAaron Young 137531762eaaSAaron Young memset(dext, 0, sizeof(*dext)); 137631762eaaSAaron Young if (skb_is_gso(port->tx_bufs[txi].skb)) { 137731762eaaSAaron Young dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) 137831762eaaSAaron Young ->gso_size; 137931762eaaSAaron Young dext->flags |= VNET_PKT_IPV4_LSO; 138031762eaaSAaron Young } 138131762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 8) && 138231762eaaSAaron Young !port->switch_port) { 138331762eaaSAaron Young dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; 138431762eaaSAaron Young dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; 138531762eaaSAaron Young } 138631762eaaSAaron Young } 138731762eaaSAaron Young 138831762eaaSAaron Young /* This has to be a non-SMP write barrier because we are writing 138931762eaaSAaron Young * to memory which is shared with the peer LDOM. 139031762eaaSAaron Young */ 139131762eaaSAaron Young dma_wmb(); 139231762eaaSAaron Young 139331762eaaSAaron Young d->hdr.state = VIO_DESC_READY; 139431762eaaSAaron Young 139531762eaaSAaron Young /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent 139631762eaaSAaron Young * to notify the consumer that some descriptors are READY. 139731762eaaSAaron Young * After that "start" trigger, no additional triggers are needed until 139831762eaaSAaron Young * a DRING_STOPPED is received from the consumer. The dr->cons field 139931762eaaSAaron Young * (set up by vnet_ack()) has the value of the next dring index 140031762eaaSAaron Young * that has not yet been ack-ed. We send a "start" trigger here 140131762eaaSAaron Young * if, and only if, start_cons is true (reset it afterward). Conversely, 140231762eaaSAaron Young * vnet_ack() should check if the dring corresponding to cons 140331762eaaSAaron Young * is marked READY, but start_cons was false. 140431762eaaSAaron Young * If so, vnet_ack() should send out the missed "start" trigger. 140531762eaaSAaron Young * 140631762eaaSAaron Young * Note that the dma_wmb() above makes sure the cookies et al. are 140731762eaaSAaron Young * not globally visible before the VIO_DESC_READY, and that the 140831762eaaSAaron Young * stores are ordered correctly by the compiler. The consumer will 140931762eaaSAaron Young * not proceed until the VIO_DESC_READY is visible assuring that 141031762eaaSAaron Young * the consumer does not observe anything related to descriptors 141131762eaaSAaron Young * out of order. The HV trap from the LDC start trigger is the 141231762eaaSAaron Young * producer to consumer announcement that work is available to the 141331762eaaSAaron Young * consumer 141431762eaaSAaron Young */ 141531762eaaSAaron Young if (!port->start_cons) { /* previous trigger suffices */ 141631762eaaSAaron Young trace_vnet_skip_tx_trigger(port->vio._local_sid, 141731762eaaSAaron Young port->vio._peer_sid, dr->cons); 141831762eaaSAaron Young goto ldc_start_done; 141931762eaaSAaron Young } 142031762eaaSAaron Young 142131762eaaSAaron Young err = __vnet_tx_trigger(port, dr->cons); 142231762eaaSAaron Young if (unlikely(err < 0)) { 142331762eaaSAaron Young netdev_info(dev, "TX trigger error %d\n", err); 142431762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 142531762eaaSAaron Young skb = port->tx_bufs[txi].skb; 142631762eaaSAaron Young port->tx_bufs[txi].skb = NULL; 142731762eaaSAaron Young dev->stats.tx_carrier_errors++; 142831762eaaSAaron Young goto out_dropped; 142931762eaaSAaron Young } 143031762eaaSAaron Young 143131762eaaSAaron Young ldc_start_done: 143231762eaaSAaron Young port->start_cons = false; 143331762eaaSAaron Young 143431762eaaSAaron Young dev->stats.tx_packets++; 143531762eaaSAaron Young dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; 143631762eaaSAaron Young 143731762eaaSAaron Young dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); 143831762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 143931762eaaSAaron Young netif_tx_stop_queue(txq); 144031762eaaSAaron Young if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) 144131762eaaSAaron Young netif_tx_wake_queue(txq); 144231762eaaSAaron Young } 144331762eaaSAaron Young 144431762eaaSAaron Young (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); 144531762eaaSAaron Young rcu_read_unlock(); 144631762eaaSAaron Young 144731762eaaSAaron Young vnet_free_skbs(freeskbs); 144831762eaaSAaron Young 144931762eaaSAaron Young return NETDEV_TX_OK; 145031762eaaSAaron Young 145131762eaaSAaron Young out_dropped: 145231762eaaSAaron Young if (pending) 145331762eaaSAaron Young (void)mod_timer(&port->clean_timer, 145431762eaaSAaron Young jiffies + VNET_CLEAN_TIMEOUT); 145531762eaaSAaron Young else if (port) 145631762eaaSAaron Young del_timer(&port->clean_timer); 145731762eaaSAaron Young if (port) 145831762eaaSAaron Young rcu_read_unlock(); 145931762eaaSAaron Young if (skb) 146031762eaaSAaron Young dev_kfree_skb(skb); 146131762eaaSAaron Young vnet_free_skbs(freeskbs); 146231762eaaSAaron Young dev->stats.tx_dropped++; 146331762eaaSAaron Young return NETDEV_TX_OK; 146431762eaaSAaron Young } 146531762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common); 146631762eaaSAaron Young 146731762eaaSAaron Young void sunvnet_tx_timeout_common(struct net_device *dev) 146831762eaaSAaron Young { 146931762eaaSAaron Young /* XXX Implement me XXX */ 147031762eaaSAaron Young } 147131762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common); 147231762eaaSAaron Young 147331762eaaSAaron Young int sunvnet_open_common(struct net_device *dev) 147431762eaaSAaron Young { 147531762eaaSAaron Young netif_carrier_on(dev); 147631762eaaSAaron Young netif_tx_start_all_queues(dev); 147731762eaaSAaron Young 147831762eaaSAaron Young return 0; 147931762eaaSAaron Young } 148031762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_open_common); 148131762eaaSAaron Young 148231762eaaSAaron Young int sunvnet_close_common(struct net_device *dev) 148331762eaaSAaron Young { 148431762eaaSAaron Young netif_tx_stop_all_queues(dev); 148531762eaaSAaron Young netif_carrier_off(dev); 148631762eaaSAaron Young 148731762eaaSAaron Young return 0; 148831762eaaSAaron Young } 148931762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_close_common); 149031762eaaSAaron Young 149131762eaaSAaron Young static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) 149231762eaaSAaron Young { 149331762eaaSAaron Young struct vnet_mcast_entry *m; 149431762eaaSAaron Young 149531762eaaSAaron Young for (m = vp->mcast_list; m; m = m->next) { 149631762eaaSAaron Young if (ether_addr_equal(m->addr, addr)) 149731762eaaSAaron Young return m; 149831762eaaSAaron Young } 149931762eaaSAaron Young return NULL; 150031762eaaSAaron Young } 150131762eaaSAaron Young 150231762eaaSAaron Young static void __update_mc_list(struct vnet *vp, struct net_device *dev) 150331762eaaSAaron Young { 150431762eaaSAaron Young struct netdev_hw_addr *ha; 150531762eaaSAaron Young 150631762eaaSAaron Young netdev_for_each_mc_addr(ha, dev) { 150731762eaaSAaron Young struct vnet_mcast_entry *m; 150831762eaaSAaron Young 150931762eaaSAaron Young m = __vnet_mc_find(vp, ha->addr); 151031762eaaSAaron Young if (m) { 151131762eaaSAaron Young m->hit = 1; 151231762eaaSAaron Young continue; 151331762eaaSAaron Young } 151431762eaaSAaron Young 151531762eaaSAaron Young if (!m) { 151631762eaaSAaron Young m = kzalloc(sizeof(*m), GFP_ATOMIC); 151731762eaaSAaron Young if (!m) 151831762eaaSAaron Young continue; 151931762eaaSAaron Young memcpy(m->addr, ha->addr, ETH_ALEN); 152031762eaaSAaron Young m->hit = 1; 152131762eaaSAaron Young 152231762eaaSAaron Young m->next = vp->mcast_list; 152331762eaaSAaron Young vp->mcast_list = m; 152431762eaaSAaron Young } 152531762eaaSAaron Young } 152631762eaaSAaron Young } 152731762eaaSAaron Young 152831762eaaSAaron Young static void __send_mc_list(struct vnet *vp, struct vnet_port *port) 152931762eaaSAaron Young { 153031762eaaSAaron Young struct vio_net_mcast_info info; 153131762eaaSAaron Young struct vnet_mcast_entry *m, **pp; 153231762eaaSAaron Young int n_addrs; 153331762eaaSAaron Young 153431762eaaSAaron Young memset(&info, 0, sizeof(info)); 153531762eaaSAaron Young 153631762eaaSAaron Young info.tag.type = VIO_TYPE_CTRL; 153731762eaaSAaron Young info.tag.stype = VIO_SUBTYPE_INFO; 153831762eaaSAaron Young info.tag.stype_env = VNET_MCAST_INFO; 153931762eaaSAaron Young info.tag.sid = vio_send_sid(&port->vio); 154031762eaaSAaron Young info.set = 1; 154131762eaaSAaron Young 154231762eaaSAaron Young n_addrs = 0; 154331762eaaSAaron Young for (m = vp->mcast_list; m; m = m->next) { 154431762eaaSAaron Young if (m->sent) 154531762eaaSAaron Young continue; 154631762eaaSAaron Young m->sent = 1; 154731762eaaSAaron Young memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 154831762eaaSAaron Young m->addr, ETH_ALEN); 154931762eaaSAaron Young if (++n_addrs == VNET_NUM_MCAST) { 155031762eaaSAaron Young info.count = n_addrs; 155131762eaaSAaron Young 155231762eaaSAaron Young (void) vio_ldc_send(&port->vio, &info, 155331762eaaSAaron Young sizeof(info)); 155431762eaaSAaron Young n_addrs = 0; 155531762eaaSAaron Young } 155631762eaaSAaron Young } 155731762eaaSAaron Young if (n_addrs) { 155831762eaaSAaron Young info.count = n_addrs; 155931762eaaSAaron Young (void) vio_ldc_send(&port->vio, &info, sizeof(info)); 156031762eaaSAaron Young } 156131762eaaSAaron Young 156231762eaaSAaron Young info.set = 0; 156331762eaaSAaron Young 156431762eaaSAaron Young n_addrs = 0; 156531762eaaSAaron Young pp = &vp->mcast_list; 156631762eaaSAaron Young while ((m = *pp) != NULL) { 156731762eaaSAaron Young if (m->hit) { 156831762eaaSAaron Young m->hit = 0; 156931762eaaSAaron Young pp = &m->next; 157031762eaaSAaron Young continue; 157131762eaaSAaron Young } 157231762eaaSAaron Young 157331762eaaSAaron Young memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 157431762eaaSAaron Young m->addr, ETH_ALEN); 157531762eaaSAaron Young if (++n_addrs == VNET_NUM_MCAST) { 157631762eaaSAaron Young info.count = n_addrs; 157731762eaaSAaron Young (void) vio_ldc_send(&port->vio, &info, 157831762eaaSAaron Young sizeof(info)); 157931762eaaSAaron Young n_addrs = 0; 158031762eaaSAaron Young } 158131762eaaSAaron Young 158231762eaaSAaron Young *pp = m->next; 158331762eaaSAaron Young kfree(m); 158431762eaaSAaron Young } 158531762eaaSAaron Young if (n_addrs) { 158631762eaaSAaron Young info.count = n_addrs; 158731762eaaSAaron Young (void) vio_ldc_send(&port->vio, &info, sizeof(info)); 158831762eaaSAaron Young } 158931762eaaSAaron Young } 159031762eaaSAaron Young 159131762eaaSAaron Young void sunvnet_set_rx_mode_common(struct net_device *dev) 159231762eaaSAaron Young { 159331762eaaSAaron Young struct vnet *vp = netdev_priv(dev); 159431762eaaSAaron Young struct vnet_port *port; 159531762eaaSAaron Young 159631762eaaSAaron Young rcu_read_lock(); 159731762eaaSAaron Young list_for_each_entry_rcu(port, &vp->port_list, list) { 159831762eaaSAaron Young 159931762eaaSAaron Young if (port->switch_port) { 160031762eaaSAaron Young __update_mc_list(vp, dev); 160131762eaaSAaron Young __send_mc_list(vp, port); 160231762eaaSAaron Young break; 160331762eaaSAaron Young } 160431762eaaSAaron Young } 160531762eaaSAaron Young rcu_read_unlock(); 160631762eaaSAaron Young } 160731762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common); 160831762eaaSAaron Young 160931762eaaSAaron Young int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu) 161031762eaaSAaron Young { 161131762eaaSAaron Young if (new_mtu < 68 || new_mtu > 65535) 161231762eaaSAaron Young return -EINVAL; 161331762eaaSAaron Young 161431762eaaSAaron Young dev->mtu = new_mtu; 161531762eaaSAaron Young return 0; 161631762eaaSAaron Young } 161731762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_change_mtu_common); 161831762eaaSAaron Young 161931762eaaSAaron Young int sunvnet_set_mac_addr_common(struct net_device *dev, void *p) 162031762eaaSAaron Young { 162131762eaaSAaron Young return -EINVAL; 162231762eaaSAaron Young } 162331762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common); 162431762eaaSAaron Young 162531762eaaSAaron Young void sunvnet_port_free_tx_bufs_common(struct vnet_port *port) 162631762eaaSAaron Young { 162731762eaaSAaron Young struct vio_dring_state *dr; 162831762eaaSAaron Young int i; 162931762eaaSAaron Young 163031762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 163131762eaaSAaron Young 163231762eaaSAaron Young if (dr->base == NULL) 163331762eaaSAaron Young return; 163431762eaaSAaron Young 163531762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; i++) { 163631762eaaSAaron Young struct vio_net_desc *d; 163731762eaaSAaron Young void *skb = port->tx_bufs[i].skb; 163831762eaaSAaron Young 163931762eaaSAaron Young if (!skb) 164031762eaaSAaron Young continue; 164131762eaaSAaron Young 164231762eaaSAaron Young d = vio_dring_entry(dr, i); 164331762eaaSAaron Young 164431762eaaSAaron Young ldc_unmap(port->vio.lp, 164531762eaaSAaron Young port->tx_bufs[i].cookies, 164631762eaaSAaron Young port->tx_bufs[i].ncookies); 164731762eaaSAaron Young dev_kfree_skb(skb); 164831762eaaSAaron Young port->tx_bufs[i].skb = NULL; 164931762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 165031762eaaSAaron Young } 165131762eaaSAaron Young ldc_free_exp_dring(port->vio.lp, dr->base, 165231762eaaSAaron Young (dr->entry_size * dr->num_entries), 165331762eaaSAaron Young dr->cookies, dr->ncookies); 165431762eaaSAaron Young dr->base = NULL; 165531762eaaSAaron Young dr->entry_size = 0; 165631762eaaSAaron Young dr->num_entries = 0; 165731762eaaSAaron Young dr->pending = 0; 165831762eaaSAaron Young dr->ncookies = 0; 165931762eaaSAaron Young } 166031762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common); 166131762eaaSAaron Young 166231762eaaSAaron Young static void vnet_port_reset(struct vnet_port *port) 166331762eaaSAaron Young { 166431762eaaSAaron Young del_timer(&port->clean_timer); 166531762eaaSAaron Young sunvnet_port_free_tx_bufs_common(port); 166631762eaaSAaron Young port->rmtu = 0; 166731762eaaSAaron Young port->tso = true; 166831762eaaSAaron Young port->tsolen = 0; 166931762eaaSAaron Young } 167031762eaaSAaron Young 167131762eaaSAaron Young static int vnet_port_alloc_tx_ring(struct vnet_port *port) 167231762eaaSAaron Young { 167331762eaaSAaron Young struct vio_dring_state *dr; 167431762eaaSAaron Young unsigned long len, elen; 167531762eaaSAaron Young int i, err, ncookies; 167631762eaaSAaron Young void *dring; 167731762eaaSAaron Young 167831762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 167931762eaaSAaron Young 168031762eaaSAaron Young elen = sizeof(struct vio_net_desc) + 168131762eaaSAaron Young sizeof(struct ldc_trans_cookie) * 2; 168231762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 7)) 168331762eaaSAaron Young elen += sizeof(struct vio_net_dext); 168431762eaaSAaron Young len = VNET_TX_RING_SIZE * elen; 168531762eaaSAaron Young 168631762eaaSAaron Young ncookies = VIO_MAX_RING_COOKIES; 168731762eaaSAaron Young dring = ldc_alloc_exp_dring(port->vio.lp, len, 168831762eaaSAaron Young dr->cookies, &ncookies, 168931762eaaSAaron Young (LDC_MAP_SHADOW | 169031762eaaSAaron Young LDC_MAP_DIRECT | 169131762eaaSAaron Young LDC_MAP_RW)); 169231762eaaSAaron Young if (IS_ERR(dring)) { 169331762eaaSAaron Young err = PTR_ERR(dring); 169431762eaaSAaron Young goto err_out; 169531762eaaSAaron Young } 169631762eaaSAaron Young 169731762eaaSAaron Young dr->base = dring; 169831762eaaSAaron Young dr->entry_size = elen; 169931762eaaSAaron Young dr->num_entries = VNET_TX_RING_SIZE; 170031762eaaSAaron Young dr->prod = dr->cons = 0; 170131762eaaSAaron Young port->start_cons = true; /* need an initial trigger */ 170231762eaaSAaron Young dr->pending = VNET_TX_RING_SIZE; 170331762eaaSAaron Young dr->ncookies = ncookies; 170431762eaaSAaron Young 170531762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 170631762eaaSAaron Young struct vio_net_desc *d; 170731762eaaSAaron Young 170831762eaaSAaron Young d = vio_dring_entry(dr, i); 170931762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 171031762eaaSAaron Young } 171131762eaaSAaron Young return 0; 171231762eaaSAaron Young 171331762eaaSAaron Young err_out: 171431762eaaSAaron Young sunvnet_port_free_tx_bufs_common(port); 171531762eaaSAaron Young 171631762eaaSAaron Young return err; 171731762eaaSAaron Young } 171831762eaaSAaron Young 171931762eaaSAaron Young #ifdef CONFIG_NET_POLL_CONTROLLER 172031762eaaSAaron Young void sunvnet_poll_controller_common(struct net_device *dev) 172131762eaaSAaron Young { 172231762eaaSAaron Young struct vnet *vp = netdev_priv(dev); 172331762eaaSAaron Young struct vnet_port *port; 172431762eaaSAaron Young unsigned long flags; 172531762eaaSAaron Young 172631762eaaSAaron Young spin_lock_irqsave(&vp->lock, flags); 172731762eaaSAaron Young if (!list_empty(&vp->port_list)) { 172831762eaaSAaron Young port = list_entry(vp->port_list.next, struct vnet_port, list); 172931762eaaSAaron Young napi_schedule(&port->napi); 173031762eaaSAaron Young } 173131762eaaSAaron Young spin_unlock_irqrestore(&vp->lock, flags); 173231762eaaSAaron Young } 173331762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common); 173431762eaaSAaron Young #endif 173531762eaaSAaron Young 173631762eaaSAaron Young void sunvnet_port_add_txq_common(struct vnet_port *port) 173731762eaaSAaron Young { 173831762eaaSAaron Young struct vnet *vp = port->vp; 173931762eaaSAaron Young int n; 174031762eaaSAaron Young 174131762eaaSAaron Young n = vp->nports++; 174231762eaaSAaron Young n = n & (VNET_MAX_TXQS - 1); 174331762eaaSAaron Young port->q_index = n; 174431762eaaSAaron Young netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index)); 174531762eaaSAaron Young } 174631762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); 174731762eaaSAaron Young 174831762eaaSAaron Young void sunvnet_port_rm_txq_common(struct vnet_port *port) 174931762eaaSAaron Young { 175031762eaaSAaron Young port->vp->nports--; 175131762eaaSAaron Young netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index)); 175231762eaaSAaron Young } 175331762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common); 1754