1c861ef83SShannon Nelson // SPDX-License-Identifier: GPL-2.0 231762eaaSAaron Young /* sunvnet.c: Sun LDOM Virtual Network Driver. 331762eaaSAaron Young * 431762eaaSAaron Young * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 5867fa150SShannon Nelson * Copyright (C) 2016-2017 Oracle. All rights reserved. 631762eaaSAaron Young */ 731762eaaSAaron Young 831762eaaSAaron Young #include <linux/module.h> 931762eaaSAaron Young #include <linux/kernel.h> 1031762eaaSAaron Young #include <linux/types.h> 1131762eaaSAaron Young #include <linux/slab.h> 1231762eaaSAaron Young #include <linux/delay.h> 1331762eaaSAaron Young #include <linux/init.h> 1431762eaaSAaron Young #include <linux/netdevice.h> 1531762eaaSAaron Young #include <linux/ethtool.h> 1631762eaaSAaron Young #include <linux/etherdevice.h> 1731762eaaSAaron Young #include <linux/mutex.h> 1831762eaaSAaron Young #include <linux/highmem.h> 1931762eaaSAaron Young #include <linux/if_vlan.h> 2031762eaaSAaron Young #define CREATE_TRACE_POINTS 2131762eaaSAaron Young #include <trace/events/sunvnet.h> 2231762eaaSAaron Young 2331762eaaSAaron Young #if IS_ENABLED(CONFIG_IPV6) 2431762eaaSAaron Young #include <linux/icmpv6.h> 2531762eaaSAaron Young #endif 2631762eaaSAaron Young 2731762eaaSAaron Young #include <net/ip.h> 2831762eaaSAaron Young #include <net/icmp.h> 2931762eaaSAaron Young #include <net/route.h> 3031762eaaSAaron Young 3131762eaaSAaron Young #include <asm/vio.h> 3231762eaaSAaron Young #include <asm/ldc.h> 3331762eaaSAaron Young 3431762eaaSAaron Young #include "sunvnet_common.h" 3531762eaaSAaron Young 3631762eaaSAaron Young /* Heuristic for the number of times to exponentially backoff and 3731762eaaSAaron Young * retry sending an LDC trigger when EAGAIN is encountered 3831762eaaSAaron Young */ 3931762eaaSAaron Young #define VNET_MAX_RETRIES 10 4031762eaaSAaron Young 412493b842SShannon Nelson MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 422493b842SShannon Nelson MODULE_DESCRIPTION("Sun LDOM virtual network support library"); 432493b842SShannon Nelson MODULE_LICENSE("GPL"); 442493b842SShannon Nelson MODULE_VERSION("1.1"); 452493b842SShannon Nelson 4631762eaaSAaron Young static int __vnet_tx_trigger(struct vnet_port *port, u32 start); 4731762eaaSAaron Young 4831762eaaSAaron Young static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) 4931762eaaSAaron Young { 5031762eaaSAaron Young return vio_dring_avail(dr, VNET_TX_RING_SIZE); 5131762eaaSAaron Young } 5231762eaaSAaron Young 5331762eaaSAaron Young static int vnet_handle_unknown(struct vnet_port *port, void *arg) 5431762eaaSAaron Young { 5531762eaaSAaron Young struct vio_msg_tag *pkt = arg; 5631762eaaSAaron Young 5731762eaaSAaron Young pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", 5831762eaaSAaron Young pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 5931762eaaSAaron Young pr_err("Resetting connection\n"); 6031762eaaSAaron Young 6131762eaaSAaron Young ldc_disconnect(port->vio.lp); 6231762eaaSAaron Young 6331762eaaSAaron Young return -ECONNRESET; 6431762eaaSAaron Young } 6531762eaaSAaron Young 6631762eaaSAaron Young static int vnet_port_alloc_tx_ring(struct vnet_port *port); 6731762eaaSAaron Young 6831762eaaSAaron Young int sunvnet_send_attr_common(struct vio_driver_state *vio) 6931762eaaSAaron Young { 7031762eaaSAaron Young struct vnet_port *port = to_vnet_port(vio); 7167d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 7231762eaaSAaron Young struct vio_net_attr_info pkt; 7331762eaaSAaron Young int framelen = ETH_FRAME_LEN; 7431762eaaSAaron Young int i, err; 7531762eaaSAaron Young 7631762eaaSAaron Young err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); 7731762eaaSAaron Young if (err) 7831762eaaSAaron Young return err; 7931762eaaSAaron Young 8031762eaaSAaron Young memset(&pkt, 0, sizeof(pkt)); 8131762eaaSAaron Young pkt.tag.type = VIO_TYPE_CTRL; 8231762eaaSAaron Young pkt.tag.stype = VIO_SUBTYPE_INFO; 8331762eaaSAaron Young pkt.tag.stype_env = VIO_ATTR_INFO; 8431762eaaSAaron Young pkt.tag.sid = vio_send_sid(vio); 8531762eaaSAaron Young if (vio_version_before(vio, 1, 2)) 8631762eaaSAaron Young pkt.xfer_mode = VIO_DRING_MODE; 8731762eaaSAaron Young else 8831762eaaSAaron Young pkt.xfer_mode = VIO_NEW_DRING_MODE; 8931762eaaSAaron Young pkt.addr_type = VNET_ADDR_ETHERMAC; 9031762eaaSAaron Young pkt.ack_freq = 0; 9131762eaaSAaron Young for (i = 0; i < 6; i++) 9231762eaaSAaron Young pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); 9331762eaaSAaron Young if (vio_version_after(vio, 1, 3)) { 9431762eaaSAaron Young if (port->rmtu) { 9531762eaaSAaron Young port->rmtu = min(VNET_MAXPACKET, port->rmtu); 9631762eaaSAaron Young pkt.mtu = port->rmtu; 9731762eaaSAaron Young } else { 9831762eaaSAaron Young port->rmtu = VNET_MAXPACKET; 9931762eaaSAaron Young pkt.mtu = port->rmtu; 10031762eaaSAaron Young } 10131762eaaSAaron Young if (vio_version_after_eq(vio, 1, 6)) 10231762eaaSAaron Young pkt.options = VIO_TX_DRING; 10331762eaaSAaron Young } else if (vio_version_before(vio, 1, 3)) { 10431762eaaSAaron Young pkt.mtu = framelen; 10531762eaaSAaron Young } else { /* v1.3 */ 10631762eaaSAaron Young pkt.mtu = framelen + VLAN_HLEN; 10731762eaaSAaron Young } 10831762eaaSAaron Young 10931762eaaSAaron Young pkt.cflags = 0; 11031762eaaSAaron Young if (vio_version_after_eq(vio, 1, 7) && port->tso) { 11131762eaaSAaron Young pkt.cflags |= VNET_LSO_IPV4_CAPAB; 11231762eaaSAaron Young if (!port->tsolen) 11331762eaaSAaron Young port->tsolen = VNET_MAXTSO; 11431762eaaSAaron Young pkt.ipv4_lso_maxlen = port->tsolen; 11531762eaaSAaron Young } 11631762eaaSAaron Young 11731762eaaSAaron Young pkt.plnk_updt = PHYSLINK_UPDATE_NONE; 11831762eaaSAaron Young 11931762eaaSAaron Young viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 12031762eaaSAaron Young "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " 12131762eaaSAaron Young "cflags[0x%04x] lso_max[%u]\n", 12231762eaaSAaron Young pkt.xfer_mode, pkt.addr_type, 12331762eaaSAaron Young (unsigned long long)pkt.addr, 12431762eaaSAaron Young pkt.ack_freq, pkt.plnk_updt, pkt.options, 12531762eaaSAaron Young (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); 12631762eaaSAaron Young 12731762eaaSAaron Young return vio_ldc_send(vio, &pkt, sizeof(pkt)); 12831762eaaSAaron Young } 12931762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_send_attr_common); 13031762eaaSAaron Young 13131762eaaSAaron Young static int handle_attr_info(struct vio_driver_state *vio, 13231762eaaSAaron Young struct vio_net_attr_info *pkt) 13331762eaaSAaron Young { 13431762eaaSAaron Young struct vnet_port *port = to_vnet_port(vio); 13531762eaaSAaron Young u64 localmtu; 13631762eaaSAaron Young u8 xfer_mode; 13731762eaaSAaron Young 13831762eaaSAaron Young viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 13931762eaaSAaron Young "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " 14031762eaaSAaron Young " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", 14131762eaaSAaron Young pkt->xfer_mode, pkt->addr_type, 14231762eaaSAaron Young (unsigned long long)pkt->addr, 14331762eaaSAaron Young pkt->ack_freq, pkt->plnk_updt, pkt->options, 14431762eaaSAaron Young (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, 14531762eaaSAaron Young pkt->ipv4_lso_maxlen); 14631762eaaSAaron Young 14731762eaaSAaron Young pkt->tag.sid = vio_send_sid(vio); 14831762eaaSAaron Young 14931762eaaSAaron Young xfer_mode = pkt->xfer_mode; 15031762eaaSAaron Young /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ 15131762eaaSAaron Young if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) 15231762eaaSAaron Young xfer_mode = VIO_NEW_DRING_MODE; 15331762eaaSAaron Young 15431762eaaSAaron Young /* MTU negotiation: 15531762eaaSAaron Young * < v1.3 - ETH_FRAME_LEN exactly 15631762eaaSAaron Young * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change 15731762eaaSAaron Young * pkt->mtu for ACK 15831762eaaSAaron Young * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly 15931762eaaSAaron Young */ 16031762eaaSAaron Young if (vio_version_before(vio, 1, 3)) { 16131762eaaSAaron Young localmtu = ETH_FRAME_LEN; 16231762eaaSAaron Young } else if (vio_version_after(vio, 1, 3)) { 16331762eaaSAaron Young localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; 16431762eaaSAaron Young localmtu = min(pkt->mtu, localmtu); 16531762eaaSAaron Young pkt->mtu = localmtu; 16631762eaaSAaron Young } else { /* v1.3 */ 16731762eaaSAaron Young localmtu = ETH_FRAME_LEN + VLAN_HLEN; 16831762eaaSAaron Young } 16931762eaaSAaron Young port->rmtu = localmtu; 17031762eaaSAaron Young 17131762eaaSAaron Young /* LSO negotiation */ 17231762eaaSAaron Young if (vio_version_after_eq(vio, 1, 7)) 17331762eaaSAaron Young port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); 17431762eaaSAaron Young else 17531762eaaSAaron Young port->tso = false; 17631762eaaSAaron Young if (port->tso) { 17731762eaaSAaron Young if (!port->tsolen) 17831762eaaSAaron Young port->tsolen = VNET_MAXTSO; 17931762eaaSAaron Young port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); 18031762eaaSAaron Young if (port->tsolen < VNET_MINTSO) { 18131762eaaSAaron Young port->tso = false; 18231762eaaSAaron Young port->tsolen = 0; 18331762eaaSAaron Young pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; 18431762eaaSAaron Young } 18531762eaaSAaron Young pkt->ipv4_lso_maxlen = port->tsolen; 18631762eaaSAaron Young } else { 18731762eaaSAaron Young pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; 18831762eaaSAaron Young pkt->ipv4_lso_maxlen = 0; 189bc221a34SShannon Nelson port->tsolen = 0; 19031762eaaSAaron Young } 19131762eaaSAaron Young 19231762eaaSAaron Young /* for version >= 1.6, ACK packet mode we support */ 19331762eaaSAaron Young if (vio_version_after_eq(vio, 1, 6)) { 19431762eaaSAaron Young pkt->xfer_mode = VIO_NEW_DRING_MODE; 19531762eaaSAaron Young pkt->options = VIO_TX_DRING; 19631762eaaSAaron Young } 19731762eaaSAaron Young 19831762eaaSAaron Young if (!(xfer_mode | VIO_NEW_DRING_MODE) || 19931762eaaSAaron Young pkt->addr_type != VNET_ADDR_ETHERMAC || 20031762eaaSAaron Young pkt->mtu != localmtu) { 20131762eaaSAaron Young viodbg(HS, "SEND NET ATTR NACK\n"); 20231762eaaSAaron Young 20331762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_NACK; 20431762eaaSAaron Young 20531762eaaSAaron Young (void)vio_ldc_send(vio, pkt, sizeof(*pkt)); 20631762eaaSAaron Young 20731762eaaSAaron Young return -ECONNRESET; 208dc153f85SAaron Young } 209dc153f85SAaron Young 21031762eaaSAaron Young viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " 21131762eaaSAaron Young "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " 21231762eaaSAaron Young "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", 21331762eaaSAaron Young pkt->xfer_mode, pkt->addr_type, 21431762eaaSAaron Young (unsigned long long)pkt->addr, 21531762eaaSAaron Young pkt->ack_freq, pkt->plnk_updt, pkt->options, 21631762eaaSAaron Young (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, 21731762eaaSAaron Young pkt->ipv4_lso_maxlen); 21831762eaaSAaron Young 21931762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_ACK; 22031762eaaSAaron Young 22131762eaaSAaron Young return vio_ldc_send(vio, pkt, sizeof(*pkt)); 22231762eaaSAaron Young } 22331762eaaSAaron Young 22431762eaaSAaron Young static int handle_attr_ack(struct vio_driver_state *vio, 22531762eaaSAaron Young struct vio_net_attr_info *pkt) 22631762eaaSAaron Young { 22731762eaaSAaron Young viodbg(HS, "GOT NET ATTR ACK\n"); 22831762eaaSAaron Young 22931762eaaSAaron Young return 0; 23031762eaaSAaron Young } 23131762eaaSAaron Young 23231762eaaSAaron Young static int handle_attr_nack(struct vio_driver_state *vio, 23331762eaaSAaron Young struct vio_net_attr_info *pkt) 23431762eaaSAaron Young { 23531762eaaSAaron Young viodbg(HS, "GOT NET ATTR NACK\n"); 23631762eaaSAaron Young 23731762eaaSAaron Young return -ECONNRESET; 23831762eaaSAaron Young } 23931762eaaSAaron Young 24031762eaaSAaron Young int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg) 24131762eaaSAaron Young { 24231762eaaSAaron Young struct vio_net_attr_info *pkt = arg; 24331762eaaSAaron Young 24431762eaaSAaron Young switch (pkt->tag.stype) { 24531762eaaSAaron Young case VIO_SUBTYPE_INFO: 24631762eaaSAaron Young return handle_attr_info(vio, pkt); 24731762eaaSAaron Young 24831762eaaSAaron Young case VIO_SUBTYPE_ACK: 24931762eaaSAaron Young return handle_attr_ack(vio, pkt); 25031762eaaSAaron Young 25131762eaaSAaron Young case VIO_SUBTYPE_NACK: 25231762eaaSAaron Young return handle_attr_nack(vio, pkt); 25331762eaaSAaron Young 25431762eaaSAaron Young default: 25531762eaaSAaron Young return -ECONNRESET; 25631762eaaSAaron Young } 25731762eaaSAaron Young } 25831762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common); 25931762eaaSAaron Young 26031762eaaSAaron Young void sunvnet_handshake_complete_common(struct vio_driver_state *vio) 26131762eaaSAaron Young { 26231762eaaSAaron Young struct vio_dring_state *dr; 26331762eaaSAaron Young 26431762eaaSAaron Young dr = &vio->drings[VIO_DRIVER_RX_RING]; 265dc153f85SAaron Young dr->rcv_nxt = 1; 266dc153f85SAaron Young dr->snd_nxt = 1; 26731762eaaSAaron Young 26831762eaaSAaron Young dr = &vio->drings[VIO_DRIVER_TX_RING]; 269dc153f85SAaron Young dr->rcv_nxt = 1; 270dc153f85SAaron Young dr->snd_nxt = 1; 27131762eaaSAaron Young } 27231762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common); 27331762eaaSAaron Young 27431762eaaSAaron Young /* The hypervisor interface that implements copying to/from imported 27531762eaaSAaron Young * memory from another domain requires that copies are done to 8-byte 27631762eaaSAaron Young * aligned buffers, and that the lengths of such copies are also 8-byte 27731762eaaSAaron Young * multiples. 27831762eaaSAaron Young * 27931762eaaSAaron Young * So we align skb->data to an 8-byte multiple and pad-out the data 28031762eaaSAaron Young * area so we can round the copy length up to the next multiple of 28131762eaaSAaron Young * 8 for the copy. 28231762eaaSAaron Young * 28331762eaaSAaron Young * The transmitter puts the actual start of the packet 6 bytes into 28431762eaaSAaron Young * the buffer it sends over, so that the IP headers after the ethernet 28531762eaaSAaron Young * header are aligned properly. These 6 bytes are not in the descriptor 28631762eaaSAaron Young * length, they are simply implied. This offset is represented using 28731762eaaSAaron Young * the VNET_PACKET_SKIP macro. 28831762eaaSAaron Young */ 28931762eaaSAaron Young static struct sk_buff *alloc_and_align_skb(struct net_device *dev, 29031762eaaSAaron Young unsigned int len) 29131762eaaSAaron Young { 292dc153f85SAaron Young struct sk_buff *skb; 29331762eaaSAaron Young unsigned long addr, off; 29431762eaaSAaron Young 295dc153f85SAaron Young skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8); 29631762eaaSAaron Young if (unlikely(!skb)) 29731762eaaSAaron Young return NULL; 29831762eaaSAaron Young 29931762eaaSAaron Young addr = (unsigned long)skb->data; 30031762eaaSAaron Young off = ((addr + 7UL) & ~7UL) - addr; 30131762eaaSAaron Young if (off) 30231762eaaSAaron Young skb_reserve(skb, off); 30331762eaaSAaron Young 30431762eaaSAaron Young return skb; 30531762eaaSAaron Young } 30631762eaaSAaron Young 30798524e04SShannon Nelson static inline void vnet_fullcsum_ipv4(struct sk_buff *skb) 30831762eaaSAaron Young { 30931762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 31031762eaaSAaron Young int offset = skb_transport_offset(skb); 31131762eaaSAaron Young 31231762eaaSAaron Young if (skb->protocol != htons(ETH_P_IP)) 31331762eaaSAaron Young return; 31431762eaaSAaron Young if (iph->protocol != IPPROTO_TCP && 31531762eaaSAaron Young iph->protocol != IPPROTO_UDP) 31631762eaaSAaron Young return; 31731762eaaSAaron Young skb->ip_summed = CHECKSUM_NONE; 31831762eaaSAaron Young skb->csum_level = 1; 31931762eaaSAaron Young skb->csum = 0; 32031762eaaSAaron Young if (iph->protocol == IPPROTO_TCP) { 32131762eaaSAaron Young struct tcphdr *ptcp = tcp_hdr(skb); 32231762eaaSAaron Young 32331762eaaSAaron Young ptcp->check = 0; 32431762eaaSAaron Young skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 32531762eaaSAaron Young ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 32631762eaaSAaron Young skb->len - offset, IPPROTO_TCP, 32731762eaaSAaron Young skb->csum); 32831762eaaSAaron Young } else if (iph->protocol == IPPROTO_UDP) { 32931762eaaSAaron Young struct udphdr *pudp = udp_hdr(skb); 33031762eaaSAaron Young 33131762eaaSAaron Young pudp->check = 0; 33231762eaaSAaron Young skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 33331762eaaSAaron Young pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 33431762eaaSAaron Young skb->len - offset, IPPROTO_UDP, 33531762eaaSAaron Young skb->csum); 33631762eaaSAaron Young } 33731762eaaSAaron Young } 33831762eaaSAaron Young 33998524e04SShannon Nelson #if IS_ENABLED(CONFIG_IPV6) 34098524e04SShannon Nelson static inline void vnet_fullcsum_ipv6(struct sk_buff *skb) 34198524e04SShannon Nelson { 34298524e04SShannon Nelson struct ipv6hdr *ip6h = ipv6_hdr(skb); 34398524e04SShannon Nelson int offset = skb_transport_offset(skb); 34498524e04SShannon Nelson 34598524e04SShannon Nelson if (skb->protocol != htons(ETH_P_IPV6)) 34698524e04SShannon Nelson return; 34798524e04SShannon Nelson if (ip6h->nexthdr != IPPROTO_TCP && 34898524e04SShannon Nelson ip6h->nexthdr != IPPROTO_UDP) 34998524e04SShannon Nelson return; 35098524e04SShannon Nelson skb->ip_summed = CHECKSUM_NONE; 35198524e04SShannon Nelson skb->csum_level = 1; 35298524e04SShannon Nelson skb->csum = 0; 35398524e04SShannon Nelson if (ip6h->nexthdr == IPPROTO_TCP) { 35498524e04SShannon Nelson struct tcphdr *ptcp = tcp_hdr(skb); 35598524e04SShannon Nelson 35698524e04SShannon Nelson ptcp->check = 0; 35798524e04SShannon Nelson skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 35898524e04SShannon Nelson ptcp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 35998524e04SShannon Nelson skb->len - offset, IPPROTO_TCP, 36098524e04SShannon Nelson skb->csum); 36198524e04SShannon Nelson } else if (ip6h->nexthdr == IPPROTO_UDP) { 36298524e04SShannon Nelson struct udphdr *pudp = udp_hdr(skb); 36398524e04SShannon Nelson 36498524e04SShannon Nelson pudp->check = 0; 36598524e04SShannon Nelson skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); 36698524e04SShannon Nelson pudp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 36798524e04SShannon Nelson skb->len - offset, IPPROTO_UDP, 36898524e04SShannon Nelson skb->csum); 36998524e04SShannon Nelson } 37098524e04SShannon Nelson } 37198524e04SShannon Nelson #endif 37298524e04SShannon Nelson 37331762eaaSAaron Young static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) 37431762eaaSAaron Young { 37567d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 37631762eaaSAaron Young unsigned int len = desc->size; 37731762eaaSAaron Young unsigned int copy_len; 37831762eaaSAaron Young struct sk_buff *skb; 37931762eaaSAaron Young int maxlen; 38031762eaaSAaron Young int err; 38131762eaaSAaron Young 38231762eaaSAaron Young err = -EMSGSIZE; 38331762eaaSAaron Young if (port->tso && port->tsolen > port->rmtu) 38431762eaaSAaron Young maxlen = port->tsolen; 38531762eaaSAaron Young else 38631762eaaSAaron Young maxlen = port->rmtu; 38731762eaaSAaron Young if (unlikely(len < ETH_ZLEN || len > maxlen)) { 38831762eaaSAaron Young dev->stats.rx_length_errors++; 38931762eaaSAaron Young goto out_dropped; 39031762eaaSAaron Young } 39131762eaaSAaron Young 39231762eaaSAaron Young skb = alloc_and_align_skb(dev, len); 39331762eaaSAaron Young err = -ENOMEM; 39431762eaaSAaron Young if (unlikely(!skb)) { 39531762eaaSAaron Young dev->stats.rx_missed_errors++; 39631762eaaSAaron Young goto out_dropped; 39731762eaaSAaron Young } 39831762eaaSAaron Young 39931762eaaSAaron Young copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; 40031762eaaSAaron Young skb_put(skb, copy_len); 40131762eaaSAaron Young err = ldc_copy(port->vio.lp, LDC_COPY_IN, 40231762eaaSAaron Young skb->data, copy_len, 0, 40331762eaaSAaron Young desc->cookies, desc->ncookies); 40431762eaaSAaron Young if (unlikely(err < 0)) { 40531762eaaSAaron Young dev->stats.rx_frame_errors++; 40631762eaaSAaron Young goto out_free_skb; 40731762eaaSAaron Young } 40831762eaaSAaron Young 40931762eaaSAaron Young skb_pull(skb, VNET_PACKET_SKIP); 41031762eaaSAaron Young skb_trim(skb, len); 41131762eaaSAaron Young skb->protocol = eth_type_trans(skb, dev); 41231762eaaSAaron Young 41331762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 8)) { 41431762eaaSAaron Young struct vio_net_dext *dext = vio_net_ext(desc); 41531762eaaSAaron Young 41631762eaaSAaron Young skb_reset_network_header(skb); 41731762eaaSAaron Young 41831762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { 41931762eaaSAaron Young if (skb->protocol == ETH_P_IP) { 42031762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 42131762eaaSAaron Young 42231762eaaSAaron Young iph->check = 0; 42331762eaaSAaron Young ip_send_check(iph); 42431762eaaSAaron Young } 42531762eaaSAaron Young } 42631762eaaSAaron Young if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && 42731762eaaSAaron Young skb->ip_summed == CHECKSUM_NONE) { 42831762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) { 42931762eaaSAaron Young struct iphdr *iph = ip_hdr(skb); 43031762eaaSAaron Young int ihl = iph->ihl * 4; 43131762eaaSAaron Young 43231762eaaSAaron Young skb_set_transport_header(skb, ihl); 43398524e04SShannon Nelson vnet_fullcsum_ipv4(skb); 43498524e04SShannon Nelson #if IS_ENABLED(CONFIG_IPV6) 43598524e04SShannon Nelson } else if (skb->protocol == htons(ETH_P_IPV6)) { 43698524e04SShannon Nelson skb_set_transport_header(skb, 43798524e04SShannon Nelson sizeof(struct ipv6hdr)); 43898524e04SShannon Nelson vnet_fullcsum_ipv6(skb); 43998524e04SShannon Nelson #endif 44031762eaaSAaron Young } 44131762eaaSAaron Young } 44231762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { 44331762eaaSAaron Young skb->ip_summed = CHECKSUM_PARTIAL; 44431762eaaSAaron Young skb->csum_level = 0; 44531762eaaSAaron Young if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) 44631762eaaSAaron Young skb->csum_level = 1; 44731762eaaSAaron Young } 44831762eaaSAaron Young } 44931762eaaSAaron Young 45031762eaaSAaron Young skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; 45131762eaaSAaron Young 452b12a96f5SShannon Nelson if (unlikely(is_multicast_ether_addr(eth_hdr(skb)->h_dest))) 453b12a96f5SShannon Nelson dev->stats.multicast++; 45431762eaaSAaron Young dev->stats.rx_packets++; 45531762eaaSAaron Young dev->stats.rx_bytes += len; 4560f512c84SShannon Nelson port->stats.rx_packets++; 4570f512c84SShannon Nelson port->stats.rx_bytes += len; 45831762eaaSAaron Young napi_gro_receive(&port->napi, skb); 45931762eaaSAaron Young return 0; 46031762eaaSAaron Young 46131762eaaSAaron Young out_free_skb: 46231762eaaSAaron Young kfree_skb(skb); 46331762eaaSAaron Young 46431762eaaSAaron Young out_dropped: 46531762eaaSAaron Young dev->stats.rx_dropped++; 46631762eaaSAaron Young return err; 46731762eaaSAaron Young } 46831762eaaSAaron Young 46931762eaaSAaron Young static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, 47031762eaaSAaron Young u32 start, u32 end, u8 vio_dring_state) 47131762eaaSAaron Young { 47231762eaaSAaron Young struct vio_dring_data hdr = { 47331762eaaSAaron Young .tag = { 47431762eaaSAaron Young .type = VIO_TYPE_DATA, 47531762eaaSAaron Young .stype = VIO_SUBTYPE_ACK, 47631762eaaSAaron Young .stype_env = VIO_DRING_DATA, 47731762eaaSAaron Young .sid = vio_send_sid(&port->vio), 47831762eaaSAaron Young }, 47931762eaaSAaron Young .dring_ident = dr->ident, 48031762eaaSAaron Young .start_idx = start, 48131762eaaSAaron Young .end_idx = end, 48231762eaaSAaron Young .state = vio_dring_state, 48331762eaaSAaron Young }; 48431762eaaSAaron Young int err, delay; 48531762eaaSAaron Young int retries = 0; 48631762eaaSAaron Young 48731762eaaSAaron Young hdr.seq = dr->snd_nxt; 48831762eaaSAaron Young delay = 1; 48931762eaaSAaron Young do { 49031762eaaSAaron Young err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 49131762eaaSAaron Young if (err > 0) { 49231762eaaSAaron Young dr->snd_nxt++; 49331762eaaSAaron Young break; 49431762eaaSAaron Young } 49531762eaaSAaron Young udelay(delay); 49631762eaaSAaron Young if ((delay <<= 1) > 128) 49731762eaaSAaron Young delay = 128; 49831762eaaSAaron Young if (retries++ > VNET_MAX_RETRIES) { 49931762eaaSAaron Young pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", 50031762eaaSAaron Young port->raddr[0], port->raddr[1], 50131762eaaSAaron Young port->raddr[2], port->raddr[3], 50231762eaaSAaron Young port->raddr[4], port->raddr[5]); 50331762eaaSAaron Young break; 50431762eaaSAaron Young } 50531762eaaSAaron Young } while (err == -EAGAIN); 50631762eaaSAaron Young 50731762eaaSAaron Young if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { 50831762eaaSAaron Young port->stop_rx_idx = end; 50931762eaaSAaron Young port->stop_rx = true; 51031762eaaSAaron Young } else { 51131762eaaSAaron Young port->stop_rx_idx = 0; 51231762eaaSAaron Young port->stop_rx = false; 51331762eaaSAaron Young } 51431762eaaSAaron Young 51531762eaaSAaron Young return err; 51631762eaaSAaron Young } 51731762eaaSAaron Young 51831762eaaSAaron Young static struct vio_net_desc *get_rx_desc(struct vnet_port *port, 51931762eaaSAaron Young struct vio_dring_state *dr, 52031762eaaSAaron Young u32 index) 52131762eaaSAaron Young { 52231762eaaSAaron Young struct vio_net_desc *desc = port->vio.desc_buf; 52331762eaaSAaron Young int err; 52431762eaaSAaron Young 52531762eaaSAaron Young err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, 52631762eaaSAaron Young (index * dr->entry_size), 52731762eaaSAaron Young dr->cookies, dr->ncookies); 52831762eaaSAaron Young if (err < 0) 52931762eaaSAaron Young return ERR_PTR(err); 53031762eaaSAaron Young 53131762eaaSAaron Young return desc; 53231762eaaSAaron Young } 53331762eaaSAaron Young 53431762eaaSAaron Young static int put_rx_desc(struct vnet_port *port, 53531762eaaSAaron Young struct vio_dring_state *dr, 53631762eaaSAaron Young struct vio_net_desc *desc, 53731762eaaSAaron Young u32 index) 53831762eaaSAaron Young { 53931762eaaSAaron Young int err; 54031762eaaSAaron Young 54131762eaaSAaron Young err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, 54231762eaaSAaron Young (index * dr->entry_size), 54331762eaaSAaron Young dr->cookies, dr->ncookies); 54431762eaaSAaron Young if (err < 0) 54531762eaaSAaron Young return err; 54631762eaaSAaron Young 54731762eaaSAaron Young return 0; 54831762eaaSAaron Young } 54931762eaaSAaron Young 55031762eaaSAaron Young static int vnet_walk_rx_one(struct vnet_port *port, 55131762eaaSAaron Young struct vio_dring_state *dr, 55231762eaaSAaron Young u32 index, int *needs_ack) 55331762eaaSAaron Young { 55431762eaaSAaron Young struct vio_net_desc *desc = get_rx_desc(port, dr, index); 55531762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 55631762eaaSAaron Young int err; 55731762eaaSAaron Young 558dc153f85SAaron Young BUG_ON(!desc); 55931762eaaSAaron Young if (IS_ERR(desc)) 56031762eaaSAaron Young return PTR_ERR(desc); 56131762eaaSAaron Young 56231762eaaSAaron Young if (desc->hdr.state != VIO_DESC_READY) 56331762eaaSAaron Young return 1; 56431762eaaSAaron Young 56531762eaaSAaron Young dma_rmb(); 56631762eaaSAaron Young 56731762eaaSAaron Young viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", 56831762eaaSAaron Young desc->hdr.state, desc->hdr.ack, 56931762eaaSAaron Young desc->size, desc->ncookies, 57031762eaaSAaron Young desc->cookies[0].cookie_addr, 57131762eaaSAaron Young desc->cookies[0].cookie_size); 57231762eaaSAaron Young 57331762eaaSAaron Young err = vnet_rx_one(port, desc); 57431762eaaSAaron Young if (err == -ECONNRESET) 57531762eaaSAaron Young return err; 57631762eaaSAaron Young trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid, 57731762eaaSAaron Young index, desc->hdr.ack); 57831762eaaSAaron Young desc->hdr.state = VIO_DESC_DONE; 57931762eaaSAaron Young err = put_rx_desc(port, dr, desc, index); 58031762eaaSAaron Young if (err < 0) 58131762eaaSAaron Young return err; 58231762eaaSAaron Young *needs_ack = desc->hdr.ack; 58331762eaaSAaron Young return 0; 58431762eaaSAaron Young } 58531762eaaSAaron Young 58631762eaaSAaron Young static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, 58731762eaaSAaron Young u32 start, u32 end, int *npkts, int budget) 58831762eaaSAaron Young { 58931762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 59031762eaaSAaron Young int ack_start = -1, ack_end = -1; 59131762eaaSAaron Young bool send_ack = true; 59231762eaaSAaron Young 59331762eaaSAaron Young end = (end == (u32)-1) ? vio_dring_prev(dr, start) 59431762eaaSAaron Young : vio_dring_next(dr, end); 59531762eaaSAaron Young 59631762eaaSAaron Young viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); 59731762eaaSAaron Young 59831762eaaSAaron Young while (start != end) { 59931762eaaSAaron Young int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); 600dc153f85SAaron Young 60131762eaaSAaron Young if (err == -ECONNRESET) 60231762eaaSAaron Young return err; 60331762eaaSAaron Young if (err != 0) 60431762eaaSAaron Young break; 60531762eaaSAaron Young (*npkts)++; 60631762eaaSAaron Young if (ack_start == -1) 60731762eaaSAaron Young ack_start = start; 60831762eaaSAaron Young ack_end = start; 60931762eaaSAaron Young start = vio_dring_next(dr, start); 61031762eaaSAaron Young if (ack && start != end) { 61131762eaaSAaron Young err = vnet_send_ack(port, dr, ack_start, ack_end, 61231762eaaSAaron Young VIO_DRING_ACTIVE); 61331762eaaSAaron Young if (err == -ECONNRESET) 61431762eaaSAaron Young return err; 61531762eaaSAaron Young ack_start = -1; 61631762eaaSAaron Young } 61731762eaaSAaron Young if ((*npkts) >= budget) { 61831762eaaSAaron Young send_ack = false; 61931762eaaSAaron Young break; 62031762eaaSAaron Young } 62131762eaaSAaron Young } 622dc153f85SAaron Young if (unlikely(ack_start == -1)) { 623dc153f85SAaron Young ack_end = vio_dring_prev(dr, start); 624dc153f85SAaron Young ack_start = ack_end; 625dc153f85SAaron Young } 62631762eaaSAaron Young if (send_ack) { 62731762eaaSAaron Young port->napi_resume = false; 62831762eaaSAaron Young trace_vnet_tx_send_stopped_ack(port->vio._local_sid, 62931762eaaSAaron Young port->vio._peer_sid, 63031762eaaSAaron Young ack_end, *npkts); 63131762eaaSAaron Young return vnet_send_ack(port, dr, ack_start, ack_end, 63231762eaaSAaron Young VIO_DRING_STOPPED); 63331762eaaSAaron Young } else { 63431762eaaSAaron Young trace_vnet_tx_defer_stopped_ack(port->vio._local_sid, 63531762eaaSAaron Young port->vio._peer_sid, 63631762eaaSAaron Young ack_end, *npkts); 63731762eaaSAaron Young port->napi_resume = true; 63831762eaaSAaron Young port->napi_stop_idx = ack_end; 63931762eaaSAaron Young return 1; 64031762eaaSAaron Young } 64131762eaaSAaron Young } 64231762eaaSAaron Young 64331762eaaSAaron Young static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, 64431762eaaSAaron Young int budget) 64531762eaaSAaron Young { 64631762eaaSAaron Young struct vio_dring_data *pkt = msgbuf; 64731762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; 64831762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 64931762eaaSAaron Young 65031762eaaSAaron Young viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", 65131762eaaSAaron Young pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); 65231762eaaSAaron Young 65331762eaaSAaron Young if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 65431762eaaSAaron Young return 0; 65531762eaaSAaron Young if (unlikely(pkt->seq != dr->rcv_nxt)) { 65631762eaaSAaron Young pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", 65731762eaaSAaron Young pkt->seq, dr->rcv_nxt); 65831762eaaSAaron Young return 0; 65931762eaaSAaron Young } 66031762eaaSAaron Young 66131762eaaSAaron Young if (!port->napi_resume) 66231762eaaSAaron Young dr->rcv_nxt++; 66331762eaaSAaron Young 66431762eaaSAaron Young /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ 66531762eaaSAaron Young 66631762eaaSAaron Young return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, 66731762eaaSAaron Young npkts, budget); 66831762eaaSAaron Young } 66931762eaaSAaron Young 67031762eaaSAaron Young static int idx_is_pending(struct vio_dring_state *dr, u32 end) 67131762eaaSAaron Young { 67231762eaaSAaron Young u32 idx = dr->cons; 67331762eaaSAaron Young int found = 0; 67431762eaaSAaron Young 67531762eaaSAaron Young while (idx != dr->prod) { 67631762eaaSAaron Young if (idx == end) { 67731762eaaSAaron Young found = 1; 67831762eaaSAaron Young break; 67931762eaaSAaron Young } 68031762eaaSAaron Young idx = vio_dring_next(dr, idx); 68131762eaaSAaron Young } 68231762eaaSAaron Young return found; 68331762eaaSAaron Young } 68431762eaaSAaron Young 68531762eaaSAaron Young static int vnet_ack(struct vnet_port *port, void *msgbuf) 68631762eaaSAaron Young { 68731762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 68831762eaaSAaron Young struct vio_dring_data *pkt = msgbuf; 68931762eaaSAaron Young struct net_device *dev; 69031762eaaSAaron Young u32 end; 69131762eaaSAaron Young struct vio_net_desc *desc; 69231762eaaSAaron Young struct netdev_queue *txq; 69331762eaaSAaron Young 69431762eaaSAaron Young if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 69531762eaaSAaron Young return 0; 69631762eaaSAaron Young 69731762eaaSAaron Young end = pkt->end_idx; 69867d0719fSAaron Young dev = VNET_PORT_TO_NET_DEVICE(port); 69931762eaaSAaron Young netif_tx_lock(dev); 70031762eaaSAaron Young if (unlikely(!idx_is_pending(dr, end))) { 70131762eaaSAaron Young netif_tx_unlock(dev); 70231762eaaSAaron Young return 0; 70331762eaaSAaron Young } 70431762eaaSAaron Young 70531762eaaSAaron Young /* sync for race conditions with vnet_start_xmit() and tell xmit it 70631762eaaSAaron Young * is time to send a trigger. 70731762eaaSAaron Young */ 70831762eaaSAaron Young trace_vnet_rx_stopped_ack(port->vio._local_sid, 70931762eaaSAaron Young port->vio._peer_sid, end); 71031762eaaSAaron Young dr->cons = vio_dring_next(dr, end); 71131762eaaSAaron Young desc = vio_dring_entry(dr, dr->cons); 71231762eaaSAaron Young if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { 71331762eaaSAaron Young /* vnet_start_xmit() just populated this dring but missed 71431762eaaSAaron Young * sending the "start" LDC message to the consumer. 71531762eaaSAaron Young * Send a "start" trigger on its behalf. 71631762eaaSAaron Young */ 71731762eaaSAaron Young if (__vnet_tx_trigger(port, dr->cons) > 0) 71831762eaaSAaron Young port->start_cons = false; 71931762eaaSAaron Young else 72031762eaaSAaron Young port->start_cons = true; 72131762eaaSAaron Young } else { 72231762eaaSAaron Young port->start_cons = true; 72331762eaaSAaron Young } 72431762eaaSAaron Young netif_tx_unlock(dev); 72531762eaaSAaron Young 72631762eaaSAaron Young txq = netdev_get_tx_queue(dev, port->q_index); 72731762eaaSAaron Young if (unlikely(netif_tx_queue_stopped(txq) && 72831762eaaSAaron Young vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) 72931762eaaSAaron Young return 1; 73031762eaaSAaron Young 73131762eaaSAaron Young return 0; 73231762eaaSAaron Young } 73331762eaaSAaron Young 73431762eaaSAaron Young static int vnet_nack(struct vnet_port *port, void *msgbuf) 73531762eaaSAaron Young { 73631762eaaSAaron Young /* XXX just reset or similar XXX */ 73731762eaaSAaron Young return 0; 73831762eaaSAaron Young } 73931762eaaSAaron Young 74031762eaaSAaron Young static int handle_mcast(struct vnet_port *port, void *msgbuf) 74131762eaaSAaron Young { 74231762eaaSAaron Young struct vio_net_mcast_info *pkt = msgbuf; 74367d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 74431762eaaSAaron Young 74531762eaaSAaron Young if (pkt->tag.stype != VIO_SUBTYPE_ACK) 74631762eaaSAaron Young pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", 74767d0719fSAaron Young dev->name, 74831762eaaSAaron Young pkt->tag.type, 74931762eaaSAaron Young pkt->tag.stype, 75031762eaaSAaron Young pkt->tag.stype_env, 75131762eaaSAaron Young pkt->tag.sid); 75231762eaaSAaron Young 75331762eaaSAaron Young return 0; 75431762eaaSAaron Young } 75531762eaaSAaron Young 7568778b276SAaron Young /* If the queue is stopped, wake it up so that we'll 7578778b276SAaron Young * send out another START message at the next TX. 75831762eaaSAaron Young */ 75931762eaaSAaron Young static void maybe_tx_wakeup(struct vnet_port *port) 76031762eaaSAaron Young { 76131762eaaSAaron Young struct netdev_queue *txq; 76231762eaaSAaron Young 76367d0719fSAaron Young txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), 76467d0719fSAaron Young port->q_index); 76531762eaaSAaron Young __netif_tx_lock(txq, smp_processor_id()); 766d4aa89ccSSowmini Varadhan if (likely(netif_tx_queue_stopped(txq))) 76731762eaaSAaron Young netif_tx_wake_queue(txq); 76831762eaaSAaron Young __netif_tx_unlock(txq); 76931762eaaSAaron Young } 77031762eaaSAaron Young 77167d0719fSAaron Young bool sunvnet_port_is_up_common(struct vnet_port *vnet) 77231762eaaSAaron Young { 77331762eaaSAaron Young struct vio_driver_state *vio = &vnet->vio; 77431762eaaSAaron Young 77531762eaaSAaron Young return !!(vio->hs_state & VIO_HS_COMPLETE); 77631762eaaSAaron Young } 77767d0719fSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common); 77831762eaaSAaron Young 77931762eaaSAaron Young static int vnet_event_napi(struct vnet_port *port, int budget) 78031762eaaSAaron Young { 7818778b276SAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 78231762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 78331762eaaSAaron Young int tx_wakeup, err; 78431762eaaSAaron Young int npkts = 0; 78531762eaaSAaron Young 786bf091f3fSShannon Nelson /* we don't expect any other bits */ 787bf091f3fSShannon Nelson BUG_ON(port->rx_event & ~(LDC_EVENT_DATA_READY | 788bf091f3fSShannon Nelson LDC_EVENT_RESET | 789bf091f3fSShannon Nelson LDC_EVENT_UP)); 79031762eaaSAaron Young 791bf091f3fSShannon Nelson /* RESET takes precedent over any other event */ 792bf091f3fSShannon Nelson if (port->rx_event & LDC_EVENT_RESET) { 793867fa150SShannon Nelson /* a link went down */ 794867fa150SShannon Nelson 795867fa150SShannon Nelson if (port->vsw == 1) { 796867fa150SShannon Nelson netif_tx_stop_all_queues(dev); 797867fa150SShannon Nelson netif_carrier_off(dev); 798867fa150SShannon Nelson } 799867fa150SShannon Nelson 800bf091f3fSShannon Nelson vio_link_state_change(vio, LDC_EVENT_RESET); 80131762eaaSAaron Young vnet_port_reset(port); 80231762eaaSAaron Young vio_port_up(vio); 8038778b276SAaron Young 8048778b276SAaron Young /* If the device is running but its tx queue was 8058778b276SAaron Young * stopped (due to flow control), restart it. 8068778b276SAaron Young * This is necessary since vnet_port_reset() 8078778b276SAaron Young * clears the tx drings and thus we may never get 8088778b276SAaron Young * back a VIO_TYPE_DATA ACK packet - which is 8098778b276SAaron Young * the normal mechanism to restart the tx queue. 8108778b276SAaron Young */ 8118778b276SAaron Young if (netif_running(dev)) 8128778b276SAaron Young maybe_tx_wakeup(port); 813bf091f3fSShannon Nelson 81431762eaaSAaron Young port->rx_event = 0; 8150f512c84SShannon Nelson port->stats.event_reset++; 81631762eaaSAaron Young return 0; 81731762eaaSAaron Young } 81831762eaaSAaron Young 819bf091f3fSShannon Nelson if (port->rx_event & LDC_EVENT_UP) { 820867fa150SShannon Nelson /* a link came up */ 821867fa150SShannon Nelson 822867fa150SShannon Nelson if (port->vsw == 1) { 823867fa150SShannon Nelson netif_carrier_on(port->dev); 824867fa150SShannon Nelson netif_tx_start_all_queues(port->dev); 825867fa150SShannon Nelson } 826867fa150SShannon Nelson 827bf091f3fSShannon Nelson vio_link_state_change(vio, LDC_EVENT_UP); 828bf091f3fSShannon Nelson port->rx_event = 0; 8290f512c84SShannon Nelson port->stats.event_up++; 830bf091f3fSShannon Nelson return 0; 831bf091f3fSShannon Nelson } 83231762eaaSAaron Young 833dc153f85SAaron Young err = 0; 834dc153f85SAaron Young tx_wakeup = 0; 83531762eaaSAaron Young while (1) { 83631762eaaSAaron Young union { 83731762eaaSAaron Young struct vio_msg_tag tag; 83831762eaaSAaron Young u64 raw[8]; 83931762eaaSAaron Young } msgbuf; 84031762eaaSAaron Young 84131762eaaSAaron Young if (port->napi_resume) { 84231762eaaSAaron Young struct vio_dring_data *pkt = 84331762eaaSAaron Young (struct vio_dring_data *)&msgbuf; 84431762eaaSAaron Young struct vio_dring_state *dr = 84531762eaaSAaron Young &port->vio.drings[VIO_DRIVER_RX_RING]; 84631762eaaSAaron Young 84731762eaaSAaron Young pkt->tag.type = VIO_TYPE_DATA; 84831762eaaSAaron Young pkt->tag.stype = VIO_SUBTYPE_INFO; 84931762eaaSAaron Young pkt->tag.stype_env = VIO_DRING_DATA; 85031762eaaSAaron Young pkt->seq = dr->rcv_nxt; 851dc153f85SAaron Young pkt->start_idx = vio_dring_next(dr, 852dc153f85SAaron Young port->napi_stop_idx); 85331762eaaSAaron Young pkt->end_idx = -1; 854bf091f3fSShannon Nelson } else { 85531762eaaSAaron Young err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); 85631762eaaSAaron Young if (unlikely(err < 0)) { 85731762eaaSAaron Young if (err == -ECONNRESET) 85831762eaaSAaron Young vio_conn_reset(vio); 85931762eaaSAaron Young break; 86031762eaaSAaron Young } 86131762eaaSAaron Young if (err == 0) 86231762eaaSAaron Young break; 86331762eaaSAaron Young viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", 86431762eaaSAaron Young msgbuf.tag.type, 86531762eaaSAaron Young msgbuf.tag.stype, 86631762eaaSAaron Young msgbuf.tag.stype_env, 86731762eaaSAaron Young msgbuf.tag.sid); 86831762eaaSAaron Young err = vio_validate_sid(vio, &msgbuf.tag); 86931762eaaSAaron Young if (err < 0) 87031762eaaSAaron Young break; 871bf091f3fSShannon Nelson } 872bf091f3fSShannon Nelson 87331762eaaSAaron Young if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { 87431762eaaSAaron Young if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { 87567d0719fSAaron Young if (!sunvnet_port_is_up_common(port)) { 87631762eaaSAaron Young /* failures like handshake_failure() 87731762eaaSAaron Young * may have cleaned up dring, but 87831762eaaSAaron Young * NAPI polling may bring us here. 87931762eaaSAaron Young */ 88031762eaaSAaron Young err = -ECONNRESET; 88131762eaaSAaron Young break; 88231762eaaSAaron Young } 88331762eaaSAaron Young err = vnet_rx(port, &msgbuf, &npkts, budget); 88431762eaaSAaron Young if (npkts >= budget) 88531762eaaSAaron Young break; 88631762eaaSAaron Young if (npkts == 0) 88731762eaaSAaron Young break; 88831762eaaSAaron Young } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { 88931762eaaSAaron Young err = vnet_ack(port, &msgbuf); 89031762eaaSAaron Young if (err > 0) 89131762eaaSAaron Young tx_wakeup |= err; 89231762eaaSAaron Young } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { 89331762eaaSAaron Young err = vnet_nack(port, &msgbuf); 89431762eaaSAaron Young } 89531762eaaSAaron Young } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { 89631762eaaSAaron Young if (msgbuf.tag.stype_env == VNET_MCAST_INFO) 89731762eaaSAaron Young err = handle_mcast(port, &msgbuf); 89831762eaaSAaron Young else 89931762eaaSAaron Young err = vio_control_pkt_engine(vio, &msgbuf); 90031762eaaSAaron Young if (err) 90131762eaaSAaron Young break; 90231762eaaSAaron Young } else { 90331762eaaSAaron Young err = vnet_handle_unknown(port, &msgbuf); 90431762eaaSAaron Young } 90531762eaaSAaron Young if (err == -ECONNRESET) 90631762eaaSAaron Young break; 90731762eaaSAaron Young } 90831762eaaSAaron Young if (unlikely(tx_wakeup && err != -ECONNRESET)) 90931762eaaSAaron Young maybe_tx_wakeup(port); 91031762eaaSAaron Young return npkts; 91131762eaaSAaron Young } 91231762eaaSAaron Young 91331762eaaSAaron Young int sunvnet_poll_common(struct napi_struct *napi, int budget) 91431762eaaSAaron Young { 91531762eaaSAaron Young struct vnet_port *port = container_of(napi, struct vnet_port, napi); 91631762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 91731762eaaSAaron Young int processed = vnet_event_napi(port, budget); 91831762eaaSAaron Young 91931762eaaSAaron Young if (processed < budget) { 9206ad20165SEric Dumazet napi_complete_done(napi, processed); 92131762eaaSAaron Young port->rx_event &= ~LDC_EVENT_DATA_READY; 92231762eaaSAaron Young vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); 92331762eaaSAaron Young } 92431762eaaSAaron Young return processed; 92531762eaaSAaron Young } 92631762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_poll_common); 92731762eaaSAaron Young 92831762eaaSAaron Young void sunvnet_event_common(void *arg, int event) 92931762eaaSAaron Young { 93031762eaaSAaron Young struct vnet_port *port = arg; 93131762eaaSAaron Young struct vio_driver_state *vio = &port->vio; 93231762eaaSAaron Young 93331762eaaSAaron Young port->rx_event |= event; 93431762eaaSAaron Young vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); 93531762eaaSAaron Young napi_schedule(&port->napi); 93631762eaaSAaron Young } 93731762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_event_common); 93831762eaaSAaron Young 93931762eaaSAaron Young static int __vnet_tx_trigger(struct vnet_port *port, u32 start) 94031762eaaSAaron Young { 94131762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 94231762eaaSAaron Young struct vio_dring_data hdr = { 94331762eaaSAaron Young .tag = { 94431762eaaSAaron Young .type = VIO_TYPE_DATA, 94531762eaaSAaron Young .stype = VIO_SUBTYPE_INFO, 94631762eaaSAaron Young .stype_env = VIO_DRING_DATA, 94731762eaaSAaron Young .sid = vio_send_sid(&port->vio), 94831762eaaSAaron Young }, 94931762eaaSAaron Young .dring_ident = dr->ident, 95031762eaaSAaron Young .start_idx = start, 95131762eaaSAaron Young .end_idx = (u32)-1, 95231762eaaSAaron Young }; 95331762eaaSAaron Young int err, delay; 95431762eaaSAaron Young int retries = 0; 95531762eaaSAaron Young 95631762eaaSAaron Young if (port->stop_rx) { 95731762eaaSAaron Young trace_vnet_tx_pending_stopped_ack(port->vio._local_sid, 95831762eaaSAaron Young port->vio._peer_sid, 95931762eaaSAaron Young port->stop_rx_idx, -1); 96031762eaaSAaron Young err = vnet_send_ack(port, 96131762eaaSAaron Young &port->vio.drings[VIO_DRIVER_RX_RING], 96231762eaaSAaron Young port->stop_rx_idx, -1, 96331762eaaSAaron Young VIO_DRING_STOPPED); 96431762eaaSAaron Young if (err <= 0) 96531762eaaSAaron Young return err; 96631762eaaSAaron Young } 96731762eaaSAaron Young 96831762eaaSAaron Young hdr.seq = dr->snd_nxt; 96931762eaaSAaron Young delay = 1; 97031762eaaSAaron Young do { 97131762eaaSAaron Young err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 97231762eaaSAaron Young if (err > 0) { 97331762eaaSAaron Young dr->snd_nxt++; 97431762eaaSAaron Young break; 97531762eaaSAaron Young } 97631762eaaSAaron Young udelay(delay); 97731762eaaSAaron Young if ((delay <<= 1) > 128) 97831762eaaSAaron Young delay = 128; 97931762eaaSAaron Young if (retries++ > VNET_MAX_RETRIES) 98031762eaaSAaron Young break; 98131762eaaSAaron Young } while (err == -EAGAIN); 98231762eaaSAaron Young trace_vnet_tx_trigger(port->vio._local_sid, 98331762eaaSAaron Young port->vio._peer_sid, start, err); 98431762eaaSAaron Young 98531762eaaSAaron Young return err; 98631762eaaSAaron Young } 98731762eaaSAaron Young 98831762eaaSAaron Young static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, 98931762eaaSAaron Young unsigned *pending) 99031762eaaSAaron Young { 99131762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 99231762eaaSAaron Young struct sk_buff *skb = NULL; 99331762eaaSAaron Young int i, txi; 99431762eaaSAaron Young 99531762eaaSAaron Young *pending = 0; 99631762eaaSAaron Young 99731762eaaSAaron Young txi = dr->prod; 99831762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 99931762eaaSAaron Young struct vio_net_desc *d; 100031762eaaSAaron Young 100131762eaaSAaron Young --txi; 100231762eaaSAaron Young if (txi < 0) 100331762eaaSAaron Young txi = VNET_TX_RING_SIZE - 1; 100431762eaaSAaron Young 100531762eaaSAaron Young d = vio_dring_entry(dr, txi); 100631762eaaSAaron Young 100731762eaaSAaron Young if (d->hdr.state == VIO_DESC_READY) { 100831762eaaSAaron Young (*pending)++; 100931762eaaSAaron Young continue; 101031762eaaSAaron Young } 101131762eaaSAaron Young if (port->tx_bufs[txi].skb) { 101231762eaaSAaron Young if (d->hdr.state != VIO_DESC_DONE) 101331762eaaSAaron Young pr_notice("invalid ring buffer state %d\n", 101431762eaaSAaron Young d->hdr.state); 101531762eaaSAaron Young BUG_ON(port->tx_bufs[txi].skb->next); 101631762eaaSAaron Young 101731762eaaSAaron Young port->tx_bufs[txi].skb->next = skb; 101831762eaaSAaron Young skb = port->tx_bufs[txi].skb; 101931762eaaSAaron Young port->tx_bufs[txi].skb = NULL; 102031762eaaSAaron Young 102131762eaaSAaron Young ldc_unmap(port->vio.lp, 102231762eaaSAaron Young port->tx_bufs[txi].cookies, 102331762eaaSAaron Young port->tx_bufs[txi].ncookies); 1024dc153f85SAaron Young } else if (d->hdr.state == VIO_DESC_FREE) { 102531762eaaSAaron Young break; 1026dc153f85SAaron Young } 102731762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 102831762eaaSAaron Young } 102931762eaaSAaron Young return skb; 103031762eaaSAaron Young } 103131762eaaSAaron Young 103231762eaaSAaron Young static inline void vnet_free_skbs(struct sk_buff *skb) 103331762eaaSAaron Young { 103431762eaaSAaron Young struct sk_buff *next; 103531762eaaSAaron Young 103631762eaaSAaron Young while (skb) { 103731762eaaSAaron Young next = skb->next; 103831762eaaSAaron Young skb->next = NULL; 103931762eaaSAaron Young dev_kfree_skb(skb); 104031762eaaSAaron Young skb = next; 104131762eaaSAaron Young } 104231762eaaSAaron Young } 104331762eaaSAaron Young 10440822c5d9SKees Cook void sunvnet_clean_timer_expire_common(struct timer_list *t) 104531762eaaSAaron Young { 10460822c5d9SKees Cook struct vnet_port *port = from_timer(port, t, clean_timer); 104731762eaaSAaron Young struct sk_buff *freeskbs; 104831762eaaSAaron Young unsigned pending; 104931762eaaSAaron Young 105067d0719fSAaron Young netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port)); 105131762eaaSAaron Young freeskbs = vnet_clean_tx_ring(port, &pending); 105267d0719fSAaron Young netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port)); 105331762eaaSAaron Young 105431762eaaSAaron Young vnet_free_skbs(freeskbs); 105531762eaaSAaron Young 105631762eaaSAaron Young if (pending) 105731762eaaSAaron Young (void)mod_timer(&port->clean_timer, 105831762eaaSAaron Young jiffies + VNET_CLEAN_TIMEOUT); 105931762eaaSAaron Young else 106031762eaaSAaron Young del_timer(&port->clean_timer); 106131762eaaSAaron Young } 106231762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common); 106331762eaaSAaron Young 106431762eaaSAaron Young static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, 106531762eaaSAaron Young struct ldc_trans_cookie *cookies, int ncookies, 106631762eaaSAaron Young unsigned int map_perm) 106731762eaaSAaron Young { 106831762eaaSAaron Young int i, nc, err, blen; 106931762eaaSAaron Young 107031762eaaSAaron Young /* header */ 107131762eaaSAaron Young blen = skb_headlen(skb); 107231762eaaSAaron Young if (blen < ETH_ZLEN) 107331762eaaSAaron Young blen = ETH_ZLEN; 107431762eaaSAaron Young blen += VNET_PACKET_SKIP; 107531762eaaSAaron Young blen += 8 - (blen & 7); 107631762eaaSAaron Young 107731762eaaSAaron Young err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies, 107831762eaaSAaron Young ncookies, map_perm); 107931762eaaSAaron Young if (err < 0) 108031762eaaSAaron Young return err; 108131762eaaSAaron Young nc = err; 108231762eaaSAaron Young 108331762eaaSAaron Young for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 108431762eaaSAaron Young skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 108531762eaaSAaron Young u8 *vaddr; 108631762eaaSAaron Young 108731762eaaSAaron Young if (nc < ncookies) { 108831762eaaSAaron Young vaddr = kmap_atomic(skb_frag_page(f)); 108931762eaaSAaron Young blen = skb_frag_size(f); 109031762eaaSAaron Young blen += 8 - (blen & 7); 109131762eaaSAaron Young err = ldc_map_single(lp, vaddr + f->page_offset, 109231762eaaSAaron Young blen, cookies + nc, ncookies - nc, 109331762eaaSAaron Young map_perm); 109431762eaaSAaron Young kunmap_atomic(vaddr); 109531762eaaSAaron Young } else { 109631762eaaSAaron Young err = -EMSGSIZE; 109731762eaaSAaron Young } 109831762eaaSAaron Young 109931762eaaSAaron Young if (err < 0) { 110031762eaaSAaron Young ldc_unmap(lp, cookies, nc); 110131762eaaSAaron Young return err; 110231762eaaSAaron Young } 110331762eaaSAaron Young nc += err; 110431762eaaSAaron Young } 110531762eaaSAaron Young return nc; 110631762eaaSAaron Young } 110731762eaaSAaron Young 110831762eaaSAaron Young static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) 110931762eaaSAaron Young { 111031762eaaSAaron Young struct sk_buff *nskb; 111131762eaaSAaron Young int i, len, pad, docopy; 111231762eaaSAaron Young 111331762eaaSAaron Young len = skb->len; 111431762eaaSAaron Young pad = 0; 111531762eaaSAaron Young if (len < ETH_ZLEN) { 111631762eaaSAaron Young pad += ETH_ZLEN - skb->len; 111731762eaaSAaron Young len += pad; 111831762eaaSAaron Young } 111931762eaaSAaron Young len += VNET_PACKET_SKIP; 112031762eaaSAaron Young pad += 8 - (len & 7); 112131762eaaSAaron Young 112231762eaaSAaron Young /* make sure we have enough cookies and alignment in every frag */ 112331762eaaSAaron Young docopy = skb_shinfo(skb)->nr_frags >= ncookies; 112431762eaaSAaron Young for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 112531762eaaSAaron Young skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 112631762eaaSAaron Young 112731762eaaSAaron Young docopy |= f->page_offset & 7; 112831762eaaSAaron Young } 112931762eaaSAaron Young if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || 113031762eaaSAaron Young skb_tailroom(skb) < pad || 113131762eaaSAaron Young skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { 113231762eaaSAaron Young int start = 0, offset; 113331762eaaSAaron Young __wsum csum; 113431762eaaSAaron Young 113531762eaaSAaron Young len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; 113631762eaaSAaron Young nskb = alloc_and_align_skb(skb->dev, len); 1137dc153f85SAaron Young if (!nskb) { 113831762eaaSAaron Young dev_kfree_skb(skb); 113931762eaaSAaron Young return NULL; 114031762eaaSAaron Young } 114131762eaaSAaron Young skb_reserve(nskb, VNET_PACKET_SKIP); 114231762eaaSAaron Young 114331762eaaSAaron Young nskb->protocol = skb->protocol; 114431762eaaSAaron Young offset = skb_mac_header(skb) - skb->data; 114531762eaaSAaron Young skb_set_mac_header(nskb, offset); 114631762eaaSAaron Young offset = skb_network_header(skb) - skb->data; 114731762eaaSAaron Young skb_set_network_header(nskb, offset); 114831762eaaSAaron Young offset = skb_transport_header(skb) - skb->data; 114931762eaaSAaron Young skb_set_transport_header(nskb, offset); 115031762eaaSAaron Young 115131762eaaSAaron Young offset = 0; 115231762eaaSAaron Young nskb->csum_offset = skb->csum_offset; 115331762eaaSAaron Young nskb->ip_summed = skb->ip_summed; 115431762eaaSAaron Young 115531762eaaSAaron Young if (skb->ip_summed == CHECKSUM_PARTIAL) 115631762eaaSAaron Young start = skb_checksum_start_offset(skb); 115731762eaaSAaron Young if (start) { 115831762eaaSAaron Young int offset = start + nskb->csum_offset; 115931762eaaSAaron Young 116098524e04SShannon Nelson /* copy the headers, no csum here */ 116131762eaaSAaron Young if (skb_copy_bits(skb, 0, nskb->data, start)) { 116231762eaaSAaron Young dev_kfree_skb(nskb); 116331762eaaSAaron Young dev_kfree_skb(skb); 116431762eaaSAaron Young return NULL; 116531762eaaSAaron Young } 116698524e04SShannon Nelson 116798524e04SShannon Nelson /* copy the rest, with csum calculation */ 116831762eaaSAaron Young *(__sum16 *)(skb->data + offset) = 0; 116931762eaaSAaron Young csum = skb_copy_and_csum_bits(skb, start, 117031762eaaSAaron Young nskb->data + start, 117131762eaaSAaron Young skb->len - start, 0); 117298524e04SShannon Nelson 117398524e04SShannon Nelson /* add in the header checksums */ 117498524e04SShannon Nelson if (skb->protocol == htons(ETH_P_IP)) { 117598524e04SShannon Nelson struct iphdr *iph = ip_hdr(nskb); 117698524e04SShannon Nelson 117731762eaaSAaron Young if (iph->protocol == IPPROTO_TCP || 117831762eaaSAaron Young iph->protocol == IPPROTO_UDP) { 117998524e04SShannon Nelson csum = csum_tcpudp_magic(iph->saddr, 118098524e04SShannon Nelson iph->daddr, 118131762eaaSAaron Young skb->len - start, 118298524e04SShannon Nelson iph->protocol, 118398524e04SShannon Nelson csum); 118431762eaaSAaron Young } 118598524e04SShannon Nelson } else if (skb->protocol == htons(ETH_P_IPV6)) { 118698524e04SShannon Nelson struct ipv6hdr *ip6h = ipv6_hdr(nskb); 118798524e04SShannon Nelson 118898524e04SShannon Nelson if (ip6h->nexthdr == IPPROTO_TCP || 118998524e04SShannon Nelson ip6h->nexthdr == IPPROTO_UDP) { 119098524e04SShannon Nelson csum = csum_ipv6_magic(&ip6h->saddr, 119198524e04SShannon Nelson &ip6h->daddr, 119298524e04SShannon Nelson skb->len - start, 119398524e04SShannon Nelson ip6h->nexthdr, 119498524e04SShannon Nelson csum); 119598524e04SShannon Nelson } 119698524e04SShannon Nelson } 119798524e04SShannon Nelson 119898524e04SShannon Nelson /* save the final result */ 119931762eaaSAaron Young *(__sum16 *)(nskb->data + offset) = csum; 120031762eaaSAaron Young 120131762eaaSAaron Young nskb->ip_summed = CHECKSUM_NONE; 120231762eaaSAaron Young } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { 120331762eaaSAaron Young dev_kfree_skb(nskb); 120431762eaaSAaron Young dev_kfree_skb(skb); 120531762eaaSAaron Young return NULL; 120631762eaaSAaron Young } 120731762eaaSAaron Young (void)skb_put(nskb, skb->len); 120831762eaaSAaron Young if (skb_is_gso(skb)) { 120931762eaaSAaron Young skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; 121031762eaaSAaron Young skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; 121131762eaaSAaron Young } 121231762eaaSAaron Young nskb->queue_mapping = skb->queue_mapping; 121331762eaaSAaron Young dev_kfree_skb(skb); 121431762eaaSAaron Young skb = nskb; 121531762eaaSAaron Young } 121631762eaaSAaron Young return skb; 121731762eaaSAaron Young } 121831762eaaSAaron Young 12190e0cc31fSYueHaibing static netdev_tx_t 12200e0cc31fSYueHaibing vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, 122167d0719fSAaron Young struct vnet_port *(*vnet_tx_port) 122267d0719fSAaron Young (struct sk_buff *, struct net_device *)) 122331762eaaSAaron Young { 122467d0719fSAaron Young struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); 122531762eaaSAaron Young struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 122631762eaaSAaron Young struct sk_buff *segs; 122731762eaaSAaron Young int maclen, datalen; 122831762eaaSAaron Young int status; 122931762eaaSAaron Young int gso_size, gso_type, gso_segs; 123031762eaaSAaron Young int hlen = skb_transport_header(skb) - skb_mac_header(skb); 123131762eaaSAaron Young int proto = IPPROTO_IP; 123231762eaaSAaron Young 123331762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) 123431762eaaSAaron Young proto = ip_hdr(skb)->protocol; 123531762eaaSAaron Young else if (skb->protocol == htons(ETH_P_IPV6)) 123631762eaaSAaron Young proto = ipv6_hdr(skb)->nexthdr; 123731762eaaSAaron Young 1238dc153f85SAaron Young if (proto == IPPROTO_TCP) { 123931762eaaSAaron Young hlen += tcp_hdr(skb)->doff * 4; 1240dc153f85SAaron Young } else if (proto == IPPROTO_UDP) { 124131762eaaSAaron Young hlen += sizeof(struct udphdr); 1242dc153f85SAaron Young } else { 124331762eaaSAaron Young pr_err("vnet_handle_offloads GSO with unknown transport " 124431762eaaSAaron Young "protocol %d tproto %d\n", skb->protocol, proto); 124531762eaaSAaron Young hlen = 128; /* XXX */ 124631762eaaSAaron Young } 124731762eaaSAaron Young datalen = port->tsolen - hlen; 124831762eaaSAaron Young 124931762eaaSAaron Young gso_size = skb_shinfo(skb)->gso_size; 125031762eaaSAaron Young gso_type = skb_shinfo(skb)->gso_type; 125131762eaaSAaron Young gso_segs = skb_shinfo(skb)->gso_segs; 125231762eaaSAaron Young 125331762eaaSAaron Young if (port->tso && gso_size < datalen) 125431762eaaSAaron Young gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); 125531762eaaSAaron Young 125631762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { 125731762eaaSAaron Young struct netdev_queue *txq; 125831762eaaSAaron Young 125931762eaaSAaron Young txq = netdev_get_tx_queue(dev, port->q_index); 126031762eaaSAaron Young netif_tx_stop_queue(txq); 126131762eaaSAaron Young if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) 126231762eaaSAaron Young return NETDEV_TX_BUSY; 126331762eaaSAaron Young netif_tx_wake_queue(txq); 126431762eaaSAaron Young } 126531762eaaSAaron Young 126631762eaaSAaron Young maclen = skb_network_header(skb) - skb_mac_header(skb); 126731762eaaSAaron Young skb_pull(skb, maclen); 126831762eaaSAaron Young 126931762eaaSAaron Young if (port->tso && gso_size < datalen) { 127031762eaaSAaron Young if (skb_unclone(skb, GFP_ATOMIC)) 127131762eaaSAaron Young goto out_dropped; 127231762eaaSAaron Young 127331762eaaSAaron Young /* segment to TSO size */ 127431762eaaSAaron Young skb_shinfo(skb)->gso_size = datalen; 127531762eaaSAaron Young skb_shinfo(skb)->gso_segs = gso_segs; 127631762eaaSAaron Young } 127731762eaaSAaron Young segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); 127831762eaaSAaron Young if (IS_ERR(segs)) 127931762eaaSAaron Young goto out_dropped; 128031762eaaSAaron Young 128131762eaaSAaron Young skb_push(skb, maclen); 128231762eaaSAaron Young skb_reset_mac_header(skb); 128331762eaaSAaron Young 128431762eaaSAaron Young status = 0; 128531762eaaSAaron Young while (segs) { 128631762eaaSAaron Young struct sk_buff *curr = segs; 128731762eaaSAaron Young 128831762eaaSAaron Young segs = segs->next; 128931762eaaSAaron Young curr->next = NULL; 129031762eaaSAaron Young if (port->tso && curr->len > dev->mtu) { 129131762eaaSAaron Young skb_shinfo(curr)->gso_size = gso_size; 129231762eaaSAaron Young skb_shinfo(curr)->gso_type = gso_type; 129331762eaaSAaron Young skb_shinfo(curr)->gso_segs = 129431762eaaSAaron Young DIV_ROUND_UP(curr->len - hlen, gso_size); 1295dc153f85SAaron Young } else { 129631762eaaSAaron Young skb_shinfo(curr)->gso_size = 0; 1297dc153f85SAaron Young } 129831762eaaSAaron Young 129931762eaaSAaron Young skb_push(curr, maclen); 130031762eaaSAaron Young skb_reset_mac_header(curr); 130131762eaaSAaron Young memcpy(skb_mac_header(curr), skb_mac_header(skb), 130231762eaaSAaron Young maclen); 130331762eaaSAaron Young curr->csum_start = skb_transport_header(curr) - curr->head; 130431762eaaSAaron Young if (ip_hdr(curr)->protocol == IPPROTO_TCP) 130531762eaaSAaron Young curr->csum_offset = offsetof(struct tcphdr, check); 130631762eaaSAaron Young else if (ip_hdr(curr)->protocol == IPPROTO_UDP) 130731762eaaSAaron Young curr->csum_offset = offsetof(struct udphdr, check); 130831762eaaSAaron Young 130931762eaaSAaron Young if (!(status & NETDEV_TX_MASK)) 131067d0719fSAaron Young status = sunvnet_start_xmit_common(curr, dev, 131167d0719fSAaron Young vnet_tx_port); 131231762eaaSAaron Young if (status & NETDEV_TX_MASK) 131331762eaaSAaron Young dev_kfree_skb_any(curr); 131431762eaaSAaron Young } 131531762eaaSAaron Young 131631762eaaSAaron Young if (!(status & NETDEV_TX_MASK)) 131731762eaaSAaron Young dev_kfree_skb_any(skb); 131831762eaaSAaron Young return status; 131931762eaaSAaron Young out_dropped: 132031762eaaSAaron Young dev->stats.tx_dropped++; 132131762eaaSAaron Young dev_kfree_skb_any(skb); 132231762eaaSAaron Young return NETDEV_TX_OK; 132331762eaaSAaron Young } 132431762eaaSAaron Young 13250e0cc31fSYueHaibing netdev_tx_t 13260e0cc31fSYueHaibing sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, 132767d0719fSAaron Young struct vnet_port *(*vnet_tx_port) 132867d0719fSAaron Young (struct sk_buff *, struct net_device *)) 132931762eaaSAaron Young { 133031762eaaSAaron Young struct vnet_port *port = NULL; 133131762eaaSAaron Young struct vio_dring_state *dr; 133231762eaaSAaron Young struct vio_net_desc *d; 133331762eaaSAaron Young unsigned int len; 133431762eaaSAaron Young struct sk_buff *freeskbs = NULL; 133531762eaaSAaron Young int i, err, txi; 133631762eaaSAaron Young unsigned pending = 0; 133731762eaaSAaron Young struct netdev_queue *txq; 133831762eaaSAaron Young 133931762eaaSAaron Young rcu_read_lock(); 134067d0719fSAaron Young port = vnet_tx_port(skb, dev); 1341daa86e50SShannon Nelson if (unlikely(!port)) 134231762eaaSAaron Young goto out_dropped; 134331762eaaSAaron Young 134431762eaaSAaron Young if (skb_is_gso(skb) && skb->len > port->tsolen) { 134567d0719fSAaron Young err = vnet_handle_offloads(port, skb, vnet_tx_port); 134631762eaaSAaron Young rcu_read_unlock(); 134731762eaaSAaron Young return err; 134831762eaaSAaron Young } 134931762eaaSAaron Young 135031762eaaSAaron Young if (!skb_is_gso(skb) && skb->len > port->rmtu) { 135131762eaaSAaron Young unsigned long localmtu = port->rmtu - ETH_HLEN; 135231762eaaSAaron Young 135331762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 3)) 135431762eaaSAaron Young localmtu -= VLAN_HLEN; 135531762eaaSAaron Young 135631762eaaSAaron Young if (skb->protocol == htons(ETH_P_IP)) { 135731762eaaSAaron Young struct flowi4 fl4; 135831762eaaSAaron Young struct rtable *rt = NULL; 135931762eaaSAaron Young 136031762eaaSAaron Young memset(&fl4, 0, sizeof(fl4)); 136131762eaaSAaron Young fl4.flowi4_oif = dev->ifindex; 136231762eaaSAaron Young fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 136331762eaaSAaron Young fl4.daddr = ip_hdr(skb)->daddr; 136431762eaaSAaron Young fl4.saddr = ip_hdr(skb)->saddr; 136531762eaaSAaron Young 136631762eaaSAaron Young rt = ip_route_output_key(dev_net(dev), &fl4); 136731762eaaSAaron Young if (!IS_ERR(rt)) { 136831762eaaSAaron Young skb_dst_set(skb, &rt->dst); 136931762eaaSAaron Young icmp_send(skb, ICMP_DEST_UNREACH, 137031762eaaSAaron Young ICMP_FRAG_NEEDED, 137131762eaaSAaron Young htonl(localmtu)); 137231762eaaSAaron Young } 137331762eaaSAaron Young } 137431762eaaSAaron Young #if IS_ENABLED(CONFIG_IPV6) 137531762eaaSAaron Young else if (skb->protocol == htons(ETH_P_IPV6)) 137631762eaaSAaron Young icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); 137731762eaaSAaron Young #endif 137831762eaaSAaron Young goto out_dropped; 137931762eaaSAaron Young } 138031762eaaSAaron Young 138131762eaaSAaron Young skb = vnet_skb_shape(skb, 2); 138231762eaaSAaron Young 138331762eaaSAaron Young if (unlikely(!skb)) 138431762eaaSAaron Young goto out_dropped; 138531762eaaSAaron Young 138698524e04SShannon Nelson if (skb->ip_summed == CHECKSUM_PARTIAL) { 138798524e04SShannon Nelson if (skb->protocol == htons(ETH_P_IP)) 138898524e04SShannon Nelson vnet_fullcsum_ipv4(skb); 138998524e04SShannon Nelson #if IS_ENABLED(CONFIG_IPV6) 139098524e04SShannon Nelson else if (skb->protocol == htons(ETH_P_IPV6)) 139198524e04SShannon Nelson vnet_fullcsum_ipv6(skb); 139298524e04SShannon Nelson #endif 139398524e04SShannon Nelson } 139431762eaaSAaron Young 139531762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 139631762eaaSAaron Young i = skb_get_queue_mapping(skb); 139731762eaaSAaron Young txq = netdev_get_tx_queue(dev, i); 139831762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 139931762eaaSAaron Young if (!netif_tx_queue_stopped(txq)) { 140031762eaaSAaron Young netif_tx_stop_queue(txq); 140131762eaaSAaron Young 140231762eaaSAaron Young /* This is a hard error, log it. */ 140331762eaaSAaron Young netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 140431762eaaSAaron Young dev->stats.tx_errors++; 140531762eaaSAaron Young } 140631762eaaSAaron Young rcu_read_unlock(); 140731762eaaSAaron Young return NETDEV_TX_BUSY; 140831762eaaSAaron Young } 140931762eaaSAaron Young 141031762eaaSAaron Young d = vio_dring_cur(dr); 141131762eaaSAaron Young 141231762eaaSAaron Young txi = dr->prod; 141331762eaaSAaron Young 141431762eaaSAaron Young freeskbs = vnet_clean_tx_ring(port, &pending); 141531762eaaSAaron Young 141631762eaaSAaron Young BUG_ON(port->tx_bufs[txi].skb); 141731762eaaSAaron Young 141831762eaaSAaron Young len = skb->len; 141931762eaaSAaron Young if (len < ETH_ZLEN) 142031762eaaSAaron Young len = ETH_ZLEN; 142131762eaaSAaron Young 142231762eaaSAaron Young err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, 142331762eaaSAaron Young (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); 142431762eaaSAaron Young if (err < 0) { 142531762eaaSAaron Young netdev_info(dev, "tx buffer map error %d\n", err); 142631762eaaSAaron Young goto out_dropped; 142731762eaaSAaron Young } 142831762eaaSAaron Young 142931762eaaSAaron Young port->tx_bufs[txi].skb = skb; 143031762eaaSAaron Young skb = NULL; 143131762eaaSAaron Young port->tx_bufs[txi].ncookies = err; 143231762eaaSAaron Young 143331762eaaSAaron Young /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), 143431762eaaSAaron Young * thus it is safe to not set VIO_ACK_ENABLE for each transmission: 143531762eaaSAaron Young * the protocol itself does not require it as long as the peer 143631762eaaSAaron Young * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. 143731762eaaSAaron Young * 143831762eaaSAaron Young * An ACK for every packet in the ring is expensive as the 143931762eaaSAaron Young * sending of LDC messages is slow and affects performance. 144031762eaaSAaron Young */ 144131762eaaSAaron Young d->hdr.ack = VIO_ACK_DISABLE; 144231762eaaSAaron Young d->size = len; 144331762eaaSAaron Young d->ncookies = port->tx_bufs[txi].ncookies; 144431762eaaSAaron Young for (i = 0; i < d->ncookies; i++) 144531762eaaSAaron Young d->cookies[i] = port->tx_bufs[txi].cookies[i]; 144631762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 7)) { 144731762eaaSAaron Young struct vio_net_dext *dext = vio_net_ext(d); 144831762eaaSAaron Young 144931762eaaSAaron Young memset(dext, 0, sizeof(*dext)); 145031762eaaSAaron Young if (skb_is_gso(port->tx_bufs[txi].skb)) { 145131762eaaSAaron Young dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) 145231762eaaSAaron Young ->gso_size; 145331762eaaSAaron Young dext->flags |= VNET_PKT_IPV4_LSO; 145431762eaaSAaron Young } 145531762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 8) && 145631762eaaSAaron Young !port->switch_port) { 145731762eaaSAaron Young dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; 145831762eaaSAaron Young dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; 145931762eaaSAaron Young } 146031762eaaSAaron Young } 146131762eaaSAaron Young 146231762eaaSAaron Young /* This has to be a non-SMP write barrier because we are writing 146331762eaaSAaron Young * to memory which is shared with the peer LDOM. 146431762eaaSAaron Young */ 146531762eaaSAaron Young dma_wmb(); 146631762eaaSAaron Young 146731762eaaSAaron Young d->hdr.state = VIO_DESC_READY; 146831762eaaSAaron Young 146931762eaaSAaron Young /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent 147031762eaaSAaron Young * to notify the consumer that some descriptors are READY. 147131762eaaSAaron Young * After that "start" trigger, no additional triggers are needed until 147231762eaaSAaron Young * a DRING_STOPPED is received from the consumer. The dr->cons field 147331762eaaSAaron Young * (set up by vnet_ack()) has the value of the next dring index 147431762eaaSAaron Young * that has not yet been ack-ed. We send a "start" trigger here 147531762eaaSAaron Young * if, and only if, start_cons is true (reset it afterward). Conversely, 147631762eaaSAaron Young * vnet_ack() should check if the dring corresponding to cons 147731762eaaSAaron Young * is marked READY, but start_cons was false. 147831762eaaSAaron Young * If so, vnet_ack() should send out the missed "start" trigger. 147931762eaaSAaron Young * 148031762eaaSAaron Young * Note that the dma_wmb() above makes sure the cookies et al. are 148131762eaaSAaron Young * not globally visible before the VIO_DESC_READY, and that the 148231762eaaSAaron Young * stores are ordered correctly by the compiler. The consumer will 148331762eaaSAaron Young * not proceed until the VIO_DESC_READY is visible assuring that 148431762eaaSAaron Young * the consumer does not observe anything related to descriptors 148531762eaaSAaron Young * out of order. The HV trap from the LDC start trigger is the 148631762eaaSAaron Young * producer to consumer announcement that work is available to the 148731762eaaSAaron Young * consumer 148831762eaaSAaron Young */ 148931762eaaSAaron Young if (!port->start_cons) { /* previous trigger suffices */ 149031762eaaSAaron Young trace_vnet_skip_tx_trigger(port->vio._local_sid, 149131762eaaSAaron Young port->vio._peer_sid, dr->cons); 149231762eaaSAaron Young goto ldc_start_done; 149331762eaaSAaron Young } 149431762eaaSAaron Young 149531762eaaSAaron Young err = __vnet_tx_trigger(port, dr->cons); 149631762eaaSAaron Young if (unlikely(err < 0)) { 149731762eaaSAaron Young netdev_info(dev, "TX trigger error %d\n", err); 149831762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 149931762eaaSAaron Young skb = port->tx_bufs[txi].skb; 150031762eaaSAaron Young port->tx_bufs[txi].skb = NULL; 150131762eaaSAaron Young dev->stats.tx_carrier_errors++; 150231762eaaSAaron Young goto out_dropped; 150331762eaaSAaron Young } 150431762eaaSAaron Young 150531762eaaSAaron Young ldc_start_done: 150631762eaaSAaron Young port->start_cons = false; 150731762eaaSAaron Young 150831762eaaSAaron Young dev->stats.tx_packets++; 150931762eaaSAaron Young dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; 15100f512c84SShannon Nelson port->stats.tx_packets++; 15110f512c84SShannon Nelson port->stats.tx_bytes += port->tx_bufs[txi].skb->len; 151231762eaaSAaron Young 151331762eaaSAaron Young dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); 151431762eaaSAaron Young if (unlikely(vnet_tx_dring_avail(dr) < 1)) { 151531762eaaSAaron Young netif_tx_stop_queue(txq); 1516fd263fb6SShannon Nelson smp_rmb(); 151731762eaaSAaron Young if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) 151831762eaaSAaron Young netif_tx_wake_queue(txq); 151931762eaaSAaron Young } 152031762eaaSAaron Young 152131762eaaSAaron Young (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); 152231762eaaSAaron Young rcu_read_unlock(); 152331762eaaSAaron Young 152431762eaaSAaron Young vnet_free_skbs(freeskbs); 152531762eaaSAaron Young 152631762eaaSAaron Young return NETDEV_TX_OK; 152731762eaaSAaron Young 152831762eaaSAaron Young out_dropped: 152931762eaaSAaron Young if (pending) 153031762eaaSAaron Young (void)mod_timer(&port->clean_timer, 153131762eaaSAaron Young jiffies + VNET_CLEAN_TIMEOUT); 153231762eaaSAaron Young else if (port) 153331762eaaSAaron Young del_timer(&port->clean_timer); 153431762eaaSAaron Young rcu_read_unlock(); 153531762eaaSAaron Young if (skb) 153631762eaaSAaron Young dev_kfree_skb(skb); 153731762eaaSAaron Young vnet_free_skbs(freeskbs); 153831762eaaSAaron Young dev->stats.tx_dropped++; 153931762eaaSAaron Young return NETDEV_TX_OK; 154031762eaaSAaron Young } 154131762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common); 154231762eaaSAaron Young 154331762eaaSAaron Young void sunvnet_tx_timeout_common(struct net_device *dev) 154431762eaaSAaron Young { 154531762eaaSAaron Young /* XXX Implement me XXX */ 154631762eaaSAaron Young } 154731762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common); 154831762eaaSAaron Young 154931762eaaSAaron Young int sunvnet_open_common(struct net_device *dev) 155031762eaaSAaron Young { 155131762eaaSAaron Young netif_carrier_on(dev); 155231762eaaSAaron Young netif_tx_start_all_queues(dev); 155331762eaaSAaron Young 155431762eaaSAaron Young return 0; 155531762eaaSAaron Young } 155631762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_open_common); 155731762eaaSAaron Young 155831762eaaSAaron Young int sunvnet_close_common(struct net_device *dev) 155931762eaaSAaron Young { 156031762eaaSAaron Young netif_tx_stop_all_queues(dev); 156131762eaaSAaron Young netif_carrier_off(dev); 156231762eaaSAaron Young 156331762eaaSAaron Young return 0; 156431762eaaSAaron Young } 156531762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_close_common); 156631762eaaSAaron Young 156731762eaaSAaron Young static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) 156831762eaaSAaron Young { 156931762eaaSAaron Young struct vnet_mcast_entry *m; 157031762eaaSAaron Young 157131762eaaSAaron Young for (m = vp->mcast_list; m; m = m->next) { 157231762eaaSAaron Young if (ether_addr_equal(m->addr, addr)) 157331762eaaSAaron Young return m; 157431762eaaSAaron Young } 157531762eaaSAaron Young return NULL; 157631762eaaSAaron Young } 157731762eaaSAaron Young 157831762eaaSAaron Young static void __update_mc_list(struct vnet *vp, struct net_device *dev) 157931762eaaSAaron Young { 158031762eaaSAaron Young struct netdev_hw_addr *ha; 158131762eaaSAaron Young 158231762eaaSAaron Young netdev_for_each_mc_addr(ha, dev) { 158331762eaaSAaron Young struct vnet_mcast_entry *m; 158431762eaaSAaron Young 158531762eaaSAaron Young m = __vnet_mc_find(vp, ha->addr); 158631762eaaSAaron Young if (m) { 158731762eaaSAaron Young m->hit = 1; 158831762eaaSAaron Young continue; 158931762eaaSAaron Young } 159031762eaaSAaron Young 159131762eaaSAaron Young if (!m) { 159231762eaaSAaron Young m = kzalloc(sizeof(*m), GFP_ATOMIC); 159331762eaaSAaron Young if (!m) 159431762eaaSAaron Young continue; 159531762eaaSAaron Young memcpy(m->addr, ha->addr, ETH_ALEN); 159631762eaaSAaron Young m->hit = 1; 159731762eaaSAaron Young 159831762eaaSAaron Young m->next = vp->mcast_list; 159931762eaaSAaron Young vp->mcast_list = m; 160031762eaaSAaron Young } 160131762eaaSAaron Young } 160231762eaaSAaron Young } 160331762eaaSAaron Young 160431762eaaSAaron Young static void __send_mc_list(struct vnet *vp, struct vnet_port *port) 160531762eaaSAaron Young { 160631762eaaSAaron Young struct vio_net_mcast_info info; 160731762eaaSAaron Young struct vnet_mcast_entry *m, **pp; 160831762eaaSAaron Young int n_addrs; 160931762eaaSAaron Young 161031762eaaSAaron Young memset(&info, 0, sizeof(info)); 161131762eaaSAaron Young 161231762eaaSAaron Young info.tag.type = VIO_TYPE_CTRL; 161331762eaaSAaron Young info.tag.stype = VIO_SUBTYPE_INFO; 161431762eaaSAaron Young info.tag.stype_env = VNET_MCAST_INFO; 161531762eaaSAaron Young info.tag.sid = vio_send_sid(&port->vio); 161631762eaaSAaron Young info.set = 1; 161731762eaaSAaron Young 161831762eaaSAaron Young n_addrs = 0; 161931762eaaSAaron Young for (m = vp->mcast_list; m; m = m->next) { 162031762eaaSAaron Young if (m->sent) 162131762eaaSAaron Young continue; 162231762eaaSAaron Young m->sent = 1; 162331762eaaSAaron Young memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 162431762eaaSAaron Young m->addr, ETH_ALEN); 162531762eaaSAaron Young if (++n_addrs == VNET_NUM_MCAST) { 162631762eaaSAaron Young info.count = n_addrs; 162731762eaaSAaron Young 162831762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, 162931762eaaSAaron Young sizeof(info)); 163031762eaaSAaron Young n_addrs = 0; 163131762eaaSAaron Young } 163231762eaaSAaron Young } 163331762eaaSAaron Young if (n_addrs) { 163431762eaaSAaron Young info.count = n_addrs; 163531762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, sizeof(info)); 163631762eaaSAaron Young } 163731762eaaSAaron Young 163831762eaaSAaron Young info.set = 0; 163931762eaaSAaron Young 164031762eaaSAaron Young n_addrs = 0; 164131762eaaSAaron Young pp = &vp->mcast_list; 164231762eaaSAaron Young while ((m = *pp) != NULL) { 164331762eaaSAaron Young if (m->hit) { 164431762eaaSAaron Young m->hit = 0; 164531762eaaSAaron Young pp = &m->next; 164631762eaaSAaron Young continue; 164731762eaaSAaron Young } 164831762eaaSAaron Young 164931762eaaSAaron Young memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 165031762eaaSAaron Young m->addr, ETH_ALEN); 165131762eaaSAaron Young if (++n_addrs == VNET_NUM_MCAST) { 165231762eaaSAaron Young info.count = n_addrs; 165331762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, 165431762eaaSAaron Young sizeof(info)); 165531762eaaSAaron Young n_addrs = 0; 165631762eaaSAaron Young } 165731762eaaSAaron Young 165831762eaaSAaron Young *pp = m->next; 165931762eaaSAaron Young kfree(m); 166031762eaaSAaron Young } 166131762eaaSAaron Young if (n_addrs) { 166231762eaaSAaron Young info.count = n_addrs; 166331762eaaSAaron Young (void)vio_ldc_send(&port->vio, &info, sizeof(info)); 166431762eaaSAaron Young } 166531762eaaSAaron Young } 166631762eaaSAaron Young 166767d0719fSAaron Young void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp) 166831762eaaSAaron Young { 166931762eaaSAaron Young struct vnet_port *port; 167031762eaaSAaron Young 167131762eaaSAaron Young rcu_read_lock(); 167231762eaaSAaron Young list_for_each_entry_rcu(port, &vp->port_list, list) { 167331762eaaSAaron Young if (port->switch_port) { 167431762eaaSAaron Young __update_mc_list(vp, dev); 167531762eaaSAaron Young __send_mc_list(vp, port); 167631762eaaSAaron Young break; 167731762eaaSAaron Young } 167831762eaaSAaron Young } 167931762eaaSAaron Young rcu_read_unlock(); 168031762eaaSAaron Young } 168131762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common); 168231762eaaSAaron Young 168331762eaaSAaron Young int sunvnet_set_mac_addr_common(struct net_device *dev, void *p) 168431762eaaSAaron Young { 168531762eaaSAaron Young return -EINVAL; 168631762eaaSAaron Young } 168731762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common); 168831762eaaSAaron Young 168931762eaaSAaron Young void sunvnet_port_free_tx_bufs_common(struct vnet_port *port) 169031762eaaSAaron Young { 169131762eaaSAaron Young struct vio_dring_state *dr; 169231762eaaSAaron Young int i; 169331762eaaSAaron Young 169431762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 169531762eaaSAaron Young 1696dc153f85SAaron Young if (!dr->base) 169731762eaaSAaron Young return; 169831762eaaSAaron Young 169931762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; i++) { 170031762eaaSAaron Young struct vio_net_desc *d; 170131762eaaSAaron Young void *skb = port->tx_bufs[i].skb; 170231762eaaSAaron Young 170331762eaaSAaron Young if (!skb) 170431762eaaSAaron Young continue; 170531762eaaSAaron Young 170631762eaaSAaron Young d = vio_dring_entry(dr, i); 170731762eaaSAaron Young 170831762eaaSAaron Young ldc_unmap(port->vio.lp, 170931762eaaSAaron Young port->tx_bufs[i].cookies, 171031762eaaSAaron Young port->tx_bufs[i].ncookies); 171131762eaaSAaron Young dev_kfree_skb(skb); 171231762eaaSAaron Young port->tx_bufs[i].skb = NULL; 171331762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 171431762eaaSAaron Young } 171531762eaaSAaron Young ldc_free_exp_dring(port->vio.lp, dr->base, 171631762eaaSAaron Young (dr->entry_size * dr->num_entries), 171731762eaaSAaron Young dr->cookies, dr->ncookies); 171831762eaaSAaron Young dr->base = NULL; 171931762eaaSAaron Young dr->entry_size = 0; 172031762eaaSAaron Young dr->num_entries = 0; 172131762eaaSAaron Young dr->pending = 0; 172231762eaaSAaron Young dr->ncookies = 0; 172331762eaaSAaron Young } 172431762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common); 172531762eaaSAaron Young 1726867fa150SShannon Nelson void vnet_port_reset(struct vnet_port *port) 172731762eaaSAaron Young { 172831762eaaSAaron Young del_timer(&port->clean_timer); 172931762eaaSAaron Young sunvnet_port_free_tx_bufs_common(port); 173031762eaaSAaron Young port->rmtu = 0; 1731bc221a34SShannon Nelson port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */ 173231762eaaSAaron Young port->tsolen = 0; 173331762eaaSAaron Young } 1734867fa150SShannon Nelson EXPORT_SYMBOL_GPL(vnet_port_reset); 173531762eaaSAaron Young 173631762eaaSAaron Young static int vnet_port_alloc_tx_ring(struct vnet_port *port) 173731762eaaSAaron Young { 173831762eaaSAaron Young struct vio_dring_state *dr; 173931762eaaSAaron Young unsigned long len, elen; 174031762eaaSAaron Young int i, err, ncookies; 174131762eaaSAaron Young void *dring; 174231762eaaSAaron Young 174331762eaaSAaron Young dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 174431762eaaSAaron Young 174531762eaaSAaron Young elen = sizeof(struct vio_net_desc) + 174631762eaaSAaron Young sizeof(struct ldc_trans_cookie) * 2; 174731762eaaSAaron Young if (vio_version_after_eq(&port->vio, 1, 7)) 174831762eaaSAaron Young elen += sizeof(struct vio_net_dext); 174931762eaaSAaron Young len = VNET_TX_RING_SIZE * elen; 175031762eaaSAaron Young 175131762eaaSAaron Young ncookies = VIO_MAX_RING_COOKIES; 175231762eaaSAaron Young dring = ldc_alloc_exp_dring(port->vio.lp, len, 175331762eaaSAaron Young dr->cookies, &ncookies, 175431762eaaSAaron Young (LDC_MAP_SHADOW | 175531762eaaSAaron Young LDC_MAP_DIRECT | 175631762eaaSAaron Young LDC_MAP_RW)); 175731762eaaSAaron Young if (IS_ERR(dring)) { 175831762eaaSAaron Young err = PTR_ERR(dring); 175931762eaaSAaron Young goto err_out; 176031762eaaSAaron Young } 176131762eaaSAaron Young 176231762eaaSAaron Young dr->base = dring; 176331762eaaSAaron Young dr->entry_size = elen; 176431762eaaSAaron Young dr->num_entries = VNET_TX_RING_SIZE; 1765dc153f85SAaron Young dr->prod = 0; 1766dc153f85SAaron Young dr->cons = 0; 176731762eaaSAaron Young port->start_cons = true; /* need an initial trigger */ 176831762eaaSAaron Young dr->pending = VNET_TX_RING_SIZE; 176931762eaaSAaron Young dr->ncookies = ncookies; 177031762eaaSAaron Young 177131762eaaSAaron Young for (i = 0; i < VNET_TX_RING_SIZE; ++i) { 177231762eaaSAaron Young struct vio_net_desc *d; 177331762eaaSAaron Young 177431762eaaSAaron Young d = vio_dring_entry(dr, i); 177531762eaaSAaron Young d->hdr.state = VIO_DESC_FREE; 177631762eaaSAaron Young } 177731762eaaSAaron Young return 0; 177831762eaaSAaron Young 177931762eaaSAaron Young err_out: 178031762eaaSAaron Young sunvnet_port_free_tx_bufs_common(port); 178131762eaaSAaron Young 178231762eaaSAaron Young return err; 178331762eaaSAaron Young } 178431762eaaSAaron Young 178531762eaaSAaron Young #ifdef CONFIG_NET_POLL_CONTROLLER 178667d0719fSAaron Young void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp) 178731762eaaSAaron Young { 178831762eaaSAaron Young struct vnet_port *port; 178931762eaaSAaron Young unsigned long flags; 179031762eaaSAaron Young 179131762eaaSAaron Young spin_lock_irqsave(&vp->lock, flags); 179231762eaaSAaron Young if (!list_empty(&vp->port_list)) { 179331762eaaSAaron Young port = list_entry(vp->port_list.next, struct vnet_port, list); 179431762eaaSAaron Young napi_schedule(&port->napi); 179531762eaaSAaron Young } 179631762eaaSAaron Young spin_unlock_irqrestore(&vp->lock, flags); 179731762eaaSAaron Young } 179831762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common); 179931762eaaSAaron Young #endif 180031762eaaSAaron Young 180131762eaaSAaron Young void sunvnet_port_add_txq_common(struct vnet_port *port) 180231762eaaSAaron Young { 180331762eaaSAaron Young struct vnet *vp = port->vp; 1804e1f1e5f7SShannon Nelson int smallest = 0; 1805e1f1e5f7SShannon Nelson int i; 180631762eaaSAaron Young 1807e1f1e5f7SShannon Nelson /* find the first least-used q 1808e1f1e5f7SShannon Nelson * When there are more ldoms than q's, we start to 1809e1f1e5f7SShannon Nelson * double up on ports per queue. 1810e1f1e5f7SShannon Nelson */ 1811e1f1e5f7SShannon Nelson for (i = 0; i < VNET_MAX_TXQS; i++) { 1812e1f1e5f7SShannon Nelson if (vp->q_used[i] == 0) { 1813e1f1e5f7SShannon Nelson smallest = i; 1814e1f1e5f7SShannon Nelson break; 1815e1f1e5f7SShannon Nelson } 1816e1f1e5f7SShannon Nelson if (vp->q_used[i] < vp->q_used[smallest]) 1817e1f1e5f7SShannon Nelson smallest = i; 1818e1f1e5f7SShannon Nelson } 1819e1f1e5f7SShannon Nelson 1820e1f1e5f7SShannon Nelson vp->nports++; 1821e1f1e5f7SShannon Nelson vp->q_used[smallest]++; 1822e1f1e5f7SShannon Nelson port->q_index = smallest; 182331762eaaSAaron Young } 182431762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); 182531762eaaSAaron Young 182631762eaaSAaron Young void sunvnet_port_rm_txq_common(struct vnet_port *port) 182731762eaaSAaron Young { 182831762eaaSAaron Young port->vp->nports--; 1829e1f1e5f7SShannon Nelson port->vp->q_used[port->q_index]--; 1830e1f1e5f7SShannon Nelson port->q_index = 0; 183131762eaaSAaron Young } 183231762eaaSAaron Young EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common); 1833