1e689cf4aSJeff Kirsher /* sunvnet.c: Sun LDOM Virtual Network Driver. 2e689cf4aSJeff Kirsher * 3e689cf4aSJeff Kirsher * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 4e689cf4aSJeff Kirsher */ 5e689cf4aSJeff Kirsher 6e689cf4aSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7e689cf4aSJeff Kirsher 8e689cf4aSJeff Kirsher #include <linux/module.h> 9e689cf4aSJeff Kirsher #include <linux/kernel.h> 10e689cf4aSJeff Kirsher #include <linux/types.h> 11e689cf4aSJeff Kirsher #include <linux/slab.h> 12e689cf4aSJeff Kirsher #include <linux/delay.h> 13e689cf4aSJeff Kirsher #include <linux/init.h> 14e689cf4aSJeff Kirsher #include <linux/netdevice.h> 15e689cf4aSJeff Kirsher #include <linux/ethtool.h> 16e689cf4aSJeff Kirsher #include <linux/etherdevice.h> 17e689cf4aSJeff Kirsher #include <linux/mutex.h> 18e689cf4aSJeff Kirsher 19e689cf4aSJeff Kirsher #include <asm/vio.h> 20e689cf4aSJeff Kirsher #include <asm/ldc.h> 21e689cf4aSJeff Kirsher 22e689cf4aSJeff Kirsher #include "sunvnet.h" 23e689cf4aSJeff Kirsher 24e689cf4aSJeff Kirsher #define DRV_MODULE_NAME "sunvnet" 25e689cf4aSJeff Kirsher #define DRV_MODULE_VERSION "1.0" 26e689cf4aSJeff Kirsher #define DRV_MODULE_RELDATE "June 25, 2007" 27e689cf4aSJeff Kirsher 28f73d12bdSBill Pemberton static char version[] = 29e689cf4aSJeff Kirsher DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 30e689cf4aSJeff Kirsher MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 31e689cf4aSJeff Kirsher MODULE_DESCRIPTION("Sun LDOM virtual network driver"); 32e689cf4aSJeff Kirsher MODULE_LICENSE("GPL"); 33e689cf4aSJeff Kirsher MODULE_VERSION(DRV_MODULE_VERSION); 34e689cf4aSJeff Kirsher 35adddc32dSSowmini Varadhan /* Heuristic for the number of times to exponentially backoff and 36adddc32dSSowmini Varadhan * retry sending an LDC trigger when EAGAIN is encountered 37adddc32dSSowmini Varadhan */ 38adddc32dSSowmini Varadhan #define VNET_MAX_RETRIES 10 39adddc32dSSowmini Varadhan 40e689cf4aSJeff Kirsher /* Ordered from largest major to lowest */ 41e689cf4aSJeff Kirsher static struct vio_version vnet_versions[] = { 42e689cf4aSJeff Kirsher { .major = 1, .minor = 0 }, 43e689cf4aSJeff Kirsher }; 44e689cf4aSJeff Kirsher 45e689cf4aSJeff Kirsher static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) 46e689cf4aSJeff Kirsher { 47e689cf4aSJeff Kirsher return vio_dring_avail(dr, VNET_TX_RING_SIZE); 48e689cf4aSJeff Kirsher } 49e689cf4aSJeff Kirsher 50e689cf4aSJeff Kirsher static int vnet_handle_unknown(struct vnet_port *port, void *arg) 51e689cf4aSJeff Kirsher { 52e689cf4aSJeff Kirsher struct vio_msg_tag *pkt = arg; 53e689cf4aSJeff Kirsher 54e689cf4aSJeff Kirsher pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", 55e689cf4aSJeff Kirsher pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 56e689cf4aSJeff Kirsher pr_err("Resetting connection\n"); 57e689cf4aSJeff Kirsher 58e689cf4aSJeff Kirsher ldc_disconnect(port->vio.lp); 59e689cf4aSJeff Kirsher 60e689cf4aSJeff Kirsher return -ECONNRESET; 61e689cf4aSJeff Kirsher } 62e689cf4aSJeff Kirsher 63e689cf4aSJeff Kirsher static int vnet_send_attr(struct vio_driver_state *vio) 64e689cf4aSJeff Kirsher { 65e689cf4aSJeff Kirsher struct vnet_port *port = to_vnet_port(vio); 66e689cf4aSJeff Kirsher struct net_device *dev = port->vp->dev; 67e689cf4aSJeff Kirsher struct vio_net_attr_info pkt; 68e689cf4aSJeff Kirsher int i; 69e689cf4aSJeff Kirsher 70e689cf4aSJeff Kirsher memset(&pkt, 0, sizeof(pkt)); 71e689cf4aSJeff Kirsher pkt.tag.type = VIO_TYPE_CTRL; 72e689cf4aSJeff Kirsher pkt.tag.stype = VIO_SUBTYPE_INFO; 73e689cf4aSJeff Kirsher pkt.tag.stype_env = VIO_ATTR_INFO; 74e689cf4aSJeff Kirsher pkt.tag.sid = vio_send_sid(vio); 75e689cf4aSJeff Kirsher pkt.xfer_mode = VIO_DRING_MODE; 76e689cf4aSJeff Kirsher pkt.addr_type = VNET_ADDR_ETHERMAC; 77e689cf4aSJeff Kirsher pkt.ack_freq = 0; 78e689cf4aSJeff Kirsher for (i = 0; i < 6; i++) 79e689cf4aSJeff Kirsher pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); 80e689cf4aSJeff Kirsher pkt.mtu = ETH_FRAME_LEN; 81e689cf4aSJeff Kirsher 82e689cf4aSJeff Kirsher viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 83e689cf4aSJeff Kirsher "ackfreq[%u] mtu[%llu]\n", 84e689cf4aSJeff Kirsher pkt.xfer_mode, pkt.addr_type, 85e689cf4aSJeff Kirsher (unsigned long long) pkt.addr, 86e689cf4aSJeff Kirsher pkt.ack_freq, 87e689cf4aSJeff Kirsher (unsigned long long) pkt.mtu); 88e689cf4aSJeff Kirsher 89e689cf4aSJeff Kirsher return vio_ldc_send(vio, &pkt, sizeof(pkt)); 90e689cf4aSJeff Kirsher } 91e689cf4aSJeff Kirsher 92e689cf4aSJeff Kirsher static int handle_attr_info(struct vio_driver_state *vio, 93e689cf4aSJeff Kirsher struct vio_net_attr_info *pkt) 94e689cf4aSJeff Kirsher { 95e689cf4aSJeff Kirsher viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] " 96e689cf4aSJeff Kirsher "ackfreq[%u] mtu[%llu]\n", 97e689cf4aSJeff Kirsher pkt->xfer_mode, pkt->addr_type, 98e689cf4aSJeff Kirsher (unsigned long long) pkt->addr, 99e689cf4aSJeff Kirsher pkt->ack_freq, 100e689cf4aSJeff Kirsher (unsigned long long) pkt->mtu); 101e689cf4aSJeff Kirsher 102e689cf4aSJeff Kirsher pkt->tag.sid = vio_send_sid(vio); 103e689cf4aSJeff Kirsher 104e689cf4aSJeff Kirsher if (pkt->xfer_mode != VIO_DRING_MODE || 105e689cf4aSJeff Kirsher pkt->addr_type != VNET_ADDR_ETHERMAC || 106e689cf4aSJeff Kirsher pkt->mtu != ETH_FRAME_LEN) { 107e689cf4aSJeff Kirsher viodbg(HS, "SEND NET ATTR NACK\n"); 108e689cf4aSJeff Kirsher 109e689cf4aSJeff Kirsher pkt->tag.stype = VIO_SUBTYPE_NACK; 110e689cf4aSJeff Kirsher 111e689cf4aSJeff Kirsher (void) vio_ldc_send(vio, pkt, sizeof(*pkt)); 112e689cf4aSJeff Kirsher 113e689cf4aSJeff Kirsher return -ECONNRESET; 114e689cf4aSJeff Kirsher } else { 115e689cf4aSJeff Kirsher viodbg(HS, "SEND NET ATTR ACK\n"); 116e689cf4aSJeff Kirsher 117e689cf4aSJeff Kirsher pkt->tag.stype = VIO_SUBTYPE_ACK; 118e689cf4aSJeff Kirsher 119e689cf4aSJeff Kirsher return vio_ldc_send(vio, pkt, sizeof(*pkt)); 120e689cf4aSJeff Kirsher } 121e689cf4aSJeff Kirsher 122e689cf4aSJeff Kirsher } 123e689cf4aSJeff Kirsher 124e689cf4aSJeff Kirsher static int handle_attr_ack(struct vio_driver_state *vio, 125e689cf4aSJeff Kirsher struct vio_net_attr_info *pkt) 126e689cf4aSJeff Kirsher { 127e689cf4aSJeff Kirsher viodbg(HS, "GOT NET ATTR ACK\n"); 128e689cf4aSJeff Kirsher 129e689cf4aSJeff Kirsher return 0; 130e689cf4aSJeff Kirsher } 131e689cf4aSJeff Kirsher 132e689cf4aSJeff Kirsher static int handle_attr_nack(struct vio_driver_state *vio, 133e689cf4aSJeff Kirsher struct vio_net_attr_info *pkt) 134e689cf4aSJeff Kirsher { 135e689cf4aSJeff Kirsher viodbg(HS, "GOT NET ATTR NACK\n"); 136e689cf4aSJeff Kirsher 137e689cf4aSJeff Kirsher return -ECONNRESET; 138e689cf4aSJeff Kirsher } 139e689cf4aSJeff Kirsher 140e689cf4aSJeff Kirsher static int vnet_handle_attr(struct vio_driver_state *vio, void *arg) 141e689cf4aSJeff Kirsher { 142e689cf4aSJeff Kirsher struct vio_net_attr_info *pkt = arg; 143e689cf4aSJeff Kirsher 144e689cf4aSJeff Kirsher switch (pkt->tag.stype) { 145e689cf4aSJeff Kirsher case VIO_SUBTYPE_INFO: 146e689cf4aSJeff Kirsher return handle_attr_info(vio, pkt); 147e689cf4aSJeff Kirsher 148e689cf4aSJeff Kirsher case VIO_SUBTYPE_ACK: 149e689cf4aSJeff Kirsher return handle_attr_ack(vio, pkt); 150e689cf4aSJeff Kirsher 151e689cf4aSJeff Kirsher case VIO_SUBTYPE_NACK: 152e689cf4aSJeff Kirsher return handle_attr_nack(vio, pkt); 153e689cf4aSJeff Kirsher 154e689cf4aSJeff Kirsher default: 155e689cf4aSJeff Kirsher return -ECONNRESET; 156e689cf4aSJeff Kirsher } 157e689cf4aSJeff Kirsher } 158e689cf4aSJeff Kirsher 159e689cf4aSJeff Kirsher static void vnet_handshake_complete(struct vio_driver_state *vio) 160e689cf4aSJeff Kirsher { 161e689cf4aSJeff Kirsher struct vio_dring_state *dr; 162e689cf4aSJeff Kirsher 163e689cf4aSJeff Kirsher dr = &vio->drings[VIO_DRIVER_RX_RING]; 164e689cf4aSJeff Kirsher dr->snd_nxt = dr->rcv_nxt = 1; 165e689cf4aSJeff Kirsher 166e689cf4aSJeff Kirsher dr = &vio->drings[VIO_DRIVER_TX_RING]; 167e689cf4aSJeff Kirsher dr->snd_nxt = dr->rcv_nxt = 1; 168e689cf4aSJeff Kirsher } 169e689cf4aSJeff Kirsher 170e689cf4aSJeff Kirsher /* The hypervisor interface that implements copying to/from imported 171e689cf4aSJeff Kirsher * memory from another domain requires that copies are done to 8-byte 172e689cf4aSJeff Kirsher * aligned buffers, and that the lengths of such copies are also 8-byte 173e689cf4aSJeff Kirsher * multiples. 174e689cf4aSJeff Kirsher * 175e689cf4aSJeff Kirsher * So we align skb->data to an 8-byte multiple and pad-out the data 176e689cf4aSJeff Kirsher * area so we can round the copy length up to the next multiple of 177e689cf4aSJeff Kirsher * 8 for the copy. 178e689cf4aSJeff Kirsher * 179e689cf4aSJeff Kirsher * The transmitter puts the actual start of the packet 6 bytes into 180e689cf4aSJeff Kirsher * the buffer it sends over, so that the IP headers after the ethernet 181e689cf4aSJeff Kirsher * header are aligned properly. These 6 bytes are not in the descriptor 182e689cf4aSJeff Kirsher * length, they are simply implied. This offset is represented using 183e689cf4aSJeff Kirsher * the VNET_PACKET_SKIP macro. 184e689cf4aSJeff Kirsher */ 185e689cf4aSJeff Kirsher static struct sk_buff *alloc_and_align_skb(struct net_device *dev, 186e689cf4aSJeff Kirsher unsigned int len) 187e689cf4aSJeff Kirsher { 188e689cf4aSJeff Kirsher struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); 189e689cf4aSJeff Kirsher unsigned long addr, off; 190e689cf4aSJeff Kirsher 191e689cf4aSJeff Kirsher if (unlikely(!skb)) 192e689cf4aSJeff Kirsher return NULL; 193e689cf4aSJeff Kirsher 194e689cf4aSJeff Kirsher addr = (unsigned long) skb->data; 195e689cf4aSJeff Kirsher off = ((addr + 7UL) & ~7UL) - addr; 196e689cf4aSJeff Kirsher if (off) 197e689cf4aSJeff Kirsher skb_reserve(skb, off); 198e689cf4aSJeff Kirsher 199e689cf4aSJeff Kirsher return skb; 200e689cf4aSJeff Kirsher } 201e689cf4aSJeff Kirsher 202e689cf4aSJeff Kirsher static int vnet_rx_one(struct vnet_port *port, unsigned int len, 203e689cf4aSJeff Kirsher struct ldc_trans_cookie *cookies, int ncookies) 204e689cf4aSJeff Kirsher { 205e689cf4aSJeff Kirsher struct net_device *dev = port->vp->dev; 206e689cf4aSJeff Kirsher unsigned int copy_len; 207e689cf4aSJeff Kirsher struct sk_buff *skb; 208e689cf4aSJeff Kirsher int err; 209e689cf4aSJeff Kirsher 210e689cf4aSJeff Kirsher err = -EMSGSIZE; 211e689cf4aSJeff Kirsher if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) { 212e689cf4aSJeff Kirsher dev->stats.rx_length_errors++; 213e689cf4aSJeff Kirsher goto out_dropped; 214e689cf4aSJeff Kirsher } 215e689cf4aSJeff Kirsher 216e689cf4aSJeff Kirsher skb = alloc_and_align_skb(dev, len); 217e689cf4aSJeff Kirsher err = -ENOMEM; 218e689cf4aSJeff Kirsher if (unlikely(!skb)) { 219e689cf4aSJeff Kirsher dev->stats.rx_missed_errors++; 220e689cf4aSJeff Kirsher goto out_dropped; 221e689cf4aSJeff Kirsher } 222e689cf4aSJeff Kirsher 223e689cf4aSJeff Kirsher copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; 224e689cf4aSJeff Kirsher skb_put(skb, copy_len); 225e689cf4aSJeff Kirsher err = ldc_copy(port->vio.lp, LDC_COPY_IN, 226e689cf4aSJeff Kirsher skb->data, copy_len, 0, 227e689cf4aSJeff Kirsher cookies, ncookies); 228e689cf4aSJeff Kirsher if (unlikely(err < 0)) { 229e689cf4aSJeff Kirsher dev->stats.rx_frame_errors++; 230e689cf4aSJeff Kirsher goto out_free_skb; 231e689cf4aSJeff Kirsher } 232e689cf4aSJeff Kirsher 233e689cf4aSJeff Kirsher skb_pull(skb, VNET_PACKET_SKIP); 234e689cf4aSJeff Kirsher skb_trim(skb, len); 235e689cf4aSJeff Kirsher skb->protocol = eth_type_trans(skb, dev); 236e689cf4aSJeff Kirsher 237e689cf4aSJeff Kirsher dev->stats.rx_packets++; 238e689cf4aSJeff Kirsher dev->stats.rx_bytes += len; 239e689cf4aSJeff Kirsher 240e689cf4aSJeff Kirsher netif_rx(skb); 241e689cf4aSJeff Kirsher 242e689cf4aSJeff Kirsher return 0; 243e689cf4aSJeff Kirsher 244e689cf4aSJeff Kirsher out_free_skb: 245e689cf4aSJeff Kirsher kfree_skb(skb); 246e689cf4aSJeff Kirsher 247e689cf4aSJeff Kirsher out_dropped: 248e689cf4aSJeff Kirsher dev->stats.rx_dropped++; 249e689cf4aSJeff Kirsher return err; 250e689cf4aSJeff Kirsher } 251e689cf4aSJeff Kirsher 252e689cf4aSJeff Kirsher static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, 253e689cf4aSJeff Kirsher u32 start, u32 end, u8 vio_dring_state) 254e689cf4aSJeff Kirsher { 255e689cf4aSJeff Kirsher struct vio_dring_data hdr = { 256e689cf4aSJeff Kirsher .tag = { 257e689cf4aSJeff Kirsher .type = VIO_TYPE_DATA, 258e689cf4aSJeff Kirsher .stype = VIO_SUBTYPE_ACK, 259e689cf4aSJeff Kirsher .stype_env = VIO_DRING_DATA, 260e689cf4aSJeff Kirsher .sid = vio_send_sid(&port->vio), 261e689cf4aSJeff Kirsher }, 262e689cf4aSJeff Kirsher .dring_ident = dr->ident, 263e689cf4aSJeff Kirsher .start_idx = start, 264e689cf4aSJeff Kirsher .end_idx = end, 265e689cf4aSJeff Kirsher .state = vio_dring_state, 266e689cf4aSJeff Kirsher }; 267e689cf4aSJeff Kirsher int err, delay; 268adddc32dSSowmini Varadhan int retries = 0; 269e689cf4aSJeff Kirsher 270e689cf4aSJeff Kirsher hdr.seq = dr->snd_nxt; 271e689cf4aSJeff Kirsher delay = 1; 272e689cf4aSJeff Kirsher do { 273e689cf4aSJeff Kirsher err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 274e689cf4aSJeff Kirsher if (err > 0) { 275e689cf4aSJeff Kirsher dr->snd_nxt++; 276e689cf4aSJeff Kirsher break; 277e689cf4aSJeff Kirsher } 278e689cf4aSJeff Kirsher udelay(delay); 279e689cf4aSJeff Kirsher if ((delay <<= 1) > 128) 280e689cf4aSJeff Kirsher delay = 128; 281adddc32dSSowmini Varadhan if (retries++ > VNET_MAX_RETRIES) { 282adddc32dSSowmini Varadhan pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", 283adddc32dSSowmini Varadhan port->raddr[0], port->raddr[1], 284adddc32dSSowmini Varadhan port->raddr[2], port->raddr[3], 285adddc32dSSowmini Varadhan port->raddr[4], port->raddr[5]); 286adddc32dSSowmini Varadhan err = -ECONNRESET; 287adddc32dSSowmini Varadhan } 288e689cf4aSJeff Kirsher } while (err == -EAGAIN); 289e689cf4aSJeff Kirsher 290e689cf4aSJeff Kirsher return err; 291e689cf4aSJeff Kirsher } 292e689cf4aSJeff Kirsher 293e689cf4aSJeff Kirsher static u32 next_idx(u32 idx, struct vio_dring_state *dr) 294e689cf4aSJeff Kirsher { 295e689cf4aSJeff Kirsher if (++idx == dr->num_entries) 296e689cf4aSJeff Kirsher idx = 0; 297e689cf4aSJeff Kirsher return idx; 298e689cf4aSJeff Kirsher } 299e689cf4aSJeff Kirsher 300e689cf4aSJeff Kirsher static u32 prev_idx(u32 idx, struct vio_dring_state *dr) 301e689cf4aSJeff Kirsher { 302e689cf4aSJeff Kirsher if (idx == 0) 303e689cf4aSJeff Kirsher idx = dr->num_entries - 1; 304e689cf4aSJeff Kirsher else 305e689cf4aSJeff Kirsher idx--; 306e689cf4aSJeff Kirsher 307e689cf4aSJeff Kirsher return idx; 308e689cf4aSJeff Kirsher } 309e689cf4aSJeff Kirsher 310e689cf4aSJeff Kirsher static struct vio_net_desc *get_rx_desc(struct vnet_port *port, 311e689cf4aSJeff Kirsher struct vio_dring_state *dr, 312e689cf4aSJeff Kirsher u32 index) 313e689cf4aSJeff Kirsher { 314e689cf4aSJeff Kirsher struct vio_net_desc *desc = port->vio.desc_buf; 315e689cf4aSJeff Kirsher int err; 316e689cf4aSJeff Kirsher 317e689cf4aSJeff Kirsher err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, 318e689cf4aSJeff Kirsher (index * dr->entry_size), 319e689cf4aSJeff Kirsher dr->cookies, dr->ncookies); 320e689cf4aSJeff Kirsher if (err < 0) 321e689cf4aSJeff Kirsher return ERR_PTR(err); 322e689cf4aSJeff Kirsher 323e689cf4aSJeff Kirsher return desc; 324e689cf4aSJeff Kirsher } 325e689cf4aSJeff Kirsher 326e689cf4aSJeff Kirsher static int put_rx_desc(struct vnet_port *port, 327e689cf4aSJeff Kirsher struct vio_dring_state *dr, 328e689cf4aSJeff Kirsher struct vio_net_desc *desc, 329e689cf4aSJeff Kirsher u32 index) 330e689cf4aSJeff Kirsher { 331e689cf4aSJeff Kirsher int err; 332e689cf4aSJeff Kirsher 333e689cf4aSJeff Kirsher err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, 334e689cf4aSJeff Kirsher (index * dr->entry_size), 335e689cf4aSJeff Kirsher dr->cookies, dr->ncookies); 336e689cf4aSJeff Kirsher if (err < 0) 337e689cf4aSJeff Kirsher return err; 338e689cf4aSJeff Kirsher 339e689cf4aSJeff Kirsher return 0; 340e689cf4aSJeff Kirsher } 341e689cf4aSJeff Kirsher 342e689cf4aSJeff Kirsher static int vnet_walk_rx_one(struct vnet_port *port, 343e689cf4aSJeff Kirsher struct vio_dring_state *dr, 344e689cf4aSJeff Kirsher u32 index, int *needs_ack) 345e689cf4aSJeff Kirsher { 346e689cf4aSJeff Kirsher struct vio_net_desc *desc = get_rx_desc(port, dr, index); 347e689cf4aSJeff Kirsher struct vio_driver_state *vio = &port->vio; 348e689cf4aSJeff Kirsher int err; 349e689cf4aSJeff Kirsher 350e689cf4aSJeff Kirsher if (IS_ERR(desc)) 351e689cf4aSJeff Kirsher return PTR_ERR(desc); 352e689cf4aSJeff Kirsher 353e689cf4aSJeff Kirsher viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", 354e689cf4aSJeff Kirsher desc->hdr.state, desc->hdr.ack, 355e689cf4aSJeff Kirsher desc->size, desc->ncookies, 356e689cf4aSJeff Kirsher desc->cookies[0].cookie_addr, 357e689cf4aSJeff Kirsher desc->cookies[0].cookie_size); 358e689cf4aSJeff Kirsher 359e689cf4aSJeff Kirsher if (desc->hdr.state != VIO_DESC_READY) 360e689cf4aSJeff Kirsher return 1; 361e689cf4aSJeff Kirsher err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); 362e689cf4aSJeff Kirsher if (err == -ECONNRESET) 363e689cf4aSJeff Kirsher return err; 364e689cf4aSJeff Kirsher desc->hdr.state = VIO_DESC_DONE; 365e689cf4aSJeff Kirsher err = put_rx_desc(port, dr, desc, index); 366e689cf4aSJeff Kirsher if (err < 0) 367e689cf4aSJeff Kirsher return err; 368e689cf4aSJeff Kirsher *needs_ack = desc->hdr.ack; 369e689cf4aSJeff Kirsher return 0; 370e689cf4aSJeff Kirsher } 371e689cf4aSJeff Kirsher 372e689cf4aSJeff Kirsher static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, 373e689cf4aSJeff Kirsher u32 start, u32 end) 374e689cf4aSJeff Kirsher { 375e689cf4aSJeff Kirsher struct vio_driver_state *vio = &port->vio; 376e689cf4aSJeff Kirsher int ack_start = -1, ack_end = -1; 377e689cf4aSJeff Kirsher 378e689cf4aSJeff Kirsher end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr); 379e689cf4aSJeff Kirsher 380e689cf4aSJeff Kirsher viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); 381e689cf4aSJeff Kirsher 382e689cf4aSJeff Kirsher while (start != end) { 383e689cf4aSJeff Kirsher int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); 384e689cf4aSJeff Kirsher if (err == -ECONNRESET) 385e689cf4aSJeff Kirsher return err; 386e689cf4aSJeff Kirsher if (err != 0) 387e689cf4aSJeff Kirsher break; 388e689cf4aSJeff Kirsher if (ack_start == -1) 389e689cf4aSJeff Kirsher ack_start = start; 390e689cf4aSJeff Kirsher ack_end = start; 391e689cf4aSJeff Kirsher start = next_idx(start, dr); 392e689cf4aSJeff Kirsher if (ack && start != end) { 393e689cf4aSJeff Kirsher err = vnet_send_ack(port, dr, ack_start, ack_end, 394e689cf4aSJeff Kirsher VIO_DRING_ACTIVE); 395e689cf4aSJeff Kirsher if (err == -ECONNRESET) 396e689cf4aSJeff Kirsher return err; 397e689cf4aSJeff Kirsher ack_start = -1; 398e689cf4aSJeff Kirsher } 399e689cf4aSJeff Kirsher } 400e689cf4aSJeff Kirsher if (unlikely(ack_start == -1)) 401e689cf4aSJeff Kirsher ack_start = ack_end = prev_idx(start, dr); 402e689cf4aSJeff Kirsher return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED); 403e689cf4aSJeff Kirsher } 404e689cf4aSJeff Kirsher 405e689cf4aSJeff Kirsher static int vnet_rx(struct vnet_port *port, void *msgbuf) 406e689cf4aSJeff Kirsher { 407e689cf4aSJeff Kirsher struct vio_dring_data *pkt = msgbuf; 408e689cf4aSJeff Kirsher struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; 409e689cf4aSJeff Kirsher struct vio_driver_state *vio = &port->vio; 410e689cf4aSJeff Kirsher 411e689cf4aSJeff Kirsher viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", 412e689cf4aSJeff Kirsher pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); 413e689cf4aSJeff Kirsher 414e689cf4aSJeff Kirsher if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 415e689cf4aSJeff Kirsher return 0; 416e689cf4aSJeff Kirsher if (unlikely(pkt->seq != dr->rcv_nxt)) { 417e689cf4aSJeff Kirsher pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", 418e689cf4aSJeff Kirsher pkt->seq, dr->rcv_nxt); 419e689cf4aSJeff Kirsher return 0; 420e689cf4aSJeff Kirsher } 421e689cf4aSJeff Kirsher 422e689cf4aSJeff Kirsher dr->rcv_nxt++; 423e689cf4aSJeff Kirsher 424e689cf4aSJeff Kirsher /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ 425e689cf4aSJeff Kirsher 426e689cf4aSJeff Kirsher return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx); 427e689cf4aSJeff Kirsher } 428e689cf4aSJeff Kirsher 429e689cf4aSJeff Kirsher static int idx_is_pending(struct vio_dring_state *dr, u32 end) 430e689cf4aSJeff Kirsher { 431e689cf4aSJeff Kirsher u32 idx = dr->cons; 432e689cf4aSJeff Kirsher int found = 0; 433e689cf4aSJeff Kirsher 434e689cf4aSJeff Kirsher while (idx != dr->prod) { 435e689cf4aSJeff Kirsher if (idx == end) { 436e689cf4aSJeff Kirsher found = 1; 437e689cf4aSJeff Kirsher break; 438e689cf4aSJeff Kirsher } 439e689cf4aSJeff Kirsher idx = next_idx(idx, dr); 440e689cf4aSJeff Kirsher } 441e689cf4aSJeff Kirsher return found; 442e689cf4aSJeff Kirsher } 443e689cf4aSJeff Kirsher 444e689cf4aSJeff Kirsher static int vnet_ack(struct vnet_port *port, void *msgbuf) 445e689cf4aSJeff Kirsher { 446e689cf4aSJeff Kirsher struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 447e689cf4aSJeff Kirsher struct vio_dring_data *pkt = msgbuf; 448e689cf4aSJeff Kirsher struct net_device *dev; 449e689cf4aSJeff Kirsher struct vnet *vp; 450e689cf4aSJeff Kirsher u32 end; 451e689cf4aSJeff Kirsher 452e689cf4aSJeff Kirsher if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 453e689cf4aSJeff Kirsher return 0; 454e689cf4aSJeff Kirsher 455e689cf4aSJeff Kirsher end = pkt->end_idx; 456e689cf4aSJeff Kirsher if (unlikely(!idx_is_pending(dr, end))) 457e689cf4aSJeff Kirsher return 0; 458e689cf4aSJeff Kirsher 459e689cf4aSJeff Kirsher dr->cons = next_idx(end, dr); 460e689cf4aSJeff Kirsher 461e689cf4aSJeff Kirsher vp = port->vp; 462e689cf4aSJeff Kirsher dev = vp->dev; 463e689cf4aSJeff Kirsher if (unlikely(netif_queue_stopped(dev) && 464e689cf4aSJeff Kirsher vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) 465e689cf4aSJeff Kirsher return 1; 466e689cf4aSJeff Kirsher 467e689cf4aSJeff Kirsher return 0; 468e689cf4aSJeff Kirsher } 469e689cf4aSJeff Kirsher 470e689cf4aSJeff Kirsher static int vnet_nack(struct vnet_port *port, void *msgbuf) 471e689cf4aSJeff Kirsher { 472e689cf4aSJeff Kirsher /* XXX just reset or similar XXX */ 473e689cf4aSJeff Kirsher return 0; 474e689cf4aSJeff Kirsher } 475e689cf4aSJeff Kirsher 476e689cf4aSJeff Kirsher static int handle_mcast(struct vnet_port *port, void *msgbuf) 477e689cf4aSJeff Kirsher { 478e689cf4aSJeff Kirsher struct vio_net_mcast_info *pkt = msgbuf; 479e689cf4aSJeff Kirsher 480e689cf4aSJeff Kirsher if (pkt->tag.stype != VIO_SUBTYPE_ACK) 481e689cf4aSJeff Kirsher pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", 482e689cf4aSJeff Kirsher port->vp->dev->name, 483e689cf4aSJeff Kirsher pkt->tag.type, 484e689cf4aSJeff Kirsher pkt->tag.stype, 485e689cf4aSJeff Kirsher pkt->tag.stype_env, 486e689cf4aSJeff Kirsher pkt->tag.sid); 487e689cf4aSJeff Kirsher 488e689cf4aSJeff Kirsher return 0; 489e689cf4aSJeff Kirsher } 490e689cf4aSJeff Kirsher 4911d311ad2SSowmini Varadhan static void maybe_tx_wakeup(unsigned long param) 492e689cf4aSJeff Kirsher { 4931d311ad2SSowmini Varadhan struct vnet *vp = (struct vnet *)param; 494e689cf4aSJeff Kirsher struct net_device *dev = vp->dev; 495e689cf4aSJeff Kirsher 496e689cf4aSJeff Kirsher netif_tx_lock(dev); 497e689cf4aSJeff Kirsher if (likely(netif_queue_stopped(dev))) { 498e689cf4aSJeff Kirsher struct vnet_port *port; 499e689cf4aSJeff Kirsher int wake = 1; 500e689cf4aSJeff Kirsher 501e689cf4aSJeff Kirsher list_for_each_entry(port, &vp->port_list, list) { 502e689cf4aSJeff Kirsher struct vio_dring_state *dr; 503e689cf4aSJeff Kirsher 504e689cf4aSJeff Kirsher dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 505e689cf4aSJeff Kirsher if (vnet_tx_dring_avail(dr) < 506e689cf4aSJeff Kirsher VNET_TX_WAKEUP_THRESH(dr)) { 507e689cf4aSJeff Kirsher wake = 0; 508e689cf4aSJeff Kirsher break; 509e689cf4aSJeff Kirsher } 510e689cf4aSJeff Kirsher } 511e689cf4aSJeff Kirsher if (wake) 512e689cf4aSJeff Kirsher netif_wake_queue(dev); 513e689cf4aSJeff Kirsher } 514e689cf4aSJeff Kirsher netif_tx_unlock(dev); 515e689cf4aSJeff Kirsher } 516e689cf4aSJeff Kirsher 517e689cf4aSJeff Kirsher static void vnet_event(void *arg, int event) 518e689cf4aSJeff Kirsher { 519e689cf4aSJeff Kirsher struct vnet_port *port = arg; 520e689cf4aSJeff Kirsher struct vio_driver_state *vio = &port->vio; 521e689cf4aSJeff Kirsher unsigned long flags; 522e689cf4aSJeff Kirsher int tx_wakeup, err; 523e689cf4aSJeff Kirsher 524e689cf4aSJeff Kirsher spin_lock_irqsave(&vio->lock, flags); 525e689cf4aSJeff Kirsher 526e689cf4aSJeff Kirsher if (unlikely(event == LDC_EVENT_RESET || 527e689cf4aSJeff Kirsher event == LDC_EVENT_UP)) { 528e689cf4aSJeff Kirsher vio_link_state_change(vio, event); 529e689cf4aSJeff Kirsher spin_unlock_irqrestore(&vio->lock, flags); 530e689cf4aSJeff Kirsher 531e689cf4aSJeff Kirsher if (event == LDC_EVENT_RESET) 532e689cf4aSJeff Kirsher vio_port_up(vio); 533e689cf4aSJeff Kirsher return; 534e689cf4aSJeff Kirsher } 535e689cf4aSJeff Kirsher 536e689cf4aSJeff Kirsher if (unlikely(event != LDC_EVENT_DATA_READY)) { 537e689cf4aSJeff Kirsher pr_warning("Unexpected LDC event %d\n", event); 538e689cf4aSJeff Kirsher spin_unlock_irqrestore(&vio->lock, flags); 539e689cf4aSJeff Kirsher return; 540e689cf4aSJeff Kirsher } 541e689cf4aSJeff Kirsher 542e689cf4aSJeff Kirsher tx_wakeup = err = 0; 543e689cf4aSJeff Kirsher while (1) { 544e689cf4aSJeff Kirsher union { 545e689cf4aSJeff Kirsher struct vio_msg_tag tag; 546e689cf4aSJeff Kirsher u64 raw[8]; 547e689cf4aSJeff Kirsher } msgbuf; 548e689cf4aSJeff Kirsher 549e689cf4aSJeff Kirsher err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); 550e689cf4aSJeff Kirsher if (unlikely(err < 0)) { 551e689cf4aSJeff Kirsher if (err == -ECONNRESET) 552e689cf4aSJeff Kirsher vio_conn_reset(vio); 553e689cf4aSJeff Kirsher break; 554e689cf4aSJeff Kirsher } 555e689cf4aSJeff Kirsher if (err == 0) 556e689cf4aSJeff Kirsher break; 557e689cf4aSJeff Kirsher viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", 558e689cf4aSJeff Kirsher msgbuf.tag.type, 559e689cf4aSJeff Kirsher msgbuf.tag.stype, 560e689cf4aSJeff Kirsher msgbuf.tag.stype_env, 561e689cf4aSJeff Kirsher msgbuf.tag.sid); 562e689cf4aSJeff Kirsher err = vio_validate_sid(vio, &msgbuf.tag); 563e689cf4aSJeff Kirsher if (err < 0) 564e689cf4aSJeff Kirsher break; 565e689cf4aSJeff Kirsher 566e689cf4aSJeff Kirsher if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { 567e689cf4aSJeff Kirsher if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { 568e689cf4aSJeff Kirsher err = vnet_rx(port, &msgbuf); 569e689cf4aSJeff Kirsher } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { 570e689cf4aSJeff Kirsher err = vnet_ack(port, &msgbuf); 571e689cf4aSJeff Kirsher if (err > 0) 572e689cf4aSJeff Kirsher tx_wakeup |= err; 573e689cf4aSJeff Kirsher } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { 574e689cf4aSJeff Kirsher err = vnet_nack(port, &msgbuf); 575e689cf4aSJeff Kirsher } 576e689cf4aSJeff Kirsher } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { 577e689cf4aSJeff Kirsher if (msgbuf.tag.stype_env == VNET_MCAST_INFO) 578e689cf4aSJeff Kirsher err = handle_mcast(port, &msgbuf); 579e689cf4aSJeff Kirsher else 580e689cf4aSJeff Kirsher err = vio_control_pkt_engine(vio, &msgbuf); 581e689cf4aSJeff Kirsher if (err) 582e689cf4aSJeff Kirsher break; 583e689cf4aSJeff Kirsher } else { 584e689cf4aSJeff Kirsher err = vnet_handle_unknown(port, &msgbuf); 585e689cf4aSJeff Kirsher } 586e689cf4aSJeff Kirsher if (err == -ECONNRESET) 587e689cf4aSJeff Kirsher break; 588e689cf4aSJeff Kirsher } 589e689cf4aSJeff Kirsher spin_unlock(&vio->lock); 5901d311ad2SSowmini Varadhan /* Kick off a tasklet to wake the queue. We cannot call 5911d311ad2SSowmini Varadhan * maybe_tx_wakeup directly here because we could deadlock on 5921d311ad2SSowmini Varadhan * netif_tx_lock() with dev_watchdog() 5931d311ad2SSowmini Varadhan */ 594e689cf4aSJeff Kirsher if (unlikely(tx_wakeup && err != -ECONNRESET)) 5951d311ad2SSowmini Varadhan tasklet_schedule(&port->vp->vnet_tx_wakeup); 5961d311ad2SSowmini Varadhan 597e689cf4aSJeff Kirsher local_irq_restore(flags); 598e689cf4aSJeff Kirsher } 599e689cf4aSJeff Kirsher 600e689cf4aSJeff Kirsher static int __vnet_tx_trigger(struct vnet_port *port) 601e689cf4aSJeff Kirsher { 602e689cf4aSJeff Kirsher struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 603e689cf4aSJeff Kirsher struct vio_dring_data hdr = { 604e689cf4aSJeff Kirsher .tag = { 605e689cf4aSJeff Kirsher .type = VIO_TYPE_DATA, 606e689cf4aSJeff Kirsher .stype = VIO_SUBTYPE_INFO, 607e689cf4aSJeff Kirsher .stype_env = VIO_DRING_DATA, 608e689cf4aSJeff Kirsher .sid = vio_send_sid(&port->vio), 609e689cf4aSJeff Kirsher }, 610e689cf4aSJeff Kirsher .dring_ident = dr->ident, 611e689cf4aSJeff Kirsher .start_idx = dr->prod, 612e689cf4aSJeff Kirsher .end_idx = (u32) -1, 613e689cf4aSJeff Kirsher }; 614e689cf4aSJeff Kirsher int err, delay; 615adddc32dSSowmini Varadhan int retries = 0; 616e689cf4aSJeff Kirsher 617e689cf4aSJeff Kirsher hdr.seq = dr->snd_nxt; 618e689cf4aSJeff Kirsher delay = 1; 619e689cf4aSJeff Kirsher do { 620e689cf4aSJeff Kirsher err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); 621e689cf4aSJeff Kirsher if (err > 0) { 622e689cf4aSJeff Kirsher dr->snd_nxt++; 623e689cf4aSJeff Kirsher break; 624e689cf4aSJeff Kirsher } 625e689cf4aSJeff Kirsher udelay(delay); 626e689cf4aSJeff Kirsher if ((delay <<= 1) > 128) 627e689cf4aSJeff Kirsher delay = 128; 628adddc32dSSowmini Varadhan if (retries++ > VNET_MAX_RETRIES) 629adddc32dSSowmini Varadhan break; 630e689cf4aSJeff Kirsher } while (err == -EAGAIN); 631e689cf4aSJeff Kirsher 632e689cf4aSJeff Kirsher return err; 633e689cf4aSJeff Kirsher } 634e689cf4aSJeff Kirsher 6358266f5fcSDavid L Stevens static inline bool port_is_up(struct vnet_port *vnet) 6368266f5fcSDavid L Stevens { 6378266f5fcSDavid L Stevens struct vio_driver_state *vio = &vnet->vio; 6388266f5fcSDavid L Stevens 6398266f5fcSDavid L Stevens return !!(vio->hs_state & VIO_HS_COMPLETE); 6408266f5fcSDavid L Stevens } 6418266f5fcSDavid L Stevens 642e689cf4aSJeff Kirsher struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) 643e689cf4aSJeff Kirsher { 644e689cf4aSJeff Kirsher unsigned int hash = vnet_hashfn(skb->data); 645e689cf4aSJeff Kirsher struct hlist_head *hp = &vp->port_hash[hash]; 646e689cf4aSJeff Kirsher struct vnet_port *port; 647e689cf4aSJeff Kirsher 648b67bfe0dSSasha Levin hlist_for_each_entry(port, hp, hash) { 6498266f5fcSDavid L Stevens if (!port_is_up(port)) 6508266f5fcSDavid L Stevens continue; 6512e42e474SJoe Perches if (ether_addr_equal(port->raddr, skb->data)) 652e689cf4aSJeff Kirsher return port; 653e689cf4aSJeff Kirsher } 6548266f5fcSDavid L Stevens list_for_each_entry(port, &vp->port_list, list) { 6558266f5fcSDavid L Stevens if (!port->switch_port) 6568266f5fcSDavid L Stevens continue; 6578266f5fcSDavid L Stevens if (!port_is_up(port)) 6588266f5fcSDavid L Stevens continue; 659e689cf4aSJeff Kirsher return port; 660e689cf4aSJeff Kirsher } 6618266f5fcSDavid L Stevens return NULL; 6628266f5fcSDavid L Stevens } 663e689cf4aSJeff Kirsher 664e689cf4aSJeff Kirsher struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) 665e689cf4aSJeff Kirsher { 666e689cf4aSJeff Kirsher struct vnet_port *ret; 667e689cf4aSJeff Kirsher unsigned long flags; 668e689cf4aSJeff Kirsher 669e689cf4aSJeff Kirsher spin_lock_irqsave(&vp->lock, flags); 670e689cf4aSJeff Kirsher ret = __tx_port_find(vp, skb); 671e689cf4aSJeff Kirsher spin_unlock_irqrestore(&vp->lock, flags); 672e689cf4aSJeff Kirsher 673e689cf4aSJeff Kirsher return ret; 674e689cf4aSJeff Kirsher } 675e689cf4aSJeff Kirsher 676e689cf4aSJeff Kirsher static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) 677e689cf4aSJeff Kirsher { 678e689cf4aSJeff Kirsher struct vnet *vp = netdev_priv(dev); 679e689cf4aSJeff Kirsher struct vnet_port *port = tx_port_find(vp, skb); 680e689cf4aSJeff Kirsher struct vio_dring_state *dr; 681e689cf4aSJeff Kirsher struct vio_net_desc *d; 682e689cf4aSJeff Kirsher unsigned long flags; 683e689cf4aSJeff Kirsher unsigned int len; 684e689cf4aSJeff Kirsher void *tx_buf; 685e689cf4aSJeff Kirsher int i, err; 686e689cf4aSJeff Kirsher 687e689cf4aSJeff Kirsher if (unlikely(!port)) 688e689cf4aSJeff Kirsher goto out_dropped; 689e689cf4aSJeff Kirsher 690e689cf4aSJeff Kirsher spin_lock_irqsave(&port->vio.lock, flags); 691e689cf4aSJeff Kirsher 692e689cf4aSJeff Kirsher dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 693e689cf4aSJeff Kirsher if (unlikely(vnet_tx_dring_avail(dr) < 2)) { 694e689cf4aSJeff Kirsher if (!netif_queue_stopped(dev)) { 695e689cf4aSJeff Kirsher netif_stop_queue(dev); 696e689cf4aSJeff Kirsher 697e689cf4aSJeff Kirsher /* This is a hard error, log it. */ 698e689cf4aSJeff Kirsher netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 699e689cf4aSJeff Kirsher dev->stats.tx_errors++; 700e689cf4aSJeff Kirsher } 701e689cf4aSJeff Kirsher spin_unlock_irqrestore(&port->vio.lock, flags); 702e689cf4aSJeff Kirsher return NETDEV_TX_BUSY; 703e689cf4aSJeff Kirsher } 704e689cf4aSJeff Kirsher 705e689cf4aSJeff Kirsher d = vio_dring_cur(dr); 706e689cf4aSJeff Kirsher 707e689cf4aSJeff Kirsher tx_buf = port->tx_bufs[dr->prod].buf; 708e689cf4aSJeff Kirsher skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len); 709e689cf4aSJeff Kirsher 710e689cf4aSJeff Kirsher len = skb->len; 711e689cf4aSJeff Kirsher if (len < ETH_ZLEN) { 712e689cf4aSJeff Kirsher len = ETH_ZLEN; 713e689cf4aSJeff Kirsher memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len); 714e689cf4aSJeff Kirsher } 715e689cf4aSJeff Kirsher 7161f6394e3SSowmini Varadhan /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), 7171f6394e3SSowmini Varadhan * thus it is safe to not set VIO_ACK_ENABLE for each transmission: 7181f6394e3SSowmini Varadhan * the protocol itself does not require it as long as the peer 7191f6394e3SSowmini Varadhan * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. 7201f6394e3SSowmini Varadhan * 7211f6394e3SSowmini Varadhan * An ACK for every packet in the ring is expensive as the 7221f6394e3SSowmini Varadhan * sending of LDC messages is slow and affects performance. 7231f6394e3SSowmini Varadhan */ 7241f6394e3SSowmini Varadhan d->hdr.ack = VIO_ACK_DISABLE; 725e689cf4aSJeff Kirsher d->size = len; 726e689cf4aSJeff Kirsher d->ncookies = port->tx_bufs[dr->prod].ncookies; 727e689cf4aSJeff Kirsher for (i = 0; i < d->ncookies; i++) 728e689cf4aSJeff Kirsher d->cookies[i] = port->tx_bufs[dr->prod].cookies[i]; 729e689cf4aSJeff Kirsher 730e689cf4aSJeff Kirsher /* This has to be a non-SMP write barrier because we are writing 731e689cf4aSJeff Kirsher * to memory which is shared with the peer LDOM. 732e689cf4aSJeff Kirsher */ 733e689cf4aSJeff Kirsher wmb(); 734e689cf4aSJeff Kirsher 735e689cf4aSJeff Kirsher d->hdr.state = VIO_DESC_READY; 736e689cf4aSJeff Kirsher 737e689cf4aSJeff Kirsher err = __vnet_tx_trigger(port); 738e689cf4aSJeff Kirsher if (unlikely(err < 0)) { 739e689cf4aSJeff Kirsher netdev_info(dev, "TX trigger error %d\n", err); 740e689cf4aSJeff Kirsher d->hdr.state = VIO_DESC_FREE; 741e689cf4aSJeff Kirsher dev->stats.tx_carrier_errors++; 742e689cf4aSJeff Kirsher goto out_dropped_unlock; 743e689cf4aSJeff Kirsher } 744e689cf4aSJeff Kirsher 745e689cf4aSJeff Kirsher dev->stats.tx_packets++; 746e689cf4aSJeff Kirsher dev->stats.tx_bytes += skb->len; 747e689cf4aSJeff Kirsher 748e689cf4aSJeff Kirsher dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); 749e689cf4aSJeff Kirsher if (unlikely(vnet_tx_dring_avail(dr) < 2)) { 750e689cf4aSJeff Kirsher netif_stop_queue(dev); 751e689cf4aSJeff Kirsher if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) 752e689cf4aSJeff Kirsher netif_wake_queue(dev); 753e689cf4aSJeff Kirsher } 754e689cf4aSJeff Kirsher 755e689cf4aSJeff Kirsher spin_unlock_irqrestore(&port->vio.lock, flags); 756e689cf4aSJeff Kirsher 757e689cf4aSJeff Kirsher dev_kfree_skb(skb); 758e689cf4aSJeff Kirsher 759e689cf4aSJeff Kirsher return NETDEV_TX_OK; 760e689cf4aSJeff Kirsher 761e689cf4aSJeff Kirsher out_dropped_unlock: 762e689cf4aSJeff Kirsher spin_unlock_irqrestore(&port->vio.lock, flags); 763e689cf4aSJeff Kirsher 764e689cf4aSJeff Kirsher out_dropped: 765e689cf4aSJeff Kirsher dev_kfree_skb(skb); 766e689cf4aSJeff Kirsher dev->stats.tx_dropped++; 767e689cf4aSJeff Kirsher return NETDEV_TX_OK; 768e689cf4aSJeff Kirsher } 769e689cf4aSJeff Kirsher 770e689cf4aSJeff Kirsher static void vnet_tx_timeout(struct net_device *dev) 771e689cf4aSJeff Kirsher { 772e689cf4aSJeff Kirsher /* XXX Implement me XXX */ 773e689cf4aSJeff Kirsher } 774e689cf4aSJeff Kirsher 775e689cf4aSJeff Kirsher static int vnet_open(struct net_device *dev) 776e689cf4aSJeff Kirsher { 777e689cf4aSJeff Kirsher netif_carrier_on(dev); 778e689cf4aSJeff Kirsher netif_start_queue(dev); 779e689cf4aSJeff Kirsher 780e689cf4aSJeff Kirsher return 0; 781e689cf4aSJeff Kirsher } 782e689cf4aSJeff Kirsher 783e689cf4aSJeff Kirsher static int vnet_close(struct net_device *dev) 784e689cf4aSJeff Kirsher { 785e689cf4aSJeff Kirsher netif_stop_queue(dev); 786e689cf4aSJeff Kirsher netif_carrier_off(dev); 787e689cf4aSJeff Kirsher 788e689cf4aSJeff Kirsher return 0; 789e689cf4aSJeff Kirsher } 790e689cf4aSJeff Kirsher 791e689cf4aSJeff Kirsher static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) 792e689cf4aSJeff Kirsher { 793e689cf4aSJeff Kirsher struct vnet_mcast_entry *m; 794e689cf4aSJeff Kirsher 795e689cf4aSJeff Kirsher for (m = vp->mcast_list; m; m = m->next) { 79600fa4ce9Sdingtianhong if (ether_addr_equal(m->addr, addr)) 797e689cf4aSJeff Kirsher return m; 798e689cf4aSJeff Kirsher } 799e689cf4aSJeff Kirsher return NULL; 800e689cf4aSJeff Kirsher } 801e689cf4aSJeff Kirsher 802e689cf4aSJeff Kirsher static void __update_mc_list(struct vnet *vp, struct net_device *dev) 803e689cf4aSJeff Kirsher { 804e689cf4aSJeff Kirsher struct netdev_hw_addr *ha; 805e689cf4aSJeff Kirsher 806e689cf4aSJeff Kirsher netdev_for_each_mc_addr(ha, dev) { 807e689cf4aSJeff Kirsher struct vnet_mcast_entry *m; 808e689cf4aSJeff Kirsher 809e689cf4aSJeff Kirsher m = __vnet_mc_find(vp, ha->addr); 810e689cf4aSJeff Kirsher if (m) { 811e689cf4aSJeff Kirsher m->hit = 1; 812e689cf4aSJeff Kirsher continue; 813e689cf4aSJeff Kirsher } 814e689cf4aSJeff Kirsher 815e689cf4aSJeff Kirsher if (!m) { 816e689cf4aSJeff Kirsher m = kzalloc(sizeof(*m), GFP_ATOMIC); 817e689cf4aSJeff Kirsher if (!m) 818e689cf4aSJeff Kirsher continue; 819e689cf4aSJeff Kirsher memcpy(m->addr, ha->addr, ETH_ALEN); 820e689cf4aSJeff Kirsher m->hit = 1; 821e689cf4aSJeff Kirsher 822e689cf4aSJeff Kirsher m->next = vp->mcast_list; 823e689cf4aSJeff Kirsher vp->mcast_list = m; 824e689cf4aSJeff Kirsher } 825e689cf4aSJeff Kirsher } 826e689cf4aSJeff Kirsher } 827e689cf4aSJeff Kirsher 828e689cf4aSJeff Kirsher static void __send_mc_list(struct vnet *vp, struct vnet_port *port) 829e689cf4aSJeff Kirsher { 830e689cf4aSJeff Kirsher struct vio_net_mcast_info info; 831e689cf4aSJeff Kirsher struct vnet_mcast_entry *m, **pp; 832e689cf4aSJeff Kirsher int n_addrs; 833e689cf4aSJeff Kirsher 834e689cf4aSJeff Kirsher memset(&info, 0, sizeof(info)); 835e689cf4aSJeff Kirsher 836e689cf4aSJeff Kirsher info.tag.type = VIO_TYPE_CTRL; 837e689cf4aSJeff Kirsher info.tag.stype = VIO_SUBTYPE_INFO; 838e689cf4aSJeff Kirsher info.tag.stype_env = VNET_MCAST_INFO; 839e689cf4aSJeff Kirsher info.tag.sid = vio_send_sid(&port->vio); 840e689cf4aSJeff Kirsher info.set = 1; 841e689cf4aSJeff Kirsher 842e689cf4aSJeff Kirsher n_addrs = 0; 843e689cf4aSJeff Kirsher for (m = vp->mcast_list; m; m = m->next) { 844e689cf4aSJeff Kirsher if (m->sent) 845e689cf4aSJeff Kirsher continue; 846e689cf4aSJeff Kirsher m->sent = 1; 847e689cf4aSJeff Kirsher memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 848e689cf4aSJeff Kirsher m->addr, ETH_ALEN); 849e689cf4aSJeff Kirsher if (++n_addrs == VNET_NUM_MCAST) { 850e689cf4aSJeff Kirsher info.count = n_addrs; 851e689cf4aSJeff Kirsher 852e689cf4aSJeff Kirsher (void) vio_ldc_send(&port->vio, &info, 853e689cf4aSJeff Kirsher sizeof(info)); 854e689cf4aSJeff Kirsher n_addrs = 0; 855e689cf4aSJeff Kirsher } 856e689cf4aSJeff Kirsher } 857e689cf4aSJeff Kirsher if (n_addrs) { 858e689cf4aSJeff Kirsher info.count = n_addrs; 859e689cf4aSJeff Kirsher (void) vio_ldc_send(&port->vio, &info, sizeof(info)); 860e689cf4aSJeff Kirsher } 861e689cf4aSJeff Kirsher 862e689cf4aSJeff Kirsher info.set = 0; 863e689cf4aSJeff Kirsher 864e689cf4aSJeff Kirsher n_addrs = 0; 865e689cf4aSJeff Kirsher pp = &vp->mcast_list; 866e689cf4aSJeff Kirsher while ((m = *pp) != NULL) { 867e689cf4aSJeff Kirsher if (m->hit) { 868e689cf4aSJeff Kirsher m->hit = 0; 869e689cf4aSJeff Kirsher pp = &m->next; 870e689cf4aSJeff Kirsher continue; 871e689cf4aSJeff Kirsher } 872e689cf4aSJeff Kirsher 873e689cf4aSJeff Kirsher memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], 874e689cf4aSJeff Kirsher m->addr, ETH_ALEN); 875e689cf4aSJeff Kirsher if (++n_addrs == VNET_NUM_MCAST) { 876e689cf4aSJeff Kirsher info.count = n_addrs; 877e689cf4aSJeff Kirsher (void) vio_ldc_send(&port->vio, &info, 878e689cf4aSJeff Kirsher sizeof(info)); 879e689cf4aSJeff Kirsher n_addrs = 0; 880e689cf4aSJeff Kirsher } 881e689cf4aSJeff Kirsher 882e689cf4aSJeff Kirsher *pp = m->next; 883e689cf4aSJeff Kirsher kfree(m); 884e689cf4aSJeff Kirsher } 885e689cf4aSJeff Kirsher if (n_addrs) { 886e689cf4aSJeff Kirsher info.count = n_addrs; 887e689cf4aSJeff Kirsher (void) vio_ldc_send(&port->vio, &info, sizeof(info)); 888e689cf4aSJeff Kirsher } 889e689cf4aSJeff Kirsher } 890e689cf4aSJeff Kirsher 891e689cf4aSJeff Kirsher static void vnet_set_rx_mode(struct net_device *dev) 892e689cf4aSJeff Kirsher { 893e689cf4aSJeff Kirsher struct vnet *vp = netdev_priv(dev); 894e689cf4aSJeff Kirsher struct vnet_port *port; 895e689cf4aSJeff Kirsher unsigned long flags; 896e689cf4aSJeff Kirsher 897e689cf4aSJeff Kirsher spin_lock_irqsave(&vp->lock, flags); 898e689cf4aSJeff Kirsher if (!list_empty(&vp->port_list)) { 899e689cf4aSJeff Kirsher port = list_entry(vp->port_list.next, struct vnet_port, list); 900e689cf4aSJeff Kirsher 901e689cf4aSJeff Kirsher if (port->switch_port) { 902e689cf4aSJeff Kirsher __update_mc_list(vp, dev); 903e689cf4aSJeff Kirsher __send_mc_list(vp, port); 904e689cf4aSJeff Kirsher } 905e689cf4aSJeff Kirsher } 906e689cf4aSJeff Kirsher spin_unlock_irqrestore(&vp->lock, flags); 907e689cf4aSJeff Kirsher } 908e689cf4aSJeff Kirsher 909e689cf4aSJeff Kirsher static int vnet_change_mtu(struct net_device *dev, int new_mtu) 910e689cf4aSJeff Kirsher { 911e689cf4aSJeff Kirsher if (new_mtu != ETH_DATA_LEN) 912e689cf4aSJeff Kirsher return -EINVAL; 913e689cf4aSJeff Kirsher 914e689cf4aSJeff Kirsher dev->mtu = new_mtu; 915e689cf4aSJeff Kirsher return 0; 916e689cf4aSJeff Kirsher } 917e689cf4aSJeff Kirsher 918e689cf4aSJeff Kirsher static int vnet_set_mac_addr(struct net_device *dev, void *p) 919e689cf4aSJeff Kirsher { 920e689cf4aSJeff Kirsher return -EINVAL; 921e689cf4aSJeff Kirsher } 922e689cf4aSJeff Kirsher 923e689cf4aSJeff Kirsher static void vnet_get_drvinfo(struct net_device *dev, 924e689cf4aSJeff Kirsher struct ethtool_drvinfo *info) 925e689cf4aSJeff Kirsher { 9267826d43fSJiri Pirko strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); 9277826d43fSJiri Pirko strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); 928e689cf4aSJeff Kirsher } 929e689cf4aSJeff Kirsher 930e689cf4aSJeff Kirsher static u32 vnet_get_msglevel(struct net_device *dev) 931e689cf4aSJeff Kirsher { 932e689cf4aSJeff Kirsher struct vnet *vp = netdev_priv(dev); 933e689cf4aSJeff Kirsher return vp->msg_enable; 934e689cf4aSJeff Kirsher } 935e689cf4aSJeff Kirsher 936e689cf4aSJeff Kirsher static void vnet_set_msglevel(struct net_device *dev, u32 value) 937e689cf4aSJeff Kirsher { 938e689cf4aSJeff Kirsher struct vnet *vp = netdev_priv(dev); 939e689cf4aSJeff Kirsher vp->msg_enable = value; 940e689cf4aSJeff Kirsher } 941e689cf4aSJeff Kirsher 942e689cf4aSJeff Kirsher static const struct ethtool_ops vnet_ethtool_ops = { 943e689cf4aSJeff Kirsher .get_drvinfo = vnet_get_drvinfo, 944e689cf4aSJeff Kirsher .get_msglevel = vnet_get_msglevel, 945e689cf4aSJeff Kirsher .set_msglevel = vnet_set_msglevel, 946e689cf4aSJeff Kirsher .get_link = ethtool_op_get_link, 947e689cf4aSJeff Kirsher }; 948e689cf4aSJeff Kirsher 949e689cf4aSJeff Kirsher static void vnet_port_free_tx_bufs(struct vnet_port *port) 950e689cf4aSJeff Kirsher { 951e689cf4aSJeff Kirsher struct vio_dring_state *dr; 952e689cf4aSJeff Kirsher int i; 953e689cf4aSJeff Kirsher 954e689cf4aSJeff Kirsher dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 955e689cf4aSJeff Kirsher if (dr->base) { 956e689cf4aSJeff Kirsher ldc_free_exp_dring(port->vio.lp, dr->base, 957e689cf4aSJeff Kirsher (dr->entry_size * dr->num_entries), 958e689cf4aSJeff Kirsher dr->cookies, dr->ncookies); 959e689cf4aSJeff Kirsher dr->base = NULL; 960e689cf4aSJeff Kirsher dr->entry_size = 0; 961e689cf4aSJeff Kirsher dr->num_entries = 0; 962e689cf4aSJeff Kirsher dr->pending = 0; 963e689cf4aSJeff Kirsher dr->ncookies = 0; 964e689cf4aSJeff Kirsher } 965e689cf4aSJeff Kirsher 966e689cf4aSJeff Kirsher for (i = 0; i < VNET_TX_RING_SIZE; i++) { 967e689cf4aSJeff Kirsher void *buf = port->tx_bufs[i].buf; 968e689cf4aSJeff Kirsher 969e689cf4aSJeff Kirsher if (!buf) 970e689cf4aSJeff Kirsher continue; 971e689cf4aSJeff Kirsher 972e689cf4aSJeff Kirsher ldc_unmap(port->vio.lp, 973e689cf4aSJeff Kirsher port->tx_bufs[i].cookies, 974e689cf4aSJeff Kirsher port->tx_bufs[i].ncookies); 975e689cf4aSJeff Kirsher 976e689cf4aSJeff Kirsher kfree(buf); 977e689cf4aSJeff Kirsher port->tx_bufs[i].buf = NULL; 978e689cf4aSJeff Kirsher } 979e689cf4aSJeff Kirsher } 980e689cf4aSJeff Kirsher 981f73d12bdSBill Pemberton static int vnet_port_alloc_tx_bufs(struct vnet_port *port) 982e689cf4aSJeff Kirsher { 983e689cf4aSJeff Kirsher struct vio_dring_state *dr; 984e689cf4aSJeff Kirsher unsigned long len; 985e689cf4aSJeff Kirsher int i, err, ncookies; 986e689cf4aSJeff Kirsher void *dring; 987e689cf4aSJeff Kirsher 988e689cf4aSJeff Kirsher for (i = 0; i < VNET_TX_RING_SIZE; i++) { 989e689cf4aSJeff Kirsher void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL); 990e689cf4aSJeff Kirsher int map_len = (ETH_FRAME_LEN + 7) & ~7; 991e689cf4aSJeff Kirsher 992e689cf4aSJeff Kirsher err = -ENOMEM; 993e404decbSJoe Perches if (!buf) 994e689cf4aSJeff Kirsher goto err_out; 995e404decbSJoe Perches 996e689cf4aSJeff Kirsher err = -EFAULT; 997e689cf4aSJeff Kirsher if ((unsigned long)buf & (8UL - 1)) { 998e689cf4aSJeff Kirsher pr_err("TX buffer misaligned\n"); 999e689cf4aSJeff Kirsher kfree(buf); 1000e689cf4aSJeff Kirsher goto err_out; 1001e689cf4aSJeff Kirsher } 1002e689cf4aSJeff Kirsher 1003e689cf4aSJeff Kirsher err = ldc_map_single(port->vio.lp, buf, map_len, 1004e689cf4aSJeff Kirsher port->tx_bufs[i].cookies, 2, 1005e689cf4aSJeff Kirsher (LDC_MAP_SHADOW | 1006e689cf4aSJeff Kirsher LDC_MAP_DIRECT | 1007e689cf4aSJeff Kirsher LDC_MAP_RW)); 1008e689cf4aSJeff Kirsher if (err < 0) { 1009e689cf4aSJeff Kirsher kfree(buf); 1010e689cf4aSJeff Kirsher goto err_out; 1011e689cf4aSJeff Kirsher } 1012e689cf4aSJeff Kirsher port->tx_bufs[i].buf = buf; 1013e689cf4aSJeff Kirsher port->tx_bufs[i].ncookies = err; 1014e689cf4aSJeff Kirsher } 1015e689cf4aSJeff Kirsher 1016e689cf4aSJeff Kirsher dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 1017e689cf4aSJeff Kirsher 1018e689cf4aSJeff Kirsher len = (VNET_TX_RING_SIZE * 1019e689cf4aSJeff Kirsher (sizeof(struct vio_net_desc) + 1020e689cf4aSJeff Kirsher (sizeof(struct ldc_trans_cookie) * 2))); 1021e689cf4aSJeff Kirsher 1022e689cf4aSJeff Kirsher ncookies = VIO_MAX_RING_COOKIES; 1023e689cf4aSJeff Kirsher dring = ldc_alloc_exp_dring(port->vio.lp, len, 1024e689cf4aSJeff Kirsher dr->cookies, &ncookies, 1025e689cf4aSJeff Kirsher (LDC_MAP_SHADOW | 1026e689cf4aSJeff Kirsher LDC_MAP_DIRECT | 1027e689cf4aSJeff Kirsher LDC_MAP_RW)); 1028e689cf4aSJeff Kirsher if (IS_ERR(dring)) { 1029e689cf4aSJeff Kirsher err = PTR_ERR(dring); 1030e689cf4aSJeff Kirsher goto err_out; 1031e689cf4aSJeff Kirsher } 1032e689cf4aSJeff Kirsher 1033e689cf4aSJeff Kirsher dr->base = dring; 1034e689cf4aSJeff Kirsher dr->entry_size = (sizeof(struct vio_net_desc) + 1035e689cf4aSJeff Kirsher (sizeof(struct ldc_trans_cookie) * 2)); 1036e689cf4aSJeff Kirsher dr->num_entries = VNET_TX_RING_SIZE; 1037e689cf4aSJeff Kirsher dr->prod = dr->cons = 0; 1038e689cf4aSJeff Kirsher dr->pending = VNET_TX_RING_SIZE; 1039e689cf4aSJeff Kirsher dr->ncookies = ncookies; 1040e689cf4aSJeff Kirsher 1041e689cf4aSJeff Kirsher return 0; 1042e689cf4aSJeff Kirsher 1043e689cf4aSJeff Kirsher err_out: 1044e689cf4aSJeff Kirsher vnet_port_free_tx_bufs(port); 1045e689cf4aSJeff Kirsher 1046e689cf4aSJeff Kirsher return err; 1047e689cf4aSJeff Kirsher } 1048e689cf4aSJeff Kirsher 1049e689cf4aSJeff Kirsher static LIST_HEAD(vnet_list); 1050e689cf4aSJeff Kirsher static DEFINE_MUTEX(vnet_list_mutex); 1051e689cf4aSJeff Kirsher 1052e689cf4aSJeff Kirsher static const struct net_device_ops vnet_ops = { 1053e689cf4aSJeff Kirsher .ndo_open = vnet_open, 1054e689cf4aSJeff Kirsher .ndo_stop = vnet_close, 1055afc4b13dSJiri Pirko .ndo_set_rx_mode = vnet_set_rx_mode, 1056e689cf4aSJeff Kirsher .ndo_set_mac_address = vnet_set_mac_addr, 1057e689cf4aSJeff Kirsher .ndo_validate_addr = eth_validate_addr, 1058e689cf4aSJeff Kirsher .ndo_tx_timeout = vnet_tx_timeout, 1059e689cf4aSJeff Kirsher .ndo_change_mtu = vnet_change_mtu, 1060e689cf4aSJeff Kirsher .ndo_start_xmit = vnet_start_xmit, 1061e689cf4aSJeff Kirsher }; 1062e689cf4aSJeff Kirsher 1063f73d12bdSBill Pemberton static struct vnet *vnet_new(const u64 *local_mac) 1064e689cf4aSJeff Kirsher { 1065e689cf4aSJeff Kirsher struct net_device *dev; 1066e689cf4aSJeff Kirsher struct vnet *vp; 1067e689cf4aSJeff Kirsher int err, i; 1068e689cf4aSJeff Kirsher 1069e689cf4aSJeff Kirsher dev = alloc_etherdev(sizeof(*vp)); 107041de8d4cSJoe Perches if (!dev) 1071e689cf4aSJeff Kirsher return ERR_PTR(-ENOMEM); 1072e689cf4aSJeff Kirsher 1073e689cf4aSJeff Kirsher for (i = 0; i < ETH_ALEN; i++) 1074e689cf4aSJeff Kirsher dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff; 1075e689cf4aSJeff Kirsher 1076e689cf4aSJeff Kirsher vp = netdev_priv(dev); 1077e689cf4aSJeff Kirsher 1078e689cf4aSJeff Kirsher spin_lock_init(&vp->lock); 10791d311ad2SSowmini Varadhan tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp); 1080e689cf4aSJeff Kirsher vp->dev = dev; 1081e689cf4aSJeff Kirsher 1082e689cf4aSJeff Kirsher INIT_LIST_HEAD(&vp->port_list); 1083e689cf4aSJeff Kirsher for (i = 0; i < VNET_PORT_HASH_SIZE; i++) 1084e689cf4aSJeff Kirsher INIT_HLIST_HEAD(&vp->port_hash[i]); 1085e689cf4aSJeff Kirsher INIT_LIST_HEAD(&vp->list); 1086e689cf4aSJeff Kirsher vp->local_mac = *local_mac; 1087e689cf4aSJeff Kirsher 1088e689cf4aSJeff Kirsher dev->netdev_ops = &vnet_ops; 1089e689cf4aSJeff Kirsher dev->ethtool_ops = &vnet_ethtool_ops; 1090e689cf4aSJeff Kirsher dev->watchdog_timeo = VNET_TX_TIMEOUT; 1091e689cf4aSJeff Kirsher 1092e689cf4aSJeff Kirsher err = register_netdev(dev); 1093e689cf4aSJeff Kirsher if (err) { 1094e689cf4aSJeff Kirsher pr_err("Cannot register net device, aborting\n"); 1095e689cf4aSJeff Kirsher goto err_out_free_dev; 1096e689cf4aSJeff Kirsher } 1097e689cf4aSJeff Kirsher 1098e689cf4aSJeff Kirsher netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr); 1099e689cf4aSJeff Kirsher 1100e689cf4aSJeff Kirsher list_add(&vp->list, &vnet_list); 1101e689cf4aSJeff Kirsher 1102e689cf4aSJeff Kirsher return vp; 1103e689cf4aSJeff Kirsher 1104e689cf4aSJeff Kirsher err_out_free_dev: 1105e689cf4aSJeff Kirsher free_netdev(dev); 1106e689cf4aSJeff Kirsher 1107e689cf4aSJeff Kirsher return ERR_PTR(err); 1108e689cf4aSJeff Kirsher } 1109e689cf4aSJeff Kirsher 1110f73d12bdSBill Pemberton static struct vnet *vnet_find_or_create(const u64 *local_mac) 1111e689cf4aSJeff Kirsher { 1112e689cf4aSJeff Kirsher struct vnet *iter, *vp; 1113e689cf4aSJeff Kirsher 1114e689cf4aSJeff Kirsher mutex_lock(&vnet_list_mutex); 1115e689cf4aSJeff Kirsher vp = NULL; 1116e689cf4aSJeff Kirsher list_for_each_entry(iter, &vnet_list, list) { 1117e689cf4aSJeff Kirsher if (iter->local_mac == *local_mac) { 1118e689cf4aSJeff Kirsher vp = iter; 1119e689cf4aSJeff Kirsher break; 1120e689cf4aSJeff Kirsher } 1121e689cf4aSJeff Kirsher } 1122e689cf4aSJeff Kirsher if (!vp) 1123e689cf4aSJeff Kirsher vp = vnet_new(local_mac); 1124e689cf4aSJeff Kirsher mutex_unlock(&vnet_list_mutex); 1125e689cf4aSJeff Kirsher 1126e689cf4aSJeff Kirsher return vp; 1127e689cf4aSJeff Kirsher } 1128e689cf4aSJeff Kirsher 1129a4b70a07SSowmini Varadhan static void vnet_cleanup(void) 1130a4b70a07SSowmini Varadhan { 1131a4b70a07SSowmini Varadhan struct vnet *vp; 1132a4b70a07SSowmini Varadhan struct net_device *dev; 1133a4b70a07SSowmini Varadhan 1134a4b70a07SSowmini Varadhan mutex_lock(&vnet_list_mutex); 1135a4b70a07SSowmini Varadhan while (!list_empty(&vnet_list)) { 1136a4b70a07SSowmini Varadhan vp = list_first_entry(&vnet_list, struct vnet, list); 1137a4b70a07SSowmini Varadhan list_del(&vp->list); 1138a4b70a07SSowmini Varadhan dev = vp->dev; 11391d311ad2SSowmini Varadhan tasklet_kill(&vp->vnet_tx_wakeup); 1140a4b70a07SSowmini Varadhan /* vio_unregister_driver() should have cleaned up port_list */ 1141a4b70a07SSowmini Varadhan BUG_ON(!list_empty(&vp->port_list)); 1142a4b70a07SSowmini Varadhan unregister_netdev(dev); 1143a4b70a07SSowmini Varadhan free_netdev(dev); 1144a4b70a07SSowmini Varadhan } 1145a4b70a07SSowmini Varadhan mutex_unlock(&vnet_list_mutex); 1146a4b70a07SSowmini Varadhan } 1147a4b70a07SSowmini Varadhan 1148e689cf4aSJeff Kirsher static const char *local_mac_prop = "local-mac-address"; 1149e689cf4aSJeff Kirsher 1150f73d12bdSBill Pemberton static struct vnet *vnet_find_parent(struct mdesc_handle *hp, 1151e689cf4aSJeff Kirsher u64 port_node) 1152e689cf4aSJeff Kirsher { 1153e689cf4aSJeff Kirsher const u64 *local_mac = NULL; 1154e689cf4aSJeff Kirsher u64 a; 1155e689cf4aSJeff Kirsher 1156e689cf4aSJeff Kirsher mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { 1157e689cf4aSJeff Kirsher u64 target = mdesc_arc_target(hp, a); 1158e689cf4aSJeff Kirsher const char *name; 1159e689cf4aSJeff Kirsher 1160e689cf4aSJeff Kirsher name = mdesc_get_property(hp, target, "name", NULL); 1161e689cf4aSJeff Kirsher if (!name || strcmp(name, "network")) 1162e689cf4aSJeff Kirsher continue; 1163e689cf4aSJeff Kirsher 1164e689cf4aSJeff Kirsher local_mac = mdesc_get_property(hp, target, 1165e689cf4aSJeff Kirsher local_mac_prop, NULL); 1166e689cf4aSJeff Kirsher if (local_mac) 1167e689cf4aSJeff Kirsher break; 1168e689cf4aSJeff Kirsher } 1169e689cf4aSJeff Kirsher if (!local_mac) 1170e689cf4aSJeff Kirsher return ERR_PTR(-ENODEV); 1171e689cf4aSJeff Kirsher 1172e689cf4aSJeff Kirsher return vnet_find_or_create(local_mac); 1173e689cf4aSJeff Kirsher } 1174e689cf4aSJeff Kirsher 1175e689cf4aSJeff Kirsher static struct ldc_channel_config vnet_ldc_cfg = { 1176e689cf4aSJeff Kirsher .event = vnet_event, 1177e689cf4aSJeff Kirsher .mtu = 64, 1178e689cf4aSJeff Kirsher .mode = LDC_MODE_UNRELIABLE, 1179e689cf4aSJeff Kirsher }; 1180e689cf4aSJeff Kirsher 1181e689cf4aSJeff Kirsher static struct vio_driver_ops vnet_vio_ops = { 1182e689cf4aSJeff Kirsher .send_attr = vnet_send_attr, 1183e689cf4aSJeff Kirsher .handle_attr = vnet_handle_attr, 1184e689cf4aSJeff Kirsher .handshake_complete = vnet_handshake_complete, 1185e689cf4aSJeff Kirsher }; 1186e689cf4aSJeff Kirsher 1187f73d12bdSBill Pemberton static void print_version(void) 1188e689cf4aSJeff Kirsher { 1189e689cf4aSJeff Kirsher printk_once(KERN_INFO "%s", version); 1190e689cf4aSJeff Kirsher } 1191e689cf4aSJeff Kirsher 1192e689cf4aSJeff Kirsher const char *remote_macaddr_prop = "remote-mac-address"; 1193e689cf4aSJeff Kirsher 11941dd06ae8SGreg Kroah-Hartman static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) 1195e689cf4aSJeff Kirsher { 1196e689cf4aSJeff Kirsher struct mdesc_handle *hp; 1197e689cf4aSJeff Kirsher struct vnet_port *port; 1198e689cf4aSJeff Kirsher unsigned long flags; 1199e689cf4aSJeff Kirsher struct vnet *vp; 1200e689cf4aSJeff Kirsher const u64 *rmac; 1201e689cf4aSJeff Kirsher int len, i, err, switch_port; 1202e689cf4aSJeff Kirsher 1203e689cf4aSJeff Kirsher print_version(); 1204e689cf4aSJeff Kirsher 1205e689cf4aSJeff Kirsher hp = mdesc_grab(); 1206e689cf4aSJeff Kirsher 1207e689cf4aSJeff Kirsher vp = vnet_find_parent(hp, vdev->mp); 1208e689cf4aSJeff Kirsher if (IS_ERR(vp)) { 1209e689cf4aSJeff Kirsher pr_err("Cannot find port parent vnet\n"); 1210e689cf4aSJeff Kirsher err = PTR_ERR(vp); 1211e689cf4aSJeff Kirsher goto err_out_put_mdesc; 1212e689cf4aSJeff Kirsher } 1213e689cf4aSJeff Kirsher 1214e689cf4aSJeff Kirsher rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); 1215e689cf4aSJeff Kirsher err = -ENODEV; 1216e689cf4aSJeff Kirsher if (!rmac) { 1217e689cf4aSJeff Kirsher pr_err("Port lacks %s property\n", remote_macaddr_prop); 1218e689cf4aSJeff Kirsher goto err_out_put_mdesc; 1219e689cf4aSJeff Kirsher } 1220e689cf4aSJeff Kirsher 1221e689cf4aSJeff Kirsher port = kzalloc(sizeof(*port), GFP_KERNEL); 1222e689cf4aSJeff Kirsher err = -ENOMEM; 1223e404decbSJoe Perches if (!port) 1224e689cf4aSJeff Kirsher goto err_out_put_mdesc; 1225e689cf4aSJeff Kirsher 1226e689cf4aSJeff Kirsher for (i = 0; i < ETH_ALEN; i++) 1227e689cf4aSJeff Kirsher port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; 1228e689cf4aSJeff Kirsher 1229e689cf4aSJeff Kirsher port->vp = vp; 1230e689cf4aSJeff Kirsher 1231e689cf4aSJeff Kirsher err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK, 1232e689cf4aSJeff Kirsher vnet_versions, ARRAY_SIZE(vnet_versions), 1233e689cf4aSJeff Kirsher &vnet_vio_ops, vp->dev->name); 1234e689cf4aSJeff Kirsher if (err) 1235e689cf4aSJeff Kirsher goto err_out_free_port; 1236e689cf4aSJeff Kirsher 1237e689cf4aSJeff Kirsher err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port); 1238e689cf4aSJeff Kirsher if (err) 1239e689cf4aSJeff Kirsher goto err_out_free_port; 1240e689cf4aSJeff Kirsher 1241e689cf4aSJeff Kirsher err = vnet_port_alloc_tx_bufs(port); 1242e689cf4aSJeff Kirsher if (err) 1243e689cf4aSJeff Kirsher goto err_out_free_ldc; 1244e689cf4aSJeff Kirsher 1245e689cf4aSJeff Kirsher INIT_HLIST_NODE(&port->hash); 1246e689cf4aSJeff Kirsher INIT_LIST_HEAD(&port->list); 1247e689cf4aSJeff Kirsher 1248e689cf4aSJeff Kirsher switch_port = 0; 1249e689cf4aSJeff Kirsher if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) 1250e689cf4aSJeff Kirsher switch_port = 1; 1251e689cf4aSJeff Kirsher port->switch_port = switch_port; 1252e689cf4aSJeff Kirsher 1253e689cf4aSJeff Kirsher spin_lock_irqsave(&vp->lock, flags); 1254e689cf4aSJeff Kirsher if (switch_port) 1255e689cf4aSJeff Kirsher list_add(&port->list, &vp->port_list); 1256e689cf4aSJeff Kirsher else 1257e689cf4aSJeff Kirsher list_add_tail(&port->list, &vp->port_list); 1258e689cf4aSJeff Kirsher hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]); 1259e689cf4aSJeff Kirsher spin_unlock_irqrestore(&vp->lock, flags); 1260e689cf4aSJeff Kirsher 1261e689cf4aSJeff Kirsher dev_set_drvdata(&vdev->dev, port); 1262e689cf4aSJeff Kirsher 1263e689cf4aSJeff Kirsher pr_info("%s: PORT ( remote-mac %pM%s )\n", 1264e689cf4aSJeff Kirsher vp->dev->name, port->raddr, switch_port ? " switch-port" : ""); 1265e689cf4aSJeff Kirsher 1266e689cf4aSJeff Kirsher vio_port_up(&port->vio); 1267e689cf4aSJeff Kirsher 1268e689cf4aSJeff Kirsher mdesc_release(hp); 1269e689cf4aSJeff Kirsher 1270e689cf4aSJeff Kirsher return 0; 1271e689cf4aSJeff Kirsher 1272e689cf4aSJeff Kirsher err_out_free_ldc: 1273e689cf4aSJeff Kirsher vio_ldc_free(&port->vio); 1274e689cf4aSJeff Kirsher 1275e689cf4aSJeff Kirsher err_out_free_port: 1276e689cf4aSJeff Kirsher kfree(port); 1277e689cf4aSJeff Kirsher 1278e689cf4aSJeff Kirsher err_out_put_mdesc: 1279e689cf4aSJeff Kirsher mdesc_release(hp); 1280e689cf4aSJeff Kirsher return err; 1281e689cf4aSJeff Kirsher } 1282e689cf4aSJeff Kirsher 1283e689cf4aSJeff Kirsher static int vnet_port_remove(struct vio_dev *vdev) 1284e689cf4aSJeff Kirsher { 1285e689cf4aSJeff Kirsher struct vnet_port *port = dev_get_drvdata(&vdev->dev); 1286e689cf4aSJeff Kirsher 1287e689cf4aSJeff Kirsher if (port) { 1288e689cf4aSJeff Kirsher struct vnet *vp = port->vp; 1289e689cf4aSJeff Kirsher unsigned long flags; 1290e689cf4aSJeff Kirsher 1291e689cf4aSJeff Kirsher del_timer_sync(&port->vio.timer); 1292e689cf4aSJeff Kirsher 1293e689cf4aSJeff Kirsher spin_lock_irqsave(&vp->lock, flags); 1294e689cf4aSJeff Kirsher list_del(&port->list); 1295e689cf4aSJeff Kirsher hlist_del(&port->hash); 1296e689cf4aSJeff Kirsher spin_unlock_irqrestore(&vp->lock, flags); 1297e689cf4aSJeff Kirsher 1298e689cf4aSJeff Kirsher vnet_port_free_tx_bufs(port); 1299e689cf4aSJeff Kirsher vio_ldc_free(&port->vio); 1300e689cf4aSJeff Kirsher 1301e689cf4aSJeff Kirsher dev_set_drvdata(&vdev->dev, NULL); 1302e689cf4aSJeff Kirsher 1303e689cf4aSJeff Kirsher kfree(port); 1304aabb9875SDave Kleikamp 1305e689cf4aSJeff Kirsher } 1306e689cf4aSJeff Kirsher return 0; 1307e689cf4aSJeff Kirsher } 1308e689cf4aSJeff Kirsher 1309e689cf4aSJeff Kirsher static const struct vio_device_id vnet_port_match[] = { 1310e689cf4aSJeff Kirsher { 1311e689cf4aSJeff Kirsher .type = "vnet-port", 1312e689cf4aSJeff Kirsher }, 1313e689cf4aSJeff Kirsher {}, 1314e689cf4aSJeff Kirsher }; 1315e689cf4aSJeff Kirsher MODULE_DEVICE_TABLE(vio, vnet_port_match); 1316e689cf4aSJeff Kirsher 1317e689cf4aSJeff Kirsher static struct vio_driver vnet_port_driver = { 1318e689cf4aSJeff Kirsher .id_table = vnet_port_match, 1319e689cf4aSJeff Kirsher .probe = vnet_port_probe, 1320e689cf4aSJeff Kirsher .remove = vnet_port_remove, 1321e689cf4aSJeff Kirsher .name = "vnet_port", 1322e689cf4aSJeff Kirsher }; 1323e689cf4aSJeff Kirsher 1324e689cf4aSJeff Kirsher static int __init vnet_init(void) 1325e689cf4aSJeff Kirsher { 1326e689cf4aSJeff Kirsher return vio_register_driver(&vnet_port_driver); 1327e689cf4aSJeff Kirsher } 1328e689cf4aSJeff Kirsher 1329e689cf4aSJeff Kirsher static void __exit vnet_exit(void) 1330e689cf4aSJeff Kirsher { 1331e689cf4aSJeff Kirsher vio_unregister_driver(&vnet_port_driver); 1332a4b70a07SSowmini Varadhan vnet_cleanup(); 1333e689cf4aSJeff Kirsher } 1334e689cf4aSJeff Kirsher 1335e689cf4aSJeff Kirsher module_init(vnet_init); 1336e689cf4aSJeff Kirsher module_exit(vnet_exit); 1337