1f89efd52SMatt Porter /* 2f89efd52SMatt Porter * rionet - Ethernet driver over RapidIO messaging services 3f89efd52SMatt Porter * 4f89efd52SMatt Porter * Copyright 2005 MontaVista Software, Inc. 5f89efd52SMatt Porter * Matt Porter <mporter@kernel.crashing.org> 6f89efd52SMatt Porter * 7f89efd52SMatt Porter * This program is free software; you can redistribute it and/or modify it 8f89efd52SMatt Porter * under the terms of the GNU General Public License as published by the 9f89efd52SMatt Porter * Free Software Foundation; either version 2 of the License, or (at your 10f89efd52SMatt Porter * option) any later version. 11f89efd52SMatt Porter */ 12f89efd52SMatt Porter 13f89efd52SMatt Porter #include <linux/module.h> 14f89efd52SMatt Porter #include <linux/kernel.h> 15f89efd52SMatt Porter #include <linux/dma-mapping.h> 16f89efd52SMatt Porter #include <linux/delay.h> 17f89efd52SMatt Porter #include <linux/rio.h> 18f89efd52SMatt Porter #include <linux/rio_drv.h> 195a0e3ad6STejun Heo #include <linux/slab.h> 20f89efd52SMatt Porter #include <linux/rio_ids.h> 21f89efd52SMatt Porter 22f89efd52SMatt Porter #include <linux/netdevice.h> 23f89efd52SMatt Porter #include <linux/etherdevice.h> 24f89efd52SMatt Porter #include <linux/skbuff.h> 25f89efd52SMatt Porter #include <linux/crc32.h> 26f89efd52SMatt Porter #include <linux/ethtool.h> 27f41e2472SAlexandre Bounine #include <linux/reboot.h> 28f89efd52SMatt Porter 29f89efd52SMatt Porter #define DRV_NAME "rionet" 302fb717ecSAlexandre Bounine #define DRV_VERSION "0.3" 31f89efd52SMatt Porter #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>" 32f89efd52SMatt Porter #define DRV_DESC "Ethernet over RapidIO" 33f89efd52SMatt Porter 34f89efd52SMatt Porter MODULE_AUTHOR(DRV_AUTHOR); 35f89efd52SMatt Porter MODULE_DESCRIPTION(DRV_DESC); 36f89efd52SMatt Porter MODULE_LICENSE("GPL"); 37f89efd52SMatt Porter 38f89efd52SMatt Porter #define RIONET_DEFAULT_MSGLEVEL \ 39f89efd52SMatt Porter (NETIF_MSG_DRV | \ 40f89efd52SMatt Porter NETIF_MSG_LINK | \ 41f89efd52SMatt Porter NETIF_MSG_RX_ERR | \ 42f89efd52SMatt Porter NETIF_MSG_TX_ERR) 43f89efd52SMatt Porter 44f89efd52SMatt Porter #define RIONET_DOORBELL_JOIN 0x1000 45f89efd52SMatt Porter #define RIONET_DOORBELL_LEAVE 0x1001 46f89efd52SMatt Porter 47f89efd52SMatt Porter #define RIONET_MAILBOX 0 48f89efd52SMatt Porter 49f89efd52SMatt Porter #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE 50f89efd52SMatt Porter #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE 512fb717ecSAlexandre Bounine #define RIONET_MAX_NETS 8 5292444bb3SAurelien Jacquiot #define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE 5392444bb3SAurelien Jacquiot #define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN) 54f89efd52SMatt Porter 55f89efd52SMatt Porter struct rionet_private { 56f89efd52SMatt Porter struct rio_mport *mport; 57f89efd52SMatt Porter struct sk_buff *rx_skb[RIONET_RX_RING_SIZE]; 58f89efd52SMatt Porter struct sk_buff *tx_skb[RIONET_TX_RING_SIZE]; 59f89efd52SMatt Porter int rx_slot; 60f89efd52SMatt Porter int tx_slot; 61f89efd52SMatt Porter int tx_cnt; 62f89efd52SMatt Porter int ack_slot; 63f89efd52SMatt Porter spinlock_t lock; 64f89efd52SMatt Porter spinlock_t tx_lock; 65f89efd52SMatt Porter u32 msg_enable; 66*34ed2ebbSAlexandre Bounine bool open; 67f89efd52SMatt Porter }; 68f89efd52SMatt Porter 69f89efd52SMatt Porter struct rionet_peer { 70f89efd52SMatt Porter struct list_head node; 71f89efd52SMatt Porter struct rio_dev *rdev; 72f89efd52SMatt Porter struct resource *res; 73f89efd52SMatt Porter }; 74f89efd52SMatt Porter 752fb717ecSAlexandre Bounine struct rionet_net { 762fb717ecSAlexandre Bounine struct net_device *ndev; 772fb717ecSAlexandre Bounine struct list_head peers; 78*34ed2ebbSAlexandre Bounine spinlock_t lock; /* net info access lock */ 792fb717ecSAlexandre Bounine struct rio_dev **active; 802fb717ecSAlexandre Bounine int nact; /* number of active peers */ 812fb717ecSAlexandre Bounine }; 82f89efd52SMatt Porter 832fb717ecSAlexandre Bounine static struct rionet_net nets[RIONET_MAX_NETS]; 84f89efd52SMatt Porter 85284fb68dSAlexandre Bounine #define is_rionet_capable(src_ops, dst_ops) \ 86284fb68dSAlexandre Bounine ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ 87284fb68dSAlexandre Bounine (dst_ops & RIO_DST_OPS_DATA_MSG) && \ 88f89efd52SMatt Porter (src_ops & RIO_SRC_OPS_DOORBELL) && \ 89f89efd52SMatt Porter (dst_ops & RIO_DST_OPS_DOORBELL)) 90f89efd52SMatt Porter #define dev_rionet_capable(dev) \ 91284fb68dSAlexandre Bounine is_rionet_capable(dev->src_ops, dev->dst_ops) 92f89efd52SMatt Porter 93e0c87bd9SAlexandre Bounine #define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4)) 94e0c87bd9SAlexandre Bounine #define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5)) 95f89efd52SMatt Porter 96f89efd52SMatt Porter static int rionet_rx_clean(struct net_device *ndev) 97f89efd52SMatt Porter { 98f89efd52SMatt Porter int i; 99f89efd52SMatt Porter int error = 0; 1004cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 101f89efd52SMatt Porter void *data; 102f89efd52SMatt Porter 103f89efd52SMatt Porter i = rnet->rx_slot; 104f89efd52SMatt Porter 105f89efd52SMatt Porter do { 106f89efd52SMatt Porter if (!rnet->rx_skb[i]) 107f89efd52SMatt Porter continue; 108f89efd52SMatt Porter 109f89efd52SMatt Porter if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX))) 110f89efd52SMatt Porter break; 111f89efd52SMatt Porter 112f89efd52SMatt Porter rnet->rx_skb[i]->data = data; 113f89efd52SMatt Porter skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); 114f89efd52SMatt Porter rnet->rx_skb[i]->protocol = 115f89efd52SMatt Porter eth_type_trans(rnet->rx_skb[i], ndev); 116f89efd52SMatt Porter error = netif_rx(rnet->rx_skb[i]); 117f89efd52SMatt Porter 118f89efd52SMatt Porter if (error == NET_RX_DROP) { 11909f75cd7SJeff Garzik ndev->stats.rx_dropped++; 120f89efd52SMatt Porter } else { 12109f75cd7SJeff Garzik ndev->stats.rx_packets++; 12209f75cd7SJeff Garzik ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE; 123f89efd52SMatt Porter } 124f89efd52SMatt Porter 125f89efd52SMatt Porter } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot); 126f89efd52SMatt Porter 127f89efd52SMatt Porter return i; 128f89efd52SMatt Porter } 129f89efd52SMatt Porter 130f89efd52SMatt Porter static void rionet_rx_fill(struct net_device *ndev, int end) 131f89efd52SMatt Porter { 132f89efd52SMatt Porter int i; 1334cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 134f89efd52SMatt Porter 135f89efd52SMatt Porter i = rnet->rx_slot; 136f89efd52SMatt Porter do { 137f89efd52SMatt Porter rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE); 138f89efd52SMatt Porter 139f89efd52SMatt Porter if (!rnet->rx_skb[i]) 140f89efd52SMatt Porter break; 141f89efd52SMatt Porter 142f89efd52SMatt Porter rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX, 143f89efd52SMatt Porter rnet->rx_skb[i]->data); 144f89efd52SMatt Porter } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end); 145f89efd52SMatt Porter 146f89efd52SMatt Porter rnet->rx_slot = i; 147f89efd52SMatt Porter } 148f89efd52SMatt Porter 149f89efd52SMatt Porter static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev, 150f89efd52SMatt Porter struct rio_dev *rdev) 151f89efd52SMatt Porter { 1524cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 153f89efd52SMatt Porter 154f89efd52SMatt Porter rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len); 155f89efd52SMatt Porter rnet->tx_skb[rnet->tx_slot] = skb; 156f89efd52SMatt Porter 15709f75cd7SJeff Garzik ndev->stats.tx_packets++; 15809f75cd7SJeff Garzik ndev->stats.tx_bytes += skb->len; 159f89efd52SMatt Porter 160f89efd52SMatt Porter if (++rnet->tx_cnt == RIONET_TX_RING_SIZE) 161f89efd52SMatt Porter netif_stop_queue(ndev); 162f89efd52SMatt Porter 163f89efd52SMatt Porter ++rnet->tx_slot; 164f89efd52SMatt Porter rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1); 165f89efd52SMatt Porter 166f89efd52SMatt Porter if (netif_msg_tx_queued(rnet)) 1678df8a475SDavid S. Miller printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME, 1688df8a475SDavid S. Miller skb->len); 169f89efd52SMatt Porter 170f89efd52SMatt Porter return 0; 171f89efd52SMatt Porter } 172f89efd52SMatt Porter 173f89efd52SMatt Porter static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 174f89efd52SMatt Porter { 175f89efd52SMatt Porter int i; 1764cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 177f89efd52SMatt Porter struct ethhdr *eth = (struct ethhdr *)skb->data; 178f89efd52SMatt Porter u16 destid; 179f89efd52SMatt Porter unsigned long flags; 1807c4a6106SAlexandre Bounine int add_num = 1; 181f89efd52SMatt Porter 182f89efd52SMatt Porter local_irq_save(flags); 183f89efd52SMatt Porter if (!spin_trylock(&rnet->tx_lock)) { 184f89efd52SMatt Porter local_irq_restore(flags); 185f89efd52SMatt Porter return NETDEV_TX_LOCKED; 186f89efd52SMatt Porter } 187f89efd52SMatt Porter 1887c4a6106SAlexandre Bounine if (is_multicast_ether_addr(eth->h_dest)) 1892fb717ecSAlexandre Bounine add_num = nets[rnet->mport->id].nact; 1907c4a6106SAlexandre Bounine 1917c4a6106SAlexandre Bounine if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) { 192f89efd52SMatt Porter netif_stop_queue(ndev); 193f89efd52SMatt Porter spin_unlock_irqrestore(&rnet->tx_lock, flags); 194f89efd52SMatt Porter printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", 195f89efd52SMatt Porter ndev->name); 196f89efd52SMatt Porter return NETDEV_TX_BUSY; 197f89efd52SMatt Porter } 198f89efd52SMatt Porter 199abfc89c7STobias Klauser if (is_multicast_ether_addr(eth->h_dest)) { 2007c4a6106SAlexandre Bounine int count = 0; 2012fb717ecSAlexandre Bounine 202e0423236SZhang Wei for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size); 203e0423236SZhang Wei i++) 2042fb717ecSAlexandre Bounine if (nets[rnet->mport->id].active[i]) { 205f89efd52SMatt Porter rionet_queue_tx_msg(skb, ndev, 2062fb717ecSAlexandre Bounine nets[rnet->mport->id].active[i]); 2077c4a6106SAlexandre Bounine if (count) 2087c4a6106SAlexandre Bounine atomic_inc(&skb->users); 2097c4a6106SAlexandre Bounine count++; 2107c4a6106SAlexandre Bounine } 211f89efd52SMatt Porter } else if (RIONET_MAC_MATCH(eth->h_dest)) { 212f89efd52SMatt Porter destid = RIONET_GET_DESTID(eth->h_dest); 2132fb717ecSAlexandre Bounine if (nets[rnet->mport->id].active[destid]) 2142fb717ecSAlexandre Bounine rionet_queue_tx_msg(skb, ndev, 2152fb717ecSAlexandre Bounine nets[rnet->mport->id].active[destid]); 216e6161d64SAlexandre Bounine else { 217e6161d64SAlexandre Bounine /* 218e6161d64SAlexandre Bounine * If the target device was removed from the list of 219e6161d64SAlexandre Bounine * active peers but we still have TX packets targeting 220e6161d64SAlexandre Bounine * it just report sending a packet to the target 221e6161d64SAlexandre Bounine * (without actual packet transfer). 222e6161d64SAlexandre Bounine */ 223e6161d64SAlexandre Bounine dev_kfree_skb_any(skb); 224e6161d64SAlexandre Bounine ndev->stats.tx_packets++; 225e6161d64SAlexandre Bounine ndev->stats.tx_bytes += skb->len; 226e6161d64SAlexandre Bounine } 227f89efd52SMatt Porter } 228f89efd52SMatt Porter 229f89efd52SMatt Porter spin_unlock_irqrestore(&rnet->tx_lock, flags); 230f89efd52SMatt Porter 2316ed10654SPatrick McHardy return NETDEV_TX_OK; 232f89efd52SMatt Porter } 233f89efd52SMatt Porter 234f89efd52SMatt Porter static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid, 235f89efd52SMatt Porter u16 info) 236f89efd52SMatt Porter { 237f89efd52SMatt Porter struct net_device *ndev = dev_id; 2384cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 239f89efd52SMatt Porter struct rionet_peer *peer; 240*34ed2ebbSAlexandre Bounine unsigned char netid = rnet->mport->id; 241f89efd52SMatt Porter 242f89efd52SMatt Porter if (netif_msg_intr(rnet)) 243f89efd52SMatt Porter printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x", 244f89efd52SMatt Porter DRV_NAME, sid, tid, info); 245f89efd52SMatt Porter if (info == RIONET_DOORBELL_JOIN) { 246*34ed2ebbSAlexandre Bounine if (!nets[netid].active[sid]) { 247*34ed2ebbSAlexandre Bounine spin_lock(&nets[netid].lock); 248*34ed2ebbSAlexandre Bounine list_for_each_entry(peer, &nets[netid].peers, node) { 2497c4a6106SAlexandre Bounine if (peer->rdev->destid == sid) { 250*34ed2ebbSAlexandre Bounine nets[netid].active[sid] = peer->rdev; 251*34ed2ebbSAlexandre Bounine nets[netid].nact++; 2527c4a6106SAlexandre Bounine } 253f89efd52SMatt Porter } 254*34ed2ebbSAlexandre Bounine spin_unlock(&nets[netid].lock); 255*34ed2ebbSAlexandre Bounine 256f89efd52SMatt Porter rio_mport_send_doorbell(mport, sid, 257f89efd52SMatt Porter RIONET_DOORBELL_JOIN); 258f89efd52SMatt Porter } 259f89efd52SMatt Porter } else if (info == RIONET_DOORBELL_LEAVE) { 260*34ed2ebbSAlexandre Bounine spin_lock(&nets[netid].lock); 261*34ed2ebbSAlexandre Bounine if (nets[netid].active[sid]) { 262*34ed2ebbSAlexandre Bounine nets[netid].active[sid] = NULL; 263*34ed2ebbSAlexandre Bounine nets[netid].nact--; 264*34ed2ebbSAlexandre Bounine } 265*34ed2ebbSAlexandre Bounine spin_unlock(&nets[netid].lock); 266f89efd52SMatt Porter } else { 267f89efd52SMatt Porter if (netif_msg_intr(rnet)) 268f89efd52SMatt Porter printk(KERN_WARNING "%s: unhandled doorbell\n", 269f89efd52SMatt Porter DRV_NAME); 270f89efd52SMatt Porter } 271f89efd52SMatt Porter } 272f89efd52SMatt Porter 273f89efd52SMatt Porter static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) 274f89efd52SMatt Porter { 275f89efd52SMatt Porter int n; 276f89efd52SMatt Porter struct net_device *ndev = dev_id; 2774cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 278f89efd52SMatt Porter 279f89efd52SMatt Porter if (netif_msg_intr(rnet)) 280f89efd52SMatt Porter printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n", 281f89efd52SMatt Porter DRV_NAME, mbox, slot); 282f89efd52SMatt Porter 283f89efd52SMatt Porter spin_lock(&rnet->lock); 284f89efd52SMatt Porter if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot) 285f89efd52SMatt Porter rionet_rx_fill(ndev, n); 286f89efd52SMatt Porter spin_unlock(&rnet->lock); 287f89efd52SMatt Porter } 288f89efd52SMatt Porter 289f89efd52SMatt Porter static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) 290f89efd52SMatt Porter { 291f89efd52SMatt Porter struct net_device *ndev = dev_id; 2924cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 293f89efd52SMatt Porter 29436915976SAurelien Jacquiot spin_lock(&rnet->tx_lock); 295f89efd52SMatt Porter 296f89efd52SMatt Porter if (netif_msg_intr(rnet)) 297f89efd52SMatt Porter printk(KERN_INFO 298f89efd52SMatt Porter "%s: outbound message event, mbox %d slot %d\n", 299f89efd52SMatt Porter DRV_NAME, mbox, slot); 300f89efd52SMatt Porter 301f89efd52SMatt Porter while (rnet->tx_cnt && (rnet->ack_slot != slot)) { 302f89efd52SMatt Porter /* dma unmap single */ 303f89efd52SMatt Porter dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]); 304f89efd52SMatt Porter rnet->tx_skb[rnet->ack_slot] = NULL; 305f89efd52SMatt Porter ++rnet->ack_slot; 306f89efd52SMatt Porter rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1); 307f89efd52SMatt Porter rnet->tx_cnt--; 308f89efd52SMatt Porter } 309f89efd52SMatt Porter 310f89efd52SMatt Porter if (rnet->tx_cnt < RIONET_TX_RING_SIZE) 311f89efd52SMatt Porter netif_wake_queue(ndev); 312f89efd52SMatt Porter 31336915976SAurelien Jacquiot spin_unlock(&rnet->tx_lock); 314f89efd52SMatt Porter } 315f89efd52SMatt Porter 316f89efd52SMatt Porter static int rionet_open(struct net_device *ndev) 317f89efd52SMatt Porter { 318f89efd52SMatt Porter int i, rc = 0; 319*34ed2ebbSAlexandre Bounine struct rionet_peer *peer; 3204cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 321*34ed2ebbSAlexandre Bounine unsigned char netid = rnet->mport->id; 322*34ed2ebbSAlexandre Bounine unsigned long flags; 323f89efd52SMatt Porter 324f89efd52SMatt Porter if (netif_msg_ifup(rnet)) 325f89efd52SMatt Porter printk(KERN_INFO "%s: open\n", DRV_NAME); 326f89efd52SMatt Porter 327f89efd52SMatt Porter if ((rc = rio_request_inb_dbell(rnet->mport, 328f89efd52SMatt Porter (void *)ndev, 329f89efd52SMatt Porter RIONET_DOORBELL_JOIN, 330f89efd52SMatt Porter RIONET_DOORBELL_LEAVE, 331f89efd52SMatt Porter rionet_dbell_event)) < 0) 332f89efd52SMatt Porter goto out; 333f89efd52SMatt Porter 334f89efd52SMatt Porter if ((rc = rio_request_inb_mbox(rnet->mport, 335f89efd52SMatt Porter (void *)ndev, 336f89efd52SMatt Porter RIONET_MAILBOX, 337f89efd52SMatt Porter RIONET_RX_RING_SIZE, 338f89efd52SMatt Porter rionet_inb_msg_event)) < 0) 339f89efd52SMatt Porter goto out; 340f89efd52SMatt Porter 341f89efd52SMatt Porter if ((rc = rio_request_outb_mbox(rnet->mport, 342f89efd52SMatt Porter (void *)ndev, 343f89efd52SMatt Porter RIONET_MAILBOX, 344f89efd52SMatt Porter RIONET_TX_RING_SIZE, 345f89efd52SMatt Porter rionet_outb_msg_event)) < 0) 346f89efd52SMatt Porter goto out; 347f89efd52SMatt Porter 348f89efd52SMatt Porter /* Initialize inbound message ring */ 349f89efd52SMatt Porter for (i = 0; i < RIONET_RX_RING_SIZE; i++) 350f89efd52SMatt Porter rnet->rx_skb[i] = NULL; 351f89efd52SMatt Porter rnet->rx_slot = 0; 352f89efd52SMatt Porter rionet_rx_fill(ndev, 0); 353f89efd52SMatt Porter 354f89efd52SMatt Porter rnet->tx_slot = 0; 355f89efd52SMatt Porter rnet->tx_cnt = 0; 356f89efd52SMatt Porter rnet->ack_slot = 0; 357f89efd52SMatt Porter 358f89efd52SMatt Porter netif_carrier_on(ndev); 359f89efd52SMatt Porter netif_start_queue(ndev); 360f89efd52SMatt Porter 361*34ed2ebbSAlexandre Bounine spin_lock_irqsave(&nets[netid].lock, flags); 362*34ed2ebbSAlexandre Bounine list_for_each_entry(peer, &nets[netid].peers, node) { 363284fb68dSAlexandre Bounine /* Send a join message */ 364f89efd52SMatt Porter rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); 365f89efd52SMatt Porter } 366*34ed2ebbSAlexandre Bounine spin_unlock_irqrestore(&nets[netid].lock, flags); 367*34ed2ebbSAlexandre Bounine rnet->open = true; 368f89efd52SMatt Porter 369f89efd52SMatt Porter out: 370f89efd52SMatt Porter return rc; 371f89efd52SMatt Porter } 372f89efd52SMatt Porter 373f89efd52SMatt Porter static int rionet_close(struct net_device *ndev) 374f89efd52SMatt Porter { 3754cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 376*34ed2ebbSAlexandre Bounine struct rionet_peer *peer; 377*34ed2ebbSAlexandre Bounine unsigned char netid = rnet->mport->id; 378*34ed2ebbSAlexandre Bounine unsigned long flags; 379f89efd52SMatt Porter int i; 380f89efd52SMatt Porter 381f89efd52SMatt Porter if (netif_msg_ifup(rnet)) 3822fb717ecSAlexandre Bounine printk(KERN_INFO "%s: close %s\n", DRV_NAME, ndev->name); 383f89efd52SMatt Porter 384f89efd52SMatt Porter netif_stop_queue(ndev); 385f89efd52SMatt Porter netif_carrier_off(ndev); 386*34ed2ebbSAlexandre Bounine rnet->open = false; 387f89efd52SMatt Porter 388f89efd52SMatt Porter for (i = 0; i < RIONET_RX_RING_SIZE; i++) 389f89efd52SMatt Porter kfree_skb(rnet->rx_skb[i]); 390f89efd52SMatt Porter 391*34ed2ebbSAlexandre Bounine spin_lock_irqsave(&nets[netid].lock, flags); 392*34ed2ebbSAlexandre Bounine list_for_each_entry(peer, &nets[netid].peers, node) { 393*34ed2ebbSAlexandre Bounine if (nets[netid].active[peer->rdev->destid]) { 394f89efd52SMatt Porter rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE); 395*34ed2ebbSAlexandre Bounine nets[netid].active[peer->rdev->destid] = NULL; 396f89efd52SMatt Porter } 397*34ed2ebbSAlexandre Bounine if (peer->res) 398f89efd52SMatt Porter rio_release_outb_dbell(peer->rdev, peer->res); 399f89efd52SMatt Porter } 400*34ed2ebbSAlexandre Bounine spin_unlock_irqrestore(&nets[netid].lock, flags); 401f89efd52SMatt Porter 402f89efd52SMatt Porter rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN, 403f89efd52SMatt Porter RIONET_DOORBELL_LEAVE); 404f89efd52SMatt Porter rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX); 405f89efd52SMatt Porter rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX); 406f89efd52SMatt Porter 407f89efd52SMatt Porter return 0; 408f89efd52SMatt Porter } 409f89efd52SMatt Porter 41071db87baSViresh Kumar static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif) 411f89efd52SMatt Porter { 412e6161d64SAlexandre Bounine struct rio_dev *rdev = to_rio_dev(dev); 4132fb717ecSAlexandre Bounine unsigned char netid = rdev->net->hport->id; 414*34ed2ebbSAlexandre Bounine struct rionet_peer *peer; 415*34ed2ebbSAlexandre Bounine int state, found = 0; 416*34ed2ebbSAlexandre Bounine unsigned long flags; 417f89efd52SMatt Porter 418*34ed2ebbSAlexandre Bounine if (!dev_rionet_capable(rdev)) 419*34ed2ebbSAlexandre Bounine return; 420*34ed2ebbSAlexandre Bounine 421*34ed2ebbSAlexandre Bounine spin_lock_irqsave(&nets[netid].lock, flags); 422*34ed2ebbSAlexandre Bounine list_for_each_entry(peer, &nets[netid].peers, node) { 423e6161d64SAlexandre Bounine if (peer->rdev == rdev) { 424*34ed2ebbSAlexandre Bounine list_del(&peer->node); 425e6161d64SAlexandre Bounine if (nets[netid].active[rdev->destid]) { 426*34ed2ebbSAlexandre Bounine state = atomic_read(&rdev->state); 427*34ed2ebbSAlexandre Bounine if (state != RIO_DEVICE_GONE && 428*34ed2ebbSAlexandre Bounine state != RIO_DEVICE_INITIALIZING) { 429*34ed2ebbSAlexandre Bounine rio_send_doorbell(rdev, 430*34ed2ebbSAlexandre Bounine RIONET_DOORBELL_LEAVE); 431*34ed2ebbSAlexandre Bounine } 432e6161d64SAlexandre Bounine nets[netid].active[rdev->destid] = NULL; 433e6161d64SAlexandre Bounine nets[netid].nact--; 434f89efd52SMatt Porter } 435*34ed2ebbSAlexandre Bounine found = 1; 436e6161d64SAlexandre Bounine break; 437e6161d64SAlexandre Bounine } 438e6161d64SAlexandre Bounine } 439*34ed2ebbSAlexandre Bounine spin_unlock_irqrestore(&nets[netid].lock, flags); 440*34ed2ebbSAlexandre Bounine 441*34ed2ebbSAlexandre Bounine if (found) { 442*34ed2ebbSAlexandre Bounine if (peer->res) 443*34ed2ebbSAlexandre Bounine rio_release_outb_dbell(rdev, peer->res); 444*34ed2ebbSAlexandre Bounine kfree(peer); 445e6161d64SAlexandre Bounine } 446f89efd52SMatt Porter } 447f89efd52SMatt Porter 448f89efd52SMatt Porter static void rionet_get_drvinfo(struct net_device *ndev, 449f89efd52SMatt Porter struct ethtool_drvinfo *info) 450f89efd52SMatt Porter { 4514cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 452f89efd52SMatt Porter 4537826d43fSJiri Pirko strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 4547826d43fSJiri Pirko strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 4557826d43fSJiri Pirko strlcpy(info->fw_version, "n/a", sizeof(info->fw_version)); 4567826d43fSJiri Pirko strlcpy(info->bus_info, rnet->mport->name, sizeof(info->bus_info)); 457f89efd52SMatt Porter } 458f89efd52SMatt Porter 459f89efd52SMatt Porter static u32 rionet_get_msglevel(struct net_device *ndev) 460f89efd52SMatt Porter { 4614cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 462f89efd52SMatt Porter 463f89efd52SMatt Porter return rnet->msg_enable; 464f89efd52SMatt Porter } 465f89efd52SMatt Porter 466f89efd52SMatt Porter static void rionet_set_msglevel(struct net_device *ndev, u32 value) 467f89efd52SMatt Porter { 4684cf1653aSWang Chen struct rionet_private *rnet = netdev_priv(ndev); 469f89efd52SMatt Porter 470f89efd52SMatt Porter rnet->msg_enable = value; 471f89efd52SMatt Porter } 472f89efd52SMatt Porter 47392444bb3SAurelien Jacquiot static int rionet_change_mtu(struct net_device *ndev, int new_mtu) 47492444bb3SAurelien Jacquiot { 47592444bb3SAurelien Jacquiot if ((new_mtu < 68) || (new_mtu > RIONET_MAX_MTU)) { 47692444bb3SAurelien Jacquiot printk(KERN_ERR "%s: Invalid MTU size %d\n", 47792444bb3SAurelien Jacquiot ndev->name, new_mtu); 47892444bb3SAurelien Jacquiot return -EINVAL; 47992444bb3SAurelien Jacquiot } 48092444bb3SAurelien Jacquiot ndev->mtu = new_mtu; 48192444bb3SAurelien Jacquiot return 0; 48292444bb3SAurelien Jacquiot } 48392444bb3SAurelien Jacquiot 4847282d491SJeff Garzik static const struct ethtool_ops rionet_ethtool_ops = { 485f89efd52SMatt Porter .get_drvinfo = rionet_get_drvinfo, 486f89efd52SMatt Porter .get_msglevel = rionet_get_msglevel, 487f89efd52SMatt Porter .set_msglevel = rionet_set_msglevel, 488f89efd52SMatt Porter .get_link = ethtool_op_get_link, 489f89efd52SMatt Porter }; 490f89efd52SMatt Porter 491a33a2bb3SAlexander Beregalov static const struct net_device_ops rionet_netdev_ops = { 492a33a2bb3SAlexander Beregalov .ndo_open = rionet_open, 493a33a2bb3SAlexander Beregalov .ndo_stop = rionet_close, 494a33a2bb3SAlexander Beregalov .ndo_start_xmit = rionet_start_xmit, 49592444bb3SAurelien Jacquiot .ndo_change_mtu = rionet_change_mtu, 496a33a2bb3SAlexander Beregalov .ndo_validate_addr = eth_validate_addr, 497a33a2bb3SAlexander Beregalov .ndo_set_mac_address = eth_mac_addr, 498a33a2bb3SAlexander Beregalov }; 499a33a2bb3SAlexander Beregalov 50055caa924SYinglin Luan static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) 501f89efd52SMatt Porter { 502f89efd52SMatt Porter int rc = 0; 503f89efd52SMatt Porter struct rionet_private *rnet; 504f89efd52SMatt Porter u16 device_id; 505acc65632SAkinobu Mita const size_t rionet_active_bytes = sizeof(void *) * 506acc65632SAkinobu Mita RIO_MAX_ROUTE_ENTRIES(mport->sys_size); 507f89efd52SMatt Porter 5082fb717ecSAlexandre Bounine nets[mport->id].active = (struct rio_dev **)__get_free_pages(GFP_KERNEL, 509acc65632SAkinobu Mita get_order(rionet_active_bytes)); 5102fb717ecSAlexandre Bounine if (!nets[mport->id].active) { 511e0423236SZhang Wei rc = -ENOMEM; 512e0423236SZhang Wei goto out; 513e0423236SZhang Wei } 5142fb717ecSAlexandre Bounine memset((void *)nets[mport->id].active, 0, rionet_active_bytes); 515e0423236SZhang Wei 516f89efd52SMatt Porter /* Set up private area */ 5174cf1653aSWang Chen rnet = netdev_priv(ndev); 518f89efd52SMatt Porter rnet->mport = mport; 519*34ed2ebbSAlexandre Bounine rnet->open = false; 520f89efd52SMatt Porter 521f89efd52SMatt Porter /* Set the default MAC address */ 522f89efd52SMatt Porter device_id = rio_local_get_device_id(mport); 523f89efd52SMatt Porter ndev->dev_addr[0] = 0x00; 524f89efd52SMatt Porter ndev->dev_addr[1] = 0x01; 525f89efd52SMatt Porter ndev->dev_addr[2] = 0x00; 526f89efd52SMatt Porter ndev->dev_addr[3] = 0x01; 527f89efd52SMatt Porter ndev->dev_addr[4] = device_id >> 8; 528f89efd52SMatt Porter ndev->dev_addr[5] = device_id & 0xff; 529f89efd52SMatt Porter 530a33a2bb3SAlexander Beregalov ndev->netdev_ops = &rionet_netdev_ops; 53192444bb3SAurelien Jacquiot ndev->mtu = RIONET_MAX_MTU; 532f89efd52SMatt Porter ndev->features = NETIF_F_LLTX; 5332aaf308bSAlexandre Bounine SET_NETDEV_DEV(ndev, &mport->dev); 5347ad24ea4SWilfried Klaebe ndev->ethtool_ops = &rionet_ethtool_ops; 535f89efd52SMatt Porter 536f89efd52SMatt Porter spin_lock_init(&rnet->lock); 537f89efd52SMatt Porter spin_lock_init(&rnet->tx_lock); 538f89efd52SMatt Porter 539f89efd52SMatt Porter rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL; 540f89efd52SMatt Porter 541f89efd52SMatt Porter rc = register_netdev(ndev); 542*34ed2ebbSAlexandre Bounine if (rc != 0) { 543*34ed2ebbSAlexandre Bounine free_pages((unsigned long)nets[mport->id].active, 544*34ed2ebbSAlexandre Bounine get_order(rionet_active_bytes)); 545f89efd52SMatt Porter goto out; 546*34ed2ebbSAlexandre Bounine } 547f89efd52SMatt Porter 5482fb717ecSAlexandre Bounine printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n", 549f89efd52SMatt Porter ndev->name, 550f89efd52SMatt Porter DRV_NAME, 551f89efd52SMatt Porter DRV_DESC, 552f89efd52SMatt Porter DRV_VERSION, 5532fb717ecSAlexandre Bounine ndev->dev_addr, 5542fb717ecSAlexandre Bounine mport->name); 555f89efd52SMatt Porter 556f89efd52SMatt Porter out: 557f89efd52SMatt Porter return rc; 558f89efd52SMatt Porter } 559f89efd52SMatt Porter 560e6161d64SAlexandre Bounine static int rionet_add_dev(struct device *dev, struct subsys_interface *sif) 561f89efd52SMatt Porter { 562f89efd52SMatt Porter int rc = -ENODEV; 563284fb68dSAlexandre Bounine u32 lsrc_ops, ldst_ops; 564f89efd52SMatt Porter struct rionet_peer *peer; 56555caa924SYinglin Luan struct net_device *ndev = NULL; 566e6161d64SAlexandre Bounine struct rio_dev *rdev = to_rio_dev(dev); 5672fb717ecSAlexandre Bounine unsigned char netid = rdev->net->hport->id; 568f89efd52SMatt Porter 5692fb717ecSAlexandre Bounine if (netid >= RIONET_MAX_NETS) 5702fb717ecSAlexandre Bounine return rc; 5712fb717ecSAlexandre Bounine 5722fb717ecSAlexandre Bounine /* 573e6161d64SAlexandre Bounine * If first time through this net, make sure local device is rionet 574e6161d64SAlexandre Bounine * capable and setup netdev (this step will be skipped in later probes 575e6161d64SAlexandre Bounine * on the same net). 5762fb717ecSAlexandre Bounine */ 577*34ed2ebbSAlexandre Bounine if (!nets[netid].ndev) { 5782fb717ecSAlexandre Bounine rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, 5792fb717ecSAlexandre Bounine &lsrc_ops); 5802fb717ecSAlexandre Bounine rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, 5812fb717ecSAlexandre Bounine &ldst_ops); 5822fb717ecSAlexandre Bounine if (!is_rionet_capable(lsrc_ops, ldst_ops)) { 5832fb717ecSAlexandre Bounine printk(KERN_ERR 5842fb717ecSAlexandre Bounine "%s: local device %s is not network capable\n", 5852fb717ecSAlexandre Bounine DRV_NAME, rdev->net->hport->name); 586f89efd52SMatt Porter goto out; 5872fb717ecSAlexandre Bounine } 588f89efd52SMatt Porter 58955caa924SYinglin Luan /* Allocate our net_device structure */ 59055caa924SYinglin Luan ndev = alloc_etherdev(sizeof(struct rionet_private)); 59155caa924SYinglin Luan if (ndev == NULL) { 59255caa924SYinglin Luan rc = -ENOMEM; 59355caa924SYinglin Luan goto out; 59455caa924SYinglin Luan } 595*34ed2ebbSAlexandre Bounine 59655caa924SYinglin Luan rc = rionet_setup_netdev(rdev->net->hport, ndev); 597e6161d64SAlexandre Bounine if (rc) { 598e6161d64SAlexandre Bounine printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n", 599e6161d64SAlexandre Bounine DRV_NAME, rc); 600*34ed2ebbSAlexandre Bounine free_netdev(ndev); 601e6161d64SAlexandre Bounine goto out; 602e6161d64SAlexandre Bounine } 603e6161d64SAlexandre Bounine 6042fb717ecSAlexandre Bounine INIT_LIST_HEAD(&nets[netid].peers); 605*34ed2ebbSAlexandre Bounine spin_lock_init(&nets[netid].lock); 6062fb717ecSAlexandre Bounine nets[netid].nact = 0; 607*34ed2ebbSAlexandre Bounine nets[netid].ndev = ndev; 608*34ed2ebbSAlexandre Bounine } 609f89efd52SMatt Porter 610f89efd52SMatt Porter /* 611f89efd52SMatt Porter * If the remote device has mailbox/doorbell capabilities, 612f89efd52SMatt Porter * add it to the peer list. 613f89efd52SMatt Porter */ 614f89efd52SMatt Porter if (dev_rionet_capable(rdev)) { 615*34ed2ebbSAlexandre Bounine struct rionet_private *rnet; 616*34ed2ebbSAlexandre Bounine unsigned long flags; 617*34ed2ebbSAlexandre Bounine 618*34ed2ebbSAlexandre Bounine rnet = netdev_priv(nets[netid].ndev); 619*34ed2ebbSAlexandre Bounine 620*34ed2ebbSAlexandre Bounine peer = kzalloc(sizeof(*peer), GFP_KERNEL); 621*34ed2ebbSAlexandre Bounine if (!peer) { 622f89efd52SMatt Porter rc = -ENOMEM; 623f89efd52SMatt Porter goto out; 624f89efd52SMatt Porter } 625f89efd52SMatt Porter peer->rdev = rdev; 626*34ed2ebbSAlexandre Bounine peer->res = rio_request_outb_dbell(peer->rdev, 627*34ed2ebbSAlexandre Bounine RIONET_DOORBELL_JOIN, 628*34ed2ebbSAlexandre Bounine RIONET_DOORBELL_LEAVE); 629*34ed2ebbSAlexandre Bounine if (!peer->res) { 630*34ed2ebbSAlexandre Bounine pr_err("%s: error requesting doorbells\n", DRV_NAME); 631*34ed2ebbSAlexandre Bounine kfree(peer); 632*34ed2ebbSAlexandre Bounine rc = -ENOMEM; 633*34ed2ebbSAlexandre Bounine goto out; 634*34ed2ebbSAlexandre Bounine } 635*34ed2ebbSAlexandre Bounine 636*34ed2ebbSAlexandre Bounine spin_lock_irqsave(&nets[netid].lock, flags); 6372fb717ecSAlexandre Bounine list_add_tail(&peer->node, &nets[netid].peers); 638*34ed2ebbSAlexandre Bounine spin_unlock_irqrestore(&nets[netid].lock, flags); 639*34ed2ebbSAlexandre Bounine pr_debug("%s: %s add peer %s\n", 640*34ed2ebbSAlexandre Bounine DRV_NAME, __func__, rio_name(rdev)); 641*34ed2ebbSAlexandre Bounine 642*34ed2ebbSAlexandre Bounine /* If netdev is already opened, send join request to new peer */ 643*34ed2ebbSAlexandre Bounine if (rnet->open) 644*34ed2ebbSAlexandre Bounine rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); 645f89efd52SMatt Porter } 646f89efd52SMatt Porter 647e6161d64SAlexandre Bounine return 0; 648f89efd52SMatt Porter out: 649f89efd52SMatt Porter return rc; 650f89efd52SMatt Porter } 651f89efd52SMatt Porter 652f41e2472SAlexandre Bounine static int rionet_shutdown(struct notifier_block *nb, unsigned long code, 653f41e2472SAlexandre Bounine void *unused) 654f41e2472SAlexandre Bounine { 655*34ed2ebbSAlexandre Bounine struct rionet_peer *peer; 656*34ed2ebbSAlexandre Bounine unsigned long flags; 657f41e2472SAlexandre Bounine int i; 658f41e2472SAlexandre Bounine 659f41e2472SAlexandre Bounine pr_debug("%s: %s\n", DRV_NAME, __func__); 660f41e2472SAlexandre Bounine 661f41e2472SAlexandre Bounine for (i = 0; i < RIONET_MAX_NETS; i++) { 662f41e2472SAlexandre Bounine if (!nets[i].ndev) 663f41e2472SAlexandre Bounine continue; 664f41e2472SAlexandre Bounine 665*34ed2ebbSAlexandre Bounine spin_lock_irqsave(&nets[i].lock, flags); 666*34ed2ebbSAlexandre Bounine list_for_each_entry(peer, &nets[i].peers, node) { 667f41e2472SAlexandre Bounine if (nets[i].active[peer->rdev->destid]) { 668f41e2472SAlexandre Bounine rio_send_doorbell(peer->rdev, 669f41e2472SAlexandre Bounine RIONET_DOORBELL_LEAVE); 670f41e2472SAlexandre Bounine nets[i].active[peer->rdev->destid] = NULL; 671f41e2472SAlexandre Bounine } 672f41e2472SAlexandre Bounine } 673*34ed2ebbSAlexandre Bounine spin_unlock_irqrestore(&nets[i].lock, flags); 674f41e2472SAlexandre Bounine } 675f41e2472SAlexandre Bounine 676f41e2472SAlexandre Bounine return NOTIFY_DONE; 677f41e2472SAlexandre Bounine } 678f41e2472SAlexandre Bounine 679e6161d64SAlexandre Bounine #ifdef MODULE 680f89efd52SMatt Porter static struct rio_device_id rionet_id_table[] = { 681e6161d64SAlexandre Bounine {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}, 682e6161d64SAlexandre Bounine { 0, } /* terminate list */ 683f89efd52SMatt Porter }; 684f89efd52SMatt Porter 685e6161d64SAlexandre Bounine MODULE_DEVICE_TABLE(rapidio, rionet_id_table); 686e6161d64SAlexandre Bounine #endif 687e6161d64SAlexandre Bounine 688e6161d64SAlexandre Bounine static struct subsys_interface rionet_interface = { 689f89efd52SMatt Porter .name = "rionet", 690e6161d64SAlexandre Bounine .subsys = &rio_bus_type, 691e6161d64SAlexandre Bounine .add_dev = rionet_add_dev, 692e6161d64SAlexandre Bounine .remove_dev = rionet_remove_dev, 693f89efd52SMatt Porter }; 694f89efd52SMatt Porter 695f41e2472SAlexandre Bounine static struct notifier_block rionet_notifier = { 696f41e2472SAlexandre Bounine .notifier_call = rionet_shutdown, 697f41e2472SAlexandre Bounine }; 698f41e2472SAlexandre Bounine 699f89efd52SMatt Porter static int __init rionet_init(void) 700f89efd52SMatt Porter { 701f41e2472SAlexandre Bounine int ret; 702f41e2472SAlexandre Bounine 703f41e2472SAlexandre Bounine ret = register_reboot_notifier(&rionet_notifier); 704f41e2472SAlexandre Bounine if (ret) { 705f41e2472SAlexandre Bounine pr_err("%s: failed to register reboot notifier (err=%d)\n", 706f41e2472SAlexandre Bounine DRV_NAME, ret); 707f41e2472SAlexandre Bounine return ret; 708f41e2472SAlexandre Bounine } 709e6161d64SAlexandre Bounine return subsys_interface_register(&rionet_interface); 710f89efd52SMatt Porter } 711f89efd52SMatt Porter 712f89efd52SMatt Porter static void __exit rionet_exit(void) 713f89efd52SMatt Porter { 714e6161d64SAlexandre Bounine struct rionet_private *rnet; 715e6161d64SAlexandre Bounine struct net_device *ndev; 716e6161d64SAlexandre Bounine struct rionet_peer *peer, *tmp; 717e6161d64SAlexandre Bounine int i; 718e6161d64SAlexandre Bounine 719e6161d64SAlexandre Bounine for (i = 0; i < RIONET_MAX_NETS; i++) { 720e6161d64SAlexandre Bounine if (nets[i].ndev != NULL) { 721e6161d64SAlexandre Bounine ndev = nets[i].ndev; 722e6161d64SAlexandre Bounine rnet = netdev_priv(ndev); 723e6161d64SAlexandre Bounine unregister_netdev(ndev); 724e6161d64SAlexandre Bounine 725e6161d64SAlexandre Bounine list_for_each_entry_safe(peer, 726e6161d64SAlexandre Bounine tmp, &nets[i].peers, node) { 727e6161d64SAlexandre Bounine list_del(&peer->node); 728e6161d64SAlexandre Bounine kfree(peer); 729e6161d64SAlexandre Bounine } 730e6161d64SAlexandre Bounine 731e6161d64SAlexandre Bounine free_pages((unsigned long)nets[i].active, 732e6161d64SAlexandre Bounine get_order(sizeof(void *) * 733e6161d64SAlexandre Bounine RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size))); 734e6161d64SAlexandre Bounine nets[i].active = NULL; 735e6161d64SAlexandre Bounine 736e6161d64SAlexandre Bounine free_netdev(ndev); 737e6161d64SAlexandre Bounine } 738e6161d64SAlexandre Bounine } 739e6161d64SAlexandre Bounine 740f41e2472SAlexandre Bounine unregister_reboot_notifier(&rionet_notifier); 741e6161d64SAlexandre Bounine subsys_interface_unregister(&rionet_interface); 742f89efd52SMatt Porter } 743f89efd52SMatt Porter 7442f809985SAlexandre Bounine late_initcall(rionet_init); 745f89efd52SMatt Porter module_exit(rionet_exit); 746