1f7917c00SJeff Kirsher /* 2f7917c00SJeff Kirsher * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. 3f7917c00SJeff Kirsher * 4f7917c00SJeff Kirsher * This software is available to you under a choice of one of two 5f7917c00SJeff Kirsher * licenses. You may choose to be licensed under the terms of the GNU 6f7917c00SJeff Kirsher * General Public License (GPL) Version 2, available from the file 7f7917c00SJeff Kirsher * COPYING in the main directory of this source tree, or the 8f7917c00SJeff Kirsher * OpenIB.org BSD license below: 9f7917c00SJeff Kirsher * 10f7917c00SJeff Kirsher * Redistribution and use in source and binary forms, with or 11f7917c00SJeff Kirsher * without modification, are permitted provided that the following 12f7917c00SJeff Kirsher * conditions are met: 13f7917c00SJeff Kirsher * 14f7917c00SJeff Kirsher * - Redistributions of source code must retain the above 15f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 16f7917c00SJeff Kirsher * disclaimer. 17f7917c00SJeff Kirsher * 18f7917c00SJeff Kirsher * - Redistributions in binary form must reproduce the above 19f7917c00SJeff Kirsher * copyright notice, this list of conditions and the following 20f7917c00SJeff Kirsher * disclaimer in the documentation and/or other materials 21f7917c00SJeff Kirsher * provided with the distribution. 22f7917c00SJeff Kirsher * 23f7917c00SJeff Kirsher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24f7917c00SJeff Kirsher * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25f7917c00SJeff Kirsher * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26f7917c00SJeff Kirsher * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27f7917c00SJeff Kirsher * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28f7917c00SJeff Kirsher * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29f7917c00SJeff Kirsher * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30f7917c00SJeff Kirsher * SOFTWARE. 31f7917c00SJeff Kirsher */ 32f7917c00SJeff Kirsher #include <linux/skbuff.h> 33f7917c00SJeff Kirsher #include <linux/netdevice.h> 34f7917c00SJeff Kirsher #include <linux/if.h> 35f7917c00SJeff Kirsher #include <linux/if_vlan.h> 36f7917c00SJeff Kirsher #include <linux/jhash.h> 37f7917c00SJeff Kirsher #include <linux/slab.h> 38ee40fa06SPaul Gortmaker #include <linux/export.h> 39f7917c00SJeff Kirsher #include <net/neighbour.h> 40f7917c00SJeff Kirsher #include "common.h" 41f7917c00SJeff Kirsher #include "t3cdev.h" 42f7917c00SJeff Kirsher #include "cxgb3_defs.h" 43f7917c00SJeff Kirsher #include "l2t.h" 44f7917c00SJeff Kirsher #include "t3_cpl.h" 45f7917c00SJeff Kirsher #include "firmware_exports.h" 46f7917c00SJeff Kirsher 47f7917c00SJeff Kirsher #define VLAN_NONE 0xfff 48f7917c00SJeff Kirsher 49f7917c00SJeff Kirsher /* 50f7917c00SJeff Kirsher * Module locking notes: There is a RW lock protecting the L2 table as a 51f7917c00SJeff Kirsher * whole plus a spinlock per L2T entry. Entry lookups and allocations happen 52f7917c00SJeff Kirsher * under the protection of the table lock, individual entry changes happen 53f7917c00SJeff Kirsher * while holding that entry's spinlock. The table lock nests outside the 54f7917c00SJeff Kirsher * entry locks. Allocations of new entries take the table lock as writers so 55f7917c00SJeff Kirsher * no other lookups can happen while allocating new entries. Entry updates 56f7917c00SJeff Kirsher * take the table lock as readers so multiple entries can be updated in 57f7917c00SJeff Kirsher * parallel. An L2T entry can be dropped by decrementing its reference count 58f7917c00SJeff Kirsher * and therefore can happen in parallel with entry allocation but no entry 59f7917c00SJeff Kirsher * can change state or increment its ref count during allocation as both of 60f7917c00SJeff Kirsher * these perform lookups. 61f7917c00SJeff Kirsher */ 62f7917c00SJeff Kirsher 63f7917c00SJeff Kirsher static inline unsigned int vlan_prio(const struct l2t_entry *e) 64f7917c00SJeff Kirsher { 65f7917c00SJeff Kirsher return e->vlan >> 13; 66f7917c00SJeff Kirsher } 67f7917c00SJeff Kirsher 68f7917c00SJeff Kirsher static inline unsigned int arp_hash(u32 key, int ifindex, 69f7917c00SJeff Kirsher const struct l2t_data *d) 70f7917c00SJeff Kirsher { 71f7917c00SJeff Kirsher return jhash_2words(key, ifindex, 0) & (d->nentries - 1); 72f7917c00SJeff Kirsher } 73f7917c00SJeff Kirsher 74f7917c00SJeff Kirsher static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n) 75f7917c00SJeff Kirsher { 76f7917c00SJeff Kirsher neigh_hold(n); 77f7917c00SJeff Kirsher if (e->neigh) 78f7917c00SJeff Kirsher neigh_release(e->neigh); 79f7917c00SJeff Kirsher e->neigh = n; 80f7917c00SJeff Kirsher } 81f7917c00SJeff Kirsher 82f7917c00SJeff Kirsher /* 83f7917c00SJeff Kirsher * Set up an L2T entry and send any packets waiting in the arp queue. The 84f7917c00SJeff Kirsher * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the 85f7917c00SJeff Kirsher * entry locked. 86f7917c00SJeff Kirsher */ 87f7917c00SJeff Kirsher static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb, 88f7917c00SJeff Kirsher struct l2t_entry *e) 89f7917c00SJeff Kirsher { 90f7917c00SJeff Kirsher struct cpl_l2t_write_req *req; 91f7917c00SJeff Kirsher struct sk_buff *tmp; 92f7917c00SJeff Kirsher 93f7917c00SJeff Kirsher if (!skb) { 94f7917c00SJeff Kirsher skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 95f7917c00SJeff Kirsher if (!skb) 96f7917c00SJeff Kirsher return -ENOMEM; 97f7917c00SJeff Kirsher } 98f7917c00SJeff Kirsher 99f7917c00SJeff Kirsher req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); 100f7917c00SJeff Kirsher req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 101f7917c00SJeff Kirsher OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx)); 102f7917c00SJeff Kirsher req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) | 103f7917c00SJeff Kirsher V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) | 104f7917c00SJeff Kirsher V_L2T_W_PRIO(vlan_prio(e))); 105f7917c00SJeff Kirsher memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); 106f7917c00SJeff Kirsher memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 107f7917c00SJeff Kirsher skb->priority = CPL_PRIORITY_CONTROL; 108f7917c00SJeff Kirsher cxgb3_ofld_send(dev, skb); 109f7917c00SJeff Kirsher 110f7917c00SJeff Kirsher skb_queue_walk_safe(&e->arpq, skb, tmp) { 111f7917c00SJeff Kirsher __skb_unlink(skb, &e->arpq); 112f7917c00SJeff Kirsher cxgb3_ofld_send(dev, skb); 113f7917c00SJeff Kirsher } 114f7917c00SJeff Kirsher e->state = L2T_STATE_VALID; 115f7917c00SJeff Kirsher 116f7917c00SJeff Kirsher return 0; 117f7917c00SJeff Kirsher } 118f7917c00SJeff Kirsher 119f7917c00SJeff Kirsher /* 120f7917c00SJeff Kirsher * Add a packet to the an L2T entry's queue of packets awaiting resolution. 121f7917c00SJeff Kirsher * Must be called with the entry's lock held. 122f7917c00SJeff Kirsher */ 123f7917c00SJeff Kirsher static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) 124f7917c00SJeff Kirsher { 125f7917c00SJeff Kirsher __skb_queue_tail(&e->arpq, skb); 126f7917c00SJeff Kirsher } 127f7917c00SJeff Kirsher 128f7917c00SJeff Kirsher int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, 129f7917c00SJeff Kirsher struct l2t_entry *e) 130f7917c00SJeff Kirsher { 131f7917c00SJeff Kirsher again: 132f7917c00SJeff Kirsher switch (e->state) { 133f7917c00SJeff Kirsher case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ 134f7917c00SJeff Kirsher neigh_event_send(e->neigh, NULL); 135f7917c00SJeff Kirsher spin_lock_bh(&e->lock); 136f7917c00SJeff Kirsher if (e->state == L2T_STATE_STALE) 137f7917c00SJeff Kirsher e->state = L2T_STATE_VALID; 138f7917c00SJeff Kirsher spin_unlock_bh(&e->lock); 139f7917c00SJeff Kirsher case L2T_STATE_VALID: /* fast-path, send the packet on */ 140f7917c00SJeff Kirsher return cxgb3_ofld_send(dev, skb); 141f7917c00SJeff Kirsher case L2T_STATE_RESOLVING: 142f7917c00SJeff Kirsher spin_lock_bh(&e->lock); 143f7917c00SJeff Kirsher if (e->state != L2T_STATE_RESOLVING) { 144f7917c00SJeff Kirsher /* ARP already completed */ 145f7917c00SJeff Kirsher spin_unlock_bh(&e->lock); 146f7917c00SJeff Kirsher goto again; 147f7917c00SJeff Kirsher } 148f7917c00SJeff Kirsher arpq_enqueue(e, skb); 149f7917c00SJeff Kirsher spin_unlock_bh(&e->lock); 150f7917c00SJeff Kirsher 151f7917c00SJeff Kirsher /* 152f7917c00SJeff Kirsher * Only the first packet added to the arpq should kick off 153f7917c00SJeff Kirsher * resolution. However, because the alloc_skb below can fail, 154f7917c00SJeff Kirsher * we allow each packet added to the arpq to retry resolution 155f7917c00SJeff Kirsher * as a way of recovering from transient memory exhaustion. 156f7917c00SJeff Kirsher * A better way would be to use a work request to retry L2T 157f7917c00SJeff Kirsher * entries when there's no memory. 158f7917c00SJeff Kirsher */ 159f7917c00SJeff Kirsher if (!neigh_event_send(e->neigh, NULL)) { 160f7917c00SJeff Kirsher skb = alloc_skb(sizeof(struct cpl_l2t_write_req), 161f7917c00SJeff Kirsher GFP_ATOMIC); 162f7917c00SJeff Kirsher if (!skb) 163f7917c00SJeff Kirsher break; 164f7917c00SJeff Kirsher 165f7917c00SJeff Kirsher spin_lock_bh(&e->lock); 166f7917c00SJeff Kirsher if (!skb_queue_empty(&e->arpq)) 167f7917c00SJeff Kirsher setup_l2e_send_pending(dev, skb, e); 168f7917c00SJeff Kirsher else /* we lost the race */ 169f7917c00SJeff Kirsher __kfree_skb(skb); 170f7917c00SJeff Kirsher spin_unlock_bh(&e->lock); 171f7917c00SJeff Kirsher } 172f7917c00SJeff Kirsher } 173f7917c00SJeff Kirsher return 0; 174f7917c00SJeff Kirsher } 175f7917c00SJeff Kirsher 176f7917c00SJeff Kirsher EXPORT_SYMBOL(t3_l2t_send_slow); 177f7917c00SJeff Kirsher 178f7917c00SJeff Kirsher void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e) 179f7917c00SJeff Kirsher { 180f7917c00SJeff Kirsher again: 181f7917c00SJeff Kirsher switch (e->state) { 182f7917c00SJeff Kirsher case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ 183f7917c00SJeff Kirsher neigh_event_send(e->neigh, NULL); 184f7917c00SJeff Kirsher spin_lock_bh(&e->lock); 185f7917c00SJeff Kirsher if (e->state == L2T_STATE_STALE) { 186f7917c00SJeff Kirsher e->state = L2T_STATE_VALID; 187f7917c00SJeff Kirsher } 188f7917c00SJeff Kirsher spin_unlock_bh(&e->lock); 189f7917c00SJeff Kirsher return; 190f7917c00SJeff Kirsher case L2T_STATE_VALID: /* fast-path, send the packet on */ 191f7917c00SJeff Kirsher return; 192f7917c00SJeff Kirsher case L2T_STATE_RESOLVING: 193f7917c00SJeff Kirsher spin_lock_bh(&e->lock); 194f7917c00SJeff Kirsher if (e->state != L2T_STATE_RESOLVING) { 195f7917c00SJeff Kirsher /* ARP already completed */ 196f7917c00SJeff Kirsher spin_unlock_bh(&e->lock); 197f7917c00SJeff Kirsher goto again; 198f7917c00SJeff Kirsher } 199f7917c00SJeff Kirsher spin_unlock_bh(&e->lock); 200f7917c00SJeff Kirsher 201f7917c00SJeff Kirsher /* 202f7917c00SJeff Kirsher * Only the first packet added to the arpq should kick off 203f7917c00SJeff Kirsher * resolution. However, because the alloc_skb below can fail, 204f7917c00SJeff Kirsher * we allow each packet added to the arpq to retry resolution 205f7917c00SJeff Kirsher * as a way of recovering from transient memory exhaustion. 206f7917c00SJeff Kirsher * A better way would be to use a work request to retry L2T 207f7917c00SJeff Kirsher * entries when there's no memory. 208f7917c00SJeff Kirsher */ 209f7917c00SJeff Kirsher neigh_event_send(e->neigh, NULL); 210f7917c00SJeff Kirsher } 211f7917c00SJeff Kirsher } 212f7917c00SJeff Kirsher 213f7917c00SJeff Kirsher EXPORT_SYMBOL(t3_l2t_send_event); 214f7917c00SJeff Kirsher 215f7917c00SJeff Kirsher /* 216f7917c00SJeff Kirsher * Allocate a free L2T entry. Must be called with l2t_data.lock held. 217f7917c00SJeff Kirsher */ 218f7917c00SJeff Kirsher static struct l2t_entry *alloc_l2e(struct l2t_data *d) 219f7917c00SJeff Kirsher { 220f7917c00SJeff Kirsher struct l2t_entry *end, *e, **p; 221f7917c00SJeff Kirsher 222f7917c00SJeff Kirsher if (!atomic_read(&d->nfree)) 223f7917c00SJeff Kirsher return NULL; 224f7917c00SJeff Kirsher 225f7917c00SJeff Kirsher /* there's definitely a free entry */ 226f7917c00SJeff Kirsher for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e) 227f7917c00SJeff Kirsher if (atomic_read(&e->refcnt) == 0) 228f7917c00SJeff Kirsher goto found; 229f7917c00SJeff Kirsher 230f7917c00SJeff Kirsher for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ; 231f7917c00SJeff Kirsher found: 232f7917c00SJeff Kirsher d->rover = e + 1; 233f7917c00SJeff Kirsher atomic_dec(&d->nfree); 234f7917c00SJeff Kirsher 235f7917c00SJeff Kirsher /* 236f7917c00SJeff Kirsher * The entry we found may be an inactive entry that is 237f7917c00SJeff Kirsher * presently in the hash table. We need to remove it. 238f7917c00SJeff Kirsher */ 239f7917c00SJeff Kirsher if (e->state != L2T_STATE_UNUSED) { 240f7917c00SJeff Kirsher int hash = arp_hash(e->addr, e->ifindex, d); 241f7917c00SJeff Kirsher 242f7917c00SJeff Kirsher for (p = &d->l2tab[hash].first; *p; p = &(*p)->next) 243f7917c00SJeff Kirsher if (*p == e) { 244f7917c00SJeff Kirsher *p = e->next; 245f7917c00SJeff Kirsher break; 246f7917c00SJeff Kirsher } 247f7917c00SJeff Kirsher e->state = L2T_STATE_UNUSED; 248f7917c00SJeff Kirsher } 249f7917c00SJeff Kirsher return e; 250f7917c00SJeff Kirsher } 251f7917c00SJeff Kirsher 252f7917c00SJeff Kirsher /* 253f7917c00SJeff Kirsher * Called when an L2T entry has no more users. The entry is left in the hash 254f7917c00SJeff Kirsher * table since it is likely to be reused but we also bump nfree to indicate 255f7917c00SJeff Kirsher * that the entry can be reallocated for a different neighbor. We also drop 256f7917c00SJeff Kirsher * the existing neighbor reference in case the neighbor is going away and is 257f7917c00SJeff Kirsher * waiting on our reference. 258f7917c00SJeff Kirsher * 259f7917c00SJeff Kirsher * Because entries can be reallocated to other neighbors once their ref count 260f7917c00SJeff Kirsher * drops to 0 we need to take the entry's lock to avoid races with a new 261f7917c00SJeff Kirsher * incarnation. 262f7917c00SJeff Kirsher */ 263f7917c00SJeff Kirsher void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e) 264f7917c00SJeff Kirsher { 265f7917c00SJeff Kirsher spin_lock_bh(&e->lock); 266f7917c00SJeff Kirsher if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ 267f7917c00SJeff Kirsher if (e->neigh) { 268f7917c00SJeff Kirsher neigh_release(e->neigh); 269f7917c00SJeff Kirsher e->neigh = NULL; 270f7917c00SJeff Kirsher } 271f7917c00SJeff Kirsher } 272f7917c00SJeff Kirsher spin_unlock_bh(&e->lock); 273f7917c00SJeff Kirsher atomic_inc(&d->nfree); 274f7917c00SJeff Kirsher } 275f7917c00SJeff Kirsher 276f7917c00SJeff Kirsher EXPORT_SYMBOL(t3_l2e_free); 277f7917c00SJeff Kirsher 278f7917c00SJeff Kirsher /* 279f7917c00SJeff Kirsher * Update an L2T entry that was previously used for the same next hop as neigh. 280f7917c00SJeff Kirsher * Must be called with softirqs disabled. 281f7917c00SJeff Kirsher */ 282f7917c00SJeff Kirsher static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) 283f7917c00SJeff Kirsher { 284f7917c00SJeff Kirsher unsigned int nud_state; 285f7917c00SJeff Kirsher 286f7917c00SJeff Kirsher spin_lock(&e->lock); /* avoid race with t3_l2t_free */ 287f7917c00SJeff Kirsher 288f7917c00SJeff Kirsher if (neigh != e->neigh) 289f7917c00SJeff Kirsher neigh_replace(e, neigh); 290f7917c00SJeff Kirsher nud_state = neigh->nud_state; 291f7917c00SJeff Kirsher if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || 292f7917c00SJeff Kirsher !(nud_state & NUD_VALID)) 293f7917c00SJeff Kirsher e->state = L2T_STATE_RESOLVING; 294f7917c00SJeff Kirsher else if (nud_state & NUD_CONNECTED) 295f7917c00SJeff Kirsher e->state = L2T_STATE_VALID; 296f7917c00SJeff Kirsher else 297f7917c00SJeff Kirsher e->state = L2T_STATE_STALE; 298f7917c00SJeff Kirsher spin_unlock(&e->lock); 299f7917c00SJeff Kirsher } 300f7917c00SJeff Kirsher 301a4757123SDavid Miller struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, 302534cb283SDavid S. Miller struct net_device *dev, const void *daddr) 303f7917c00SJeff Kirsher { 30488c5100cSDavid S. Miller struct l2t_entry *e = NULL; 305a4757123SDavid Miller struct neighbour *neigh; 306a4757123SDavid Miller struct port_info *p; 30788c5100cSDavid S. Miller struct l2t_data *d; 30888c5100cSDavid S. Miller int hash; 309a4757123SDavid Miller u32 addr; 310a4757123SDavid Miller int ifidx; 311a4757123SDavid Miller int smt_idx; 312f7917c00SJeff Kirsher 31388c5100cSDavid S. Miller rcu_read_lock(); 314534cb283SDavid S. Miller neigh = dst_neigh_lookup(dst, daddr); 315a4757123SDavid Miller if (!neigh) 316a4757123SDavid Miller goto done_rcu; 317a4757123SDavid Miller 318a4757123SDavid Miller addr = *(u32 *) neigh->primary_key; 319a4757123SDavid Miller ifidx = neigh->dev->ifindex; 320a4757123SDavid Miller 321a4757123SDavid Miller if (!dev) 322a4757123SDavid Miller dev = neigh->dev; 323a4757123SDavid Miller p = netdev_priv(dev); 324a4757123SDavid Miller smt_idx = p->port_id; 325a4757123SDavid Miller 32688c5100cSDavid S. Miller d = L2DATA(cdev); 32788c5100cSDavid S. Miller if (!d) 32888c5100cSDavid S. Miller goto done_rcu; 32988c5100cSDavid S. Miller 33088c5100cSDavid S. Miller hash = arp_hash(addr, ifidx, d); 33188c5100cSDavid S. Miller 332f7917c00SJeff Kirsher write_lock_bh(&d->lock); 333f7917c00SJeff Kirsher for (e = d->l2tab[hash].first; e; e = e->next) 334f7917c00SJeff Kirsher if (e->addr == addr && e->ifindex == ifidx && 335f7917c00SJeff Kirsher e->smt_idx == smt_idx) { 336f7917c00SJeff Kirsher l2t_hold(d, e); 337f7917c00SJeff Kirsher if (atomic_read(&e->refcnt) == 1) 338f7917c00SJeff Kirsher reuse_entry(e, neigh); 339a4757123SDavid Miller goto done_unlock; 340f7917c00SJeff Kirsher } 341f7917c00SJeff Kirsher 342f7917c00SJeff Kirsher /* Need to allocate a new entry */ 343f7917c00SJeff Kirsher e = alloc_l2e(d); 344f7917c00SJeff Kirsher if (e) { 345f7917c00SJeff Kirsher spin_lock(&e->lock); /* avoid race with t3_l2t_free */ 346f7917c00SJeff Kirsher e->next = d->l2tab[hash].first; 347f7917c00SJeff Kirsher d->l2tab[hash].first = e; 348f7917c00SJeff Kirsher e->state = L2T_STATE_RESOLVING; 349f7917c00SJeff Kirsher e->addr = addr; 350f7917c00SJeff Kirsher e->ifindex = ifidx; 351f7917c00SJeff Kirsher e->smt_idx = smt_idx; 352f7917c00SJeff Kirsher atomic_set(&e->refcnt, 1); 353f7917c00SJeff Kirsher neigh_replace(e, neigh); 354f7917c00SJeff Kirsher if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) 355f7917c00SJeff Kirsher e->vlan = vlan_dev_vlan_id(neigh->dev); 356f7917c00SJeff Kirsher else 357f7917c00SJeff Kirsher e->vlan = VLAN_NONE; 358f7917c00SJeff Kirsher spin_unlock(&e->lock); 359f7917c00SJeff Kirsher } 360a4757123SDavid Miller done_unlock: 361f7917c00SJeff Kirsher write_unlock_bh(&d->lock); 36288c5100cSDavid S. Miller done_rcu: 363534cb283SDavid S. Miller if (neigh) 364534cb283SDavid S. Miller neigh_release(neigh); 36588c5100cSDavid S. Miller rcu_read_unlock(); 366f7917c00SJeff Kirsher return e; 367f7917c00SJeff Kirsher } 368f7917c00SJeff Kirsher 369f7917c00SJeff Kirsher EXPORT_SYMBOL(t3_l2t_get); 370f7917c00SJeff Kirsher 371f7917c00SJeff Kirsher /* 372f7917c00SJeff Kirsher * Called when address resolution fails for an L2T entry to handle packets 373f7917c00SJeff Kirsher * on the arpq head. If a packet specifies a failure handler it is invoked, 374f7917c00SJeff Kirsher * otherwise the packets is sent to the offload device. 375f7917c00SJeff Kirsher * 376f7917c00SJeff Kirsher * XXX: maybe we should abandon the latter behavior and just require a failure 377f7917c00SJeff Kirsher * handler. 378f7917c00SJeff Kirsher */ 379f7917c00SJeff Kirsher static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq) 380f7917c00SJeff Kirsher { 381f7917c00SJeff Kirsher struct sk_buff *skb, *tmp; 382f7917c00SJeff Kirsher 383f7917c00SJeff Kirsher skb_queue_walk_safe(arpq, skb, tmp) { 384f7917c00SJeff Kirsher struct l2t_skb_cb *cb = L2T_SKB_CB(skb); 385f7917c00SJeff Kirsher 386f7917c00SJeff Kirsher __skb_unlink(skb, arpq); 387f7917c00SJeff Kirsher if (cb->arp_failure_handler) 388f7917c00SJeff Kirsher cb->arp_failure_handler(dev, skb); 389f7917c00SJeff Kirsher else 390f7917c00SJeff Kirsher cxgb3_ofld_send(dev, skb); 391f7917c00SJeff Kirsher } 392f7917c00SJeff Kirsher } 393f7917c00SJeff Kirsher 394f7917c00SJeff Kirsher /* 395f7917c00SJeff Kirsher * Called when the host's ARP layer makes a change to some entry that is 396f7917c00SJeff Kirsher * loaded into the HW L2 table. 397f7917c00SJeff Kirsher */ 398f7917c00SJeff Kirsher void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh) 399f7917c00SJeff Kirsher { 400f7917c00SJeff Kirsher struct sk_buff_head arpq; 401f7917c00SJeff Kirsher struct l2t_entry *e; 402f7917c00SJeff Kirsher struct l2t_data *d = L2DATA(dev); 403f7917c00SJeff Kirsher u32 addr = *(u32 *) neigh->primary_key; 404f7917c00SJeff Kirsher int ifidx = neigh->dev->ifindex; 405f7917c00SJeff Kirsher int hash = arp_hash(addr, ifidx, d); 406f7917c00SJeff Kirsher 407f7917c00SJeff Kirsher read_lock_bh(&d->lock); 408f7917c00SJeff Kirsher for (e = d->l2tab[hash].first; e; e = e->next) 409f7917c00SJeff Kirsher if (e->addr == addr && e->ifindex == ifidx) { 410f7917c00SJeff Kirsher spin_lock(&e->lock); 411f7917c00SJeff Kirsher goto found; 412f7917c00SJeff Kirsher } 413f7917c00SJeff Kirsher read_unlock_bh(&d->lock); 414f7917c00SJeff Kirsher return; 415f7917c00SJeff Kirsher 416f7917c00SJeff Kirsher found: 417f7917c00SJeff Kirsher __skb_queue_head_init(&arpq); 418f7917c00SJeff Kirsher 419f7917c00SJeff Kirsher read_unlock(&d->lock); 420f7917c00SJeff Kirsher if (atomic_read(&e->refcnt)) { 421f7917c00SJeff Kirsher if (neigh != e->neigh) 422f7917c00SJeff Kirsher neigh_replace(e, neigh); 423f7917c00SJeff Kirsher 424f7917c00SJeff Kirsher if (e->state == L2T_STATE_RESOLVING) { 425f7917c00SJeff Kirsher if (neigh->nud_state & NUD_FAILED) { 426f7917c00SJeff Kirsher skb_queue_splice_init(&e->arpq, &arpq); 427f7917c00SJeff Kirsher } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) 428f7917c00SJeff Kirsher setup_l2e_send_pending(dev, NULL, e); 429f7917c00SJeff Kirsher } else { 430f7917c00SJeff Kirsher e->state = neigh->nud_state & NUD_CONNECTED ? 431f7917c00SJeff Kirsher L2T_STATE_VALID : L2T_STATE_STALE; 4324c1120b6Sdingtianhong if (!ether_addr_equal(e->dmac, neigh->ha)) 433f7917c00SJeff Kirsher setup_l2e_send_pending(dev, NULL, e); 434f7917c00SJeff Kirsher } 435f7917c00SJeff Kirsher } 436f7917c00SJeff Kirsher spin_unlock_bh(&e->lock); 437f7917c00SJeff Kirsher 438f7917c00SJeff Kirsher if (!skb_queue_empty(&arpq)) 439f7917c00SJeff Kirsher handle_failed_resolution(dev, &arpq); 440f7917c00SJeff Kirsher } 441f7917c00SJeff Kirsher 442f7917c00SJeff Kirsher struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) 443f7917c00SJeff Kirsher { 444f7917c00SJeff Kirsher struct l2t_data *d; 445f7917c00SJeff Kirsher int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry); 446f7917c00SJeff Kirsher 447f7917c00SJeff Kirsher d = cxgb_alloc_mem(size); 448f7917c00SJeff Kirsher if (!d) 449f7917c00SJeff Kirsher return NULL; 450f7917c00SJeff Kirsher 451f7917c00SJeff Kirsher d->nentries = l2t_capacity; 452f7917c00SJeff Kirsher d->rover = &d->l2tab[1]; /* entry 0 is not used */ 453f7917c00SJeff Kirsher atomic_set(&d->nfree, l2t_capacity - 1); 454f7917c00SJeff Kirsher rwlock_init(&d->lock); 455f7917c00SJeff Kirsher 456f7917c00SJeff Kirsher for (i = 0; i < l2t_capacity; ++i) { 457f7917c00SJeff Kirsher d->l2tab[i].idx = i; 458f7917c00SJeff Kirsher d->l2tab[i].state = L2T_STATE_UNUSED; 459f7917c00SJeff Kirsher __skb_queue_head_init(&d->l2tab[i].arpq); 460f7917c00SJeff Kirsher spin_lock_init(&d->l2tab[i].lock); 461f7917c00SJeff Kirsher atomic_set(&d->l2tab[i].refcnt, 0); 462f7917c00SJeff Kirsher } 463f7917c00SJeff Kirsher return d; 464f7917c00SJeff Kirsher } 465f7917c00SJeff Kirsher 466f7917c00SJeff Kirsher void t3_free_l2t(struct l2t_data *d) 467f7917c00SJeff Kirsher { 468f7917c00SJeff Kirsher cxgb_free_mem(d); 469f7917c00SJeff Kirsher } 470f7917c00SJeff Kirsher 471