1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/skbuff.h> 36 #include <linux/netdevice.h> 37 #include <linux/if.h> 38 #include <linux/if_vlan.h> 39 #include <linux/jhash.h> 40 #include <linux/module.h> 41 #include <linux/debugfs.h> 42 #include <linux/seq_file.h> 43 #include <net/neighbour.h> 44 #include "cxgb4.h" 45 #include "l2t.h" 46 #include "t4_msg.h" 47 #include "t4fw_api.h" 48 #include "t4_regs.h" 49 #include "t4_values.h" 50 51 #define VLAN_NONE 0xfff 52 53 /* identifies sync vs async L2T_WRITE_REQs */ 54 #define SYNC_WR_S 12 55 #define SYNC_WR_V(x) ((x) << SYNC_WR_S) 56 #define SYNC_WR_F SYNC_WR_V(1) 57 58 struct l2t_data { 59 unsigned int l2t_start; /* start index of our piece of the L2T */ 60 unsigned int l2t_size; /* number of entries in l2tab */ 61 rwlock_t lock; 62 atomic_t nfree; /* number of free entries */ 63 struct l2t_entry *rover; /* starting point for next allocation */ 64 struct l2t_entry l2tab[0]; /* MUST BE LAST */ 65 }; 66 67 static inline unsigned int vlan_prio(const struct l2t_entry *e) 68 { 69 return e->vlan >> VLAN_PRIO_SHIFT; 70 } 71 72 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) 73 { 74 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ 75 atomic_dec(&d->nfree); 76 } 77 78 /* 79 * To avoid having to check address families we do not allow v4 and v6 80 * neighbors to be on the same hash chain. We keep v4 entries in the first 81 * half of available hash buckets and v6 in the second. We need at least two 82 * entries in our L2T for this scheme to work. 83 */ 84 enum { 85 L2T_MIN_HASH_BUCKETS = 2, 86 }; 87 88 static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key, 89 int ifindex) 90 { 91 unsigned int l2t_size_half = d->l2t_size / 2; 92 93 return jhash_2words(*key, ifindex, 0) % l2t_size_half; 94 } 95 96 static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key, 97 int ifindex) 98 { 99 unsigned int l2t_size_half = d->l2t_size / 2; 100 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; 101 102 return (l2t_size_half + 103 (jhash_2words(xor, ifindex, 0) % l2t_size_half)); 104 } 105 106 static unsigned int addr_hash(struct l2t_data *d, const u32 *addr, 107 int addr_len, int ifindex) 108 { 109 return addr_len == 4 ? arp_hash(d, addr, ifindex) : 110 ipv6_hash(d, addr, ifindex); 111 } 112 113 /* 114 * Checks if an L2T entry is for the given IP/IPv6 address. It does not check 115 * whether the L2T entry and the address are of the same address family. 116 * Callers ensure an address is only checked against L2T entries of the same 117 * family, something made trivial by the separation of IP and IPv6 hash chains 118 * mentioned above. Returns 0 if there's a match, 119 */ 120 static int addreq(const struct l2t_entry *e, const u32 *addr) 121 { 122 if (e->v6) 123 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | 124 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); 125 return e->addr[0] ^ addr[0]; 126 } 127 128 static void neigh_replace(struct l2t_entry *e, struct neighbour *n) 129 { 130 neigh_hold(n); 131 if (e->neigh) 132 neigh_release(e->neigh); 133 e->neigh = n; 134 } 135 136 /* 137 * Write an L2T entry. Must be called with the entry locked. 138 * The write may be synchronous or asynchronous. 139 */ 140 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) 141 { 142 struct l2t_data *d = adap->l2t; 143 unsigned int l2t_idx = e->idx + d->l2t_start; 144 struct sk_buff *skb; 145 struct cpl_l2t_write_req *req; 146 147 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 148 if (!skb) 149 return -ENOMEM; 150 151 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); 152 INIT_TP_WR(req, 0); 153 154 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, 155 l2t_idx | (sync ? SYNC_WR_F : 0) | 156 TID_QID_V(adap->sge.fw_evtq.abs_id))); 157 req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync)); 158 req->l2t_idx = htons(l2t_idx); 159 req->vlan = htons(e->vlan); 160 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) 161 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); 162 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 163 164 t4_mgmt_tx(adap, skb); 165 166 if (sync && e->state != L2T_STATE_SWITCHING) 167 e->state = L2T_STATE_SYNC_WRITE; 168 return 0; 169 } 170 171 /* 172 * Send packets waiting in an L2T entry's ARP queue. Must be called with the 173 * entry locked. 174 */ 175 static void send_pending(struct adapter *adap, struct l2t_entry *e) 176 { 177 struct sk_buff *skb; 178 179 while ((skb = __skb_dequeue(&e->arpq)) != NULL) 180 t4_ofld_send(adap, skb); 181 } 182 183 /* 184 * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a 185 * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T 186 * index it refers to. 187 */ 188 void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) 189 { 190 struct l2t_data *d = adap->l2t; 191 unsigned int tid = GET_TID(rpl); 192 unsigned int l2t_idx = tid % L2T_SIZE; 193 194 if (unlikely(rpl->status != CPL_ERR_NONE)) { 195 dev_err(adap->pdev_dev, 196 "Unexpected L2T_WRITE_RPL status %u for entry %u\n", 197 rpl->status, l2t_idx); 198 return; 199 } 200 201 if (tid & SYNC_WR_F) { 202 struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start]; 203 204 spin_lock(&e->lock); 205 if (e->state != L2T_STATE_SWITCHING) { 206 send_pending(adap, e); 207 e->state = (e->neigh->nud_state & NUD_STALE) ? 208 L2T_STATE_STALE : L2T_STATE_VALID; 209 } 210 spin_unlock(&e->lock); 211 } 212 } 213 214 /* 215 * Add a packet to an L2T entry's queue of packets awaiting resolution. 216 * Must be called with the entry's lock held. 217 */ 218 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) 219 { 220 __skb_queue_tail(&e->arpq, skb); 221 } 222 223 int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, 224 struct l2t_entry *e) 225 { 226 struct adapter *adap = netdev2adap(dev); 227 228 again: 229 switch (e->state) { 230 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ 231 neigh_event_send(e->neigh, NULL); 232 spin_lock_bh(&e->lock); 233 if (e->state == L2T_STATE_STALE) 234 e->state = L2T_STATE_VALID; 235 spin_unlock_bh(&e->lock); 236 case L2T_STATE_VALID: /* fast-path, send the packet on */ 237 return t4_ofld_send(adap, skb); 238 case L2T_STATE_RESOLVING: 239 case L2T_STATE_SYNC_WRITE: 240 spin_lock_bh(&e->lock); 241 if (e->state != L2T_STATE_SYNC_WRITE && 242 e->state != L2T_STATE_RESOLVING) { 243 spin_unlock_bh(&e->lock); 244 goto again; 245 } 246 arpq_enqueue(e, skb); 247 spin_unlock_bh(&e->lock); 248 249 if (e->state == L2T_STATE_RESOLVING && 250 !neigh_event_send(e->neigh, NULL)) { 251 spin_lock_bh(&e->lock); 252 if (e->state == L2T_STATE_RESOLVING && 253 !skb_queue_empty(&e->arpq)) 254 write_l2e(adap, e, 1); 255 spin_unlock_bh(&e->lock); 256 } 257 } 258 return 0; 259 } 260 EXPORT_SYMBOL(cxgb4_l2t_send); 261 262 /* 263 * Allocate a free L2T entry. Must be called with l2t_data.lock held. 264 */ 265 static struct l2t_entry *alloc_l2e(struct l2t_data *d) 266 { 267 struct l2t_entry *end, *e, **p; 268 269 if (!atomic_read(&d->nfree)) 270 return NULL; 271 272 /* there's definitely a free entry */ 273 for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e) 274 if (atomic_read(&e->refcnt) == 0) 275 goto found; 276 277 for (e = d->l2tab; atomic_read(&e->refcnt); ++e) 278 ; 279 found: 280 d->rover = e + 1; 281 atomic_dec(&d->nfree); 282 283 /* 284 * The entry we found may be an inactive entry that is 285 * presently in the hash table. We need to remove it. 286 */ 287 if (e->state < L2T_STATE_SWITCHING) 288 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) 289 if (*p == e) { 290 *p = e->next; 291 e->next = NULL; 292 break; 293 } 294 295 e->state = L2T_STATE_UNUSED; 296 return e; 297 } 298 299 static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan, 300 u8 port, u8 *dmac) 301 { 302 struct l2t_entry *end, *e, **p; 303 struct l2t_entry *first_free = NULL; 304 305 for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) { 306 if (atomic_read(&e->refcnt) == 0) { 307 if (!first_free) 308 first_free = e; 309 } else { 310 if (e->state == L2T_STATE_SWITCHING) { 311 if (ether_addr_equal(e->dmac, dmac) && 312 (e->vlan == vlan) && (e->lport == port)) 313 goto exists; 314 } 315 } 316 } 317 318 if (first_free) { 319 e = first_free; 320 goto found; 321 } 322 323 return NULL; 324 325 found: 326 /* The entry we found may be an inactive entry that is 327 * presently in the hash table. We need to remove it. 328 */ 329 if (e->state < L2T_STATE_SWITCHING) 330 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) 331 if (*p == e) { 332 *p = e->next; 333 e->next = NULL; 334 break; 335 } 336 e->state = L2T_STATE_UNUSED; 337 338 exists: 339 return e; 340 } 341 342 /* Called when an L2T entry has no more users. The entry is left in the hash 343 * table since it is likely to be reused but we also bump nfree to indicate 344 * that the entry can be reallocated for a different neighbor. We also drop 345 * the existing neighbor reference in case the neighbor is going away and is 346 * waiting on our reference. 347 * 348 * Because entries can be reallocated to other neighbors once their ref count 349 * drops to 0 we need to take the entry's lock to avoid races with a new 350 * incarnation. 351 */ 352 static void _t4_l2e_free(struct l2t_entry *e) 353 { 354 struct l2t_data *d; 355 struct sk_buff *skb; 356 357 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ 358 if (e->neigh) { 359 neigh_release(e->neigh); 360 e->neigh = NULL; 361 } 362 while ((skb = __skb_dequeue(&e->arpq)) != NULL) 363 kfree_skb(skb); 364 } 365 366 d = container_of(e, struct l2t_data, l2tab[e->idx]); 367 atomic_inc(&d->nfree); 368 } 369 370 /* Locked version of _t4_l2e_free */ 371 static void t4_l2e_free(struct l2t_entry *e) 372 { 373 struct l2t_data *d; 374 struct sk_buff *skb; 375 376 spin_lock_bh(&e->lock); 377 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ 378 if (e->neigh) { 379 neigh_release(e->neigh); 380 e->neigh = NULL; 381 } 382 while ((skb = __skb_dequeue(&e->arpq)) != NULL) 383 kfree_skb(skb); 384 } 385 spin_unlock_bh(&e->lock); 386 387 d = container_of(e, struct l2t_data, l2tab[e->idx]); 388 atomic_inc(&d->nfree); 389 } 390 391 void cxgb4_l2t_release(struct l2t_entry *e) 392 { 393 if (atomic_dec_and_test(&e->refcnt)) 394 t4_l2e_free(e); 395 } 396 EXPORT_SYMBOL(cxgb4_l2t_release); 397 398 /* 399 * Update an L2T entry that was previously used for the same next hop as neigh. 400 * Must be called with softirqs disabled. 401 */ 402 static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) 403 { 404 unsigned int nud_state; 405 406 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ 407 if (neigh != e->neigh) 408 neigh_replace(e, neigh); 409 nud_state = neigh->nud_state; 410 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || 411 !(nud_state & NUD_VALID)) 412 e->state = L2T_STATE_RESOLVING; 413 else if (nud_state & NUD_CONNECTED) 414 e->state = L2T_STATE_VALID; 415 else 416 e->state = L2T_STATE_STALE; 417 spin_unlock(&e->lock); 418 } 419 420 struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, 421 const struct net_device *physdev, 422 unsigned int priority) 423 { 424 u8 lport; 425 u16 vlan; 426 struct l2t_entry *e; 427 int addr_len = neigh->tbl->key_len; 428 u32 *addr = (u32 *)neigh->primary_key; 429 int ifidx = neigh->dev->ifindex; 430 int hash = addr_hash(d, addr, addr_len, ifidx); 431 432 if (neigh->dev->flags & IFF_LOOPBACK) 433 lport = netdev2pinfo(physdev)->tx_chan + 4; 434 else 435 lport = netdev2pinfo(physdev)->lport; 436 437 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) 438 vlan = vlan_dev_vlan_id(neigh->dev); 439 else 440 vlan = VLAN_NONE; 441 442 write_lock_bh(&d->lock); 443 for (e = d->l2tab[hash].first; e; e = e->next) 444 if (!addreq(e, addr) && e->ifindex == ifidx && 445 e->vlan == vlan && e->lport == lport) { 446 l2t_hold(d, e); 447 if (atomic_read(&e->refcnt) == 1) 448 reuse_entry(e, neigh); 449 goto done; 450 } 451 452 /* Need to allocate a new entry */ 453 e = alloc_l2e(d); 454 if (e) { 455 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ 456 e->state = L2T_STATE_RESOLVING; 457 if (neigh->dev->flags & IFF_LOOPBACK) 458 memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac)); 459 memcpy(e->addr, addr, addr_len); 460 e->ifindex = ifidx; 461 e->hash = hash; 462 e->lport = lport; 463 e->v6 = addr_len == 16; 464 atomic_set(&e->refcnt, 1); 465 neigh_replace(e, neigh); 466 e->vlan = vlan; 467 e->next = d->l2tab[hash].first; 468 d->l2tab[hash].first = e; 469 spin_unlock(&e->lock); 470 } 471 done: 472 write_unlock_bh(&d->lock); 473 return e; 474 } 475 EXPORT_SYMBOL(cxgb4_l2t_get); 476 477 u64 cxgb4_select_ntuple(struct net_device *dev, 478 const struct l2t_entry *l2t) 479 { 480 struct adapter *adap = netdev2adap(dev); 481 struct tp_params *tp = &adap->params.tp; 482 u64 ntuple = 0; 483 484 /* Initialize each of the fields which we care about which are present 485 * in the Compressed Filter Tuple. 486 */ 487 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) 488 ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift; 489 490 if (tp->port_shift >= 0) 491 ntuple |= (u64)l2t->lport << tp->port_shift; 492 493 if (tp->protocol_shift >= 0) 494 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; 495 496 if (tp->vnic_shift >= 0) { 497 u32 viid = cxgb4_port_viid(dev); 498 u32 vf = FW_VIID_VIN_G(viid); 499 u32 pf = FW_VIID_PFN_G(viid); 500 u32 vld = FW_VIID_VIVLD_G(viid); 501 502 ntuple |= (u64)(FT_VNID_ID_VF_V(vf) | 503 FT_VNID_ID_PF_V(pf) | 504 FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift; 505 } 506 507 return ntuple; 508 } 509 EXPORT_SYMBOL(cxgb4_select_ntuple); 510 511 /* 512 * Called when address resolution fails for an L2T entry to handle packets 513 * on the arpq head. If a packet specifies a failure handler it is invoked, 514 * otherwise the packet is sent to the device. 515 */ 516 static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e) 517 { 518 struct sk_buff *skb; 519 520 while ((skb = __skb_dequeue(&e->arpq)) != NULL) { 521 const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); 522 523 spin_unlock(&e->lock); 524 if (cb->arp_err_handler) 525 cb->arp_err_handler(cb->handle, skb); 526 else 527 t4_ofld_send(adap, skb); 528 spin_lock(&e->lock); 529 } 530 } 531 532 /* 533 * Called when the host's neighbor layer makes a change to some entry that is 534 * loaded into the HW L2 table. 535 */ 536 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) 537 { 538 struct l2t_entry *e; 539 struct sk_buff_head *arpq = NULL; 540 struct l2t_data *d = adap->l2t; 541 int addr_len = neigh->tbl->key_len; 542 u32 *addr = (u32 *) neigh->primary_key; 543 int ifidx = neigh->dev->ifindex; 544 int hash = addr_hash(d, addr, addr_len, ifidx); 545 546 read_lock_bh(&d->lock); 547 for (e = d->l2tab[hash].first; e; e = e->next) 548 if (!addreq(e, addr) && e->ifindex == ifidx) { 549 spin_lock(&e->lock); 550 if (atomic_read(&e->refcnt)) 551 goto found; 552 spin_unlock(&e->lock); 553 break; 554 } 555 read_unlock_bh(&d->lock); 556 return; 557 558 found: 559 read_unlock(&d->lock); 560 561 if (neigh != e->neigh) 562 neigh_replace(e, neigh); 563 564 if (e->state == L2T_STATE_RESOLVING) { 565 if (neigh->nud_state & NUD_FAILED) { 566 arpq = &e->arpq; 567 } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && 568 !skb_queue_empty(&e->arpq)) { 569 write_l2e(adap, e, 1); 570 } 571 } else { 572 e->state = neigh->nud_state & NUD_CONNECTED ? 573 L2T_STATE_VALID : L2T_STATE_STALE; 574 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) 575 write_l2e(adap, e, 0); 576 } 577 578 if (arpq) 579 handle_failed_resolution(adap, e); 580 spin_unlock_bh(&e->lock); 581 } 582 583 /* Allocate an L2T entry for use by a switching rule. Such need to be 584 * explicitly freed and while busy they are not on any hash chain, so normal 585 * address resolution updates do not see them. 586 */ 587 struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan, 588 u8 port, u8 *eth_addr) 589 { 590 struct l2t_data *d = adap->l2t; 591 struct l2t_entry *e; 592 int ret; 593 594 write_lock_bh(&d->lock); 595 e = find_or_alloc_l2e(d, vlan, port, eth_addr); 596 if (e) { 597 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ 598 if (!atomic_read(&e->refcnt)) { 599 e->state = L2T_STATE_SWITCHING; 600 e->vlan = vlan; 601 e->lport = port; 602 ether_addr_copy(e->dmac, eth_addr); 603 atomic_set(&e->refcnt, 1); 604 ret = write_l2e(adap, e, 0); 605 if (ret < 0) { 606 _t4_l2e_free(e); 607 spin_unlock(&e->lock); 608 write_unlock_bh(&d->lock); 609 return NULL; 610 } 611 } else { 612 atomic_inc(&e->refcnt); 613 } 614 615 spin_unlock(&e->lock); 616 } 617 write_unlock_bh(&d->lock); 618 return e; 619 } 620 621 /** 622 * @dev: net_device pointer 623 * @vlan: VLAN Id 624 * @port: Associated port 625 * @dmac: Destination MAC address to add to L2T 626 * Returns pointer to the allocated l2t entry 627 * 628 * Allocates an L2T entry for use by switching rule of a filter 629 */ 630 struct l2t_entry *cxgb4_l2t_alloc_switching(struct net_device *dev, u16 vlan, 631 u8 port, u8 *dmac) 632 { 633 struct adapter *adap = netdev2adap(dev); 634 635 return t4_l2t_alloc_switching(adap, vlan, port, dmac); 636 } 637 EXPORT_SYMBOL(cxgb4_l2t_alloc_switching); 638 639 struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) 640 { 641 unsigned int l2t_size; 642 int i; 643 struct l2t_data *d; 644 645 if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE) 646 return NULL; 647 l2t_size = l2t_end - l2t_start + 1; 648 if (l2t_size < L2T_MIN_HASH_BUCKETS) 649 return NULL; 650 651 d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry)); 652 if (!d) 653 return NULL; 654 655 d->l2t_start = l2t_start; 656 d->l2t_size = l2t_size; 657 658 d->rover = d->l2tab; 659 atomic_set(&d->nfree, l2t_size); 660 rwlock_init(&d->lock); 661 662 for (i = 0; i < d->l2t_size; ++i) { 663 d->l2tab[i].idx = i; 664 d->l2tab[i].state = L2T_STATE_UNUSED; 665 spin_lock_init(&d->l2tab[i].lock); 666 atomic_set(&d->l2tab[i].refcnt, 0); 667 skb_queue_head_init(&d->l2tab[i].arpq); 668 } 669 return d; 670 } 671 672 static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) 673 { 674 struct l2t_data *d = seq->private; 675 676 return pos >= d->l2t_size ? NULL : &d->l2tab[pos]; 677 } 678 679 static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) 680 { 681 return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 682 } 683 684 static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) 685 { 686 v = l2t_get_idx(seq, *pos); 687 if (v) 688 ++*pos; 689 return v; 690 } 691 692 static void l2t_seq_stop(struct seq_file *seq, void *v) 693 { 694 } 695 696 static char l2e_state(const struct l2t_entry *e) 697 { 698 switch (e->state) { 699 case L2T_STATE_VALID: return 'V'; 700 case L2T_STATE_STALE: return 'S'; 701 case L2T_STATE_SYNC_WRITE: return 'W'; 702 case L2T_STATE_RESOLVING: 703 return skb_queue_empty(&e->arpq) ? 'R' : 'A'; 704 case L2T_STATE_SWITCHING: return 'X'; 705 default: 706 return 'U'; 707 } 708 } 709 710 static int l2t_seq_show(struct seq_file *seq, void *v) 711 { 712 if (v == SEQ_START_TOKEN) 713 seq_puts(seq, " Idx IP address " 714 "Ethernet address VLAN/P LP State Users Port\n"); 715 else { 716 char ip[60]; 717 struct l2t_data *d = seq->private; 718 struct l2t_entry *e = v; 719 720 spin_lock_bh(&e->lock); 721 if (e->state == L2T_STATE_SWITCHING) 722 ip[0] = '\0'; 723 else 724 sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); 725 seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", 726 e->idx + d->l2t_start, ip, e->dmac, 727 e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, 728 l2e_state(e), atomic_read(&e->refcnt), 729 e->neigh ? e->neigh->dev->name : ""); 730 spin_unlock_bh(&e->lock); 731 } 732 return 0; 733 } 734 735 static const struct seq_operations l2t_seq_ops = { 736 .start = l2t_seq_start, 737 .next = l2t_seq_next, 738 .stop = l2t_seq_stop, 739 .show = l2t_seq_show 740 }; 741 742 static int l2t_seq_open(struct inode *inode, struct file *file) 743 { 744 int rc = seq_open(file, &l2t_seq_ops); 745 746 if (!rc) { 747 struct adapter *adap = inode->i_private; 748 struct seq_file *seq = file->private_data; 749 750 seq->private = adap->l2t; 751 } 752 return rc; 753 } 754 755 const struct file_operations t4_l2t_fops = { 756 .owner = THIS_MODULE, 757 .open = l2t_seq_open, 758 .read = seq_read, 759 .llseek = seq_lseek, 760 .release = seq_release, 761 }; 762