1 /* 2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 3 * 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/skbuff.h> 36 #include <linux/netdevice.h> 37 #include <linux/if.h> 38 #include <linux/if_vlan.h> 39 #include <linux/jhash.h> 40 #include <linux/module.h> 41 #include <linux/debugfs.h> 42 #include <linux/seq_file.h> 43 #include <net/neighbour.h> 44 #include "cxgb4.h" 45 #include "l2t.h" 46 #include "t4_msg.h" 47 #include "t4fw_api.h" 48 #include "t4_regs.h" 49 #include "t4_values.h" 50 51 #define VLAN_NONE 0xfff 52 53 /* identifies sync vs async L2T_WRITE_REQs */ 54 #define SYNC_WR_S 12 55 #define SYNC_WR_V(x) ((x) << SYNC_WR_S) 56 #define SYNC_WR_F SYNC_WR_V(1) 57 58 struct l2t_data { 59 unsigned int l2t_start; /* start index of our piece of the L2T */ 60 unsigned int l2t_size; /* number of entries in l2tab */ 61 rwlock_t lock; 62 atomic_t nfree; /* number of free entries */ 63 struct l2t_entry *rover; /* starting point for next allocation */ 64 struct l2t_entry l2tab[0]; /* MUST BE LAST */ 65 }; 66 67 static inline unsigned int vlan_prio(const struct l2t_entry *e) 68 { 69 return e->vlan >> 13; 70 } 71 72 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) 73 { 74 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ 75 atomic_dec(&d->nfree); 76 } 77 78 /* 79 * To avoid having to check address families we do not allow v4 and v6 80 * neighbors to be on the same hash chain. We keep v4 entries in the first 81 * half of available hash buckets and v6 in the second. We need at least two 82 * entries in our L2T for this scheme to work. 83 */ 84 enum { 85 L2T_MIN_HASH_BUCKETS = 2, 86 }; 87 88 static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key, 89 int ifindex) 90 { 91 unsigned int l2t_size_half = d->l2t_size / 2; 92 93 return jhash_2words(*key, ifindex, 0) % l2t_size_half; 94 } 95 96 static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key, 97 int ifindex) 98 { 99 unsigned int l2t_size_half = d->l2t_size / 2; 100 u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; 101 102 return (l2t_size_half + 103 (jhash_2words(xor, ifindex, 0) % l2t_size_half)); 104 } 105 106 static unsigned int addr_hash(struct l2t_data *d, const u32 *addr, 107 int addr_len, int ifindex) 108 { 109 return addr_len == 4 ? arp_hash(d, addr, ifindex) : 110 ipv6_hash(d, addr, ifindex); 111 } 112 113 /* 114 * Checks if an L2T entry is for the given IP/IPv6 address. It does not check 115 * whether the L2T entry and the address are of the same address family. 116 * Callers ensure an address is only checked against L2T entries of the same 117 * family, something made trivial by the separation of IP and IPv6 hash chains 118 * mentioned above. Returns 0 if there's a match, 119 */ 120 static int addreq(const struct l2t_entry *e, const u32 *addr) 121 { 122 if (e->v6) 123 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | 124 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); 125 return e->addr[0] ^ addr[0]; 126 } 127 128 static void neigh_replace(struct l2t_entry *e, struct neighbour *n) 129 { 130 neigh_hold(n); 131 if (e->neigh) 132 neigh_release(e->neigh); 133 e->neigh = n; 134 } 135 136 /* 137 * Write an L2T entry. Must be called with the entry locked. 138 * The write may be synchronous or asynchronous. 139 */ 140 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) 141 { 142 struct l2t_data *d = adap->l2t; 143 unsigned int l2t_idx = e->idx + d->l2t_start; 144 struct sk_buff *skb; 145 struct cpl_l2t_write_req *req; 146 147 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 148 if (!skb) 149 return -ENOMEM; 150 151 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); 152 INIT_TP_WR(req, 0); 153 154 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, 155 l2t_idx | (sync ? SYNC_WR_F : 0) | 156 TID_QID_V(adap->sge.fw_evtq.abs_id))); 157 req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync)); 158 req->l2t_idx = htons(l2t_idx); 159 req->vlan = htons(e->vlan); 160 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) 161 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); 162 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 163 164 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 165 t4_ofld_send(adap, skb); 166 167 if (sync && e->state != L2T_STATE_SWITCHING) 168 e->state = L2T_STATE_SYNC_WRITE; 169 return 0; 170 } 171 172 /* 173 * Send packets waiting in an L2T entry's ARP queue. Must be called with the 174 * entry locked. 175 */ 176 static void send_pending(struct adapter *adap, struct l2t_entry *e) 177 { 178 while (e->arpq_head) { 179 struct sk_buff *skb = e->arpq_head; 180 181 e->arpq_head = skb->next; 182 skb->next = NULL; 183 t4_ofld_send(adap, skb); 184 } 185 e->arpq_tail = NULL; 186 } 187 188 /* 189 * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a 190 * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T 191 * index it refers to. 192 */ 193 void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) 194 { 195 struct l2t_data *d = adap->l2t; 196 unsigned int tid = GET_TID(rpl); 197 unsigned int l2t_idx = tid % L2T_SIZE; 198 199 if (unlikely(rpl->status != CPL_ERR_NONE)) { 200 dev_err(adap->pdev_dev, 201 "Unexpected L2T_WRITE_RPL status %u for entry %u\n", 202 rpl->status, l2t_idx); 203 return; 204 } 205 206 if (tid & SYNC_WR_F) { 207 struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start]; 208 209 spin_lock(&e->lock); 210 if (e->state != L2T_STATE_SWITCHING) { 211 send_pending(adap, e); 212 e->state = (e->neigh->nud_state & NUD_STALE) ? 213 L2T_STATE_STALE : L2T_STATE_VALID; 214 } 215 spin_unlock(&e->lock); 216 } 217 } 218 219 /* 220 * Add a packet to an L2T entry's queue of packets awaiting resolution. 221 * Must be called with the entry's lock held. 222 */ 223 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) 224 { 225 skb->next = NULL; 226 if (e->arpq_head) 227 e->arpq_tail->next = skb; 228 else 229 e->arpq_head = skb; 230 e->arpq_tail = skb; 231 } 232 233 int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, 234 struct l2t_entry *e) 235 { 236 struct adapter *adap = netdev2adap(dev); 237 238 again: 239 switch (e->state) { 240 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ 241 neigh_event_send(e->neigh, NULL); 242 spin_lock_bh(&e->lock); 243 if (e->state == L2T_STATE_STALE) 244 e->state = L2T_STATE_VALID; 245 spin_unlock_bh(&e->lock); 246 case L2T_STATE_VALID: /* fast-path, send the packet on */ 247 return t4_ofld_send(adap, skb); 248 case L2T_STATE_RESOLVING: 249 case L2T_STATE_SYNC_WRITE: 250 spin_lock_bh(&e->lock); 251 if (e->state != L2T_STATE_SYNC_WRITE && 252 e->state != L2T_STATE_RESOLVING) { 253 spin_unlock_bh(&e->lock); 254 goto again; 255 } 256 arpq_enqueue(e, skb); 257 spin_unlock_bh(&e->lock); 258 259 if (e->state == L2T_STATE_RESOLVING && 260 !neigh_event_send(e->neigh, NULL)) { 261 spin_lock_bh(&e->lock); 262 if (e->state == L2T_STATE_RESOLVING && e->arpq_head) 263 write_l2e(adap, e, 1); 264 spin_unlock_bh(&e->lock); 265 } 266 } 267 return 0; 268 } 269 EXPORT_SYMBOL(cxgb4_l2t_send); 270 271 /* 272 * Allocate a free L2T entry. Must be called with l2t_data.lock held. 273 */ 274 static struct l2t_entry *alloc_l2e(struct l2t_data *d) 275 { 276 struct l2t_entry *end, *e, **p; 277 278 if (!atomic_read(&d->nfree)) 279 return NULL; 280 281 /* there's definitely a free entry */ 282 for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e) 283 if (atomic_read(&e->refcnt) == 0) 284 goto found; 285 286 for (e = d->l2tab; atomic_read(&e->refcnt); ++e) 287 ; 288 found: 289 d->rover = e + 1; 290 atomic_dec(&d->nfree); 291 292 /* 293 * The entry we found may be an inactive entry that is 294 * presently in the hash table. We need to remove it. 295 */ 296 if (e->state < L2T_STATE_SWITCHING) 297 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) 298 if (*p == e) { 299 *p = e->next; 300 e->next = NULL; 301 break; 302 } 303 304 e->state = L2T_STATE_UNUSED; 305 return e; 306 } 307 308 /* 309 * Called when an L2T entry has no more users. 310 */ 311 static void t4_l2e_free(struct l2t_entry *e) 312 { 313 struct l2t_data *d; 314 315 spin_lock_bh(&e->lock); 316 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ 317 if (e->neigh) { 318 neigh_release(e->neigh); 319 e->neigh = NULL; 320 } 321 while (e->arpq_head) { 322 struct sk_buff *skb = e->arpq_head; 323 324 e->arpq_head = skb->next; 325 kfree_skb(skb); 326 } 327 e->arpq_tail = NULL; 328 } 329 spin_unlock_bh(&e->lock); 330 331 d = container_of(e, struct l2t_data, l2tab[e->idx]); 332 atomic_inc(&d->nfree); 333 } 334 335 void cxgb4_l2t_release(struct l2t_entry *e) 336 { 337 if (atomic_dec_and_test(&e->refcnt)) 338 t4_l2e_free(e); 339 } 340 EXPORT_SYMBOL(cxgb4_l2t_release); 341 342 /* 343 * Update an L2T entry that was previously used for the same next hop as neigh. 344 * Must be called with softirqs disabled. 345 */ 346 static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) 347 { 348 unsigned int nud_state; 349 350 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ 351 if (neigh != e->neigh) 352 neigh_replace(e, neigh); 353 nud_state = neigh->nud_state; 354 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || 355 !(nud_state & NUD_VALID)) 356 e->state = L2T_STATE_RESOLVING; 357 else if (nud_state & NUD_CONNECTED) 358 e->state = L2T_STATE_VALID; 359 else 360 e->state = L2T_STATE_STALE; 361 spin_unlock(&e->lock); 362 } 363 364 struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, 365 const struct net_device *physdev, 366 unsigned int priority) 367 { 368 u8 lport; 369 u16 vlan; 370 struct l2t_entry *e; 371 int addr_len = neigh->tbl->key_len; 372 u32 *addr = (u32 *)neigh->primary_key; 373 int ifidx = neigh->dev->ifindex; 374 int hash = addr_hash(d, addr, addr_len, ifidx); 375 376 if (neigh->dev->flags & IFF_LOOPBACK) 377 lport = netdev2pinfo(physdev)->tx_chan + 4; 378 else 379 lport = netdev2pinfo(physdev)->lport; 380 381 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) 382 vlan = vlan_dev_vlan_id(neigh->dev); 383 else 384 vlan = VLAN_NONE; 385 386 write_lock_bh(&d->lock); 387 for (e = d->l2tab[hash].first; e; e = e->next) 388 if (!addreq(e, addr) && e->ifindex == ifidx && 389 e->vlan == vlan && e->lport == lport) { 390 l2t_hold(d, e); 391 if (atomic_read(&e->refcnt) == 1) 392 reuse_entry(e, neigh); 393 goto done; 394 } 395 396 /* Need to allocate a new entry */ 397 e = alloc_l2e(d); 398 if (e) { 399 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ 400 e->state = L2T_STATE_RESOLVING; 401 if (neigh->dev->flags & IFF_LOOPBACK) 402 memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac)); 403 memcpy(e->addr, addr, addr_len); 404 e->ifindex = ifidx; 405 e->hash = hash; 406 e->lport = lport; 407 e->v6 = addr_len == 16; 408 atomic_set(&e->refcnt, 1); 409 neigh_replace(e, neigh); 410 e->vlan = vlan; 411 e->next = d->l2tab[hash].first; 412 d->l2tab[hash].first = e; 413 spin_unlock(&e->lock); 414 } 415 done: 416 write_unlock_bh(&d->lock); 417 return e; 418 } 419 EXPORT_SYMBOL(cxgb4_l2t_get); 420 421 u64 cxgb4_select_ntuple(struct net_device *dev, 422 const struct l2t_entry *l2t) 423 { 424 struct adapter *adap = netdev2adap(dev); 425 struct tp_params *tp = &adap->params.tp; 426 u64 ntuple = 0; 427 428 /* Initialize each of the fields which we care about which are present 429 * in the Compressed Filter Tuple. 430 */ 431 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) 432 ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift; 433 434 if (tp->port_shift >= 0) 435 ntuple |= (u64)l2t->lport << tp->port_shift; 436 437 if (tp->protocol_shift >= 0) 438 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; 439 440 if (tp->vnic_shift >= 0) { 441 u32 viid = cxgb4_port_viid(dev); 442 u32 vf = FW_VIID_VIN_G(viid); 443 u32 pf = FW_VIID_PFN_G(viid); 444 u32 vld = FW_VIID_VIVLD_G(viid); 445 446 ntuple |= (u64)(FT_VNID_ID_VF_V(vf) | 447 FT_VNID_ID_PF_V(pf) | 448 FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift; 449 } 450 451 return ntuple; 452 } 453 EXPORT_SYMBOL(cxgb4_select_ntuple); 454 455 /* 456 * Called when address resolution fails for an L2T entry to handle packets 457 * on the arpq head. If a packet specifies a failure handler it is invoked, 458 * otherwise the packet is sent to the device. 459 */ 460 static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) 461 { 462 while (arpq) { 463 struct sk_buff *skb = arpq; 464 const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); 465 466 arpq = skb->next; 467 skb->next = NULL; 468 if (cb->arp_err_handler) 469 cb->arp_err_handler(cb->handle, skb); 470 else 471 t4_ofld_send(adap, skb); 472 } 473 } 474 475 /* 476 * Called when the host's neighbor layer makes a change to some entry that is 477 * loaded into the HW L2 table. 478 */ 479 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) 480 { 481 struct l2t_entry *e; 482 struct sk_buff *arpq = NULL; 483 struct l2t_data *d = adap->l2t; 484 int addr_len = neigh->tbl->key_len; 485 u32 *addr = (u32 *) neigh->primary_key; 486 int ifidx = neigh->dev->ifindex; 487 int hash = addr_hash(d, addr, addr_len, ifidx); 488 489 read_lock_bh(&d->lock); 490 for (e = d->l2tab[hash].first; e; e = e->next) 491 if (!addreq(e, addr) && e->ifindex == ifidx) { 492 spin_lock(&e->lock); 493 if (atomic_read(&e->refcnt)) 494 goto found; 495 spin_unlock(&e->lock); 496 break; 497 } 498 read_unlock_bh(&d->lock); 499 return; 500 501 found: 502 read_unlock(&d->lock); 503 504 if (neigh != e->neigh) 505 neigh_replace(e, neigh); 506 507 if (e->state == L2T_STATE_RESOLVING) { 508 if (neigh->nud_state & NUD_FAILED) { 509 arpq = e->arpq_head; 510 e->arpq_head = e->arpq_tail = NULL; 511 } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && 512 e->arpq_head) { 513 write_l2e(adap, e, 1); 514 } 515 } else { 516 e->state = neigh->nud_state & NUD_CONNECTED ? 517 L2T_STATE_VALID : L2T_STATE_STALE; 518 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) 519 write_l2e(adap, e, 0); 520 } 521 522 spin_unlock_bh(&e->lock); 523 524 if (arpq) 525 handle_failed_resolution(adap, arpq); 526 } 527 528 /* Allocate an L2T entry for use by a switching rule. Such need to be 529 * explicitly freed and while busy they are not on any hash chain, so normal 530 * address resolution updates do not see them. 531 */ 532 struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) 533 { 534 struct l2t_entry *e; 535 536 write_lock_bh(&d->lock); 537 e = alloc_l2e(d); 538 if (e) { 539 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ 540 e->state = L2T_STATE_SWITCHING; 541 atomic_set(&e->refcnt, 1); 542 spin_unlock(&e->lock); 543 } 544 write_unlock_bh(&d->lock); 545 return e; 546 } 547 548 /* Sets/updates the contents of a switching L2T entry that has been allocated 549 * with an earlier call to @t4_l2t_alloc_switching. 550 */ 551 int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, 552 u8 port, u8 *eth_addr) 553 { 554 e->vlan = vlan; 555 e->lport = port; 556 memcpy(e->dmac, eth_addr, ETH_ALEN); 557 return write_l2e(adap, e, 0); 558 } 559 560 struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) 561 { 562 unsigned int l2t_size; 563 int i; 564 struct l2t_data *d; 565 566 if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE) 567 return NULL; 568 l2t_size = l2t_end - l2t_start + 1; 569 if (l2t_size < L2T_MIN_HASH_BUCKETS) 570 return NULL; 571 572 d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry)); 573 if (!d) 574 return NULL; 575 576 d->l2t_start = l2t_start; 577 d->l2t_size = l2t_size; 578 579 d->rover = d->l2tab; 580 atomic_set(&d->nfree, l2t_size); 581 rwlock_init(&d->lock); 582 583 for (i = 0; i < d->l2t_size; ++i) { 584 d->l2tab[i].idx = i; 585 d->l2tab[i].state = L2T_STATE_UNUSED; 586 spin_lock_init(&d->l2tab[i].lock); 587 atomic_set(&d->l2tab[i].refcnt, 0); 588 } 589 return d; 590 } 591 592 static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) 593 { 594 struct l2t_data *d = seq->private; 595 596 return pos >= d->l2t_size ? NULL : &d->l2tab[pos]; 597 } 598 599 static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) 600 { 601 return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 602 } 603 604 static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) 605 { 606 v = l2t_get_idx(seq, *pos); 607 if (v) 608 ++*pos; 609 return v; 610 } 611 612 static void l2t_seq_stop(struct seq_file *seq, void *v) 613 { 614 } 615 616 static char l2e_state(const struct l2t_entry *e) 617 { 618 switch (e->state) { 619 case L2T_STATE_VALID: return 'V'; 620 case L2T_STATE_STALE: return 'S'; 621 case L2T_STATE_SYNC_WRITE: return 'W'; 622 case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; 623 case L2T_STATE_SWITCHING: return 'X'; 624 default: 625 return 'U'; 626 } 627 } 628 629 static int l2t_seq_show(struct seq_file *seq, void *v) 630 { 631 if (v == SEQ_START_TOKEN) 632 seq_puts(seq, " Idx IP address " 633 "Ethernet address VLAN/P LP State Users Port\n"); 634 else { 635 char ip[60]; 636 struct l2t_data *d = seq->private; 637 struct l2t_entry *e = v; 638 639 spin_lock_bh(&e->lock); 640 if (e->state == L2T_STATE_SWITCHING) 641 ip[0] = '\0'; 642 else 643 sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); 644 seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", 645 e->idx + d->l2t_start, ip, e->dmac, 646 e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, 647 l2e_state(e), atomic_read(&e->refcnt), 648 e->neigh ? e->neigh->dev->name : ""); 649 spin_unlock_bh(&e->lock); 650 } 651 return 0; 652 } 653 654 static const struct seq_operations l2t_seq_ops = { 655 .start = l2t_seq_start, 656 .next = l2t_seq_next, 657 .stop = l2t_seq_stop, 658 .show = l2t_seq_show 659 }; 660 661 static int l2t_seq_open(struct inode *inode, struct file *file) 662 { 663 int rc = seq_open(file, &l2t_seq_ops); 664 665 if (!rc) { 666 struct adapter *adap = inode->i_private; 667 struct seq_file *seq = file->private_data; 668 669 seq->private = adap->l2t; 670 } 671 return rc; 672 } 673 674 const struct file_operations t4_l2t_fops = { 675 .owner = THIS_MODULE, 676 .open = l2t_seq_open, 677 .read = seq_read, 678 .llseek = seq_lseek, 679 .release = seq_release, 680 }; 681