1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/jhash.h>
40 #include <linux/module.h>
41 #include <linux/debugfs.h>
42 #include <linux/seq_file.h>
43 #include <net/neighbour.h>
44 #include "cxgb4.h"
45 #include "l2t.h"
46 #include "t4_msg.h"
47 #include "t4fw_api.h"
48 #include "t4_regs.h"
49 
50 #define VLAN_NONE 0xfff
51 
52 /* identifies sync vs async L2T_WRITE_REQs */
53 #define F_SYNC_WR    (1 << 12)
54 
55 enum {
56 	L2T_STATE_VALID,      /* entry is up to date */
57 	L2T_STATE_STALE,      /* entry may be used but needs revalidation */
58 	L2T_STATE_RESOLVING,  /* entry needs address resolution */
59 	L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
60 
61 	/* when state is one of the below the entry is not hashed */
62 	L2T_STATE_SWITCHING,  /* entry is being used by a switching filter */
63 	L2T_STATE_UNUSED      /* entry not in use */
64 };
65 
66 struct l2t_data {
67 	rwlock_t lock;
68 	atomic_t nfree;             /* number of free entries */
69 	struct l2t_entry *rover;    /* starting point for next allocation */
70 	struct l2t_entry l2tab[L2T_SIZE];
71 };
72 
73 static inline unsigned int vlan_prio(const struct l2t_entry *e)
74 {
75 	return e->vlan >> 13;
76 }
77 
78 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
79 {
80 	if (atomic_add_return(1, &e->refcnt) == 1)  /* 0 -> 1 transition */
81 		atomic_dec(&d->nfree);
82 }
83 
84 /*
85  * To avoid having to check address families we do not allow v4 and v6
86  * neighbors to be on the same hash chain.  We keep v4 entries in the first
87  * half of available hash buckets and v6 in the second.
88  */
89 enum {
90 	L2T_SZ_HALF = L2T_SIZE / 2,
91 	L2T_HASH_MASK = L2T_SZ_HALF - 1
92 };
93 
94 static inline unsigned int arp_hash(const u32 *key, int ifindex)
95 {
96 	return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
97 }
98 
99 static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
100 {
101 	u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
102 
103 	return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
104 }
105 
106 static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
107 {
108 	return addr_len == 4 ? arp_hash(addr, ifindex) :
109 			       ipv6_hash(addr, ifindex);
110 }
111 
112 /*
113  * Checks if an L2T entry is for the given IP/IPv6 address.  It does not check
114  * whether the L2T entry and the address are of the same address family.
115  * Callers ensure an address is only checked against L2T entries of the same
116  * family, something made trivial by the separation of IP and IPv6 hash chains
117  * mentioned above.  Returns 0 if there's a match,
118  */
119 static int addreq(const struct l2t_entry *e, const u32 *addr)
120 {
121 	if (e->v6)
122 		return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
123 		       (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
124 	return e->addr[0] ^ addr[0];
125 }
126 
127 static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
128 {
129 	neigh_hold(n);
130 	if (e->neigh)
131 		neigh_release(e->neigh);
132 	e->neigh = n;
133 }
134 
135 /*
136  * Write an L2T entry.  Must be called with the entry locked.
137  * The write may be synchronous or asynchronous.
138  */
139 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
140 {
141 	struct sk_buff *skb;
142 	struct cpl_l2t_write_req *req;
143 
144 	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
145 	if (!skb)
146 		return -ENOMEM;
147 
148 	req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
149 	INIT_TP_WR(req, 0);
150 
151 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
152 					e->idx | (sync ? F_SYNC_WR : 0) |
153 					TID_QID(adap->sge.fw_evtq.abs_id)));
154 	req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
155 	req->l2t_idx = htons(e->idx);
156 	req->vlan = htons(e->vlan);
157 	if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
158 		memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
159 	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
160 
161 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
162 	t4_ofld_send(adap, skb);
163 
164 	if (sync && e->state != L2T_STATE_SWITCHING)
165 		e->state = L2T_STATE_SYNC_WRITE;
166 	return 0;
167 }
168 
169 /*
170  * Send packets waiting in an L2T entry's ARP queue.  Must be called with the
171  * entry locked.
172  */
173 static void send_pending(struct adapter *adap, struct l2t_entry *e)
174 {
175 	while (e->arpq_head) {
176 		struct sk_buff *skb = e->arpq_head;
177 
178 		e->arpq_head = skb->next;
179 		skb->next = NULL;
180 		t4_ofld_send(adap, skb);
181 	}
182 	e->arpq_tail = NULL;
183 }
184 
185 /*
186  * Process a CPL_L2T_WRITE_RPL.  Wake up the ARP queue if it completes a
187  * synchronous L2T_WRITE.  Note that the TID in the reply is really the L2T
188  * index it refers to.
189  */
190 void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
191 {
192 	unsigned int tid = GET_TID(rpl);
193 	unsigned int idx = tid & (L2T_SIZE - 1);
194 
195 	if (unlikely(rpl->status != CPL_ERR_NONE)) {
196 		dev_err(adap->pdev_dev,
197 			"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
198 			rpl->status, idx);
199 		return;
200 	}
201 
202 	if (tid & F_SYNC_WR) {
203 		struct l2t_entry *e = &adap->l2t->l2tab[idx];
204 
205 		spin_lock(&e->lock);
206 		if (e->state != L2T_STATE_SWITCHING) {
207 			send_pending(adap, e);
208 			e->state = (e->neigh->nud_state & NUD_STALE) ?
209 					L2T_STATE_STALE : L2T_STATE_VALID;
210 		}
211 		spin_unlock(&e->lock);
212 	}
213 }
214 
215 /*
216  * Add a packet to an L2T entry's queue of packets awaiting resolution.
217  * Must be called with the entry's lock held.
218  */
219 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
220 {
221 	skb->next = NULL;
222 	if (e->arpq_head)
223 		e->arpq_tail->next = skb;
224 	else
225 		e->arpq_head = skb;
226 	e->arpq_tail = skb;
227 }
228 
229 int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
230 		   struct l2t_entry *e)
231 {
232 	struct adapter *adap = netdev2adap(dev);
233 
234 again:
235 	switch (e->state) {
236 	case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */
237 		neigh_event_send(e->neigh, NULL);
238 		spin_lock_bh(&e->lock);
239 		if (e->state == L2T_STATE_STALE)
240 			e->state = L2T_STATE_VALID;
241 		spin_unlock_bh(&e->lock);
242 	case L2T_STATE_VALID:     /* fast-path, send the packet on */
243 		return t4_ofld_send(adap, skb);
244 	case L2T_STATE_RESOLVING:
245 	case L2T_STATE_SYNC_WRITE:
246 		spin_lock_bh(&e->lock);
247 		if (e->state != L2T_STATE_SYNC_WRITE &&
248 		    e->state != L2T_STATE_RESOLVING) {
249 			spin_unlock_bh(&e->lock);
250 			goto again;
251 		}
252 		arpq_enqueue(e, skb);
253 		spin_unlock_bh(&e->lock);
254 
255 		if (e->state == L2T_STATE_RESOLVING &&
256 		    !neigh_event_send(e->neigh, NULL)) {
257 			spin_lock_bh(&e->lock);
258 			if (e->state == L2T_STATE_RESOLVING && e->arpq_head)
259 				write_l2e(adap, e, 1);
260 			spin_unlock_bh(&e->lock);
261 		}
262 	}
263 	return 0;
264 }
265 EXPORT_SYMBOL(cxgb4_l2t_send);
266 
267 /*
268  * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
269  */
270 static struct l2t_entry *alloc_l2e(struct l2t_data *d)
271 {
272 	struct l2t_entry *end, *e, **p;
273 
274 	if (!atomic_read(&d->nfree))
275 		return NULL;
276 
277 	/* there's definitely a free entry */
278 	for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
279 		if (atomic_read(&e->refcnt) == 0)
280 			goto found;
281 
282 	for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
283 		;
284 found:
285 	d->rover = e + 1;
286 	atomic_dec(&d->nfree);
287 
288 	/*
289 	 * The entry we found may be an inactive entry that is
290 	 * presently in the hash table.  We need to remove it.
291 	 */
292 	if (e->state < L2T_STATE_SWITCHING)
293 		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
294 			if (*p == e) {
295 				*p = e->next;
296 				e->next = NULL;
297 				break;
298 			}
299 
300 	e->state = L2T_STATE_UNUSED;
301 	return e;
302 }
303 
304 /*
305  * Called when an L2T entry has no more users.
306  */
307 static void t4_l2e_free(struct l2t_entry *e)
308 {
309 	struct l2t_data *d;
310 
311 	spin_lock_bh(&e->lock);
312 	if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
313 		if (e->neigh) {
314 			neigh_release(e->neigh);
315 			e->neigh = NULL;
316 		}
317 		while (e->arpq_head) {
318 			struct sk_buff *skb = e->arpq_head;
319 
320 			e->arpq_head = skb->next;
321 			kfree_skb(skb);
322 		}
323 		e->arpq_tail = NULL;
324 	}
325 	spin_unlock_bh(&e->lock);
326 
327 	d = container_of(e, struct l2t_data, l2tab[e->idx]);
328 	atomic_inc(&d->nfree);
329 }
330 
331 void cxgb4_l2t_release(struct l2t_entry *e)
332 {
333 	if (atomic_dec_and_test(&e->refcnt))
334 		t4_l2e_free(e);
335 }
336 EXPORT_SYMBOL(cxgb4_l2t_release);
337 
338 /*
339  * Update an L2T entry that was previously used for the same next hop as neigh.
340  * Must be called with softirqs disabled.
341  */
342 static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
343 {
344 	unsigned int nud_state;
345 
346 	spin_lock(&e->lock);                /* avoid race with t4_l2t_free */
347 	if (neigh != e->neigh)
348 		neigh_replace(e, neigh);
349 	nud_state = neigh->nud_state;
350 	if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
351 	    !(nud_state & NUD_VALID))
352 		e->state = L2T_STATE_RESOLVING;
353 	else if (nud_state & NUD_CONNECTED)
354 		e->state = L2T_STATE_VALID;
355 	else
356 		e->state = L2T_STATE_STALE;
357 	spin_unlock(&e->lock);
358 }
359 
360 struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
361 				const struct net_device *physdev,
362 				unsigned int priority)
363 {
364 	u8 lport;
365 	u16 vlan;
366 	struct l2t_entry *e;
367 	int addr_len = neigh->tbl->key_len;
368 	u32 *addr = (u32 *)neigh->primary_key;
369 	int ifidx = neigh->dev->ifindex;
370 	int hash = addr_hash(addr, addr_len, ifidx);
371 
372 	if (neigh->dev->flags & IFF_LOOPBACK)
373 		lport = netdev2pinfo(physdev)->tx_chan + 4;
374 	else
375 		lport = netdev2pinfo(physdev)->lport;
376 
377 	if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
378 		vlan = vlan_dev_vlan_id(neigh->dev);
379 	else
380 		vlan = VLAN_NONE;
381 
382 	write_lock_bh(&d->lock);
383 	for (e = d->l2tab[hash].first; e; e = e->next)
384 		if (!addreq(e, addr) && e->ifindex == ifidx &&
385 		    e->vlan == vlan && e->lport == lport) {
386 			l2t_hold(d, e);
387 			if (atomic_read(&e->refcnt) == 1)
388 				reuse_entry(e, neigh);
389 			goto done;
390 		}
391 
392 	/* Need to allocate a new entry */
393 	e = alloc_l2e(d);
394 	if (e) {
395 		spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
396 		e->state = L2T_STATE_RESOLVING;
397 		if (neigh->dev->flags & IFF_LOOPBACK)
398 			memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
399 		memcpy(e->addr, addr, addr_len);
400 		e->ifindex = ifidx;
401 		e->hash = hash;
402 		e->lport = lport;
403 		e->v6 = addr_len == 16;
404 		atomic_set(&e->refcnt, 1);
405 		neigh_replace(e, neigh);
406 		e->vlan = vlan;
407 		e->next = d->l2tab[hash].first;
408 		d->l2tab[hash].first = e;
409 		spin_unlock(&e->lock);
410 	}
411 done:
412 	write_unlock_bh(&d->lock);
413 	return e;
414 }
415 EXPORT_SYMBOL(cxgb4_l2t_get);
416 
417 u64 cxgb4_select_ntuple(struct net_device *dev,
418 			const struct l2t_entry *l2t)
419 {
420 	struct adapter *adap = netdev2adap(dev);
421 	struct tp_params *tp = &adap->params.tp;
422 	u64 ntuple = 0;
423 
424 	/* Initialize each of the fields which we care about which are present
425 	 * in the Compressed Filter Tuple.
426 	 */
427 	if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
428 		ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
429 
430 	if (tp->port_shift >= 0)
431 		ntuple |= (u64)l2t->lport << tp->port_shift;
432 
433 	if (tp->protocol_shift >= 0)
434 		ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
435 
436 	if (tp->vnic_shift >= 0) {
437 		u32 viid = cxgb4_port_viid(dev);
438 		u32 vf = FW_VIID_VIN_GET(viid);
439 		u32 pf = FW_VIID_PFN_GET(viid);
440 		u32 vld = FW_VIID_VIVLD_GET(viid);
441 
442 		ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
443 				V_FT_VNID_ID_PF(pf) |
444 				V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
445 	}
446 
447 	return ntuple;
448 }
449 EXPORT_SYMBOL(cxgb4_select_ntuple);
450 
451 /*
452  * Called when address resolution fails for an L2T entry to handle packets
453  * on the arpq head.  If a packet specifies a failure handler it is invoked,
454  * otherwise the packet is sent to the device.
455  */
456 static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
457 {
458 	while (arpq) {
459 		struct sk_buff *skb = arpq;
460 		const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
461 
462 		arpq = skb->next;
463 		skb->next = NULL;
464 		if (cb->arp_err_handler)
465 			cb->arp_err_handler(cb->handle, skb);
466 		else
467 			t4_ofld_send(adap, skb);
468 	}
469 }
470 
471 /*
472  * Called when the host's neighbor layer makes a change to some entry that is
473  * loaded into the HW L2 table.
474  */
475 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
476 {
477 	struct l2t_entry *e;
478 	struct sk_buff *arpq = NULL;
479 	struct l2t_data *d = adap->l2t;
480 	int addr_len = neigh->tbl->key_len;
481 	u32 *addr = (u32 *) neigh->primary_key;
482 	int ifidx = neigh->dev->ifindex;
483 	int hash = addr_hash(addr, addr_len, ifidx);
484 
485 	read_lock_bh(&d->lock);
486 	for (e = d->l2tab[hash].first; e; e = e->next)
487 		if (!addreq(e, addr) && e->ifindex == ifidx) {
488 			spin_lock(&e->lock);
489 			if (atomic_read(&e->refcnt))
490 				goto found;
491 			spin_unlock(&e->lock);
492 			break;
493 		}
494 	read_unlock_bh(&d->lock);
495 	return;
496 
497  found:
498 	read_unlock(&d->lock);
499 
500 	if (neigh != e->neigh)
501 		neigh_replace(e, neigh);
502 
503 	if (e->state == L2T_STATE_RESOLVING) {
504 		if (neigh->nud_state & NUD_FAILED) {
505 			arpq = e->arpq_head;
506 			e->arpq_head = e->arpq_tail = NULL;
507 		} else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
508 			   e->arpq_head) {
509 			write_l2e(adap, e, 1);
510 		}
511 	} else {
512 		e->state = neigh->nud_state & NUD_CONNECTED ?
513 			L2T_STATE_VALID : L2T_STATE_STALE;
514 		if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
515 			write_l2e(adap, e, 0);
516 	}
517 
518 	spin_unlock_bh(&e->lock);
519 
520 	if (arpq)
521 		handle_failed_resolution(adap, arpq);
522 }
523 
524 /* Allocate an L2T entry for use by a switching rule.  Such need to be
525  * explicitly freed and while busy they are not on any hash chain, so normal
526  * address resolution updates do not see them.
527  */
528 struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
529 {
530 	struct l2t_entry *e;
531 
532 	write_lock_bh(&d->lock);
533 	e = alloc_l2e(d);
534 	if (e) {
535 		spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
536 		e->state = L2T_STATE_SWITCHING;
537 		atomic_set(&e->refcnt, 1);
538 		spin_unlock(&e->lock);
539 	}
540 	write_unlock_bh(&d->lock);
541 	return e;
542 }
543 
544 /* Sets/updates the contents of a switching L2T entry that has been allocated
545  * with an earlier call to @t4_l2t_alloc_switching.
546  */
547 int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
548 		u8 port, u8 *eth_addr)
549 {
550 	e->vlan = vlan;
551 	e->lport = port;
552 	memcpy(e->dmac, eth_addr, ETH_ALEN);
553 	return write_l2e(adap, e, 0);
554 }
555 
556 struct l2t_data *t4_init_l2t(void)
557 {
558 	int i;
559 	struct l2t_data *d;
560 
561 	d = t4_alloc_mem(sizeof(*d));
562 	if (!d)
563 		return NULL;
564 
565 	d->rover = d->l2tab;
566 	atomic_set(&d->nfree, L2T_SIZE);
567 	rwlock_init(&d->lock);
568 
569 	for (i = 0; i < L2T_SIZE; ++i) {
570 		d->l2tab[i].idx = i;
571 		d->l2tab[i].state = L2T_STATE_UNUSED;
572 		spin_lock_init(&d->l2tab[i].lock);
573 		atomic_set(&d->l2tab[i].refcnt, 0);
574 	}
575 	return d;
576 }
577 
578 static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
579 {
580 	struct l2t_entry *l2tab = seq->private;
581 
582 	return pos >= L2T_SIZE ? NULL : &l2tab[pos];
583 }
584 
585 static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
586 {
587 	return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
588 }
589 
590 static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
591 {
592 	v = l2t_get_idx(seq, *pos);
593 	if (v)
594 		++*pos;
595 	return v;
596 }
597 
598 static void l2t_seq_stop(struct seq_file *seq, void *v)
599 {
600 }
601 
602 static char l2e_state(const struct l2t_entry *e)
603 {
604 	switch (e->state) {
605 	case L2T_STATE_VALID: return 'V';
606 	case L2T_STATE_STALE: return 'S';
607 	case L2T_STATE_SYNC_WRITE: return 'W';
608 	case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
609 	case L2T_STATE_SWITCHING: return 'X';
610 	default:
611 		return 'U';
612 	}
613 }
614 
615 static int l2t_seq_show(struct seq_file *seq, void *v)
616 {
617 	if (v == SEQ_START_TOKEN)
618 		seq_puts(seq, " Idx IP address                "
619 			 "Ethernet address  VLAN/P LP State Users Port\n");
620 	else {
621 		char ip[60];
622 		struct l2t_entry *e = v;
623 
624 		spin_lock_bh(&e->lock);
625 		if (e->state == L2T_STATE_SWITCHING)
626 			ip[0] = '\0';
627 		else
628 			sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
629 		seq_printf(seq, "%4u %-25s %17pM %4d %u %2u   %c   %5u %s\n",
630 			   e->idx, ip, e->dmac,
631 			   e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
632 			   l2e_state(e), atomic_read(&e->refcnt),
633 			   e->neigh ? e->neigh->dev->name : "");
634 		spin_unlock_bh(&e->lock);
635 	}
636 	return 0;
637 }
638 
639 static const struct seq_operations l2t_seq_ops = {
640 	.start = l2t_seq_start,
641 	.next = l2t_seq_next,
642 	.stop = l2t_seq_stop,
643 	.show = l2t_seq_show
644 };
645 
646 static int l2t_seq_open(struct inode *inode, struct file *file)
647 {
648 	int rc = seq_open(file, &l2t_seq_ops);
649 
650 	if (!rc) {
651 		struct adapter *adap = inode->i_private;
652 		struct seq_file *seq = file->private_data;
653 
654 		seq->private = adap->l2t->l2tab;
655 	}
656 	return rc;
657 }
658 
659 const struct file_operations t4_l2t_fops = {
660 	.owner = THIS_MODULE,
661 	.open = l2t_seq_open,
662 	.read = seq_read,
663 	.llseek = seq_lseek,
664 	.release = seq_release,
665 };
666