Lines Matching +full:- +full:e
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
65 static inline unsigned int vlan_prio(const struct l2t_entry *e) in vlan_prio() argument
67 return e->vlan >> VLAN_PRIO_SHIFT; in vlan_prio()
70 static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) in l2t_hold() argument
72 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ in l2t_hold()
73 atomic_dec(&d->nfree); in l2t_hold()
89 unsigned int l2t_size_half = d->l2t_size / 2; in arp_hash()
97 unsigned int l2t_size_half = d->l2t_size / 2; in ipv6_hash()
118 static int addreq(const struct l2t_entry *e, const u32 *addr) in addreq() argument
120 if (e->v6) in addreq()
121 return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | in addreq()
122 (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); in addreq()
123 return e->addr[0] ^ addr[0]; in addreq()
126 static void neigh_replace(struct l2t_entry *e, struct neighbour *n) in neigh_replace() argument
129 if (e->neigh) in neigh_replace()
130 neigh_release(e->neigh); in neigh_replace()
131 e->neigh = n; in neigh_replace()
138 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) in write_l2e() argument
140 struct l2t_data *d = adap->l2t; in write_l2e()
141 unsigned int l2t_idx = e->idx + d->l2t_start; in write_l2e()
147 return -ENOMEM; in write_l2e()
154 TID_QID_V(adap->sge.fw_evtq.abs_id))); in write_l2e()
155 req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync)); in write_l2e()
156 req->l2t_idx = htons(l2t_idx); in write_l2e()
157 req->vlan = htons(e->vlan); in write_l2e()
158 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK)) in write_l2e()
159 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); in write_l2e()
160 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); in write_l2e()
164 if (sync && e->state != L2T_STATE_SWITCHING) in write_l2e()
165 e->state = L2T_STATE_SYNC_WRITE; in write_l2e()
173 static void send_pending(struct adapter *adap, struct l2t_entry *e) in send_pending() argument
177 while ((skb = __skb_dequeue(&e->arpq)) != NULL) in send_pending()
188 struct l2t_data *d = adap->l2t; in do_l2t_write_rpl()
192 if (unlikely(rpl->status != CPL_ERR_NONE)) { in do_l2t_write_rpl()
193 dev_err(adap->pdev_dev, in do_l2t_write_rpl()
195 rpl->status, l2t_idx); in do_l2t_write_rpl()
200 struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start]; in do_l2t_write_rpl() local
202 spin_lock(&e->lock); in do_l2t_write_rpl()
203 if (e->state != L2T_STATE_SWITCHING) { in do_l2t_write_rpl()
204 send_pending(adap, e); in do_l2t_write_rpl()
205 e->state = (e->neigh->nud_state & NUD_STALE) ? in do_l2t_write_rpl()
208 spin_unlock(&e->lock); in do_l2t_write_rpl()
216 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) in arpq_enqueue() argument
218 __skb_queue_tail(&e->arpq, skb); in arpq_enqueue()
222 struct l2t_entry *e) in cxgb4_l2t_send() argument
227 switch (e->state) { in cxgb4_l2t_send()
229 neigh_event_send(e->neigh, NULL); in cxgb4_l2t_send()
230 spin_lock_bh(&e->lock); in cxgb4_l2t_send()
231 if (e->state == L2T_STATE_STALE) in cxgb4_l2t_send()
232 e->state = L2T_STATE_VALID; in cxgb4_l2t_send()
233 spin_unlock_bh(&e->lock); in cxgb4_l2t_send()
235 case L2T_STATE_VALID: /* fast-path, send the packet on */ in cxgb4_l2t_send()
239 spin_lock_bh(&e->lock); in cxgb4_l2t_send()
240 if (e->state != L2T_STATE_SYNC_WRITE && in cxgb4_l2t_send()
241 e->state != L2T_STATE_RESOLVING) { in cxgb4_l2t_send()
242 spin_unlock_bh(&e->lock); in cxgb4_l2t_send()
245 arpq_enqueue(e, skb); in cxgb4_l2t_send()
246 spin_unlock_bh(&e->lock); in cxgb4_l2t_send()
248 if (e->state == L2T_STATE_RESOLVING && in cxgb4_l2t_send()
249 !neigh_event_send(e->neigh, NULL)) { in cxgb4_l2t_send()
250 spin_lock_bh(&e->lock); in cxgb4_l2t_send()
251 if (e->state == L2T_STATE_RESOLVING && in cxgb4_l2t_send()
252 !skb_queue_empty(&e->arpq)) in cxgb4_l2t_send()
253 write_l2e(adap, e, 1); in cxgb4_l2t_send()
254 spin_unlock_bh(&e->lock); in cxgb4_l2t_send()
266 struct l2t_entry *end, *e, **p; in alloc_l2e() local
268 if (!atomic_read(&d->nfree)) in alloc_l2e()
272 for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e) in alloc_l2e()
273 if (atomic_read(&e->refcnt) == 0) in alloc_l2e()
276 for (e = d->l2tab; atomic_read(&e->refcnt); ++e) in alloc_l2e()
279 d->rover = e + 1; in alloc_l2e()
280 atomic_dec(&d->nfree); in alloc_l2e()
286 if (e->state < L2T_STATE_SWITCHING) in alloc_l2e()
287 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) in alloc_l2e()
288 if (*p == e) { in alloc_l2e()
289 *p = e->next; in alloc_l2e()
290 e->next = NULL; in alloc_l2e()
294 e->state = L2T_STATE_UNUSED; in alloc_l2e()
295 return e; in alloc_l2e()
301 struct l2t_entry *end, *e, **p; in find_or_alloc_l2e() local
304 for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) { in find_or_alloc_l2e()
305 if (atomic_read(&e->refcnt) == 0) { in find_or_alloc_l2e()
307 first_free = e; in find_or_alloc_l2e()
309 if (e->state == L2T_STATE_SWITCHING) { in find_or_alloc_l2e()
310 if (ether_addr_equal(e->dmac, dmac) && in find_or_alloc_l2e()
311 (e->vlan == vlan) && (e->lport == port)) in find_or_alloc_l2e()
318 e = first_free; in find_or_alloc_l2e()
328 if (e->state < L2T_STATE_SWITCHING) in find_or_alloc_l2e()
329 for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) in find_or_alloc_l2e()
330 if (*p == e) { in find_or_alloc_l2e()
331 *p = e->next; in find_or_alloc_l2e()
332 e->next = NULL; in find_or_alloc_l2e()
335 e->state = L2T_STATE_UNUSED; in find_or_alloc_l2e()
338 return e; in find_or_alloc_l2e()
351 static void _t4_l2e_free(struct l2t_entry *e) in _t4_l2e_free() argument
355 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ in _t4_l2e_free()
356 if (e->neigh) { in _t4_l2e_free()
357 neigh_release(e->neigh); in _t4_l2e_free()
358 e->neigh = NULL; in _t4_l2e_free()
360 __skb_queue_purge(&e->arpq); in _t4_l2e_free()
363 d = container_of(e, struct l2t_data, l2tab[e->idx]); in _t4_l2e_free()
364 atomic_inc(&d->nfree); in _t4_l2e_free()
368 static void t4_l2e_free(struct l2t_entry *e) in t4_l2e_free() argument
372 spin_lock_bh(&e->lock); in t4_l2e_free()
373 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ in t4_l2e_free()
374 if (e->neigh) { in t4_l2e_free()
375 neigh_release(e->neigh); in t4_l2e_free()
376 e->neigh = NULL; in t4_l2e_free()
378 __skb_queue_purge(&e->arpq); in t4_l2e_free()
380 spin_unlock_bh(&e->lock); in t4_l2e_free()
382 d = container_of(e, struct l2t_data, l2tab[e->idx]); in t4_l2e_free()
383 atomic_inc(&d->nfree); in t4_l2e_free()
386 void cxgb4_l2t_release(struct l2t_entry *e) in cxgb4_l2t_release() argument
388 if (atomic_dec_and_test(&e->refcnt)) in cxgb4_l2t_release()
389 t4_l2e_free(e); in cxgb4_l2t_release()
397 static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) in reuse_entry() argument
401 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ in reuse_entry()
402 if (neigh != e->neigh) in reuse_entry()
403 neigh_replace(e, neigh); in reuse_entry()
404 nud_state = neigh->nud_state; in reuse_entry()
405 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || in reuse_entry()
407 e->state = L2T_STATE_RESOLVING; in reuse_entry()
409 e->state = L2T_STATE_VALID; in reuse_entry()
411 e->state = L2T_STATE_STALE; in reuse_entry()
412 spin_unlock(&e->lock); in reuse_entry()
421 struct l2t_entry *e; in cxgb4_l2t_get() local
422 unsigned int addr_len = neigh->tbl->key_len; in cxgb4_l2t_get()
423 u32 *addr = (u32 *)neigh->primary_key; in cxgb4_l2t_get()
424 int ifidx = neigh->dev->ifindex; in cxgb4_l2t_get()
427 if (neigh->dev->flags & IFF_LOOPBACK) in cxgb4_l2t_get()
428 lport = netdev2pinfo(physdev)->tx_chan + 4; in cxgb4_l2t_get()
430 lport = netdev2pinfo(physdev)->lport; in cxgb4_l2t_get()
432 if (is_vlan_dev(neigh->dev)) { in cxgb4_l2t_get()
433 vlan = vlan_dev_vlan_id(neigh->dev); in cxgb4_l2t_get()
434 vlan |= vlan_dev_get_egress_qos_mask(neigh->dev, priority); in cxgb4_l2t_get()
439 write_lock_bh(&d->lock); in cxgb4_l2t_get()
440 for (e = d->l2tab[hash].first; e; e = e->next) in cxgb4_l2t_get()
441 if (!addreq(e, addr) && e->ifindex == ifidx && in cxgb4_l2t_get()
442 e->vlan == vlan && e->lport == lport) { in cxgb4_l2t_get()
443 l2t_hold(d, e); in cxgb4_l2t_get()
444 if (atomic_read(&e->refcnt) == 1) in cxgb4_l2t_get()
445 reuse_entry(e, neigh); in cxgb4_l2t_get()
450 e = alloc_l2e(d); in cxgb4_l2t_get()
451 if (e) { in cxgb4_l2t_get()
452 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ in cxgb4_l2t_get()
453 e->state = L2T_STATE_RESOLVING; in cxgb4_l2t_get()
454 if (neigh->dev->flags & IFF_LOOPBACK) in cxgb4_l2t_get()
455 memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac)); in cxgb4_l2t_get()
456 memcpy(e->addr, addr, addr_len); in cxgb4_l2t_get()
457 e->ifindex = ifidx; in cxgb4_l2t_get()
458 e->hash = hash; in cxgb4_l2t_get()
459 e->lport = lport; in cxgb4_l2t_get()
460 e->v6 = addr_len == 16; in cxgb4_l2t_get()
461 atomic_set(&e->refcnt, 1); in cxgb4_l2t_get()
462 neigh_replace(e, neigh); in cxgb4_l2t_get()
463 e->vlan = vlan; in cxgb4_l2t_get()
464 e->next = d->l2tab[hash].first; in cxgb4_l2t_get()
465 d->l2tab[hash].first = e; in cxgb4_l2t_get()
466 spin_unlock(&e->lock); in cxgb4_l2t_get()
469 write_unlock_bh(&d->lock); in cxgb4_l2t_get()
470 return e; in cxgb4_l2t_get()
478 struct tp_params *tp = &adap->params.tp; in cxgb4_select_ntuple()
484 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE) in cxgb4_select_ntuple()
485 ntuple |= (u64)(FT_VLAN_VLD_F | l2t->vlan) << tp->vlan_shift; in cxgb4_select_ntuple()
487 if (tp->port_shift >= 0) in cxgb4_select_ntuple()
488 ntuple |= (u64)l2t->lport << tp->port_shift; in cxgb4_select_ntuple()
490 if (tp->protocol_shift >= 0) in cxgb4_select_ntuple()
491 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; in cxgb4_select_ntuple()
493 if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) { in cxgb4_select_ntuple()
496 ntuple |= (u64)(FT_VNID_ID_VF_V(pi->vin) | in cxgb4_select_ntuple()
497 FT_VNID_ID_PF_V(adap->pf) | in cxgb4_select_ntuple()
498 FT_VNID_ID_VLD_V(pi->vivld)) << tp->vnic_shift; in cxgb4_select_ntuple()
511 unsigned int addr_len = neigh->tbl->key_len; in t4_l2t_update()
512 u32 *addr = (u32 *) neigh->primary_key; in t4_l2t_update()
513 int hash, ifidx = neigh->dev->ifindex; in t4_l2t_update()
515 struct l2t_data *d = adap->l2t; in t4_l2t_update()
516 struct l2t_entry *e; in t4_l2t_update() local
519 read_lock_bh(&d->lock); in t4_l2t_update()
520 for (e = d->l2tab[hash].first; e; e = e->next) in t4_l2t_update()
521 if (!addreq(e, addr) && e->ifindex == ifidx) { in t4_l2t_update()
522 spin_lock(&e->lock); in t4_l2t_update()
523 if (atomic_read(&e->refcnt)) in t4_l2t_update()
525 spin_unlock(&e->lock); in t4_l2t_update()
528 read_unlock_bh(&d->lock); in t4_l2t_update()
532 read_unlock(&d->lock); in t4_l2t_update()
534 if (neigh != e->neigh) in t4_l2t_update()
535 neigh_replace(e, neigh); in t4_l2t_update()
537 if (e->state == L2T_STATE_RESOLVING) { in t4_l2t_update()
538 if (neigh->nud_state & NUD_FAILED) { in t4_l2t_update()
539 arpq = &e->arpq; in t4_l2t_update()
540 } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && in t4_l2t_update()
541 !skb_queue_empty(&e->arpq)) { in t4_l2t_update()
542 write_l2e(adap, e, 1); in t4_l2t_update()
545 e->state = neigh->nud_state & NUD_CONNECTED ? in t4_l2t_update()
547 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) in t4_l2t_update()
548 write_l2e(adap, e, 0); in t4_l2t_update()
559 while ((skb = __skb_dequeue(&e->arpq)) != NULL) { in t4_l2t_update()
562 spin_unlock(&e->lock); in t4_l2t_update()
563 if (cb->arp_err_handler) in t4_l2t_update()
564 cb->arp_err_handler(cb->handle, skb); in t4_l2t_update()
567 spin_lock(&e->lock); in t4_l2t_update()
570 spin_unlock_bh(&e->lock); in t4_l2t_update()
580 struct l2t_data *d = adap->l2t; in t4_l2t_alloc_switching()
581 struct l2t_entry *e; in t4_l2t_alloc_switching() local
584 write_lock_bh(&d->lock); in t4_l2t_alloc_switching()
585 e = find_or_alloc_l2e(d, vlan, port, eth_addr); in t4_l2t_alloc_switching()
586 if (e) { in t4_l2t_alloc_switching()
587 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ in t4_l2t_alloc_switching()
588 if (!atomic_read(&e->refcnt)) { in t4_l2t_alloc_switching()
589 e->state = L2T_STATE_SWITCHING; in t4_l2t_alloc_switching()
590 e->vlan = vlan; in t4_l2t_alloc_switching()
591 e->lport = port; in t4_l2t_alloc_switching()
592 ether_addr_copy(e->dmac, eth_addr); in t4_l2t_alloc_switching()
593 atomic_set(&e->refcnt, 1); in t4_l2t_alloc_switching()
594 ret = write_l2e(adap, e, 0); in t4_l2t_alloc_switching()
596 _t4_l2e_free(e); in t4_l2t_alloc_switching()
597 spin_unlock(&e->lock); in t4_l2t_alloc_switching()
598 write_unlock_bh(&d->lock); in t4_l2t_alloc_switching()
602 atomic_inc(&e->refcnt); in t4_l2t_alloc_switching()
605 spin_unlock(&e->lock); in t4_l2t_alloc_switching()
607 write_unlock_bh(&d->lock); in t4_l2t_alloc_switching()
608 return e; in t4_l2t_alloc_switching()
612 * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters
638 l2t_size = l2t_end - l2t_start + 1; in t4_init_l2t()
646 d->l2t_start = l2t_start; in t4_init_l2t()
647 d->l2t_size = l2t_size; in t4_init_l2t()
649 d->rover = d->l2tab; in t4_init_l2t()
650 atomic_set(&d->nfree, l2t_size); in t4_init_l2t()
651 rwlock_init(&d->lock); in t4_init_l2t()
653 for (i = 0; i < d->l2t_size; ++i) { in t4_init_l2t()
654 d->l2tab[i].idx = i; in t4_init_l2t()
655 d->l2tab[i].state = L2T_STATE_UNUSED; in t4_init_l2t()
656 spin_lock_init(&d->l2tab[i].lock); in t4_init_l2t()
657 atomic_set(&d->l2tab[i].refcnt, 0); in t4_init_l2t()
658 skb_queue_head_init(&d->l2tab[i].arpq); in t4_init_l2t()
665 struct l2t_data *d = seq->private; in l2t_get_idx()
667 return pos >= d->l2t_size ? NULL : &d->l2tab[pos]; in l2t_get_idx()
672 return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; in l2t_seq_start()
686 static char l2e_state(const struct l2t_entry *e) in l2e_state() argument
688 switch (e->state) { in l2e_state()
693 return skb_queue_empty(&e->arpq) ? 'R' : 'A'; in l2e_state()
700 bool cxgb4_check_l2t_valid(struct l2t_entry *e) in cxgb4_check_l2t_valid() argument
704 spin_lock(&e->lock); in cxgb4_check_l2t_valid()
705 valid = (e->state == L2T_STATE_VALID); in cxgb4_check_l2t_valid()
706 spin_unlock(&e->lock); in cxgb4_check_l2t_valid()
718 struct l2t_data *d = seq->private; in l2t_seq_show()
719 struct l2t_entry *e = v; in l2t_seq_show() local
721 spin_lock_bh(&e->lock); in l2t_seq_show()
722 if (e->state == L2T_STATE_SWITCHING) in l2t_seq_show()
725 sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); in l2t_seq_show()
726 seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", in l2t_seq_show()
727 e->idx + d->l2t_start, ip, e->dmac, in l2t_seq_show()
728 e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, in l2t_seq_show()
729 l2e_state(e), atomic_read(&e->refcnt), in l2t_seq_show()
730 e->neigh ? e->neigh->dev->name : ""); in l2t_seq_show()
731 spin_unlock_bh(&e->lock); in l2t_seq_show()
748 struct adapter *adap = inode->i_private; in l2t_seq_open()
749 struct seq_file *seq = file->private_data; in l2t_seq_open()
751 seq->private = adap->l2t; in l2t_seq_open()