1 #ifndef _NET_NEIGHBOUR_H 2 #define _NET_NEIGHBOUR_H 3 4 #include <linux/neighbour.h> 5 6 /* 7 * Generic neighbour manipulation 8 * 9 * Authors: 10 * Pedro Roque <roque@di.fc.ul.pt> 11 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 12 * 13 * Changes: 14 * 15 * Harald Welte: <laforge@gnumonks.org> 16 * - Add neighbour cache statistics like rtstat 17 */ 18 19 #include <linux/atomic.h> 20 #include <linux/netdevice.h> 21 #include <linux/skbuff.h> 22 #include <linux/rcupdate.h> 23 #include <linux/seq_file.h> 24 #include <linux/bitmap.h> 25 26 #include <linux/err.h> 27 #include <linux/sysctl.h> 28 #include <linux/workqueue.h> 29 #include <net/rtnetlink.h> 30 31 /* 32 * NUD stands for "neighbor unreachability detection" 33 */ 34 35 #define NUD_IN_TIMER (NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE) 36 #define NUD_VALID (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY) 37 #define NUD_CONNECTED (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE) 38 39 struct neighbour; 40 41 enum { 42 NEIGH_VAR_MCAST_PROBES, 43 NEIGH_VAR_UCAST_PROBES, 44 NEIGH_VAR_APP_PROBES, 45 NEIGH_VAR_MCAST_REPROBES, 46 NEIGH_VAR_RETRANS_TIME, 47 NEIGH_VAR_BASE_REACHABLE_TIME, 48 NEIGH_VAR_DELAY_PROBE_TIME, 49 NEIGH_VAR_GC_STALETIME, 50 NEIGH_VAR_QUEUE_LEN_BYTES, 51 NEIGH_VAR_PROXY_QLEN, 52 NEIGH_VAR_ANYCAST_DELAY, 53 NEIGH_VAR_PROXY_DELAY, 54 NEIGH_VAR_LOCKTIME, 55 #define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1) 56 /* Following are used as a second way to access one of the above */ 57 NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */ 58 NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */ 59 NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */ 60 /* Following are used by "default" only */ 61 NEIGH_VAR_GC_INTERVAL, 62 NEIGH_VAR_GC_THRESH1, 63 NEIGH_VAR_GC_THRESH2, 64 NEIGH_VAR_GC_THRESH3, 65 NEIGH_VAR_MAX 66 }; 67 68 struct neigh_parms { 69 possible_net_t net; 70 struct net_device *dev; 71 struct list_head list; 72 int (*neigh_setup)(struct neighbour *); 73 void (*neigh_cleanup)(struct neighbour *); 74 struct neigh_table *tbl; 75 76 void *sysctl_table; 77 78 int dead; 79 atomic_t refcnt; 80 struct rcu_head rcu_head; 81 82 int reachable_time; 83 int data[NEIGH_VAR_DATA_MAX]; 84 DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX); 85 }; 86 87 static inline void neigh_var_set(struct neigh_parms *p, int index, int val) 88 { 89 set_bit(index, p->data_state); 90 p->data[index] = val; 91 } 92 93 #define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr]) 94 95 /* In ndo_neigh_setup, NEIGH_VAR_INIT should be used. 96 * In other cases, NEIGH_VAR_SET should be used. 97 */ 98 #define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val) 99 #define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val) 100 101 static inline void neigh_parms_data_state_setall(struct neigh_parms *p) 102 { 103 bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX); 104 } 105 106 static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p) 107 { 108 bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX); 109 } 110 111 struct neigh_statistics { 112 unsigned long allocs; /* number of allocated neighs */ 113 unsigned long destroys; /* number of destroyed neighs */ 114 unsigned long hash_grows; /* number of hash resizes */ 115 116 unsigned long res_failed; /* number of failed resolutions */ 117 118 unsigned long lookups; /* number of lookups */ 119 unsigned long hits; /* number of hits (among lookups) */ 120 121 unsigned long rcv_probes_mcast; /* number of received mcast ipv6 */ 122 unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */ 123 124 unsigned long periodic_gc_runs; /* number of periodic GC runs */ 125 unsigned long forced_gc_runs; /* number of forced GC runs */ 126 127 unsigned long unres_discards; /* number of unresolved drops */ 128 unsigned long table_fulls; /* times even gc couldn't help */ 129 }; 130 131 #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) 132 133 struct neighbour { 134 struct neighbour __rcu *next; 135 struct neigh_table *tbl; 136 struct neigh_parms *parms; 137 unsigned long confirmed; 138 unsigned long updated; 139 rwlock_t lock; 140 atomic_t refcnt; 141 struct sk_buff_head arp_queue; 142 unsigned int arp_queue_len_bytes; 143 struct timer_list timer; 144 unsigned long used; 145 atomic_t probes; 146 __u8 flags; 147 __u8 nud_state; 148 __u8 type; 149 __u8 dead; 150 seqlock_t ha_lock; 151 unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; 152 struct hh_cache hh; 153 int (*output)(struct neighbour *, struct sk_buff *); 154 const struct neigh_ops *ops; 155 struct rcu_head rcu; 156 struct net_device *dev; 157 u8 primary_key[0]; 158 }; 159 160 struct neigh_ops { 161 int family; 162 void (*solicit)(struct neighbour *, struct sk_buff *); 163 void (*error_report)(struct neighbour *, struct sk_buff *); 164 int (*output)(struct neighbour *, struct sk_buff *); 165 int (*connected_output)(struct neighbour *, struct sk_buff *); 166 }; 167 168 struct pneigh_entry { 169 struct pneigh_entry *next; 170 possible_net_t net; 171 struct net_device *dev; 172 u8 flags; 173 u8 key[0]; 174 }; 175 176 /* 177 * neighbour table manipulation 178 */ 179 180 #define NEIGH_NUM_HASH_RND 4 181 182 struct neigh_hash_table { 183 struct neighbour __rcu **hash_buckets; 184 unsigned int hash_shift; 185 __u32 hash_rnd[NEIGH_NUM_HASH_RND]; 186 struct rcu_head rcu; 187 }; 188 189 190 struct neigh_table { 191 int family; 192 int entry_size; 193 int key_len; 194 __be16 protocol; 195 __u32 (*hash)(const void *pkey, 196 const struct net_device *dev, 197 __u32 *hash_rnd); 198 bool (*key_eq)(const struct neighbour *, const void *pkey); 199 int (*constructor)(struct neighbour *); 200 int (*pconstructor)(struct pneigh_entry *); 201 void (*pdestructor)(struct pneigh_entry *); 202 void (*proxy_redo)(struct sk_buff *skb); 203 char *id; 204 struct neigh_parms parms; 205 struct list_head parms_list; 206 int gc_interval; 207 int gc_thresh1; 208 int gc_thresh2; 209 int gc_thresh3; 210 unsigned long last_flush; 211 struct delayed_work gc_work; 212 struct timer_list proxy_timer; 213 struct sk_buff_head proxy_queue; 214 atomic_t entries; 215 rwlock_t lock; 216 unsigned long last_rand; 217 struct neigh_statistics __percpu *stats; 218 struct neigh_hash_table __rcu *nht; 219 struct pneigh_entry **phash_buckets; 220 }; 221 222 enum { 223 NEIGH_ARP_TABLE = 0, 224 NEIGH_ND_TABLE = 1, 225 NEIGH_DN_TABLE = 2, 226 NEIGH_NR_TABLES, 227 NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */ 228 }; 229 230 static inline int neigh_parms_family(struct neigh_parms *p) 231 { 232 return p->tbl->family; 233 } 234 235 #define NEIGH_PRIV_ALIGN sizeof(long long) 236 #define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN) 237 238 static inline void *neighbour_priv(const struct neighbour *n) 239 { 240 return (char *)n + n->tbl->entry_size; 241 } 242 243 /* flags for neigh_update() */ 244 #define NEIGH_UPDATE_F_OVERRIDE 0x00000001 245 #define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002 246 #define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004 247 #define NEIGH_UPDATE_F_ISROUTER 0x40000000 248 #define NEIGH_UPDATE_F_ADMIN 0x80000000 249 250 251 static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey) 252 { 253 return *(const u16 *)n->primary_key == *(const u16 *)pkey; 254 } 255 256 static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey) 257 { 258 return *(const u32 *)n->primary_key == *(const u32 *)pkey; 259 } 260 261 static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey) 262 { 263 const u32 *n32 = (const u32 *)n->primary_key; 264 const u32 *p32 = pkey; 265 266 return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) | 267 (n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0; 268 } 269 270 static inline struct neighbour *___neigh_lookup_noref( 271 struct neigh_table *tbl, 272 bool (*key_eq)(const struct neighbour *n, const void *pkey), 273 __u32 (*hash)(const void *pkey, 274 const struct net_device *dev, 275 __u32 *hash_rnd), 276 const void *pkey, 277 struct net_device *dev) 278 { 279 struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht); 280 struct neighbour *n; 281 u32 hash_val; 282 283 hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); 284 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); 285 n != NULL; 286 n = rcu_dereference_bh(n->next)) { 287 if (n->dev == dev && key_eq(n, pkey)) 288 return n; 289 } 290 291 return NULL; 292 } 293 294 static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl, 295 const void *pkey, 296 struct net_device *dev) 297 { 298 return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev); 299 } 300 301 void neigh_table_init(int index, struct neigh_table *tbl); 302 int neigh_table_clear(int index, struct neigh_table *tbl); 303 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, 304 struct net_device *dev); 305 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, 306 const void *pkey); 307 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, 308 struct net_device *dev, bool want_ref); 309 static inline struct neighbour *neigh_create(struct neigh_table *tbl, 310 const void *pkey, 311 struct net_device *dev) 312 { 313 return __neigh_create(tbl, pkey, dev, true); 314 } 315 void neigh_destroy(struct neighbour *neigh); 316 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); 317 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags, 318 u32 nlmsg_pid); 319 void __neigh_set_probe_once(struct neighbour *neigh); 320 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); 321 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); 322 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); 323 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); 324 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); 325 struct neighbour *neigh_event_ns(struct neigh_table *tbl, 326 u8 *lladdr, void *saddr, 327 struct net_device *dev); 328 329 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, 330 struct neigh_table *tbl); 331 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); 332 333 static inline 334 struct net *neigh_parms_net(const struct neigh_parms *parms) 335 { 336 return read_pnet(&parms->net); 337 } 338 339 unsigned long neigh_rand_reach_time(unsigned long base); 340 341 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, 342 struct sk_buff *skb); 343 struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, 344 const void *key, struct net_device *dev, 345 int creat); 346 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net, 347 const void *key, struct net_device *dev); 348 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, 349 struct net_device *dev); 350 351 static inline struct net *pneigh_net(const struct pneigh_entry *pneigh) 352 { 353 return read_pnet(&pneigh->net); 354 } 355 356 void neigh_app_ns(struct neighbour *n); 357 void neigh_for_each(struct neigh_table *tbl, 358 void (*cb)(struct neighbour *, void *), void *cookie); 359 void __neigh_for_each_release(struct neigh_table *tbl, 360 int (*cb)(struct neighbour *)); 361 int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *); 362 void pneigh_for_each(struct neigh_table *tbl, 363 void (*cb)(struct pneigh_entry *)); 364 365 struct neigh_seq_state { 366 struct seq_net_private p; 367 struct neigh_table *tbl; 368 struct neigh_hash_table *nht; 369 void *(*neigh_sub_iter)(struct neigh_seq_state *state, 370 struct neighbour *n, loff_t *pos); 371 unsigned int bucket; 372 unsigned int flags; 373 #define NEIGH_SEQ_NEIGH_ONLY 0x00000001 374 #define NEIGH_SEQ_IS_PNEIGH 0x00000002 375 #define NEIGH_SEQ_SKIP_NOARP 0x00000004 376 }; 377 void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, 378 unsigned int); 379 void *neigh_seq_next(struct seq_file *, void *, loff_t *); 380 void neigh_seq_stop(struct seq_file *, void *); 381 382 int neigh_proc_dointvec(struct ctl_table *ctl, int write, 383 void __user *buffer, size_t *lenp, loff_t *ppos); 384 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, 385 void __user *buffer, 386 size_t *lenp, loff_t *ppos); 387 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 388 void __user *buffer, 389 size_t *lenp, loff_t *ppos); 390 391 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 392 proc_handler *proc_handler); 393 void neigh_sysctl_unregister(struct neigh_parms *p); 394 395 static inline void __neigh_parms_put(struct neigh_parms *parms) 396 { 397 atomic_dec(&parms->refcnt); 398 } 399 400 static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms) 401 { 402 atomic_inc(&parms->refcnt); 403 return parms; 404 } 405 406 /* 407 * Neighbour references 408 */ 409 410 static inline void neigh_release(struct neighbour *neigh) 411 { 412 if (atomic_dec_and_test(&neigh->refcnt)) 413 neigh_destroy(neigh); 414 } 415 416 static inline struct neighbour * neigh_clone(struct neighbour *neigh) 417 { 418 if (neigh) 419 atomic_inc(&neigh->refcnt); 420 return neigh; 421 } 422 423 #define neigh_hold(n) atomic_inc(&(n)->refcnt) 424 425 static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 426 { 427 unsigned long now = jiffies; 428 429 if (neigh->used != now) 430 neigh->used = now; 431 if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) 432 return __neigh_event_send(neigh, skb); 433 return 0; 434 } 435 436 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 437 static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) 438 { 439 unsigned int seq, hh_alen; 440 441 do { 442 seq = read_seqbegin(&hh->hh_lock); 443 hh_alen = HH_DATA_ALIGN(ETH_HLEN); 444 memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN); 445 } while (read_seqretry(&hh->hh_lock, seq)); 446 return 0; 447 } 448 #endif 449 450 static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) 451 { 452 unsigned int seq; 453 unsigned int hh_len; 454 455 do { 456 seq = read_seqbegin(&hh->hh_lock); 457 hh_len = hh->hh_len; 458 if (likely(hh_len <= HH_DATA_MOD)) { 459 /* this is inlined by gcc */ 460 memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); 461 } else { 462 unsigned int hh_alen = HH_DATA_ALIGN(hh_len); 463 464 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); 465 } 466 } while (read_seqretry(&hh->hh_lock, seq)); 467 468 skb_push(skb, hh_len); 469 return dev_queue_xmit(skb); 470 } 471 472 static inline int neigh_output(struct neighbour *n, struct sk_buff *skb) 473 { 474 const struct hh_cache *hh = &n->hh; 475 476 if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) 477 return neigh_hh_output(hh, skb); 478 else 479 return n->output(n, skb); 480 } 481 482 static inline struct neighbour * 483 __neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) 484 { 485 struct neighbour *n = neigh_lookup(tbl, pkey, dev); 486 487 if (n || !creat) 488 return n; 489 490 n = neigh_create(tbl, pkey, dev); 491 return IS_ERR(n) ? NULL : n; 492 } 493 494 static inline struct neighbour * 495 __neigh_lookup_errno(struct neigh_table *tbl, const void *pkey, 496 struct net_device *dev) 497 { 498 struct neighbour *n = neigh_lookup(tbl, pkey, dev); 499 500 if (n) 501 return n; 502 503 return neigh_create(tbl, pkey, dev); 504 } 505 506 struct neighbour_cb { 507 unsigned long sched_next; 508 unsigned int flags; 509 }; 510 511 #define LOCALLY_ENQUEUED 0x1 512 513 #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) 514 515 static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, 516 const struct net_device *dev) 517 { 518 unsigned int seq; 519 520 do { 521 seq = read_seqbegin(&n->ha_lock); 522 memcpy(dst, n->ha, dev->addr_len); 523 } while (read_seqretry(&n->ha_lock, seq)); 524 } 525 526 527 #endif 528