1 #ifndef _NET_NEIGHBOUR_H 2 #define _NET_NEIGHBOUR_H 3 4 #include <linux/neighbour.h> 5 6 /* 7 * Generic neighbour manipulation 8 * 9 * Authors: 10 * Pedro Roque <roque@di.fc.ul.pt> 11 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 12 * 13 * Changes: 14 * 15 * Harald Welte: <laforge@gnumonks.org> 16 * - Add neighbour cache statistics like rtstat 17 */ 18 19 #include <linux/atomic.h> 20 #include <linux/netdevice.h> 21 #include <linux/skbuff.h> 22 #include <linux/rcupdate.h> 23 #include <linux/seq_file.h> 24 #include <linux/bitmap.h> 25 26 #include <linux/err.h> 27 #include <linux/sysctl.h> 28 #include <linux/workqueue.h> 29 #include <net/rtnetlink.h> 30 31 /* 32 * NUD stands for "neighbor unreachability detection" 33 */ 34 35 #define NUD_IN_TIMER (NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE) 36 #define NUD_VALID (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY) 37 #define NUD_CONNECTED (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE) 38 39 struct neighbour; 40 41 enum { 42 NEIGH_VAR_MCAST_PROBES, 43 NEIGH_VAR_UCAST_PROBES, 44 NEIGH_VAR_APP_PROBES, 45 NEIGH_VAR_RETRANS_TIME, 46 NEIGH_VAR_BASE_REACHABLE_TIME, 47 NEIGH_VAR_DELAY_PROBE_TIME, 48 NEIGH_VAR_GC_STALETIME, 49 NEIGH_VAR_QUEUE_LEN_BYTES, 50 NEIGH_VAR_PROXY_QLEN, 51 NEIGH_VAR_ANYCAST_DELAY, 52 NEIGH_VAR_PROXY_DELAY, 53 NEIGH_VAR_LOCKTIME, 54 #define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1) 55 /* Following are used as a second way to access one of the above */ 56 NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */ 57 NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */ 58 NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */ 59 /* Following are used by "default" only */ 60 NEIGH_VAR_GC_INTERVAL, 61 NEIGH_VAR_GC_THRESH1, 62 NEIGH_VAR_GC_THRESH2, 63 NEIGH_VAR_GC_THRESH3, 64 NEIGH_VAR_MAX 65 }; 66 67 struct neigh_parms { 68 #ifdef CONFIG_NET_NS 69 struct net *net; 70 #endif 71 struct net_device *dev; 72 struct neigh_parms *next; 73 int (*neigh_setup)(struct neighbour *); 74 void (*neigh_cleanup)(struct neighbour *); 75 struct neigh_table *tbl; 76 77 void *sysctl_table; 78 79 int dead; 80 atomic_t refcnt; 81 struct rcu_head rcu_head; 82 83 int reachable_time; 84 int data[NEIGH_VAR_DATA_MAX]; 85 DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX); 86 }; 87 88 static inline void neigh_var_set(struct neigh_parms *p, int index, int val) 89 { 90 set_bit(index, p->data_state); 91 p->data[index] = val; 92 } 93 94 #define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr]) 95 96 /* In ndo_neigh_setup, NEIGH_VAR_INIT should be used. 97 * In other cases, NEIGH_VAR_SET should be used. 98 */ 99 #define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val) 100 #define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val) 101 102 static inline void neigh_parms_data_state_setall(struct neigh_parms *p) 103 { 104 bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX); 105 } 106 107 static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p) 108 { 109 bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX); 110 } 111 112 struct neigh_statistics { 113 unsigned long allocs; /* number of allocated neighs */ 114 unsigned long destroys; /* number of destroyed neighs */ 115 unsigned long hash_grows; /* number of hash resizes */ 116 117 unsigned long res_failed; /* number of failed resolutions */ 118 119 unsigned long lookups; /* number of lookups */ 120 unsigned long hits; /* number of hits (among lookups) */ 121 122 unsigned long rcv_probes_mcast; /* number of received mcast ipv6 */ 123 unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */ 124 125 unsigned long periodic_gc_runs; /* number of periodic GC runs */ 126 unsigned long forced_gc_runs; /* number of forced GC runs */ 127 128 unsigned long unres_discards; /* number of unresolved drops */ 129 }; 130 131 #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) 132 133 struct neighbour { 134 struct neighbour __rcu *next; 135 struct neigh_table *tbl; 136 struct neigh_parms *parms; 137 unsigned long confirmed; 138 unsigned long updated; 139 rwlock_t lock; 140 atomic_t refcnt; 141 struct sk_buff_head arp_queue; 142 unsigned int arp_queue_len_bytes; 143 struct timer_list timer; 144 unsigned long used; 145 atomic_t probes; 146 __u8 flags; 147 __u8 nud_state; 148 __u8 type; 149 __u8 dead; 150 seqlock_t ha_lock; 151 unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; 152 struct hh_cache hh; 153 int (*output)(struct neighbour *, struct sk_buff *); 154 const struct neigh_ops *ops; 155 struct rcu_head rcu; 156 struct net_device *dev; 157 u8 primary_key[0]; 158 }; 159 160 struct neigh_ops { 161 int family; 162 void (*solicit)(struct neighbour *, struct sk_buff *); 163 void (*error_report)(struct neighbour *, struct sk_buff *); 164 int (*output)(struct neighbour *, struct sk_buff *); 165 int (*connected_output)(struct neighbour *, struct sk_buff *); 166 }; 167 168 struct pneigh_entry { 169 struct pneigh_entry *next; 170 #ifdef CONFIG_NET_NS 171 struct net *net; 172 #endif 173 struct net_device *dev; 174 u8 flags; 175 u8 key[0]; 176 }; 177 178 /* 179 * neighbour table manipulation 180 */ 181 182 #define NEIGH_NUM_HASH_RND 4 183 184 struct neigh_hash_table { 185 struct neighbour __rcu **hash_buckets; 186 unsigned int hash_shift; 187 __u32 hash_rnd[NEIGH_NUM_HASH_RND]; 188 struct rcu_head rcu; 189 }; 190 191 192 struct neigh_table { 193 struct neigh_table *next; 194 int family; 195 int entry_size; 196 int key_len; 197 __u32 (*hash)(const void *pkey, 198 const struct net_device *dev, 199 __u32 *hash_rnd); 200 int (*constructor)(struct neighbour *); 201 int (*pconstructor)(struct pneigh_entry *); 202 void (*pdestructor)(struct pneigh_entry *); 203 void (*proxy_redo)(struct sk_buff *skb); 204 char *id; 205 struct neigh_parms parms; 206 /* HACK. gc_* should follow parms without a gap! */ 207 int gc_interval; 208 int gc_thresh1; 209 int gc_thresh2; 210 int gc_thresh3; 211 unsigned long last_flush; 212 struct delayed_work gc_work; 213 struct timer_list proxy_timer; 214 struct sk_buff_head proxy_queue; 215 atomic_t entries; 216 rwlock_t lock; 217 unsigned long last_rand; 218 struct neigh_statistics __percpu *stats; 219 struct neigh_hash_table __rcu *nht; 220 struct pneigh_entry **phash_buckets; 221 }; 222 223 static inline int neigh_parms_family(struct neigh_parms *p) 224 { 225 return p->tbl->family; 226 } 227 228 #define NEIGH_PRIV_ALIGN sizeof(long long) 229 #define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN) 230 231 static inline void *neighbour_priv(const struct neighbour *n) 232 { 233 return (char *)n + n->tbl->entry_size; 234 } 235 236 /* flags for neigh_update() */ 237 #define NEIGH_UPDATE_F_OVERRIDE 0x00000001 238 #define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002 239 #define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004 240 #define NEIGH_UPDATE_F_ISROUTER 0x40000000 241 #define NEIGH_UPDATE_F_ADMIN 0x80000000 242 243 void neigh_table_init(struct neigh_table *tbl); 244 int neigh_table_clear(struct neigh_table *tbl); 245 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, 246 struct net_device *dev); 247 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, 248 const void *pkey); 249 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, 250 struct net_device *dev, bool want_ref); 251 static inline struct neighbour *neigh_create(struct neigh_table *tbl, 252 const void *pkey, 253 struct net_device *dev) 254 { 255 return __neigh_create(tbl, pkey, dev, true); 256 } 257 void neigh_destroy(struct neighbour *neigh); 258 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); 259 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags); 260 void __neigh_set_probe_once(struct neighbour *neigh); 261 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); 262 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); 263 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); 264 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); 265 int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb); 266 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); 267 struct neighbour *neigh_event_ns(struct neigh_table *tbl, 268 u8 *lladdr, void *saddr, 269 struct net_device *dev); 270 271 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, 272 struct neigh_table *tbl); 273 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); 274 275 static inline 276 struct net *neigh_parms_net(const struct neigh_parms *parms) 277 { 278 return read_pnet(&parms->net); 279 } 280 281 unsigned long neigh_rand_reach_time(unsigned long base); 282 283 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, 284 struct sk_buff *skb); 285 struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, 286 const void *key, struct net_device *dev, 287 int creat); 288 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net, 289 const void *key, struct net_device *dev); 290 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, 291 struct net_device *dev); 292 293 static inline struct net *pneigh_net(const struct pneigh_entry *pneigh) 294 { 295 return read_pnet(&pneigh->net); 296 } 297 298 void neigh_app_ns(struct neighbour *n); 299 void neigh_for_each(struct neigh_table *tbl, 300 void (*cb)(struct neighbour *, void *), void *cookie); 301 void __neigh_for_each_release(struct neigh_table *tbl, 302 int (*cb)(struct neighbour *)); 303 void pneigh_for_each(struct neigh_table *tbl, 304 void (*cb)(struct pneigh_entry *)); 305 306 struct neigh_seq_state { 307 struct seq_net_private p; 308 struct neigh_table *tbl; 309 struct neigh_hash_table *nht; 310 void *(*neigh_sub_iter)(struct neigh_seq_state *state, 311 struct neighbour *n, loff_t *pos); 312 unsigned int bucket; 313 unsigned int flags; 314 #define NEIGH_SEQ_NEIGH_ONLY 0x00000001 315 #define NEIGH_SEQ_IS_PNEIGH 0x00000002 316 #define NEIGH_SEQ_SKIP_NOARP 0x00000004 317 }; 318 void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, 319 unsigned int); 320 void *neigh_seq_next(struct seq_file *, void *, loff_t *); 321 void neigh_seq_stop(struct seq_file *, void *); 322 323 int neigh_proc_dointvec(struct ctl_table *ctl, int write, 324 void __user *buffer, size_t *lenp, loff_t *ppos); 325 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, 326 void __user *buffer, 327 size_t *lenp, loff_t *ppos); 328 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 329 void __user *buffer, 330 size_t *lenp, loff_t *ppos); 331 332 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 333 proc_handler *proc_handler); 334 void neigh_sysctl_unregister(struct neigh_parms *p); 335 336 static inline void __neigh_parms_put(struct neigh_parms *parms) 337 { 338 atomic_dec(&parms->refcnt); 339 } 340 341 static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms) 342 { 343 atomic_inc(&parms->refcnt); 344 return parms; 345 } 346 347 /* 348 * Neighbour references 349 */ 350 351 static inline void neigh_release(struct neighbour *neigh) 352 { 353 if (atomic_dec_and_test(&neigh->refcnt)) 354 neigh_destroy(neigh); 355 } 356 357 static inline struct neighbour * neigh_clone(struct neighbour *neigh) 358 { 359 if (neigh) 360 atomic_inc(&neigh->refcnt); 361 return neigh; 362 } 363 364 #define neigh_hold(n) atomic_inc(&(n)->refcnt) 365 366 static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 367 { 368 unsigned long now = jiffies; 369 370 if (neigh->used != now) 371 neigh->used = now; 372 if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) 373 return __neigh_event_send(neigh, skb); 374 return 0; 375 } 376 377 #ifdef CONFIG_BRIDGE_NETFILTER 378 static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) 379 { 380 unsigned int seq, hh_alen; 381 382 do { 383 seq = read_seqbegin(&hh->hh_lock); 384 hh_alen = HH_DATA_ALIGN(ETH_HLEN); 385 memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN); 386 } while (read_seqretry(&hh->hh_lock, seq)); 387 return 0; 388 } 389 #endif 390 391 static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) 392 { 393 unsigned int seq; 394 int hh_len; 395 396 do { 397 seq = read_seqbegin(&hh->hh_lock); 398 hh_len = hh->hh_len; 399 if (likely(hh_len <= HH_DATA_MOD)) { 400 /* this is inlined by gcc */ 401 memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); 402 } else { 403 int hh_alen = HH_DATA_ALIGN(hh_len); 404 405 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); 406 } 407 } while (read_seqretry(&hh->hh_lock, seq)); 408 409 skb_push(skb, hh_len); 410 return dev_queue_xmit(skb); 411 } 412 413 static inline struct neighbour * 414 __neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) 415 { 416 struct neighbour *n = neigh_lookup(tbl, pkey, dev); 417 418 if (n || !creat) 419 return n; 420 421 n = neigh_create(tbl, pkey, dev); 422 return IS_ERR(n) ? NULL : n; 423 } 424 425 static inline struct neighbour * 426 __neigh_lookup_errno(struct neigh_table *tbl, const void *pkey, 427 struct net_device *dev) 428 { 429 struct neighbour *n = neigh_lookup(tbl, pkey, dev); 430 431 if (n) 432 return n; 433 434 return neigh_create(tbl, pkey, dev); 435 } 436 437 struct neighbour_cb { 438 unsigned long sched_next; 439 unsigned int flags; 440 }; 441 442 #define LOCALLY_ENQUEUED 0x1 443 444 #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) 445 446 static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, 447 const struct net_device *dev) 448 { 449 unsigned int seq; 450 451 do { 452 seq = read_seqbegin(&n->ha_lock); 453 memcpy(dst, n->ha, dev->addr_len); 454 } while (read_seqretry(&n->ha_lock, seq)); 455 } 456 #endif 457