1 #ifndef _NET_NEIGHBOUR_H 2 #define _NET_NEIGHBOUR_H 3 4 #include <linux/neighbour.h> 5 6 /* 7 * Generic neighbour manipulation 8 * 9 * Authors: 10 * Pedro Roque <roque@di.fc.ul.pt> 11 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 12 * 13 * Changes: 14 * 15 * Harald Welte: <laforge@gnumonks.org> 16 * - Add neighbour cache statistics like rtstat 17 */ 18 19 #include <linux/atomic.h> 20 #include <linux/netdevice.h> 21 #include <linux/skbuff.h> 22 #include <linux/rcupdate.h> 23 #include <linux/seq_file.h> 24 #include <linux/bitmap.h> 25 26 #include <linux/err.h> 27 #include <linux/sysctl.h> 28 #include <linux/workqueue.h> 29 #include <net/rtnetlink.h> 30 31 /* 32 * NUD stands for "neighbor unreachability detection" 33 */ 34 35 #define NUD_IN_TIMER (NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE) 36 #define NUD_VALID (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY) 37 #define NUD_CONNECTED (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE) 38 39 struct neighbour; 40 41 enum { 42 NEIGH_VAR_MCAST_PROBES, 43 NEIGH_VAR_UCAST_PROBES, 44 NEIGH_VAR_APP_PROBES, 45 NEIGH_VAR_RETRANS_TIME, 46 NEIGH_VAR_BASE_REACHABLE_TIME, 47 NEIGH_VAR_DELAY_PROBE_TIME, 48 NEIGH_VAR_GC_STALETIME, 49 NEIGH_VAR_QUEUE_LEN_BYTES, 50 NEIGH_VAR_PROXY_QLEN, 51 NEIGH_VAR_ANYCAST_DELAY, 52 NEIGH_VAR_PROXY_DELAY, 53 NEIGH_VAR_LOCKTIME, 54 #define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1) 55 /* Following are used as a second way to access one of the above */ 56 NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */ 57 NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */ 58 NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */ 59 /* Following are used by "default" only */ 60 NEIGH_VAR_GC_INTERVAL, 61 NEIGH_VAR_GC_THRESH1, 62 NEIGH_VAR_GC_THRESH2, 63 NEIGH_VAR_GC_THRESH3, 64 NEIGH_VAR_MAX 65 }; 66 67 struct neigh_parms { 68 #ifdef CONFIG_NET_NS 69 struct net *net; 70 #endif 71 struct net_device *dev; 72 struct neigh_parms *next; 73 int (*neigh_setup)(struct neighbour *); 74 void (*neigh_cleanup)(struct neighbour *); 75 struct neigh_table *tbl; 76 77 void *sysctl_table; 78 79 int dead; 80 atomic_t refcnt; 81 struct rcu_head rcu_head; 82 83 int reachable_time; 84 int data[NEIGH_VAR_DATA_MAX]; 85 DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX); 86 }; 87 88 static inline void neigh_var_set(struct neigh_parms *p, int index, int val) 89 { 90 set_bit(index, p->data_state); 91 p->data[index] = val; 92 } 93 94 #define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr]) 95 96 /* In ndo_neigh_setup, NEIGH_VAR_INIT should be used. 97 * In other cases, NEIGH_VAR_SET should be used. 98 */ 99 #define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val) 100 #define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val) 101 102 static inline void neigh_parms_data_state_setall(struct neigh_parms *p) 103 { 104 bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX); 105 } 106 107 static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p) 108 { 109 bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX); 110 } 111 112 struct neigh_statistics { 113 unsigned long allocs; /* number of allocated neighs */ 114 unsigned long destroys; /* number of destroyed neighs */ 115 unsigned long hash_grows; /* number of hash resizes */ 116 117 unsigned long res_failed; /* number of failed resolutions */ 118 119 unsigned long lookups; /* number of lookups */ 120 unsigned long hits; /* number of hits (among lookups) */ 121 122 unsigned long rcv_probes_mcast; /* number of received mcast ipv6 */ 123 unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */ 124 125 unsigned long periodic_gc_runs; /* number of periodic GC runs */ 126 unsigned long forced_gc_runs; /* number of forced GC runs */ 127 128 unsigned long unres_discards; /* number of unresolved drops */ 129 }; 130 131 #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) 132 133 struct neighbour { 134 struct neighbour __rcu *next; 135 struct neigh_table *tbl; 136 struct neigh_parms *parms; 137 unsigned long confirmed; 138 unsigned long updated; 139 rwlock_t lock; 140 atomic_t refcnt; 141 struct sk_buff_head arp_queue; 142 unsigned int arp_queue_len_bytes; 143 struct timer_list timer; 144 unsigned long used; 145 atomic_t probes; 146 __u8 flags; 147 __u8 nud_state; 148 __u8 type; 149 __u8 dead; 150 seqlock_t ha_lock; 151 unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; 152 struct hh_cache hh; 153 int (*output)(struct neighbour *, struct sk_buff *); 154 const struct neigh_ops *ops; 155 struct rcu_head rcu; 156 struct net_device *dev; 157 u8 primary_key[0]; 158 }; 159 160 struct neigh_ops { 161 int family; 162 void (*solicit)(struct neighbour *, struct sk_buff *); 163 void (*error_report)(struct neighbour *, struct sk_buff *); 164 int (*output)(struct neighbour *, struct sk_buff *); 165 int (*connected_output)(struct neighbour *, struct sk_buff *); 166 }; 167 168 struct pneigh_entry { 169 struct pneigh_entry *next; 170 #ifdef CONFIG_NET_NS 171 struct net *net; 172 #endif 173 struct net_device *dev; 174 u8 flags; 175 u8 key[0]; 176 }; 177 178 /* 179 * neighbour table manipulation 180 */ 181 182 #define NEIGH_NUM_HASH_RND 4 183 184 struct neigh_hash_table { 185 struct neighbour __rcu **hash_buckets; 186 unsigned int hash_shift; 187 __u32 hash_rnd[NEIGH_NUM_HASH_RND]; 188 struct rcu_head rcu; 189 }; 190 191 192 struct neigh_table { 193 struct neigh_table *next; 194 int family; 195 int entry_size; 196 int key_len; 197 __u32 (*hash)(const void *pkey, 198 const struct net_device *dev, 199 __u32 *hash_rnd); 200 int (*constructor)(struct neighbour *); 201 int (*pconstructor)(struct pneigh_entry *); 202 void (*pdestructor)(struct pneigh_entry *); 203 void (*proxy_redo)(struct sk_buff *skb); 204 char *id; 205 struct neigh_parms parms; 206 int gc_interval; 207 int gc_thresh1; 208 int gc_thresh2; 209 int gc_thresh3; 210 unsigned long last_flush; 211 struct delayed_work gc_work; 212 struct timer_list proxy_timer; 213 struct sk_buff_head proxy_queue; 214 atomic_t entries; 215 rwlock_t lock; 216 unsigned long last_rand; 217 struct neigh_statistics __percpu *stats; 218 struct neigh_hash_table __rcu *nht; 219 struct pneigh_entry **phash_buckets; 220 }; 221 222 static inline int neigh_parms_family(struct neigh_parms *p) 223 { 224 return p->tbl->family; 225 } 226 227 #define NEIGH_PRIV_ALIGN sizeof(long long) 228 #define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN) 229 230 static inline void *neighbour_priv(const struct neighbour *n) 231 { 232 return (char *)n + n->tbl->entry_size; 233 } 234 235 /* flags for neigh_update() */ 236 #define NEIGH_UPDATE_F_OVERRIDE 0x00000001 237 #define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002 238 #define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004 239 #define NEIGH_UPDATE_F_ISROUTER 0x40000000 240 #define NEIGH_UPDATE_F_ADMIN 0x80000000 241 242 void neigh_table_init(struct neigh_table *tbl); 243 int neigh_table_clear(struct neigh_table *tbl); 244 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, 245 struct net_device *dev); 246 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, 247 const void *pkey); 248 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, 249 struct net_device *dev, bool want_ref); 250 static inline struct neighbour *neigh_create(struct neigh_table *tbl, 251 const void *pkey, 252 struct net_device *dev) 253 { 254 return __neigh_create(tbl, pkey, dev, true); 255 } 256 void neigh_destroy(struct neighbour *neigh); 257 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb); 258 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags); 259 void __neigh_set_probe_once(struct neighbour *neigh); 260 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); 261 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); 262 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); 263 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); 264 int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb); 265 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); 266 struct neighbour *neigh_event_ns(struct neigh_table *tbl, 267 u8 *lladdr, void *saddr, 268 struct net_device *dev); 269 270 struct neigh_parms *neigh_parms_alloc(struct net_device *dev, 271 struct neigh_table *tbl); 272 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms); 273 274 static inline 275 struct net *neigh_parms_net(const struct neigh_parms *parms) 276 { 277 return read_pnet(&parms->net); 278 } 279 280 unsigned long neigh_rand_reach_time(unsigned long base); 281 282 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, 283 struct sk_buff *skb); 284 struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, 285 const void *key, struct net_device *dev, 286 int creat); 287 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net, 288 const void *key, struct net_device *dev); 289 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, 290 struct net_device *dev); 291 292 static inline struct net *pneigh_net(const struct pneigh_entry *pneigh) 293 { 294 return read_pnet(&pneigh->net); 295 } 296 297 void neigh_app_ns(struct neighbour *n); 298 void neigh_for_each(struct neigh_table *tbl, 299 void (*cb)(struct neighbour *, void *), void *cookie); 300 void __neigh_for_each_release(struct neigh_table *tbl, 301 int (*cb)(struct neighbour *)); 302 void pneigh_for_each(struct neigh_table *tbl, 303 void (*cb)(struct pneigh_entry *)); 304 305 struct neigh_seq_state { 306 struct seq_net_private p; 307 struct neigh_table *tbl; 308 struct neigh_hash_table *nht; 309 void *(*neigh_sub_iter)(struct neigh_seq_state *state, 310 struct neighbour *n, loff_t *pos); 311 unsigned int bucket; 312 unsigned int flags; 313 #define NEIGH_SEQ_NEIGH_ONLY 0x00000001 314 #define NEIGH_SEQ_IS_PNEIGH 0x00000002 315 #define NEIGH_SEQ_SKIP_NOARP 0x00000004 316 }; 317 void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, 318 unsigned int); 319 void *neigh_seq_next(struct seq_file *, void *, loff_t *); 320 void neigh_seq_stop(struct seq_file *, void *); 321 322 int neigh_proc_dointvec(struct ctl_table *ctl, int write, 323 void __user *buffer, size_t *lenp, loff_t *ppos); 324 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, 325 void __user *buffer, 326 size_t *lenp, loff_t *ppos); 327 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, 328 void __user *buffer, 329 size_t *lenp, loff_t *ppos); 330 331 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, 332 proc_handler *proc_handler); 333 void neigh_sysctl_unregister(struct neigh_parms *p); 334 335 static inline void __neigh_parms_put(struct neigh_parms *parms) 336 { 337 atomic_dec(&parms->refcnt); 338 } 339 340 static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms) 341 { 342 atomic_inc(&parms->refcnt); 343 return parms; 344 } 345 346 /* 347 * Neighbour references 348 */ 349 350 static inline void neigh_release(struct neighbour *neigh) 351 { 352 if (atomic_dec_and_test(&neigh->refcnt)) 353 neigh_destroy(neigh); 354 } 355 356 static inline struct neighbour * neigh_clone(struct neighbour *neigh) 357 { 358 if (neigh) 359 atomic_inc(&neigh->refcnt); 360 return neigh; 361 } 362 363 #define neigh_hold(n) atomic_inc(&(n)->refcnt) 364 365 static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 366 { 367 unsigned long now = jiffies; 368 369 if (neigh->used != now) 370 neigh->used = now; 371 if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) 372 return __neigh_event_send(neigh, skb); 373 return 0; 374 } 375 376 #ifdef CONFIG_BRIDGE_NETFILTER 377 static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) 378 { 379 unsigned int seq, hh_alen; 380 381 do { 382 seq = read_seqbegin(&hh->hh_lock); 383 hh_alen = HH_DATA_ALIGN(ETH_HLEN); 384 memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN); 385 } while (read_seqretry(&hh->hh_lock, seq)); 386 return 0; 387 } 388 #endif 389 390 static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) 391 { 392 unsigned int seq; 393 int hh_len; 394 395 do { 396 seq = read_seqbegin(&hh->hh_lock); 397 hh_len = hh->hh_len; 398 if (likely(hh_len <= HH_DATA_MOD)) { 399 /* this is inlined by gcc */ 400 memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); 401 } else { 402 int hh_alen = HH_DATA_ALIGN(hh_len); 403 404 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); 405 } 406 } while (read_seqretry(&hh->hh_lock, seq)); 407 408 skb_push(skb, hh_len); 409 return dev_queue_xmit(skb); 410 } 411 412 static inline struct neighbour * 413 __neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat) 414 { 415 struct neighbour *n = neigh_lookup(tbl, pkey, dev); 416 417 if (n || !creat) 418 return n; 419 420 n = neigh_create(tbl, pkey, dev); 421 return IS_ERR(n) ? NULL : n; 422 } 423 424 static inline struct neighbour * 425 __neigh_lookup_errno(struct neigh_table *tbl, const void *pkey, 426 struct net_device *dev) 427 { 428 struct neighbour *n = neigh_lookup(tbl, pkey, dev); 429 430 if (n) 431 return n; 432 433 return neigh_create(tbl, pkey, dev); 434 } 435 436 struct neighbour_cb { 437 unsigned long sched_next; 438 unsigned int flags; 439 }; 440 441 #define LOCALLY_ENQUEUED 0x1 442 443 #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) 444 445 static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, 446 const struct net_device *dev) 447 { 448 unsigned int seq; 449 450 do { 451 seq = read_seqbegin(&n->ha_lock); 452 memcpy(dst, n->ha, dev->addr_len); 453 } while (read_seqretry(&n->ha_lock, seq)); 454 } 455 #endif 456