1 /* 2 * INETPEER - A storage for permanent information about peers 3 * 4 * Authors: Andrey V. Savochkin <saw@msu.ru> 5 */ 6 7 #ifndef _NET_INETPEER_H 8 #define _NET_INETPEER_H 9 10 #include <linux/types.h> 11 #include <linux/init.h> 12 #include <linux/jiffies.h> 13 #include <linux/spinlock.h> 14 #include <linux/rtnetlink.h> 15 #include <net/ipv6.h> 16 #include <linux/atomic.h> 17 18 struct inetpeer_addr_base { 19 union { 20 __be32 a4; 21 __be32 a6[4]; 22 }; 23 }; 24 25 struct inetpeer_addr { 26 struct inetpeer_addr_base addr; 27 __u16 family; 28 }; 29 30 struct inet_peer { 31 /* group together avl_left,avl_right,v4daddr to speedup lookups */ 32 struct inet_peer __rcu *avl_left, *avl_right; 33 struct inetpeer_addr daddr; 34 __u32 avl_height; 35 36 u32 metrics[RTAX_MAX]; 37 u32 rate_tokens; /* rate limiting for ICMP */ 38 unsigned long rate_last; 39 unsigned long pmtu_expires; 40 u32 pmtu_orig; 41 u32 pmtu_learned; 42 struct inetpeer_addr_base redirect_learned; 43 struct list_head gc_list; 44 /* 45 * Once inet_peer is queued for deletion (refcnt == -1), following fields 46 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp 47 * We can share memory with rcu_head to help keep inet_peer small. 48 */ 49 union { 50 struct { 51 atomic_t rid; /* Frag reception counter */ 52 atomic_t ip_id_count; /* IP ID for the next packet */ 53 __u32 tcp_ts; 54 __u32 tcp_ts_stamp; 55 }; 56 struct rcu_head rcu; 57 struct inet_peer *gc_next; 58 }; 59 60 /* following fields might be frequently dirtied */ 61 __u32 dtime; /* the time of last use of not referenced entries */ 62 atomic_t refcnt; 63 }; 64 65 void inet_initpeers(void) __init; 66 67 #define INETPEER_METRICS_NEW (~(u32) 0) 68 69 static inline bool inet_metrics_new(const struct inet_peer *p) 70 { 71 return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW; 72 } 73 74 /* can be called with or without local BH being disabled */ 75 struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create); 76 77 static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create) 78 { 79 struct inetpeer_addr daddr; 80 81 daddr.addr.a4 = v4daddr; 82 daddr.family = AF_INET; 83 return inet_getpeer(&daddr, create); 84 } 85 86 static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, int create) 87 { 88 struct inetpeer_addr daddr; 89 90 *(struct in6_addr *)daddr.addr.a6 = *v6daddr; 91 daddr.family = AF_INET6; 92 return inet_getpeer(&daddr, create); 93 } 94 95 /* can be called from BH context or outside */ 96 extern void inet_putpeer(struct inet_peer *p); 97 extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); 98 99 extern void inetpeer_invalidate_tree(int family); 100 101 /* 102 * temporary check to make sure we dont access rid, ip_id_count, tcp_ts, 103 * tcp_ts_stamp if no refcount is taken on inet_peer 104 */ 105 static inline void inet_peer_refcheck(const struct inet_peer *p) 106 { 107 WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0); 108 } 109 110 111 /* can be called with or without local BH being disabled */ 112 static inline int inet_getid(struct inet_peer *p, int more) 113 { 114 int old, new; 115 more++; 116 inet_peer_refcheck(p); 117 do { 118 old = atomic_read(&p->ip_id_count); 119 new = old + more; 120 if (!new) 121 new = 1; 122 } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old); 123 return new; 124 } 125 126 #endif /* _NET_INETPEER_H */ 127