1 /* 2 * INETPEER - A storage for permanent information about peers 3 * 4 * Authors: Andrey V. Savochkin <saw@msu.ru> 5 */ 6 7 #ifndef _NET_INETPEER_H 8 #define _NET_INETPEER_H 9 10 #include <linux/types.h> 11 #include <linux/init.h> 12 #include <linux/jiffies.h> 13 #include <linux/spinlock.h> 14 #include <linux/rtnetlink.h> 15 #include <net/ipv6.h> 16 #include <linux/atomic.h> 17 18 /* IPv4 address key for cache lookups */ 19 struct ipv4_addr_key { 20 __be32 addr; 21 int vif; 22 }; 23 24 #define INETPEER_MAXKEYSZ (sizeof(struct in6_addr) / sizeof(u32)) 25 26 struct inetpeer_addr { 27 union { 28 struct ipv4_addr_key a4; 29 struct in6_addr a6; 30 u32 key[INETPEER_MAXKEYSZ]; 31 }; 32 __u16 family; 33 }; 34 35 struct inet_peer { 36 /* group together avl_left,avl_right,v4daddr to speedup lookups */ 37 struct inet_peer __rcu *avl_left, *avl_right; 38 struct inetpeer_addr daddr; 39 __u32 avl_height; 40 41 u32 metrics[RTAX_MAX]; 42 u32 rate_tokens; /* rate limiting for ICMP */ 43 unsigned long rate_last; 44 union { 45 struct list_head gc_list; 46 struct rcu_head gc_rcu; 47 }; 48 /* 49 * Once inet_peer is queued for deletion (refcnt == -1), following field 50 * is not available: rid 51 * We can share memory with rcu_head to help keep inet_peer small. 52 */ 53 union { 54 struct { 55 atomic_t rid; /* Frag reception counter */ 56 }; 57 struct rcu_head rcu; 58 struct inet_peer *gc_next; 59 }; 60 61 /* following fields might be frequently dirtied */ 62 __u32 dtime; /* the time of last use of not referenced entries */ 63 atomic_t refcnt; 64 }; 65 66 struct inet_peer_base { 67 struct inet_peer __rcu *root; 68 seqlock_t lock; 69 int total; 70 }; 71 72 void inet_peer_base_init(struct inet_peer_base *); 73 74 void inet_initpeers(void) __init; 75 76 #define INETPEER_METRICS_NEW (~(u32) 0) 77 78 static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip) 79 { 80 iaddr->a4.addr = ip; 81 iaddr->family = AF_INET; 82 } 83 84 static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr) 85 { 86 return iaddr->a4.addr; 87 } 88 89 static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr, 90 struct in6_addr *in6) 91 { 92 iaddr->a6 = *in6; 93 iaddr->family = AF_INET6; 94 } 95 96 static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr) 97 { 98 return &iaddr->a6; 99 } 100 101 /* can be called with or without local BH being disabled */ 102 struct inet_peer *inet_getpeer(struct inet_peer_base *base, 103 const struct inetpeer_addr *daddr, 104 int create); 105 106 static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base, 107 __be32 v4daddr, 108 int vif, int create) 109 { 110 struct inetpeer_addr daddr; 111 112 daddr.a4.addr = v4daddr; 113 daddr.a4.vif = vif; 114 daddr.family = AF_INET; 115 return inet_getpeer(base, &daddr, create); 116 } 117 118 static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base, 119 const struct in6_addr *v6daddr, 120 int create) 121 { 122 struct inetpeer_addr daddr; 123 124 daddr.a6 = *v6daddr; 125 daddr.family = AF_INET6; 126 return inet_getpeer(base, &daddr, create); 127 } 128 129 static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a, 130 const struct inetpeer_addr *b) 131 { 132 int i, n; 133 134 if (a->family == AF_INET) 135 n = sizeof(a->a4) / sizeof(u32); 136 else 137 n = sizeof(a->a6) / sizeof(u32); 138 139 for (i = 0; i < n; i++) { 140 if (a->key[i] == b->key[i]) 141 continue; 142 if (a->key[i] < b->key[i]) 143 return -1; 144 return 1; 145 } 146 147 return 0; 148 } 149 150 /* can be called from BH context or outside */ 151 void inet_putpeer(struct inet_peer *p); 152 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); 153 154 void inetpeer_invalidate_tree(struct inet_peer_base *); 155 156 #endif /* _NET_INETPEER_H */ 157