1 #ifndef __NET_FRAG_H__ 2 #define __NET_FRAG_H__ 3 4 #include <linux/percpu_counter.h> 5 6 struct netns_frags { 7 int nqueues; 8 struct list_head lru_list; 9 spinlock_t lru_lock; 10 11 /* The percpu_counter "mem" need to be cacheline aligned. 12 * mem.count must not share cacheline with other writers 13 */ 14 struct percpu_counter mem ____cacheline_aligned_in_smp; 15 16 /* sysctls */ 17 int timeout; 18 int high_thresh; 19 int low_thresh; 20 }; 21 22 struct inet_frag_queue { 23 spinlock_t lock; 24 struct timer_list timer; /* when will this queue expire? */ 25 struct list_head lru_list; /* lru list member */ 26 struct hlist_node list; 27 atomic_t refcnt; 28 struct sk_buff *fragments; /* list of received fragments */ 29 struct sk_buff *fragments_tail; 30 ktime_t stamp; 31 int len; /* total length of orig datagram */ 32 int meat; 33 __u8 last_in; /* first/last segment arrived? */ 34 35 #define INET_FRAG_COMPLETE 4 36 #define INET_FRAG_FIRST_IN 2 37 #define INET_FRAG_LAST_IN 1 38 39 u16 max_size; 40 41 struct netns_frags *net; 42 }; 43 44 #define INETFRAGS_HASHSZ 64 45 46 struct inet_frags { 47 struct hlist_head hash[INETFRAGS_HASHSZ]; 48 /* This rwlock is a global lock (seperate per IPv4, IPv6 and 49 * netfilter). Important to keep this on a seperate cacheline. 50 */ 51 rwlock_t lock ____cacheline_aligned_in_smp; 52 int secret_interval; 53 struct timer_list secret_timer; 54 u32 rnd; 55 int qsize; 56 57 unsigned int (*hashfn)(struct inet_frag_queue *); 58 bool (*match)(struct inet_frag_queue *q, void *arg); 59 void (*constructor)(struct inet_frag_queue *q, 60 void *arg); 61 void (*destructor)(struct inet_frag_queue *); 62 void (*skb_free)(struct sk_buff *); 63 void (*frag_expire)(unsigned long data); 64 }; 65 66 void inet_frags_init(struct inet_frags *); 67 void inet_frags_fini(struct inet_frags *); 68 69 void inet_frags_init_net(struct netns_frags *nf); 70 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); 71 72 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); 73 void inet_frag_destroy(struct inet_frag_queue *q, 74 struct inet_frags *f, int *work); 75 int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); 76 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, 77 struct inet_frags *f, void *key, unsigned int hash) 78 __releases(&f->lock); 79 80 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) 81 { 82 if (atomic_dec_and_test(&q->refcnt)) 83 inet_frag_destroy(q, f, NULL); 84 } 85 86 /* Memory Tracking Functions. */ 87 88 /* The default percpu_counter batch size is not big enough to scale to 89 * fragmentation mem acct sizes. 90 * The mem size of a 64K fragment is approx: 91 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes 92 */ 93 static unsigned int frag_percpu_counter_batch = 130000; 94 95 static inline int frag_mem_limit(struct netns_frags *nf) 96 { 97 return percpu_counter_read(&nf->mem); 98 } 99 100 static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) 101 { 102 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); 103 } 104 105 static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) 106 { 107 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); 108 } 109 110 static inline void init_frag_mem_limit(struct netns_frags *nf) 111 { 112 percpu_counter_init(&nf->mem, 0); 113 } 114 115 static inline int sum_frag_mem_limit(struct netns_frags *nf) 116 { 117 int res; 118 119 local_bh_disable(); 120 res = percpu_counter_sum_positive(&nf->mem); 121 local_bh_enable(); 122 123 return res; 124 } 125 126 static inline void inet_frag_lru_move(struct inet_frag_queue *q) 127 { 128 spin_lock(&q->net->lru_lock); 129 list_move_tail(&q->lru_list, &q->net->lru_list); 130 spin_unlock(&q->net->lru_lock); 131 } 132 133 static inline void inet_frag_lru_del(struct inet_frag_queue *q) 134 { 135 spin_lock(&q->net->lru_lock); 136 list_del(&q->lru_list); 137 spin_unlock(&q->net->lru_lock); 138 } 139 140 static inline void inet_frag_lru_add(struct netns_frags *nf, 141 struct inet_frag_queue *q) 142 { 143 spin_lock(&nf->lru_lock); 144 list_add_tail(&q->lru_list, &nf->lru_list); 145 spin_unlock(&nf->lru_lock); 146 } 147 #endif 148