1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_GEN_STATS_H 3 #define __NET_GEN_STATS_H 4 5 #include <linux/gen_stats.h> 6 #include <linux/socket.h> 7 #include <linux/rtnetlink.h> 8 #include <linux/pkt_sched.h> 9 10 /* Throughput stats. 11 * Must be initialized beforehand with gnet_stats_basic_sync_init(). 12 * 13 * If no reads can ever occur parallel to writes (e.g. stack-allocated 14 * bstats), then the internal stat values can be written to and read 15 * from directly. Otherwise, use _bstats_set/update() for writes and 16 * gnet_stats_add_basic() for reads. 17 */ 18 struct gnet_stats_basic_sync { 19 u64_stats_t bytes; 20 u64_stats_t packets; 21 struct u64_stats_sync syncp; 22 } __aligned(2 * sizeof(u64)); 23 24 struct net_rate_estimator; 25 26 struct gnet_dump { 27 spinlock_t * lock; 28 struct sk_buff * skb; 29 struct nlattr * tail; 30 31 /* Backward compatibility */ 32 int compat_tc_stats; 33 int compat_xstats; 34 int padattr; 35 void * xstats; 36 int xstats_len; 37 struct tc_stats tc_stats; 38 }; 39 40 void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b); 41 int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, 42 struct gnet_dump *d, int padattr); 43 44 int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, 45 int tc_stats_type, int xstats_type, 46 spinlock_t *lock, struct gnet_dump *d, 47 int padattr); 48 49 int gnet_stats_copy_basic(const seqcount_t *running, 50 struct gnet_dump *d, 51 struct gnet_stats_basic_sync __percpu *cpu, 52 struct gnet_stats_basic_sync *b); 53 void gnet_stats_add_basic(const seqcount_t *running, 54 struct gnet_stats_basic_sync *bstats, 55 struct gnet_stats_basic_sync __percpu *cpu, 56 struct gnet_stats_basic_sync *b); 57 int gnet_stats_copy_basic_hw(const seqcount_t *running, 58 struct gnet_dump *d, 59 struct gnet_stats_basic_sync __percpu *cpu, 60 struct gnet_stats_basic_sync *b); 61 int gnet_stats_copy_rate_est(struct gnet_dump *d, 62 struct net_rate_estimator __rcu **ptr); 63 int gnet_stats_copy_queue(struct gnet_dump *d, 64 struct gnet_stats_queue __percpu *cpu_q, 65 struct gnet_stats_queue *q, __u32 qlen); 66 void gnet_stats_add_queue(struct gnet_stats_queue *qstats, 67 const struct gnet_stats_queue __percpu *cpu_q, 68 const struct gnet_stats_queue *q); 69 int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); 70 71 int gnet_stats_finish_copy(struct gnet_dump *d); 72 73 int gen_new_estimator(struct gnet_stats_basic_sync *bstats, 74 struct gnet_stats_basic_sync __percpu *cpu_bstats, 75 struct net_rate_estimator __rcu **rate_est, 76 spinlock_t *lock, 77 seqcount_t *running, struct nlattr *opt); 78 void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); 79 int gen_replace_estimator(struct gnet_stats_basic_sync *bstats, 80 struct gnet_stats_basic_sync __percpu *cpu_bstats, 81 struct net_rate_estimator __rcu **ptr, 82 spinlock_t *lock, 83 seqcount_t *running, struct nlattr *opt); 84 bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); 85 bool gen_estimator_read(struct net_rate_estimator __rcu **ptr, 86 struct gnet_stats_rate_est64 *sample); 87 #endif 88