1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Floating proportions with flexible aging period
4  *
5  *  Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
6  */
7 
8 #ifndef _LINUX_FLEX_PROPORTIONS_H
9 #define _LINUX_FLEX_PROPORTIONS_H
10 
11 #include <linux/percpu_counter.h>
12 #include <linux/spinlock.h>
13 #include <linux/seqlock.h>
14 #include <linux/gfp.h>
15 
16 /*
17  * When maximum proportion of some event type is specified, this is the
18  * precision with which we allow limitting. Note that this creates an upper
19  * bound on the number of events per period like
20  *   ULLONG_MAX >> FPROP_FRAC_SHIFT.
21  */
22 #define FPROP_FRAC_SHIFT 10
23 #define FPROP_FRAC_BASE (1UL << FPROP_FRAC_SHIFT)
24 
25 /*
26  * ---- Global proportion definitions ----
27  */
28 struct fprop_global {
29 	/* Number of events in the current period */
30 	struct percpu_counter events;
31 	/* Current period */
32 	unsigned int period;
33 	/* Synchronization with period transitions */
34 	seqcount_t sequence;
35 };
36 
37 int fprop_global_init(struct fprop_global *p, gfp_t gfp);
38 void fprop_global_destroy(struct fprop_global *p);
39 bool fprop_new_period(struct fprop_global *p, int periods);
40 
41 /*
42  *  ---- SINGLE ----
43  */
44 struct fprop_local_single {
45 	/* the local events counter */
46 	unsigned long events;
47 	/* Period in which we last updated events */
48 	unsigned int period;
49 	raw_spinlock_t lock;	/* Protect period and numerator */
50 };
51 
52 #define INIT_FPROP_LOCAL_SINGLE(name)			\
53 {	.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock),	\
54 }
55 
56 int fprop_local_init_single(struct fprop_local_single *pl);
57 void fprop_local_destroy_single(struct fprop_local_single *pl);
58 void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
59 void fprop_fraction_single(struct fprop_global *p,
60 	struct fprop_local_single *pl, unsigned long *numerator,
61 	unsigned long *denominator);
62 
63 static inline
fprop_inc_single(struct fprop_global * p,struct fprop_local_single * pl)64 void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
65 {
66 	unsigned long flags;
67 
68 	local_irq_save(flags);
69 	__fprop_inc_single(p, pl);
70 	local_irq_restore(flags);
71 }
72 
73 /*
74  * ---- PERCPU ----
75  */
76 struct fprop_local_percpu {
77 	/* the local events counter */
78 	struct percpu_counter events;
79 	/* Period in which we last updated events */
80 	unsigned int period;
81 	raw_spinlock_t lock;	/* Protect period and numerator */
82 };
83 
84 int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
85 void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
86 void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
87 		long nr);
88 void __fprop_add_percpu_max(struct fprop_global *p,
89 		struct fprop_local_percpu *pl, int max_frac, long nr);
90 void fprop_fraction_percpu(struct fprop_global *p,
91 	struct fprop_local_percpu *pl, unsigned long *numerator,
92 	unsigned long *denominator);
93 
94 static inline
fprop_inc_percpu(struct fprop_global * p,struct fprop_local_percpu * pl)95 void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
96 {
97 	unsigned long flags;
98 
99 	local_irq_save(flags);
100 	__fprop_add_percpu(p, pl, 1);
101 	local_irq_restore(flags);
102 }
103 
104 #endif
105