xref: /openbmc/linux/net/smc/smc_stats.h (revision e0e4b8fa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  * Macros for SMC statistics
6  *
7  * Copyright IBM Corp. 2021
8  *
9  * Author(s):  Guvenc Gulce
10  */
11 
12 #ifndef NET_SMC_SMC_STATS_H_
13 #define NET_SMC_SMC_STATS_H_
14 #include <linux/init.h>
15 #include <linux/mutex.h>
16 #include <linux/percpu.h>
17 #include <linux/ctype.h>
18 #include <linux/smc.h>
19 
20 #include "smc_clc.h"
21 
22 #define SMC_MAX_FBACK_RSN_CNT 30
23 
24 extern struct smc_stats __percpu *smc_stats;	/* per cpu counters for SMC */
25 extern struct smc_stats_reason fback_rsn;
26 extern struct mutex smc_stat_fback_rsn;
27 
28 enum {
29 	SMC_BUF_8K,
30 	SMC_BUF_16K,
31 	SMC_BUF_32K,
32 	SMC_BUF_64K,
33 	SMC_BUF_128K,
34 	SMC_BUF_256K,
35 	SMC_BUF_512K,
36 	SMC_BUF_1024K,
37 	SMC_BUF_G_1024K,
38 	SMC_BUF_MAX,
39 };
40 
41 struct smc_stats_fback {
42 	int	fback_code;
43 	u16	count;
44 };
45 
46 struct smc_stats_reason {
47 	struct	smc_stats_fback srv[SMC_MAX_FBACK_RSN_CNT];
48 	struct	smc_stats_fback clnt[SMC_MAX_FBACK_RSN_CNT];
49 	u64			srv_fback_cnt;
50 	u64			clnt_fback_cnt;
51 };
52 
53 struct smc_stats_rmbcnt {
54 	u64	buf_size_small_peer_cnt;
55 	u64	buf_size_small_cnt;
56 	u64	buf_full_peer_cnt;
57 	u64	buf_full_cnt;
58 	u64	reuse_cnt;
59 	u64	alloc_cnt;
60 	u64	dgrade_cnt;
61 };
62 
63 struct smc_stats_memsize {
64 	u64	buf[SMC_BUF_MAX];
65 };
66 
67 struct smc_stats_tech {
68 	struct smc_stats_memsize tx_rmbsize;
69 	struct smc_stats_memsize rx_rmbsize;
70 	struct smc_stats_memsize tx_pd;
71 	struct smc_stats_memsize rx_pd;
72 	struct smc_stats_rmbcnt rmb_tx;
73 	struct smc_stats_rmbcnt rmb_rx;
74 	u64			clnt_v1_succ_cnt;
75 	u64			clnt_v2_succ_cnt;
76 	u64			srv_v1_succ_cnt;
77 	u64			srv_v2_succ_cnt;
78 	u64			sendpage_cnt;
79 	u64			urg_data_cnt;
80 	u64			splice_cnt;
81 	u64			cork_cnt;
82 	u64			ndly_cnt;
83 	u64			rx_bytes;
84 	u64			tx_bytes;
85 	u64			rx_cnt;
86 	u64			tx_cnt;
87 };
88 
89 struct smc_stats {
90 	struct smc_stats_tech	smc[2];
91 	u64			clnt_hshake_err_cnt;
92 	u64			srv_hshake_err_cnt;
93 };
94 
95 #define SMC_STAT_PAYLOAD_SUB(_tech, key, _len, _rc) \
96 do { \
97 	typeof(_tech) t = (_tech); \
98 	typeof(_len) l = (_len); \
99 	int _pos = fls64((l) >> 13); \
100 	typeof(_rc) r = (_rc); \
101 	int m = SMC_BUF_MAX - 1; \
102 	this_cpu_inc((*smc_stats).smc[t].key ## _cnt); \
103 	if (r <= 0) \
104 		break; \
105 	_pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
106 	this_cpu_inc((*smc_stats).smc[t].key ## _pd.buf[_pos]); \
107 	this_cpu_add((*smc_stats).smc[t].key ## _bytes, r); \
108 } \
109 while (0)
110 
111 #define SMC_STAT_TX_PAYLOAD(_smc, length, rcode) \
112 do { \
113 	typeof(_smc) __smc = _smc; \
114 	typeof(length) _len = (length); \
115 	typeof(rcode) _rc = (rcode); \
116 	bool is_smcd = !__smc->conn.lnk; \
117 	if (is_smcd) \
118 		SMC_STAT_PAYLOAD_SUB(SMC_TYPE_D, tx, _len, _rc); \
119 	else \
120 		SMC_STAT_PAYLOAD_SUB(SMC_TYPE_R, tx, _len, _rc); \
121 } \
122 while (0)
123 
124 #define SMC_STAT_RX_PAYLOAD(_smc, length, rcode) \
125 do { \
126 	typeof(_smc) __smc = _smc; \
127 	typeof(length) _len = (length); \
128 	typeof(rcode) _rc = (rcode); \
129 	bool is_smcd = !__smc->conn.lnk; \
130 	if (is_smcd) \
131 		SMC_STAT_PAYLOAD_SUB(SMC_TYPE_D, rx, _len, _rc); \
132 	else \
133 		SMC_STAT_PAYLOAD_SUB(SMC_TYPE_R, rx, _len, _rc); \
134 } \
135 while (0)
136 
137 #define SMC_STAT_RMB_SIZE_SUB(_tech, k, _len) \
138 do { \
139 	typeof(_len) _l = (_len); \
140 	typeof(_tech) t = (_tech); \
141 	int _pos = fls((_l) >> 13); \
142 	int m = SMC_BUF_MAX - 1; \
143 	_pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
144 	this_cpu_inc((*smc_stats).smc[t].k ## _rmbsize.buf[_pos]); \
145 } \
146 while (0)
147 
148 #define SMC_STAT_RMB_SUB(type, t, key) \
149 	this_cpu_inc((*smc_stats).smc[t].rmb ## _ ## key.type ## _cnt)
150 
151 #define SMC_STAT_RMB_SIZE(_is_smcd, _is_rx, _len) \
152 do { \
153 	typeof(_is_smcd) is_d = (_is_smcd); \
154 	typeof(_is_rx) is_r = (_is_rx); \
155 	typeof(_len) l = (_len); \
156 	if ((is_d) && (is_r)) \
157 		SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_D, rx, l); \
158 	if ((is_d) && !(is_r)) \
159 		SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_D, tx, l); \
160 	if (!(is_d) && (is_r)) \
161 		SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_R, rx, l); \
162 	if (!(is_d) && !(is_r)) \
163 		SMC_STAT_RMB_SIZE_SUB(SMC_TYPE_R, tx, l); \
164 } \
165 while (0)
166 
167 #define SMC_STAT_RMB(type, _is_smcd, _is_rx) \
168 do { \
169 	typeof(_is_smcd) is_d = (_is_smcd); \
170 	typeof(_is_rx) is_r = (_is_rx); \
171 	if ((is_d) && (is_r)) \
172 		SMC_STAT_RMB_SUB(type, SMC_TYPE_D, rx); \
173 	if ((is_d) && !(is_r)) \
174 		SMC_STAT_RMB_SUB(type, SMC_TYPE_D, tx); \
175 	if (!(is_d) && (is_r)) \
176 		SMC_STAT_RMB_SUB(type, SMC_TYPE_R, rx); \
177 	if (!(is_d) && !(is_r)) \
178 		SMC_STAT_RMB_SUB(type, SMC_TYPE_R, tx); \
179 } \
180 while (0)
181 
182 #define SMC_STAT_BUF_REUSE(is_smcd, is_rx) \
183 	SMC_STAT_RMB(reuse, is_smcd, is_rx)
184 
185 #define SMC_STAT_RMB_ALLOC(is_smcd, is_rx) \
186 	SMC_STAT_RMB(alloc, is_smcd, is_rx)
187 
188 #define SMC_STAT_RMB_DOWNGRADED(is_smcd, is_rx) \
189 	SMC_STAT_RMB(dgrade, is_smcd, is_rx)
190 
191 #define SMC_STAT_RMB_TX_PEER_FULL(is_smcd) \
192 	SMC_STAT_RMB(buf_full_peer, is_smcd, false)
193 
194 #define SMC_STAT_RMB_TX_FULL(is_smcd) \
195 	SMC_STAT_RMB(buf_full, is_smcd, false)
196 
197 #define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(is_smcd) \
198 	SMC_STAT_RMB(buf_size_small_peer, is_smcd, false)
199 
200 #define SMC_STAT_RMB_TX_SIZE_SMALL(is_smcd) \
201 	SMC_STAT_RMB(buf_size_small, is_smcd, false)
202 
203 #define SMC_STAT_RMB_RX_SIZE_SMALL(is_smcd) \
204 	SMC_STAT_RMB(buf_size_small, is_smcd, true)
205 
206 #define SMC_STAT_RMB_RX_FULL(is_smcd) \
207 	SMC_STAT_RMB(buf_full, is_smcd, true)
208 
209 #define SMC_STAT_INC(is_smcd, type) \
210 do { \
211 	if ((is_smcd)) \
212 		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].type); \
213 	else \
214 		this_cpu_inc(smc_stats->smc[SMC_TYPE_R].type); \
215 } \
216 while (0)
217 
218 #define SMC_STAT_CLNT_SUCC_INC(_aclc) \
219 do { \
220 	typeof(_aclc) acl = (_aclc); \
221 	bool is_v2 = (acl->hdr.version == SMC_V2); \
222 	bool is_smcd = (acl->hdr.typev1 == SMC_TYPE_D); \
223 	if (is_v2 && is_smcd) \
224 		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v2_succ_cnt); \
225 	else if (is_v2 && !is_smcd) \
226 		this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v2_succ_cnt); \
227 	else if (!is_v2 && is_smcd) \
228 		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v1_succ_cnt); \
229 	else if (!is_v2 && !is_smcd) \
230 		this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v1_succ_cnt); \
231 } \
232 while (0)
233 
234 #define SMC_STAT_SERV_SUCC_INC(_ini) \
235 do { \
236 	typeof(_ini) i = (_ini); \
237 	bool is_v2 = (i->smcd_version & SMC_V2); \
238 	bool is_smcd = (i->is_smcd); \
239 	if (is_v2 && is_smcd) \
240 		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
241 	else if (is_v2 && !is_smcd) \
242 		this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v2_succ_cnt); \
243 	else if (!is_v2 && is_smcd) \
244 		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v1_succ_cnt); \
245 	else if (!is_v2 && !is_smcd) \
246 		this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v1_succ_cnt); \
247 } \
248 while (0)
249 
250 int smc_stats_init(void) __init;
251 void smc_stats_exit(void);
252 
253 #endif /* NET_SMC_SMC_STATS_H_ */
254