xref: /openbmc/linux/fs/ceph/metric.c (revision 405db98b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/types.h>
5 #include <linux/percpu_counter.h>
6 #include <linux/math64.h>
7 
8 #include "metric.h"
9 #include "mds_client.h"
10 
11 static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
12 				   struct ceph_mds_session *s)
13 {
14 	struct ceph_metric_head *head;
15 	struct ceph_metric_cap *cap;
16 	struct ceph_metric_read_latency *read;
17 	struct ceph_metric_write_latency *write;
18 	struct ceph_metric_metadata_latency *meta;
19 	struct ceph_metric_dlease *dlease;
20 	struct ceph_opened_files *files;
21 	struct ceph_pinned_icaps *icaps;
22 	struct ceph_opened_inodes *inodes;
23 	struct ceph_read_io_size *rsize;
24 	struct ceph_write_io_size *wsize;
25 	struct ceph_client_metric *m = &mdsc->metric;
26 	u64 nr_caps = atomic64_read(&m->total_caps);
27 	u32 header_len = sizeof(struct ceph_metric_header);
28 	struct ceph_msg *msg;
29 	struct timespec64 ts;
30 	s64 sum;
31 	s32 items = 0;
32 	s32 len;
33 
34 	len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
35 	      + sizeof(*meta) + sizeof(*dlease) + sizeof(*files)
36 	      + sizeof(*icaps) + sizeof(*inodes) + sizeof(*rsize)
37 	      + sizeof(*wsize);
38 
39 	msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
40 	if (!msg) {
41 		pr_err("send metrics to mds%d, failed to allocate message\n",
42 		       s->s_mds);
43 		return false;
44 	}
45 
46 	head = msg->front.iov_base;
47 
48 	/* encode the cap metric */
49 	cap = (struct ceph_metric_cap *)(head + 1);
50 	cap->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO);
51 	cap->header.ver = 1;
52 	cap->header.compat = 1;
53 	cap->header.data_len = cpu_to_le32(sizeof(*cap) - header_len);
54 	cap->hit = cpu_to_le64(percpu_counter_sum(&m->i_caps_hit));
55 	cap->mis = cpu_to_le64(percpu_counter_sum(&m->i_caps_mis));
56 	cap->total = cpu_to_le64(nr_caps);
57 	items++;
58 
59 	/* encode the read latency metric */
60 	read = (struct ceph_metric_read_latency *)(cap + 1);
61 	read->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
62 	read->header.ver = 1;
63 	read->header.compat = 1;
64 	read->header.data_len = cpu_to_le32(sizeof(*read) - header_len);
65 	sum = m->metric[METRIC_READ].latency_sum;
66 	jiffies_to_timespec64(sum, &ts);
67 	read->sec = cpu_to_le32(ts.tv_sec);
68 	read->nsec = cpu_to_le32(ts.tv_nsec);
69 	items++;
70 
71 	/* encode the write latency metric */
72 	write = (struct ceph_metric_write_latency *)(read + 1);
73 	write->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
74 	write->header.ver = 1;
75 	write->header.compat = 1;
76 	write->header.data_len = cpu_to_le32(sizeof(*write) - header_len);
77 	sum = m->metric[METRIC_WRITE].latency_sum;
78 	jiffies_to_timespec64(sum, &ts);
79 	write->sec = cpu_to_le32(ts.tv_sec);
80 	write->nsec = cpu_to_le32(ts.tv_nsec);
81 	items++;
82 
83 	/* encode the metadata latency metric */
84 	meta = (struct ceph_metric_metadata_latency *)(write + 1);
85 	meta->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
86 	meta->header.ver = 1;
87 	meta->header.compat = 1;
88 	meta->header.data_len = cpu_to_le32(sizeof(*meta) - header_len);
89 	sum = m->metric[METRIC_METADATA].latency_sum;
90 	jiffies_to_timespec64(sum, &ts);
91 	meta->sec = cpu_to_le32(ts.tv_sec);
92 	meta->nsec = cpu_to_le32(ts.tv_nsec);
93 	items++;
94 
95 	/* encode the dentry lease metric */
96 	dlease = (struct ceph_metric_dlease *)(meta + 1);
97 	dlease->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_DENTRY_LEASE);
98 	dlease->header.ver = 1;
99 	dlease->header.compat = 1;
100 	dlease->header.data_len = cpu_to_le32(sizeof(*dlease) - header_len);
101 	dlease->hit = cpu_to_le64(percpu_counter_sum(&m->d_lease_hit));
102 	dlease->mis = cpu_to_le64(percpu_counter_sum(&m->d_lease_mis));
103 	dlease->total = cpu_to_le64(atomic64_read(&m->total_dentries));
104 	items++;
105 
106 	sum = percpu_counter_sum(&m->total_inodes);
107 
108 	/* encode the opened files metric */
109 	files = (struct ceph_opened_files *)(dlease + 1);
110 	files->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_FILES);
111 	files->header.ver = 1;
112 	files->header.compat = 1;
113 	files->header.data_len = cpu_to_le32(sizeof(*files) - header_len);
114 	files->opened_files = cpu_to_le64(atomic64_read(&m->opened_files));
115 	files->total = cpu_to_le64(sum);
116 	items++;
117 
118 	/* encode the pinned icaps metric */
119 	icaps = (struct ceph_pinned_icaps *)(files + 1);
120 	icaps->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_PINNED_ICAPS);
121 	icaps->header.ver = 1;
122 	icaps->header.compat = 1;
123 	icaps->header.data_len = cpu_to_le32(sizeof(*icaps) - header_len);
124 	icaps->pinned_icaps = cpu_to_le64(nr_caps);
125 	icaps->total = cpu_to_le64(sum);
126 	items++;
127 
128 	/* encode the opened inodes metric */
129 	inodes = (struct ceph_opened_inodes *)(icaps + 1);
130 	inodes->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_INODES);
131 	inodes->header.ver = 1;
132 	inodes->header.compat = 1;
133 	inodes->header.data_len = cpu_to_le32(sizeof(*inodes) - header_len);
134 	inodes->opened_inodes = cpu_to_le64(percpu_counter_sum(&m->opened_inodes));
135 	inodes->total = cpu_to_le64(sum);
136 	items++;
137 
138 	/* encode the read io size metric */
139 	rsize = (struct ceph_read_io_size *)(inodes + 1);
140 	rsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_IO_SIZES);
141 	rsize->header.ver = 1;
142 	rsize->header.compat = 1;
143 	rsize->header.data_len = cpu_to_le32(sizeof(*rsize) - header_len);
144 	rsize->total_ops = cpu_to_le64(m->metric[METRIC_READ].total);
145 	rsize->total_size = cpu_to_le64(m->metric[METRIC_READ].size_sum);
146 	items++;
147 
148 	/* encode the write io size metric */
149 	wsize = (struct ceph_write_io_size *)(rsize + 1);
150 	wsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_IO_SIZES);
151 	wsize->header.ver = 1;
152 	wsize->header.compat = 1;
153 	wsize->header.data_len = cpu_to_le32(sizeof(*wsize) - header_len);
154 	wsize->total_ops = cpu_to_le64(m->metric[METRIC_WRITE].total);
155 	wsize->total_size = cpu_to_le64(m->metric[METRIC_WRITE].size_sum);
156 	items++;
157 
158 	put_unaligned_le32(items, &head->num);
159 	msg->front.iov_len = len;
160 	msg->hdr.version = cpu_to_le16(1);
161 	msg->hdr.compat_version = cpu_to_le16(1);
162 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
163 	dout("client%llu send metrics to mds%d\n",
164 	     ceph_client_gid(mdsc->fsc->client), s->s_mds);
165 	ceph_con_send(&s->s_con, msg);
166 
167 	return true;
168 }
169 
170 
171 static void metric_get_session(struct ceph_mds_client *mdsc)
172 {
173 	struct ceph_mds_session *s;
174 	int i;
175 
176 	mutex_lock(&mdsc->mutex);
177 	for (i = 0; i < mdsc->max_sessions; i++) {
178 		s = __ceph_lookup_mds_session(mdsc, i);
179 		if (!s)
180 			continue;
181 
182 		/*
183 		 * Skip it if MDS doesn't support the metric collection,
184 		 * or the MDS will close the session's socket connection
185 		 * directly when it get this message.
186 		 */
187 		if (check_session_state(s) &&
188 		    test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) {
189 			mdsc->metric.session = s;
190 			break;
191 		}
192 
193 		ceph_put_mds_session(s);
194 	}
195 	mutex_unlock(&mdsc->mutex);
196 }
197 
198 static void metric_delayed_work(struct work_struct *work)
199 {
200 	struct ceph_client_metric *m =
201 		container_of(work, struct ceph_client_metric, delayed_work.work);
202 	struct ceph_mds_client *mdsc =
203 		container_of(m, struct ceph_mds_client, metric);
204 
205 	if (mdsc->stopping)
206 		return;
207 
208 	if (!m->session || !check_session_state(m->session)) {
209 		if (m->session) {
210 			ceph_put_mds_session(m->session);
211 			m->session = NULL;
212 		}
213 		metric_get_session(mdsc);
214 	}
215 	if (m->session) {
216 		ceph_mdsc_send_metrics(mdsc, m->session);
217 		metric_schedule_delayed(m);
218 	}
219 }
220 
221 int ceph_metric_init(struct ceph_client_metric *m)
222 {
223 	struct ceph_metric *metric;
224 	int ret, i;
225 
226 	if (!m)
227 		return -EINVAL;
228 
229 	atomic64_set(&m->total_dentries, 0);
230 	ret = percpu_counter_init(&m->d_lease_hit, 0, GFP_KERNEL);
231 	if (ret)
232 		return ret;
233 
234 	ret = percpu_counter_init(&m->d_lease_mis, 0, GFP_KERNEL);
235 	if (ret)
236 		goto err_d_lease_mis;
237 
238 	atomic64_set(&m->total_caps, 0);
239 	ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL);
240 	if (ret)
241 		goto err_i_caps_hit;
242 
243 	ret = percpu_counter_init(&m->i_caps_mis, 0, GFP_KERNEL);
244 	if (ret)
245 		goto err_i_caps_mis;
246 
247 	for (i = 0; i < METRIC_MAX; i++) {
248 		metric = &m->metric[i];
249 		spin_lock_init(&metric->lock);
250 		metric->size_sum = 0;
251 		metric->size_min = U64_MAX;
252 		metric->size_max = 0;
253 		metric->total = 0;
254 		metric->latency_sum = 0;
255 		metric->latency_sq_sum = 0;
256 		metric->latency_min = KTIME_MAX;
257 		metric->latency_max = 0;
258 	}
259 
260 	atomic64_set(&m->opened_files, 0);
261 	ret = percpu_counter_init(&m->opened_inodes, 0, GFP_KERNEL);
262 	if (ret)
263 		goto err_opened_inodes;
264 	ret = percpu_counter_init(&m->total_inodes, 0, GFP_KERNEL);
265 	if (ret)
266 		goto err_total_inodes;
267 
268 	m->session = NULL;
269 	INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
270 
271 	return 0;
272 
273 err_total_inodes:
274 	percpu_counter_destroy(&m->opened_inodes);
275 err_opened_inodes:
276 	percpu_counter_destroy(&m->i_caps_mis);
277 err_i_caps_mis:
278 	percpu_counter_destroy(&m->i_caps_hit);
279 err_i_caps_hit:
280 	percpu_counter_destroy(&m->d_lease_mis);
281 err_d_lease_mis:
282 	percpu_counter_destroy(&m->d_lease_hit);
283 
284 	return ret;
285 }
286 
287 void ceph_metric_destroy(struct ceph_client_metric *m)
288 {
289 	if (!m)
290 		return;
291 
292 	cancel_delayed_work_sync(&m->delayed_work);
293 
294 	percpu_counter_destroy(&m->total_inodes);
295 	percpu_counter_destroy(&m->opened_inodes);
296 	percpu_counter_destroy(&m->i_caps_mis);
297 	percpu_counter_destroy(&m->i_caps_hit);
298 	percpu_counter_destroy(&m->d_lease_mis);
299 	percpu_counter_destroy(&m->d_lease_hit);
300 
301 	ceph_put_mds_session(m->session);
302 }
303 
304 #define METRIC_UPDATE_MIN_MAX(min, max, new)	\
305 {						\
306 	if (unlikely(new < min))		\
307 		min = new;			\
308 	if (unlikely(new > max))		\
309 		max = new;			\
310 }
311 
312 static inline void __update_stdev(ktime_t total, ktime_t lsum,
313 				  ktime_t *sq_sump, ktime_t lat)
314 {
315 	ktime_t avg, sq;
316 
317 	if (unlikely(total == 1))
318 		return;
319 
320 	/* the sq is (lat - old_avg) * (lat - new_avg) */
321 	avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
322 	sq = lat - avg;
323 	avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
324 	sq = sq * (lat - avg);
325 	*sq_sump += sq;
326 }
327 
328 void ceph_update_metrics(struct ceph_metric *m,
329 			 ktime_t r_start, ktime_t r_end,
330 			 unsigned int size, int rc)
331 {
332 	ktime_t lat = ktime_sub(r_end, r_start);
333 	ktime_t total;
334 
335 	if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
336 		return;
337 
338 	spin_lock(&m->lock);
339 	total = ++m->total;
340 	m->size_sum += size;
341 	METRIC_UPDATE_MIN_MAX(m->size_min, m->size_max, size);
342 	m->latency_sum += lat;
343 	METRIC_UPDATE_MIN_MAX(m->latency_min, m->latency_max, lat);
344 	__update_stdev(total, m->latency_sum, &m->latency_sq_sum, lat);
345 	spin_unlock(&m->lock);
346 }
347