xref: /openbmc/linux/fs/ceph/metric.c (revision 9eda7c1f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/types.h>
5 #include <linux/percpu_counter.h>
6 #include <linux/math64.h>
7 
8 #include "metric.h"
9 #include "mds_client.h"
10 
11 static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
12 				   struct ceph_mds_session *s)
13 {
14 	struct ceph_metric_head *head;
15 	struct ceph_metric_cap *cap;
16 	struct ceph_metric_read_latency *read;
17 	struct ceph_metric_write_latency *write;
18 	struct ceph_metric_metadata_latency *meta;
19 	struct ceph_client_metric *m = &mdsc->metric;
20 	u64 nr_caps = atomic64_read(&m->total_caps);
21 	struct ceph_msg *msg;
22 	struct timespec64 ts;
23 	s64 sum;
24 	s32 items = 0;
25 	s32 len;
26 
27 	len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
28 	      + sizeof(*meta);
29 
30 	msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
31 	if (!msg) {
32 		pr_err("send metrics to mds%d, failed to allocate message\n",
33 		       s->s_mds);
34 		return false;
35 	}
36 
37 	head = msg->front.iov_base;
38 
39 	/* encode the cap metric */
40 	cap = (struct ceph_metric_cap *)(head + 1);
41 	cap->type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO);
42 	cap->ver = 1;
43 	cap->compat = 1;
44 	cap->data_len = cpu_to_le32(sizeof(*cap) - 10);
45 	cap->hit = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_hit));
46 	cap->mis = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_mis));
47 	cap->total = cpu_to_le64(nr_caps);
48 	items++;
49 
50 	/* encode the read latency metric */
51 	read = (struct ceph_metric_read_latency *)(cap + 1);
52 	read->type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
53 	read->ver = 1;
54 	read->compat = 1;
55 	read->data_len = cpu_to_le32(sizeof(*read) - 10);
56 	sum = m->read_latency_sum;
57 	jiffies_to_timespec64(sum, &ts);
58 	read->sec = cpu_to_le32(ts.tv_sec);
59 	read->nsec = cpu_to_le32(ts.tv_nsec);
60 	items++;
61 
62 	/* encode the write latency metric */
63 	write = (struct ceph_metric_write_latency *)(read + 1);
64 	write->type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
65 	write->ver = 1;
66 	write->compat = 1;
67 	write->data_len = cpu_to_le32(sizeof(*write) - 10);
68 	sum = m->write_latency_sum;
69 	jiffies_to_timespec64(sum, &ts);
70 	write->sec = cpu_to_le32(ts.tv_sec);
71 	write->nsec = cpu_to_le32(ts.tv_nsec);
72 	items++;
73 
74 	/* encode the metadata latency metric */
75 	meta = (struct ceph_metric_metadata_latency *)(write + 1);
76 	meta->type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
77 	meta->ver = 1;
78 	meta->compat = 1;
79 	meta->data_len = cpu_to_le32(sizeof(*meta) - 10);
80 	sum = m->metadata_latency_sum;
81 	jiffies_to_timespec64(sum, &ts);
82 	meta->sec = cpu_to_le32(ts.tv_sec);
83 	meta->nsec = cpu_to_le32(ts.tv_nsec);
84 	items++;
85 
86 	put_unaligned_le32(items, &head->num);
87 	msg->front.iov_len = len;
88 	msg->hdr.version = cpu_to_le16(1);
89 	msg->hdr.compat_version = cpu_to_le16(1);
90 	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
91 	dout("client%llu send metrics to mds%d\n",
92 	     ceph_client_gid(mdsc->fsc->client), s->s_mds);
93 	ceph_con_send(&s->s_con, msg);
94 
95 	return true;
96 }
97 
98 
99 static void metric_get_session(struct ceph_mds_client *mdsc)
100 {
101 	struct ceph_mds_session *s;
102 	int i;
103 
104 	mutex_lock(&mdsc->mutex);
105 	for (i = 0; i < mdsc->max_sessions; i++) {
106 		s = __ceph_lookup_mds_session(mdsc, i);
107 		if (!s)
108 			continue;
109 
110 		/*
111 		 * Skip it if MDS doesn't support the metric collection,
112 		 * or the MDS will close the session's socket connection
113 		 * directly when it get this message.
114 		 */
115 		if (check_session_state(s) &&
116 		    test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) {
117 			mdsc->metric.session = s;
118 			break;
119 		}
120 
121 		ceph_put_mds_session(s);
122 	}
123 	mutex_unlock(&mdsc->mutex);
124 }
125 
126 static void metric_delayed_work(struct work_struct *work)
127 {
128 	struct ceph_client_metric *m =
129 		container_of(work, struct ceph_client_metric, delayed_work.work);
130 	struct ceph_mds_client *mdsc =
131 		container_of(m, struct ceph_mds_client, metric);
132 
133 	if (mdsc->stopping)
134 		return;
135 
136 	if (!m->session || !check_session_state(m->session)) {
137 		if (m->session) {
138 			ceph_put_mds_session(m->session);
139 			m->session = NULL;
140 		}
141 		metric_get_session(mdsc);
142 	}
143 	if (m->session) {
144 		ceph_mdsc_send_metrics(mdsc, m->session);
145 		metric_schedule_delayed(m);
146 	}
147 }
148 
149 int ceph_metric_init(struct ceph_client_metric *m)
150 {
151 	int ret;
152 
153 	if (!m)
154 		return -EINVAL;
155 
156 	atomic64_set(&m->total_dentries, 0);
157 	ret = percpu_counter_init(&m->d_lease_hit, 0, GFP_KERNEL);
158 	if (ret)
159 		return ret;
160 
161 	ret = percpu_counter_init(&m->d_lease_mis, 0, GFP_KERNEL);
162 	if (ret)
163 		goto err_d_lease_mis;
164 
165 	atomic64_set(&m->total_caps, 0);
166 	ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL);
167 	if (ret)
168 		goto err_i_caps_hit;
169 
170 	ret = percpu_counter_init(&m->i_caps_mis, 0, GFP_KERNEL);
171 	if (ret)
172 		goto err_i_caps_mis;
173 
174 	spin_lock_init(&m->read_latency_lock);
175 	m->read_latency_sq_sum = 0;
176 	m->read_latency_min = KTIME_MAX;
177 	m->read_latency_max = 0;
178 	m->total_reads = 0;
179 	m->read_latency_sum = 0;
180 
181 	spin_lock_init(&m->write_latency_lock);
182 	m->write_latency_sq_sum = 0;
183 	m->write_latency_min = KTIME_MAX;
184 	m->write_latency_max = 0;
185 	m->total_writes = 0;
186 	m->write_latency_sum = 0;
187 
188 	spin_lock_init(&m->metadata_latency_lock);
189 	m->metadata_latency_sq_sum = 0;
190 	m->metadata_latency_min = KTIME_MAX;
191 	m->metadata_latency_max = 0;
192 	m->total_metadatas = 0;
193 	m->metadata_latency_sum = 0;
194 
195 	m->session = NULL;
196 	INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
197 
198 	return 0;
199 
200 err_i_caps_mis:
201 	percpu_counter_destroy(&m->i_caps_hit);
202 err_i_caps_hit:
203 	percpu_counter_destroy(&m->d_lease_mis);
204 err_d_lease_mis:
205 	percpu_counter_destroy(&m->d_lease_hit);
206 
207 	return ret;
208 }
209 
210 void ceph_metric_destroy(struct ceph_client_metric *m)
211 {
212 	if (!m)
213 		return;
214 
215 	percpu_counter_destroy(&m->i_caps_mis);
216 	percpu_counter_destroy(&m->i_caps_hit);
217 	percpu_counter_destroy(&m->d_lease_mis);
218 	percpu_counter_destroy(&m->d_lease_hit);
219 
220 	cancel_delayed_work_sync(&m->delayed_work);
221 
222 	if (m->session)
223 		ceph_put_mds_session(m->session);
224 }
225 
226 static inline void __update_latency(ktime_t *totalp, ktime_t *lsump,
227 				    ktime_t *min, ktime_t *max,
228 				    ktime_t *sq_sump, ktime_t lat)
229 {
230 	ktime_t total, avg, sq, lsum;
231 
232 	total = ++(*totalp);
233 	lsum = (*lsump += lat);
234 
235 	if (unlikely(lat < *min))
236 		*min = lat;
237 	if (unlikely(lat > *max))
238 		*max = lat;
239 
240 	if (unlikely(total == 1))
241 		return;
242 
243 	/* the sq is (lat - old_avg) * (lat - new_avg) */
244 	avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
245 	sq = lat - avg;
246 	avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
247 	sq = sq * (lat - avg);
248 	*sq_sump += sq;
249 }
250 
251 void ceph_update_read_latency(struct ceph_client_metric *m,
252 			      ktime_t r_start, ktime_t r_end,
253 			      int rc)
254 {
255 	ktime_t lat = ktime_sub(r_end, r_start);
256 
257 	if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
258 		return;
259 
260 	spin_lock(&m->read_latency_lock);
261 	__update_latency(&m->total_reads, &m->read_latency_sum,
262 			 &m->read_latency_min, &m->read_latency_max,
263 			 &m->read_latency_sq_sum, lat);
264 	spin_unlock(&m->read_latency_lock);
265 }
266 
267 void ceph_update_write_latency(struct ceph_client_metric *m,
268 			       ktime_t r_start, ktime_t r_end,
269 			       int rc)
270 {
271 	ktime_t lat = ktime_sub(r_end, r_start);
272 
273 	if (unlikely(rc && rc != -ETIMEDOUT))
274 		return;
275 
276 	spin_lock(&m->write_latency_lock);
277 	__update_latency(&m->total_writes, &m->write_latency_sum,
278 			 &m->write_latency_min, &m->write_latency_max,
279 			 &m->write_latency_sq_sum, lat);
280 	spin_unlock(&m->write_latency_lock);
281 }
282 
283 void ceph_update_metadata_latency(struct ceph_client_metric *m,
284 				  ktime_t r_start, ktime_t r_end,
285 				  int rc)
286 {
287 	ktime_t lat = ktime_sub(r_end, r_start);
288 
289 	if (unlikely(rc && rc != -ENOENT))
290 		return;
291 
292 	spin_lock(&m->metadata_latency_lock);
293 	__update_latency(&m->total_metadatas, &m->metadata_latency_sum,
294 			 &m->metadata_latency_min, &m->metadata_latency_max,
295 			 &m->metadata_latency_sq_sum, lat);
296 	spin_unlock(&m->metadata_latency_lock);
297 }
298