1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/types.h> 5 #include <linux/percpu_counter.h> 6 #include <linux/math64.h> 7 8 #include "metric.h" 9 #include "mds_client.h" 10 11 static void ktime_to_ceph_timespec(struct ceph_timespec *ts, ktime_t val) 12 { 13 struct timespec64 t = ktime_to_timespec64(val); 14 ceph_encode_timespec64(ts, &t); 15 } 16 17 static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc, 18 struct ceph_mds_session *s) 19 { 20 struct ceph_metric_head *head; 21 struct ceph_metric_cap *cap; 22 struct ceph_metric_read_latency *read; 23 struct ceph_metric_write_latency *write; 24 struct ceph_metric_metadata_latency *meta; 25 struct ceph_metric_dlease *dlease; 26 struct ceph_opened_files *files; 27 struct ceph_pinned_icaps *icaps; 28 struct ceph_opened_inodes *inodes; 29 struct ceph_read_io_size *rsize; 30 struct ceph_write_io_size *wsize; 31 struct ceph_client_metric *m = &mdsc->metric; 32 u64 nr_caps = atomic64_read(&m->total_caps); 33 u32 header_len = sizeof(struct ceph_metric_header); 34 struct ceph_msg *msg; 35 s64 sum; 36 s32 items = 0; 37 s32 len; 38 39 len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write) 40 + sizeof(*meta) + sizeof(*dlease) + sizeof(*files) 41 + sizeof(*icaps) + sizeof(*inodes) + sizeof(*rsize) 42 + sizeof(*wsize); 43 44 msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true); 45 if (!msg) { 46 pr_err("send metrics to mds%d, failed to allocate message\n", 47 s->s_mds); 48 return false; 49 } 50 51 head = msg->front.iov_base; 52 53 /* encode the cap metric */ 54 cap = (struct ceph_metric_cap *)(head + 1); 55 cap->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO); 56 cap->header.ver = 1; 57 cap->header.compat = 1; 58 cap->header.data_len = cpu_to_le32(sizeof(*cap) - header_len); 59 cap->hit = cpu_to_le64(percpu_counter_sum(&m->i_caps_hit)); 60 cap->mis = cpu_to_le64(percpu_counter_sum(&m->i_caps_mis)); 61 cap->total = cpu_to_le64(nr_caps); 62 items++; 63 64 /* encode the read latency metric */ 65 read = (struct ceph_metric_read_latency *)(cap + 1); 66 read->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY); 67 read->header.ver = 2; 68 read->header.compat = 1; 69 read->header.data_len = cpu_to_le32(sizeof(*read) - header_len); 70 sum = m->metric[METRIC_READ].latency_sum; 71 ktime_to_ceph_timespec(&read->lat, sum); 72 ktime_to_ceph_timespec(&read->avg, m->metric[METRIC_READ].latency_avg); 73 read->sq_sum = cpu_to_le64(m->metric[METRIC_READ].latency_sq_sum); 74 read->count = cpu_to_le64(m->metric[METRIC_READ].total); 75 items++; 76 77 /* encode the write latency metric */ 78 write = (struct ceph_metric_write_latency *)(read + 1); 79 write->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY); 80 write->header.ver = 2; 81 write->header.compat = 1; 82 write->header.data_len = cpu_to_le32(sizeof(*write) - header_len); 83 sum = m->metric[METRIC_WRITE].latency_sum; 84 ktime_to_ceph_timespec(&write->lat, sum); 85 ktime_to_ceph_timespec(&write->avg, m->metric[METRIC_WRITE].latency_avg); 86 write->sq_sum = cpu_to_le64(m->metric[METRIC_WRITE].latency_sq_sum); 87 write->count = cpu_to_le64(m->metric[METRIC_WRITE].total); 88 items++; 89 90 /* encode the metadata latency metric */ 91 meta = (struct ceph_metric_metadata_latency *)(write + 1); 92 meta->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY); 93 meta->header.ver = 2; 94 meta->header.compat = 1; 95 meta->header.data_len = cpu_to_le32(sizeof(*meta) - header_len); 96 sum = m->metric[METRIC_METADATA].latency_sum; 97 ktime_to_ceph_timespec(&meta->lat, sum); 98 ktime_to_ceph_timespec(&meta->avg, m->metric[METRIC_METADATA].latency_avg); 99 meta->sq_sum = cpu_to_le64(m->metric[METRIC_METADATA].latency_sq_sum); 100 meta->count = cpu_to_le64(m->metric[METRIC_METADATA].total); 101 items++; 102 103 /* encode the dentry lease metric */ 104 dlease = (struct ceph_metric_dlease *)(meta + 1); 105 dlease->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_DENTRY_LEASE); 106 dlease->header.ver = 1; 107 dlease->header.compat = 1; 108 dlease->header.data_len = cpu_to_le32(sizeof(*dlease) - header_len); 109 dlease->hit = cpu_to_le64(percpu_counter_sum(&m->d_lease_hit)); 110 dlease->mis = cpu_to_le64(percpu_counter_sum(&m->d_lease_mis)); 111 dlease->total = cpu_to_le64(atomic64_read(&m->total_dentries)); 112 items++; 113 114 sum = percpu_counter_sum(&m->total_inodes); 115 116 /* encode the opened files metric */ 117 files = (struct ceph_opened_files *)(dlease + 1); 118 files->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_FILES); 119 files->header.ver = 1; 120 files->header.compat = 1; 121 files->header.data_len = cpu_to_le32(sizeof(*files) - header_len); 122 files->opened_files = cpu_to_le64(atomic64_read(&m->opened_files)); 123 files->total = cpu_to_le64(sum); 124 items++; 125 126 /* encode the pinned icaps metric */ 127 icaps = (struct ceph_pinned_icaps *)(files + 1); 128 icaps->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_PINNED_ICAPS); 129 icaps->header.ver = 1; 130 icaps->header.compat = 1; 131 icaps->header.data_len = cpu_to_le32(sizeof(*icaps) - header_len); 132 icaps->pinned_icaps = cpu_to_le64(nr_caps); 133 icaps->total = cpu_to_le64(sum); 134 items++; 135 136 /* encode the opened inodes metric */ 137 inodes = (struct ceph_opened_inodes *)(icaps + 1); 138 inodes->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_INODES); 139 inodes->header.ver = 1; 140 inodes->header.compat = 1; 141 inodes->header.data_len = cpu_to_le32(sizeof(*inodes) - header_len); 142 inodes->opened_inodes = cpu_to_le64(percpu_counter_sum(&m->opened_inodes)); 143 inodes->total = cpu_to_le64(sum); 144 items++; 145 146 /* encode the read io size metric */ 147 rsize = (struct ceph_read_io_size *)(inodes + 1); 148 rsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_IO_SIZES); 149 rsize->header.ver = 1; 150 rsize->header.compat = 1; 151 rsize->header.data_len = cpu_to_le32(sizeof(*rsize) - header_len); 152 rsize->total_ops = cpu_to_le64(m->metric[METRIC_READ].total); 153 rsize->total_size = cpu_to_le64(m->metric[METRIC_READ].size_sum); 154 items++; 155 156 /* encode the write io size metric */ 157 wsize = (struct ceph_write_io_size *)(rsize + 1); 158 wsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_IO_SIZES); 159 wsize->header.ver = 1; 160 wsize->header.compat = 1; 161 wsize->header.data_len = cpu_to_le32(sizeof(*wsize) - header_len); 162 wsize->total_ops = cpu_to_le64(m->metric[METRIC_WRITE].total); 163 wsize->total_size = cpu_to_le64(m->metric[METRIC_WRITE].size_sum); 164 items++; 165 166 put_unaligned_le32(items, &head->num); 167 msg->front.iov_len = len; 168 msg->hdr.version = cpu_to_le16(1); 169 msg->hdr.compat_version = cpu_to_le16(1); 170 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 171 ceph_con_send(&s->s_con, msg); 172 173 return true; 174 } 175 176 177 static void metric_get_session(struct ceph_mds_client *mdsc) 178 { 179 struct ceph_mds_session *s; 180 int i; 181 182 mutex_lock(&mdsc->mutex); 183 for (i = 0; i < mdsc->max_sessions; i++) { 184 s = __ceph_lookup_mds_session(mdsc, i); 185 if (!s) 186 continue; 187 188 /* 189 * Skip it if MDS doesn't support the metric collection, 190 * or the MDS will close the session's socket connection 191 * directly when it get this message. 192 */ 193 if (check_session_state(s) && 194 test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) { 195 mdsc->metric.session = s; 196 break; 197 } 198 199 ceph_put_mds_session(s); 200 } 201 mutex_unlock(&mdsc->mutex); 202 } 203 204 static void metric_delayed_work(struct work_struct *work) 205 { 206 struct ceph_client_metric *m = 207 container_of(work, struct ceph_client_metric, delayed_work.work); 208 struct ceph_mds_client *mdsc = 209 container_of(m, struct ceph_mds_client, metric); 210 211 if (mdsc->stopping) 212 return; 213 214 if (!m->session || !check_session_state(m->session)) { 215 if (m->session) { 216 ceph_put_mds_session(m->session); 217 m->session = NULL; 218 } 219 metric_get_session(mdsc); 220 } 221 if (m->session) { 222 ceph_mdsc_send_metrics(mdsc, m->session); 223 metric_schedule_delayed(m); 224 } 225 } 226 227 int ceph_metric_init(struct ceph_client_metric *m) 228 { 229 struct ceph_metric *metric; 230 int ret, i; 231 232 if (!m) 233 return -EINVAL; 234 235 atomic64_set(&m->total_dentries, 0); 236 ret = percpu_counter_init(&m->d_lease_hit, 0, GFP_KERNEL); 237 if (ret) 238 return ret; 239 240 ret = percpu_counter_init(&m->d_lease_mis, 0, GFP_KERNEL); 241 if (ret) 242 goto err_d_lease_mis; 243 244 atomic64_set(&m->total_caps, 0); 245 ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL); 246 if (ret) 247 goto err_i_caps_hit; 248 249 ret = percpu_counter_init(&m->i_caps_mis, 0, GFP_KERNEL); 250 if (ret) 251 goto err_i_caps_mis; 252 253 for (i = 0; i < METRIC_MAX; i++) { 254 metric = &m->metric[i]; 255 spin_lock_init(&metric->lock); 256 metric->size_sum = 0; 257 metric->size_min = U64_MAX; 258 metric->size_max = 0; 259 metric->total = 0; 260 metric->latency_sum = 0; 261 metric->latency_avg = 0; 262 metric->latency_sq_sum = 0; 263 metric->latency_min = KTIME_MAX; 264 metric->latency_max = 0; 265 } 266 267 atomic64_set(&m->opened_files, 0); 268 ret = percpu_counter_init(&m->opened_inodes, 0, GFP_KERNEL); 269 if (ret) 270 goto err_opened_inodes; 271 ret = percpu_counter_init(&m->total_inodes, 0, GFP_KERNEL); 272 if (ret) 273 goto err_total_inodes; 274 275 m->session = NULL; 276 INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work); 277 278 return 0; 279 280 err_total_inodes: 281 percpu_counter_destroy(&m->opened_inodes); 282 err_opened_inodes: 283 percpu_counter_destroy(&m->i_caps_mis); 284 err_i_caps_mis: 285 percpu_counter_destroy(&m->i_caps_hit); 286 err_i_caps_hit: 287 percpu_counter_destroy(&m->d_lease_mis); 288 err_d_lease_mis: 289 percpu_counter_destroy(&m->d_lease_hit); 290 291 return ret; 292 } 293 294 void ceph_metric_destroy(struct ceph_client_metric *m) 295 { 296 if (!m) 297 return; 298 299 cancel_delayed_work_sync(&m->delayed_work); 300 301 percpu_counter_destroy(&m->total_inodes); 302 percpu_counter_destroy(&m->opened_inodes); 303 percpu_counter_destroy(&m->i_caps_mis); 304 percpu_counter_destroy(&m->i_caps_hit); 305 percpu_counter_destroy(&m->d_lease_mis); 306 percpu_counter_destroy(&m->d_lease_hit); 307 308 ceph_put_mds_session(m->session); 309 } 310 311 #define METRIC_UPDATE_MIN_MAX(min, max, new) \ 312 { \ 313 if (unlikely(new < min)) \ 314 min = new; \ 315 if (unlikely(new > max)) \ 316 max = new; \ 317 } 318 319 static inline void __update_mean_and_stdev(ktime_t total, ktime_t *lavg, 320 ktime_t *sq_sump, ktime_t lat) 321 { 322 ktime_t avg; 323 324 if (unlikely(total == 1)) { 325 *lavg = lat; 326 } else { 327 /* the sq is (lat - old_avg) * (lat - new_avg) */ 328 avg = *lavg + div64_s64(lat - *lavg, total); 329 *sq_sump += (lat - *lavg)*(lat - avg); 330 *lavg = avg; 331 } 332 } 333 334 void ceph_update_metrics(struct ceph_metric *m, 335 ktime_t r_start, ktime_t r_end, 336 unsigned int size, int rc) 337 { 338 ktime_t lat = ktime_sub(r_end, r_start); 339 ktime_t total; 340 341 if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT)) 342 return; 343 344 spin_lock(&m->lock); 345 total = ++m->total; 346 m->size_sum += size; 347 METRIC_UPDATE_MIN_MAX(m->size_min, m->size_max, size); 348 m->latency_sum += lat; 349 METRIC_UPDATE_MIN_MAX(m->latency_min, m->latency_max, lat); 350 __update_mean_and_stdev(total, &m->latency_avg, &m->latency_sq_sum, 351 lat); 352 spin_unlock(&m->lock); 353 } 354