1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * RDMA Transport Layer 4 * 5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved. 6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved. 7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved. 8 */ 9 #undef pr_fmt 10 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt 11 12 #include "rtrs-clt.h" 13 14 void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con) 15 { 16 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 17 struct rtrs_clt_stats *stats = sess->stats; 18 struct rtrs_clt_stats_pcpu *s; 19 int cpu; 20 21 cpu = raw_smp_processor_id(); 22 s = get_cpu_ptr(stats->pcpu_stats); 23 if (con->cpu != cpu) { 24 s->cpu_migr.to++; 25 26 /* Careful here, override s pointer */ 27 s = per_cpu_ptr(stats->pcpu_stats, con->cpu); 28 atomic_inc(&s->cpu_migr.from); 29 } 30 put_cpu_ptr(stats->pcpu_stats); 31 } 32 33 void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats) 34 { 35 struct rtrs_clt_stats_pcpu *s; 36 37 s = get_cpu_ptr(stats->pcpu_stats); 38 s->rdma.failover_cnt++; 39 put_cpu_ptr(stats->pcpu_stats); 40 } 41 42 int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf) 43 { 44 struct rtrs_clt_stats_pcpu *s; 45 46 size_t used; 47 int cpu; 48 49 used = 0; 50 for_each_possible_cpu(cpu) { 51 s = per_cpu_ptr(stats->pcpu_stats, cpu); 52 used += sysfs_emit_at(buf, used, "%d ", 53 atomic_read(&s->cpu_migr.from)); 54 } 55 56 used += sysfs_emit_at(buf, used, "\n"); 57 58 return used; 59 } 60 61 int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf) 62 { 63 struct rtrs_clt_stats_pcpu *s; 64 65 size_t used; 66 int cpu; 67 68 used = 0; 69 for_each_possible_cpu(cpu) { 70 s = per_cpu_ptr(stats->pcpu_stats, cpu); 71 used += sysfs_emit_at(buf, used, "%d ", s->cpu_migr.to); 72 } 73 74 used += sysfs_emit_at(buf, used, "\n"); 75 76 return used; 77 } 78 79 int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf) 80 { 81 return sysfs_emit(buf, "%d %d\n", stats->reconnects.successful_cnt, 82 stats->reconnects.fail_cnt); 83 } 84 85 ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page) 86 { 87 struct rtrs_clt_stats_rdma sum; 88 struct rtrs_clt_stats_rdma *r; 89 int cpu; 90 91 memset(&sum, 0, sizeof(sum)); 92 93 for_each_possible_cpu(cpu) { 94 r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma; 95 96 sum.dir[READ].cnt += r->dir[READ].cnt; 97 sum.dir[READ].size_total += r->dir[READ].size_total; 98 sum.dir[WRITE].cnt += r->dir[WRITE].cnt; 99 sum.dir[WRITE].size_total += r->dir[WRITE].size_total; 100 sum.failover_cnt += r->failover_cnt; 101 } 102 103 return sysfs_emit(page, "%llu %llu %llu %llu %u %llu\n", 104 sum.dir[READ].cnt, sum.dir[READ].size_total, 105 sum.dir[WRITE].cnt, sum.dir[WRITE].size_total, 106 atomic_read(&stats->inflight), sum.failover_cnt); 107 } 108 109 ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, char *page) 110 { 111 return sysfs_emit(page, "echo 1 to reset all statistics\n"); 112 } 113 114 int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable) 115 { 116 struct rtrs_clt_stats_pcpu *s; 117 int cpu; 118 119 if (!enable) 120 return -EINVAL; 121 122 for_each_possible_cpu(cpu) { 123 s = per_cpu_ptr(stats->pcpu_stats, cpu); 124 memset(&s->rdma, 0, sizeof(s->rdma)); 125 } 126 127 return 0; 128 } 129 130 int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable) 131 { 132 struct rtrs_clt_stats_pcpu *s; 133 int cpu; 134 135 if (!enable) 136 return -EINVAL; 137 138 for_each_possible_cpu(cpu) { 139 s = per_cpu_ptr(stats->pcpu_stats, cpu); 140 memset(&s->cpu_migr, 0, sizeof(s->cpu_migr)); 141 } 142 143 return 0; 144 } 145 146 int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable) 147 { 148 if (!enable) 149 return -EINVAL; 150 151 memset(&stats->reconnects, 0, sizeof(stats->reconnects)); 152 153 return 0; 154 } 155 156 int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable) 157 { 158 if (enable) { 159 rtrs_clt_reset_rdma_stats(s, enable); 160 rtrs_clt_reset_cpu_migr_stats(s, enable); 161 rtrs_clt_reset_reconnects_stat(s, enable); 162 atomic_set(&s->inflight, 0); 163 return 0; 164 } 165 166 return -EINVAL; 167 } 168 169 static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats, 170 size_t size, int d) 171 { 172 struct rtrs_clt_stats_pcpu *s; 173 174 s = get_cpu_ptr(stats->pcpu_stats); 175 s->rdma.dir[d].cnt++; 176 s->rdma.dir[d].size_total += size; 177 put_cpu_ptr(stats->pcpu_stats); 178 } 179 180 void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir) 181 { 182 struct rtrs_clt_con *con = req->con; 183 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 184 struct rtrs_clt_stats *stats = sess->stats; 185 unsigned int len; 186 187 len = req->usr_len + req->data_len; 188 rtrs_clt_update_rdma_stats(stats, len, dir); 189 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) 190 atomic_inc(&stats->inflight); 191 } 192 193 int rtrs_clt_init_stats(struct rtrs_clt_stats *stats) 194 { 195 stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats)); 196 if (!stats->pcpu_stats) 197 return -ENOMEM; 198 199 /* 200 * successful_cnt will be set to 0 after session 201 * is established for the first time 202 */ 203 stats->reconnects.successful_cnt = -1; 204 205 return 0; 206 } 207