1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * RDMA Transport Layer 4 * 5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved. 6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved. 7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved. 8 */ 9 #undef pr_fmt 10 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt 11 12 #include "rtrs-clt.h" 13 14 void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con) 15 { 16 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 17 struct rtrs_clt_stats *stats = sess->stats; 18 struct rtrs_clt_stats_pcpu *s; 19 int cpu; 20 21 cpu = raw_smp_processor_id(); 22 s = this_cpu_ptr(stats->pcpu_stats); 23 if (con->cpu != cpu) { 24 s->cpu_migr.to++; 25 26 /* Careful here, override s pointer */ 27 s = per_cpu_ptr(stats->pcpu_stats, con->cpu); 28 atomic_inc(&s->cpu_migr.from); 29 } 30 } 31 32 void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats) 33 { 34 struct rtrs_clt_stats_pcpu *s; 35 36 s = this_cpu_ptr(stats->pcpu_stats); 37 s->rdma.failover_cnt++; 38 } 39 40 int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, char *buf) 41 { 42 struct rtrs_clt_stats_pcpu *s; 43 44 size_t used; 45 int cpu; 46 47 used = sysfs_emit(buf, " "); 48 for_each_possible_cpu(cpu) 49 used += sysfs_emit_at(buf, used, " CPU%u", cpu); 50 51 used += sysfs_emit_at(buf, used, "\nfrom:"); 52 for_each_possible_cpu(cpu) { 53 s = per_cpu_ptr(stats->pcpu_stats, cpu); 54 used += sysfs_emit_at(buf, used, " %d", 55 atomic_read(&s->cpu_migr.from)); 56 } 57 58 used += sysfs_emit_at(buf, used, "\nto :"); 59 for_each_possible_cpu(cpu) { 60 s = per_cpu_ptr(stats->pcpu_stats, cpu); 61 used += sysfs_emit_at(buf, used, " %d", s->cpu_migr.to); 62 } 63 used += sysfs_emit_at(buf, used, "\n"); 64 65 return used; 66 } 67 68 int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf) 69 { 70 return sysfs_emit(buf, "%d %d\n", stats->reconnects.successful_cnt, 71 stats->reconnects.fail_cnt); 72 } 73 74 ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page) 75 { 76 struct rtrs_clt_stats_rdma sum; 77 struct rtrs_clt_stats_rdma *r; 78 int cpu; 79 80 memset(&sum, 0, sizeof(sum)); 81 82 for_each_possible_cpu(cpu) { 83 r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma; 84 85 sum.dir[READ].cnt += r->dir[READ].cnt; 86 sum.dir[READ].size_total += r->dir[READ].size_total; 87 sum.dir[WRITE].cnt += r->dir[WRITE].cnt; 88 sum.dir[WRITE].size_total += r->dir[WRITE].size_total; 89 sum.failover_cnt += r->failover_cnt; 90 } 91 92 return sysfs_emit(page, "%llu %llu %llu %llu %u %llu\n", 93 sum.dir[READ].cnt, sum.dir[READ].size_total, 94 sum.dir[WRITE].cnt, sum.dir[WRITE].size_total, 95 atomic_read(&stats->inflight), sum.failover_cnt); 96 } 97 98 ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, char *page) 99 { 100 return sysfs_emit(page, "echo 1 to reset all statistics\n"); 101 } 102 103 int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable) 104 { 105 struct rtrs_clt_stats_pcpu *s; 106 int cpu; 107 108 if (!enable) 109 return -EINVAL; 110 111 for_each_possible_cpu(cpu) { 112 s = per_cpu_ptr(stats->pcpu_stats, cpu); 113 memset(&s->rdma, 0, sizeof(s->rdma)); 114 } 115 116 return 0; 117 } 118 119 int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable) 120 { 121 struct rtrs_clt_stats_pcpu *s; 122 int cpu; 123 124 if (!enable) 125 return -EINVAL; 126 127 for_each_possible_cpu(cpu) { 128 s = per_cpu_ptr(stats->pcpu_stats, cpu); 129 memset(&s->cpu_migr, 0, sizeof(s->cpu_migr)); 130 } 131 132 return 0; 133 } 134 135 int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable) 136 { 137 if (!enable) 138 return -EINVAL; 139 140 memset(&stats->reconnects, 0, sizeof(stats->reconnects)); 141 142 return 0; 143 } 144 145 int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable) 146 { 147 if (enable) { 148 rtrs_clt_reset_rdma_stats(s, enable); 149 rtrs_clt_reset_cpu_migr_stats(s, enable); 150 rtrs_clt_reset_reconnects_stat(s, enable); 151 atomic_set(&s->inflight, 0); 152 return 0; 153 } 154 155 return -EINVAL; 156 } 157 158 static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats, 159 size_t size, int d) 160 { 161 struct rtrs_clt_stats_pcpu *s; 162 163 s = this_cpu_ptr(stats->pcpu_stats); 164 s->rdma.dir[d].cnt++; 165 s->rdma.dir[d].size_total += size; 166 } 167 168 void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir) 169 { 170 struct rtrs_clt_con *con = req->con; 171 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 172 struct rtrs_clt_stats *stats = sess->stats; 173 unsigned int len; 174 175 len = req->usr_len + req->data_len; 176 rtrs_clt_update_rdma_stats(stats, len, dir); 177 if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) 178 atomic_inc(&stats->inflight); 179 } 180 181 int rtrs_clt_init_stats(struct rtrs_clt_stats *stats) 182 { 183 stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats)); 184 if (!stats->pcpu_stats) 185 return -ENOMEM; 186 187 /* 188 * successful_cnt will be set to 0 after session 189 * is established for the first time 190 */ 191 stats->reconnects.successful_cnt = -1; 192 193 return 0; 194 } 195