1*d7a74cadSDarrick J. Wong // SPDX-License-Identifier: GPL-2.0-or-later 2*d7a74cadSDarrick J. Wong /* 3*d7a74cadSDarrick J. Wong * Copyright (C) 2023 Oracle. All Rights Reserved. 4*d7a74cadSDarrick J. Wong * Author: Darrick J. Wong <djwong@kernel.org> 5*d7a74cadSDarrick J. Wong */ 6*d7a74cadSDarrick J. Wong #ifndef __XFS_SCRUB_STATS_H__ 7*d7a74cadSDarrick J. Wong #define __XFS_SCRUB_STATS_H__ 8*d7a74cadSDarrick J. Wong 9*d7a74cadSDarrick J. Wong struct xchk_stats_run { 10*d7a74cadSDarrick J. Wong u64 scrub_ns; 11*d7a74cadSDarrick J. Wong u64 repair_ns; 12*d7a74cadSDarrick J. Wong unsigned int retries; 13*d7a74cadSDarrick J. Wong bool repair_attempted; 14*d7a74cadSDarrick J. Wong bool repair_succeeded; 15*d7a74cadSDarrick J. Wong }; 16*d7a74cadSDarrick J. Wong 17*d7a74cadSDarrick J. Wong #ifdef CONFIG_XFS_ONLINE_SCRUB_STATS 18*d7a74cadSDarrick J. Wong struct xchk_stats; 19*d7a74cadSDarrick J. Wong 20*d7a74cadSDarrick J. Wong int __init xchk_global_stats_setup(struct dentry *parent); 21*d7a74cadSDarrick J. Wong void xchk_global_stats_teardown(void); 22*d7a74cadSDarrick J. Wong 23*d7a74cadSDarrick J. Wong int xchk_mount_stats_alloc(struct xfs_mount *mp); 24*d7a74cadSDarrick J. Wong void xchk_mount_stats_free(struct xfs_mount *mp); 25*d7a74cadSDarrick J. Wong 26*d7a74cadSDarrick J. Wong void xchk_stats_register(struct xchk_stats *cs, struct dentry *parent); 27*d7a74cadSDarrick J. Wong void xchk_stats_unregister(struct xchk_stats *cs); 28*d7a74cadSDarrick J. Wong 29*d7a74cadSDarrick J. Wong void xchk_stats_merge(struct xfs_mount *mp, const struct xfs_scrub_metadata *sm, 30*d7a74cadSDarrick J. Wong const struct xchk_stats_run *run); 31*d7a74cadSDarrick J. Wong xchk_stats_now(void)32*d7a74cadSDarrick J. Wongstatic inline u64 xchk_stats_now(void) { return ktime_get_ns(); } xchk_stats_elapsed_ns(u64 since)33*d7a74cadSDarrick J. Wongstatic inline u64 xchk_stats_elapsed_ns(u64 since) 34*d7a74cadSDarrick J. Wong { 35*d7a74cadSDarrick J. Wong u64 now = xchk_stats_now(); 36*d7a74cadSDarrick J. Wong 37*d7a74cadSDarrick J. Wong /* 38*d7a74cadSDarrick J. Wong * If the system doesn't have a high enough resolution clock, charge at 39*d7a74cadSDarrick J. Wong * least one nanosecond so that our stats don't report instantaneous 40*d7a74cadSDarrick J. Wong * runtimes. 41*d7a74cadSDarrick J. Wong */ 42*d7a74cadSDarrick J. Wong if (now == since) 43*d7a74cadSDarrick J. Wong return 1; 44*d7a74cadSDarrick J. Wong 45*d7a74cadSDarrick J. Wong return now - since; 46*d7a74cadSDarrick J. Wong } 47*d7a74cadSDarrick J. Wong #else 48*d7a74cadSDarrick J. Wong # define xchk_global_stats_setup(parent) (0) 49*d7a74cadSDarrick J. Wong # define xchk_global_stats_teardown() ((void)0) 50*d7a74cadSDarrick J. Wong # define xchk_mount_stats_alloc(mp) (0) 51*d7a74cadSDarrick J. Wong # define xchk_mount_stats_free(mp) ((void)0) 52*d7a74cadSDarrick J. Wong # define xchk_stats_register(cs, parent) ((void)0) 53*d7a74cadSDarrick J. Wong # define xchk_stats_unregister(cs) ((void)0) 54*d7a74cadSDarrick J. Wong # define xchk_stats_now() (0) 55*d7a74cadSDarrick J. Wong # define xchk_stats_elapsed_ns(x) (0 * (x)) 56*d7a74cadSDarrick J. Wong # define xchk_stats_merge(mp, sm, run) ((void)0) 57*d7a74cadSDarrick J. Wong #endif /* CONFIG_XFS_ONLINE_SCRUB_STATS */ 58*d7a74cadSDarrick J. Wong 59*d7a74cadSDarrick J. Wong #endif /* __XFS_SCRUB_STATS_H__ */ 60