Lines Matching full:cs

23 static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
25 u64 delta = clocksource_delta(end, start, cs->mask);
27 if (likely(delta < cs->max_cycles))
28 return clocksource_cyc2ns(delta, cs->mult, cs->shift);
30 return mul_u64_u32_shr(delta, cs->mult, cs->shift);
116 * Also a default for cs->uncertainty_margin when registering clocks.
124 * a lower bound for cs->uncertainty_margin values when registering clocks.
161 static void __clocksource_change_rating(struct clocksource *cs, int rating);
181 static void __clocksource_unstable(struct clocksource *cs)
183 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
184 cs->flags |= CLOCK_SOURCE_UNSTABLE;
190 if (list_empty(&cs->list)) {
191 cs->rating = 0;
195 if (cs->mark_unstable)
196 cs->mark_unstable(cs);
205 * @cs: clocksource to be marked unstable
210 void clocksource_mark_unstable(struct clocksource *cs)
215 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
216 if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
217 list_add(&cs->wd_list, &watchdog_list);
218 __clocksource_unstable(cs);
232 static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
242 *csnow = cs->read(cs);
271 smp_processor_id(), cs->name, wd_delay, WATCHDOG_MAX_SKEW, wd_seq_delay, nretries, cs->name);
278 cs->name, wd_delay);
337 struct clocksource *cs = (struct clocksource *)csin;
339 csnow_mid = cs->read(cs);
342 void clocksource_verify_percpu(struct clocksource *cs)
359 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
364 cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
369 csnow_begin = cs->read(cs);
370 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
371 csnow_end = cs->read(cs);
372 delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
375 delta = (csnow_end - csnow_mid) & cs->mask;
378 cs_nsec = cycles_to_nsec_safe(cs, csnow_begin, csnow_end);
389 cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
392 cpumask_pr_args(&cpus_behind), testcpu, cs->name);
395 testcpu, cs_nsec_min, cs_nsec_max, cs->name);
401 struct clocksource *cs;
403 list_for_each_entry(cs, &watchdog_list, wd_list)
404 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
413 struct clocksource *cs;
424 list_for_each_entry(cs, &watchdog_list, wd_list) {
427 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
433 read_ret = cs_watchdog_read(cs, &csnow, &wdnow);
437 __clocksource_unstable(cs);
451 * cs->last could keep unchanged for 5 minutes, reset
460 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
462 cs->flags |= CLOCK_SOURCE_WATCHDOG;
463 cs->wd_last = wdnow;
464 cs->cs_last = csnow;
468 wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow);
469 cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow);
470 wdlast = cs->wd_last; /* save these in case we print them */
471 cslast = cs->cs_last;
472 cs->cs_last = csnow;
473 cs->wd_last = wdnow;
500 md = cs->uncertainty_margin + watchdog->uncertainty_margin;
507 smp_processor_id(), cs->name);
511 cs->name, cs_nsec, csnow, cslast, cs->mask);
515 cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec);
516 if (curr_clocksource == cs)
517 pr_warn(" '%s' is current clocksource.\n", cs->name);
519 pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name);
522 __clocksource_unstable(cs);
526 if (cs == curr_clocksource && cs->tick_stable)
527 cs->tick_stable(cs);
529 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
530 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
533 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
550 if (cs != curr_clocksource) {
551 cs->flags |= CLOCK_SOURCE_RESELECT;
609 static void clocksource_enqueue_watchdog(struct clocksource *cs)
611 INIT_LIST_HEAD(&cs->wd_list);
613 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
614 /* cs is a clocksource to be watched. */
615 list_add(&cs->wd_list, &watchdog_list);
616 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
618 /* cs is a watchdog. */
619 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
620 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
626 struct clocksource *cs, *old_wd;
635 list_for_each_entry(cs, &clocksource_list, list) {
636 /* cs is a clocksource to be watched. */
637 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
641 if (fallback && cs == old_wd)
645 if (!watchdog || cs->rating > watchdog->rating)
646 watchdog = cs;
661 static void clocksource_dequeue_watchdog(struct clocksource *cs)
663 if (cs != watchdog) {
664 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
665 /* cs is a watched clocksource. */
666 list_del_init(&cs->wd_list);
675 struct clocksource *cs, *tmp;
686 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
687 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
688 list_del_init(&cs->wd_list);
689 __clocksource_change_rating(cs, 0);
692 if (cs->flags & CLOCK_SOURCE_RESELECT) {
693 cs->flags &= ~CLOCK_SOURCE_RESELECT;
713 static bool clocksource_is_watchdog(struct clocksource *cs)
715 return cs == watchdog;
720 static void clocksource_enqueue_watchdog(struct clocksource *cs)
722 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
723 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
727 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
730 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
731 void clocksource_mark_unstable(struct clocksource *cs) { }
738 static bool clocksource_is_suspend(struct clocksource *cs)
740 return cs == suspend_clocksource;
743 static void __clocksource_suspend_select(struct clocksource *cs)
748 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
756 if (cs->suspend || cs->resume) {
758 cs->name);
762 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
763 suspend_clocksource = cs;
772 struct clocksource *cs, *old_suspend;
778 list_for_each_entry(cs, &clocksource_list, list) {
780 if (fallback && cs == old_suspend)
783 __clocksource_suspend_select(cs);
789 * @cs: current clocksource from timekeeping
800 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
810 if (clocksource_is_suspend(cs)) {
826 * @cs: current clocksource from timekeeping
838 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
850 if (clocksource_is_suspend(cs))
862 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
873 struct clocksource *cs;
875 list_for_each_entry_reverse(cs, &clocksource_list, list)
876 if (cs->suspend)
877 cs->suspend(cs);
885 struct clocksource *cs;
887 list_for_each_entry(cs, &clocksource_list, list)
888 if (cs->resume)
889 cs->resume(cs);
908 * @cs: Pointer to clocksource
911 static u32 clocksource_max_adjustment(struct clocksource *cs)
917 ret = (u64)cs->mult * 11;
969 * @cs: Pointer to clocksource to be updated
972 static inline void clocksource_update_max_deferment(struct clocksource *cs)
974 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
975 cs->maxadj, cs->mask,
976 &cs->max_cycles);
981 struct clocksource *cs;
991 list_for_each_entry(cs, &clocksource_list, list) {
992 if (skipcur && cs == curr_clocksource)
994 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
996 return cs;
1004 struct clocksource *best, *cs;
1015 list_for_each_entry(cs, &clocksource_list, list) {
1016 if (skipcur && cs == curr_clocksource)
1018 if (strcmp(cs->name, override_name) != 0)
1025 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
1027 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
1029 cs->name);
1037 cs->name);
1041 best = cs;
1095 static void clocksource_enqueue(struct clocksource *cs)
1102 if (tmp->rating < cs->rating)
1106 list_add(&cs->list, entry);
1111 * @cs: clocksource to be registered
1121 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
1139 sec = cs->mask;
1144 else if (sec > 600 && cs->mask > UINT_MAX)
1147 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
1161 if (scale && freq && !cs->uncertainty_margin) {
1162 cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq);
1163 if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW)
1164 cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW;
1165 } else if (!cs->uncertainty_margin) {
1166 cs->uncertainty_margin = WATCHDOG_THRESHOLD;
1168 WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW);
1174 cs->maxadj = clocksource_max_adjustment(cs);
1175 while (freq && ((cs->mult + cs->maxadj < cs->mult)
1176 || (cs->mult - cs->maxadj > cs->mult))) {
1177 cs->mult >>= 1;
1178 cs->shift--;
1179 cs->maxadj = clocksource_max_adjustment(cs);
1186 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
1188 cs->name);
1190 clocksource_update_max_deferment(cs);
1193 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
1199 * @cs: clocksource to be registered
1208 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
1212 clocksource_arch_init(cs);
1214 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX))
1215 cs->id = CSID_GENERIC;
1216 if (cs->vdso_clock_mode < 0 ||
1217 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
1219 cs->name, cs->vdso_clock_mode);
1220 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
1224 __clocksource_update_freq_scale(cs, scale, freq);
1230 clocksource_enqueue(cs);
1231 clocksource_enqueue_watchdog(cs);
1236 __clocksource_suspend_select(cs);
1242 static void __clocksource_change_rating(struct clocksource *cs, int rating)
1244 list_del(&cs->list);
1245 cs->rating = rating;
1246 clocksource_enqueue(cs);
1251 * @cs: clocksource to be changed
1254 void clocksource_change_rating(struct clocksource *cs, int rating)
1260 __clocksource_change_rating(cs, rating);
1271 * Unbind clocksource @cs. Called with clocksource_mutex held
1273 static int clocksource_unbind(struct clocksource *cs)
1277 if (clocksource_is_watchdog(cs)) {
1280 if (clocksource_is_watchdog(cs))
1284 if (cs == curr_clocksource) {
1287 if (curr_clocksource == cs)
1291 if (clocksource_is_suspend(cs)) {
1301 clocksource_dequeue_watchdog(cs);
1302 list_del_init(&cs->list);
1310 * @cs: clocksource to be unregistered
1312 int clocksource_unregister(struct clocksource *cs)
1317 if (!list_empty(&cs->list))
1318 ret = clocksource_unbind(cs);
1404 struct clocksource *cs;
1414 list_for_each_entry(cs, &clocksource_list, list) {
1415 if (strcmp(cs->name, name))
1417 ret = clocksource_unbind(cs);