xref: /openbmc/linux/arch/s390/kernel/idle.c (revision 015d239a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Idle functions for s390.
4  *
5  * Copyright IBM Corp. 2014
6  *
7  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/kprobes.h>
13 #include <linux/notifier.h>
14 #include <linux/init.h>
15 #include <linux/cpu.h>
16 #include <linux/sched/cputime.h>
17 #include <asm/nmi.h>
18 #include <asm/smp.h>
19 #include "entry.h"
20 
21 static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
22 
23 void enabled_wait(void)
24 {
25 	struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
26 	unsigned long long idle_time;
27 	unsigned long psw_mask;
28 
29 	trace_hardirqs_on();
30 
31 	/* Wait for external, I/O or machine check interrupt. */
32 	psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
33 		PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
34 	clear_cpu_flag(CIF_NOHZ_DELAY);
35 
36 	/* Call the assembler magic in entry.S */
37 	psw_idle(idle, psw_mask);
38 
39 	trace_hardirqs_off();
40 
41 	/* Account time spent with enabled wait psw loaded as idle time. */
42 	write_seqcount_begin(&idle->seqcount);
43 	idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
44 	idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
45 	idle->idle_time += idle_time;
46 	idle->idle_count++;
47 	account_idle_time(cputime_to_nsecs(idle_time));
48 	write_seqcount_end(&idle->seqcount);
49 }
50 NOKPROBE_SYMBOL(enabled_wait);
51 
52 static ssize_t show_idle_count(struct device *dev,
53 				struct device_attribute *attr, char *buf)
54 {
55 	struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
56 	unsigned long long idle_count;
57 	unsigned int seq;
58 
59 	do {
60 		seq = read_seqcount_begin(&idle->seqcount);
61 		idle_count = READ_ONCE(idle->idle_count);
62 		if (READ_ONCE(idle->clock_idle_enter))
63 			idle_count++;
64 	} while (read_seqcount_retry(&idle->seqcount, seq));
65 	return sprintf(buf, "%llu\n", idle_count);
66 }
67 DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
68 
69 static ssize_t show_idle_time(struct device *dev,
70 				struct device_attribute *attr, char *buf)
71 {
72 	unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
73 	struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
74 	unsigned int seq;
75 
76 	do {
77 		seq = read_seqcount_begin(&idle->seqcount);
78 		idle_time = READ_ONCE(idle->idle_time);
79 		idle_enter = READ_ONCE(idle->clock_idle_enter);
80 		idle_exit = READ_ONCE(idle->clock_idle_exit);
81 	} while (read_seqcount_retry(&idle->seqcount, seq));
82 	in_idle = 0;
83 	now = get_tod_clock();
84 	if (idle_enter) {
85 		if (idle_exit) {
86 			in_idle = idle_exit - idle_enter;
87 		} else if (now > idle_enter) {
88 			in_idle = now - idle_enter;
89 		}
90 	}
91 	idle_time += in_idle;
92 	return sprintf(buf, "%llu\n", idle_time >> 12);
93 }
94 DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
95 
96 u64 arch_cpu_idle_time(int cpu)
97 {
98 	struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
99 	unsigned long long now, idle_enter, idle_exit, in_idle;
100 	unsigned int seq;
101 
102 	do {
103 		seq = read_seqcount_begin(&idle->seqcount);
104 		idle_enter = READ_ONCE(idle->clock_idle_enter);
105 		idle_exit = READ_ONCE(idle->clock_idle_exit);
106 	} while (read_seqcount_retry(&idle->seqcount, seq));
107 	in_idle = 0;
108 	now = get_tod_clock();
109 	if (idle_enter) {
110 		if (idle_exit) {
111 			in_idle = idle_exit - idle_enter;
112 		} else if (now > idle_enter) {
113 			in_idle = now - idle_enter;
114 		}
115 	}
116 	return cputime_to_nsecs(in_idle);
117 }
118 
119 void arch_cpu_idle_enter(void)
120 {
121 	local_mcck_disable();
122 }
123 
124 void arch_cpu_idle(void)
125 {
126 	if (!test_cpu_flag(CIF_MCCK_PENDING))
127 		/* Halt the cpu and keep track of cpu time accounting. */
128 		enabled_wait();
129 	local_irq_enable();
130 }
131 
132 void arch_cpu_idle_exit(void)
133 {
134 	local_mcck_enable();
135 	if (test_cpu_flag(CIF_MCCK_PENDING))
136 		s390_handle_mcck();
137 }
138 
139 void arch_cpu_idle_dead(void)
140 {
141 	cpu_die();
142 }
143