xref: /openbmc/linux/arch/s390/include/asm/timex.h (revision 91b41a23)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *
6  *  Derived from "include/asm-i386/timex.h"
7  *    Copyright (C) 1992, Linus Torvalds
8  */
9 
10 #ifndef _ASM_S390_TIMEX_H
11 #define _ASM_S390_TIMEX_H
12 
13 #include <linux/preempt.h>
14 #include <linux/time64.h>
15 #include <asm/lowcore.h>
16 
17 /* The value of the TOD clock for 1.1.1970. */
18 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
19 
20 extern u64 clock_comparator_max;
21 
22 union tod_clock {
23 	__uint128_t val;
24 	struct {
25 		__uint128_t ei	:  8; /* epoch index */
26 		__uint128_t tod : 64; /* bits 0-63 of tod clock */
27 		__uint128_t	: 40;
28 		__uint128_t pf	: 16; /* programmable field */
29 	};
30 	struct {
31 		__uint128_t eitod : 72; /* epoch index + bits 0-63 tod clock */
32 		__uint128_t	  : 56;
33 	};
34 	struct {
35 		__uint128_t us	: 60; /* micro-seconds */
36 		__uint128_t sus	: 12; /* sub-microseconds */
37 		__uint128_t	: 56;
38 	};
39 } __packed;
40 
41 /* Inline functions for clock register access. */
set_tod_clock(__u64 time)42 static inline int set_tod_clock(__u64 time)
43 {
44 	int cc;
45 
46 	asm volatile(
47 		"   sck   %1\n"
48 		"   ipm   %0\n"
49 		"   srl   %0,28\n"
50 		: "=d" (cc) : "Q" (time) : "cc");
51 	return cc;
52 }
53 
store_tod_clock_ext_cc(union tod_clock * clk)54 static inline int store_tod_clock_ext_cc(union tod_clock *clk)
55 {
56 	int cc;
57 
58 	asm volatile(
59 		"   stcke  %1\n"
60 		"   ipm   %0\n"
61 		"   srl   %0,28\n"
62 		: "=d" (cc), "=Q" (*clk) : : "cc");
63 	return cc;
64 }
65 
store_tod_clock_ext(union tod_clock * tod)66 static __always_inline void store_tod_clock_ext(union tod_clock *tod)
67 {
68 	asm volatile("stcke %0" : "=Q" (*tod) : : "cc");
69 }
70 
set_clock_comparator(__u64 time)71 static inline void set_clock_comparator(__u64 time)
72 {
73 	asm volatile("sckc %0" : : "Q" (time));
74 }
75 
set_tod_programmable_field(u16 val)76 static inline void set_tod_programmable_field(u16 val)
77 {
78 	asm volatile(
79 		"	lgr	0,%[val]\n"
80 		"	sckpf\n"
81 		:
82 		: [val] "d" ((unsigned long)val)
83 		: "0");
84 }
85 
86 void clock_comparator_work(void);
87 
88 void __init time_early_init(void);
89 
90 extern unsigned char ptff_function_mask[16];
91 
92 /* Function codes for the ptff instruction. */
93 #define PTFF_QAF	0x00	/* query available functions */
94 #define PTFF_QTO	0x01	/* query tod offset */
95 #define PTFF_QSI	0x02	/* query steering information */
96 #define PTFF_QUI	0x04	/* query UTC information */
97 #define PTFF_ATO	0x40	/* adjust tod offset */
98 #define PTFF_STO	0x41	/* set tod offset */
99 #define PTFF_SFS	0x42	/* set fine steering rate */
100 #define PTFF_SGS	0x43	/* set gross steering rate */
101 
102 /* Query TOD offset result */
103 struct ptff_qto {
104 	unsigned long physical_clock;
105 	unsigned long tod_offset;
106 	unsigned long logical_tod_offset;
107 	unsigned long tod_epoch_difference;
108 } __packed;
109 
ptff_query(unsigned int nr)110 static inline int ptff_query(unsigned int nr)
111 {
112 	unsigned char *ptr;
113 
114 	ptr = ptff_function_mask + (nr >> 3);
115 	return (*ptr & (0x80 >> (nr & 7))) != 0;
116 }
117 
118 /* Query UTC information result */
119 struct ptff_qui {
120 	unsigned int tm : 2;
121 	unsigned int ts : 2;
122 	unsigned int : 28;
123 	unsigned int pad_0x04;
124 	unsigned long leap_event;
125 	short old_leap;
126 	short new_leap;
127 	unsigned int pad_0x14;
128 	unsigned long prt[5];
129 	unsigned long cst[3];
130 	unsigned int skew;
131 	unsigned int pad_0x5c[41];
132 } __packed;
133 
134 /*
135  * ptff - Perform timing facility function
136  * @ptff_block: Pointer to ptff parameter block
137  * @len: Length of parameter block
138  * @func: Function code
139  * Returns: Condition code (0 on success)
140  */
141 #define ptff(ptff_block, len, func)					\
142 ({									\
143 	struct addrtype { char _[len]; };				\
144 	unsigned int reg0 = func;					\
145 	unsigned long reg1 = (unsigned long)(ptff_block);		\
146 	int rc;								\
147 									\
148 	asm volatile(							\
149 		"	lgr	0,%[reg0]\n"				\
150 		"	lgr	1,%[reg1]\n"				\
151 		"	ptff\n"						\
152 		"	ipm	%[rc]\n"				\
153 		"	srl	%[rc],28\n"				\
154 		: [rc] "=&d" (rc), "+m" (*(struct addrtype *)reg1)	\
155 		: [reg0] "d" (reg0), [reg1] "d" (reg1)			\
156 		: "cc", "0", "1");					\
157 	rc;								\
158 })
159 
local_tick_disable(void)160 static inline unsigned long local_tick_disable(void)
161 {
162 	unsigned long old;
163 
164 	old = S390_lowcore.clock_comparator;
165 	S390_lowcore.clock_comparator = clock_comparator_max;
166 	set_clock_comparator(S390_lowcore.clock_comparator);
167 	return old;
168 }
169 
local_tick_enable(unsigned long comp)170 static inline void local_tick_enable(unsigned long comp)
171 {
172 	S390_lowcore.clock_comparator = comp;
173 	set_clock_comparator(S390_lowcore.clock_comparator);
174 }
175 
176 #define CLOCK_TICK_RATE		1193180 /* Underlying HZ */
177 
178 typedef unsigned long cycles_t;
179 
get_tod_clock(void)180 static __always_inline unsigned long get_tod_clock(void)
181 {
182 	union tod_clock clk;
183 
184 	store_tod_clock_ext(&clk);
185 	return clk.tod;
186 }
187 
get_tod_clock_fast(void)188 static inline unsigned long get_tod_clock_fast(void)
189 {
190 	unsigned long clk;
191 
192 	asm volatile("stckf %0" : "=Q" (clk) : : "cc");
193 	return clk;
194 }
195 
get_cycles(void)196 static inline cycles_t get_cycles(void)
197 {
198 	return (cycles_t) get_tod_clock() >> 2;
199 }
200 #define get_cycles get_cycles
201 
202 int get_phys_clock(unsigned long *clock);
203 void init_cpu_timer(void);
204 
205 extern union tod_clock tod_clock_base;
206 
__get_tod_clock_monotonic(void)207 static __always_inline unsigned long __get_tod_clock_monotonic(void)
208 {
209 	return get_tod_clock() - tod_clock_base.tod;
210 }
211 
212 /**
213  * get_clock_monotonic - returns current time in clock rate units
214  *
215  * The clock and tod_clock_base get changed via stop_machine.
216  * Therefore preemption must be disabled, otherwise the returned
217  * value is not guaranteed to be monotonic.
218  */
get_tod_clock_monotonic(void)219 static inline unsigned long get_tod_clock_monotonic(void)
220 {
221 	unsigned long tod;
222 
223 	preempt_disable_notrace();
224 	tod = __get_tod_clock_monotonic();
225 	preempt_enable_notrace();
226 	return tod;
227 }
228 
229 /**
230  * tod_to_ns - convert a TOD format value to nanoseconds
231  * @todval: to be converted TOD format value
232  * Returns: number of nanoseconds that correspond to the TOD format value
233  *
234  * Converting a 64 Bit TOD format value to nanoseconds means that the value
235  * must be divided by 4.096. In order to achieve that we multiply with 125
236  * and divide by 512:
237  *
238  *    ns = (todval * 125) >> 9;
239  *
240  * In order to avoid an overflow with the multiplication we can rewrite this.
241  * With a split todval == 2^9 * th + tl (th upper 55 bits, tl lower 9 bits)
242  * we end up with
243  *
244  *    ns = ((2^9 * th + tl) * 125 ) >> 9;
245  * -> ns = (th * 125) + ((tl * 125) >> 9);
246  *
247  */
tod_to_ns(unsigned long todval)248 static __always_inline unsigned long tod_to_ns(unsigned long todval)
249 {
250 	return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
251 }
252 
253 /**
254  * tod_after - compare two 64 bit TOD values
255  * @a: first 64 bit TOD timestamp
256  * @b: second 64 bit TOD timestamp
257  *
258  * Returns: true if a is later than b
259  */
tod_after(unsigned long a,unsigned long b)260 static inline int tod_after(unsigned long a, unsigned long b)
261 {
262 	if (MACHINE_HAS_SCC)
263 		return (long) a > (long) b;
264 	return a > b;
265 }
266 
267 /**
268  * tod_after_eq - compare two 64 bit TOD values
269  * @a: first 64 bit TOD timestamp
270  * @b: second 64 bit TOD timestamp
271  *
272  * Returns: true if a is later than b
273  */
tod_after_eq(unsigned long a,unsigned long b)274 static inline int tod_after_eq(unsigned long a, unsigned long b)
275 {
276 	if (MACHINE_HAS_SCC)
277 		return (long) a >= (long) b;
278 	return a >= b;
279 }
280 
281 #endif
282