xref: /openbmc/linux/arch/x86/platform/uv/uv_time.c (revision 0c6dfa75)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * SGI RTC clock/timer routines.
4  *
5  *  (C) Copyright 2020 Hewlett Packard Enterprise Development LP
6  *  Copyright (c) 2009-2013 Silicon Graphics, Inc.  All Rights Reserved.
7  *  Copyright (c) Dimitri Sivanich
8  */
9 #include <linux/clockchips.h>
10 #include <linux/slab.h>
11 
12 #include <asm/uv/uv_mmrs.h>
13 #include <asm/uv/uv_hub.h>
14 #include <asm/uv/bios.h>
15 #include <asm/uv/uv.h>
16 #include <asm/apic.h>
17 #include <asm/cpu.h>
18 
19 #define RTC_NAME		"sgi_rtc"
20 
21 static u64 uv_read_rtc(struct clocksource *cs);
22 static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
23 static int uv_rtc_shutdown(struct clock_event_device *evt);
24 
25 static struct clocksource clocksource_uv = {
26 	.name		= RTC_NAME,
27 	.rating		= 299,
28 	.read		= uv_read_rtc,
29 	.mask		= (u64)UVH_RTC_REAL_TIME_CLOCK_MASK,
30 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
31 };
32 
33 static struct clock_event_device clock_event_device_uv = {
34 	.name			= RTC_NAME,
35 	.features		= CLOCK_EVT_FEAT_ONESHOT,
36 	.shift			= 20,
37 	.rating			= 400,
38 	.irq			= -1,
39 	.set_next_event		= uv_rtc_next_event,
40 	.set_state_shutdown	= uv_rtc_shutdown,
41 	.event_handler		= NULL,
42 };
43 
44 static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
45 
46 /* There is one of these allocated per node */
47 struct uv_rtc_timer_head {
48 	spinlock_t	lock;
49 	/* next cpu waiting for timer, local node relative: */
50 	int		next_cpu;
51 	/* number of cpus on this node: */
52 	int		ncpus;
53 	struct {
54 		int	lcpu;		/* systemwide logical cpu number */
55 		u64	expires;	/* next timer expiration for this cpu */
56 	} cpu[];
57 };
58 
59 /*
60  * Access to uv_rtc_timer_head via blade id.
61  */
62 static struct uv_rtc_timer_head		**blade_info __read_mostly;
63 
64 static int				uv_rtc_evt_enable;
65 
66 /*
67  * Hardware interface routines
68  */
69 
70 /* Send IPIs to another node */
71 static void uv_rtc_send_IPI(int cpu)
72 {
73 	unsigned long apicid, val;
74 	int pnode;
75 
76 	apicid = cpu_physical_id(cpu);
77 	pnode = uv_apicid_to_pnode(apicid);
78 	val = (1UL << UVH_IPI_INT_SEND_SHFT) |
79 	      (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
80 	      (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
81 
82 	uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
83 }
84 
85 /* Check for an RTC interrupt pending */
86 static int uv_intr_pending(int pnode)
87 {
88 	return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED2) &
89 		UVH_EVENT_OCCURRED2_RTC_1_MASK;
90 }
91 
92 /* Setup interrupt and return non-zero if early expiration occurred. */
93 static int uv_setup_intr(int cpu, u64 expires)
94 {
95 	u64 val;
96 	unsigned long apicid = cpu_physical_id(cpu);
97 	int pnode = uv_cpu_to_pnode(cpu);
98 
99 	uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
100 		UVH_RTC1_INT_CONFIG_M_MASK);
101 	uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
102 
103 	uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED2_ALIAS,
104 			      UVH_EVENT_OCCURRED2_RTC_1_MASK);
105 
106 	val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
107 		((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
108 
109 	/* Set configuration */
110 	uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
111 	/* Initialize comparator value */
112 	uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
113 
114 	if (uv_read_rtc(NULL) <= expires)
115 		return 0;
116 
117 	return !uv_intr_pending(pnode);
118 }
119 
120 /*
121  * Per-cpu timer tracking routines
122  */
123 
124 static __init void uv_rtc_deallocate_timers(void)
125 {
126 	int bid;
127 
128 	for_each_possible_blade(bid) {
129 		kfree(blade_info[bid]);
130 	}
131 	kfree(blade_info);
132 }
133 
134 /* Allocate per-node list of cpu timer expiration times. */
135 static __init int uv_rtc_allocate_timers(void)
136 {
137 	int cpu;
138 
139 	blade_info = kcalloc(uv_possible_blades, sizeof(void *), GFP_KERNEL);
140 	if (!blade_info)
141 		return -ENOMEM;
142 
143 	for_each_present_cpu(cpu) {
144 		int nid = cpu_to_node(cpu);
145 		int bid = uv_cpu_to_blade_id(cpu);
146 		int bcpu = uv_cpu_blade_processor_id(cpu);
147 		struct uv_rtc_timer_head *head = blade_info[bid];
148 
149 		if (!head) {
150 			head = kmalloc_node(struct_size(head, cpu,
151 				uv_blade_nr_possible_cpus(bid)),
152 				GFP_KERNEL, nid);
153 			if (!head) {
154 				uv_rtc_deallocate_timers();
155 				return -ENOMEM;
156 			}
157 			spin_lock_init(&head->lock);
158 			head->ncpus = uv_blade_nr_possible_cpus(bid);
159 			head->next_cpu = -1;
160 			blade_info[bid] = head;
161 		}
162 
163 		head->cpu[bcpu].lcpu = cpu;
164 		head->cpu[bcpu].expires = ULLONG_MAX;
165 	}
166 
167 	return 0;
168 }
169 
170 /* Find and set the next expiring timer.  */
171 static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
172 {
173 	u64 lowest = ULLONG_MAX;
174 	int c, bcpu = -1;
175 
176 	head->next_cpu = -1;
177 	for (c = 0; c < head->ncpus; c++) {
178 		u64 exp = head->cpu[c].expires;
179 		if (exp < lowest) {
180 			bcpu = c;
181 			lowest = exp;
182 		}
183 	}
184 	if (bcpu >= 0) {
185 		head->next_cpu = bcpu;
186 		c = head->cpu[bcpu].lcpu;
187 		if (uv_setup_intr(c, lowest))
188 			/* If we didn't set it up in time, trigger */
189 			uv_rtc_send_IPI(c);
190 	} else {
191 		uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
192 			UVH_RTC1_INT_CONFIG_M_MASK);
193 	}
194 }
195 
196 /*
197  * Set expiration time for current cpu.
198  *
199  * Returns 1 if we missed the expiration time.
200  */
201 static int uv_rtc_set_timer(int cpu, u64 expires)
202 {
203 	int pnode = uv_cpu_to_pnode(cpu);
204 	int bid = uv_cpu_to_blade_id(cpu);
205 	struct uv_rtc_timer_head *head = blade_info[bid];
206 	int bcpu = uv_cpu_blade_processor_id(cpu);
207 	u64 *t = &head->cpu[bcpu].expires;
208 	unsigned long flags;
209 	int next_cpu;
210 
211 	spin_lock_irqsave(&head->lock, flags);
212 
213 	next_cpu = head->next_cpu;
214 	*t = expires;
215 
216 	/* Will this one be next to go off? */
217 	if (next_cpu < 0 || bcpu == next_cpu ||
218 			expires < head->cpu[next_cpu].expires) {
219 		head->next_cpu = bcpu;
220 		if (uv_setup_intr(cpu, expires)) {
221 			*t = ULLONG_MAX;
222 			uv_rtc_find_next_timer(head, pnode);
223 			spin_unlock_irqrestore(&head->lock, flags);
224 			return -ETIME;
225 		}
226 	}
227 
228 	spin_unlock_irqrestore(&head->lock, flags);
229 	return 0;
230 }
231 
232 /*
233  * Unset expiration time for current cpu.
234  *
235  * Returns 1 if this timer was pending.
236  */
237 static int uv_rtc_unset_timer(int cpu, int force)
238 {
239 	int pnode = uv_cpu_to_pnode(cpu);
240 	int bid = uv_cpu_to_blade_id(cpu);
241 	struct uv_rtc_timer_head *head = blade_info[bid];
242 	int bcpu = uv_cpu_blade_processor_id(cpu);
243 	u64 *t = &head->cpu[bcpu].expires;
244 	unsigned long flags;
245 	int rc = 0;
246 
247 	spin_lock_irqsave(&head->lock, flags);
248 
249 	if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
250 		rc = 1;
251 
252 	if (rc) {
253 		*t = ULLONG_MAX;
254 		/* Was the hardware setup for this timer? */
255 		if (head->next_cpu == bcpu)
256 			uv_rtc_find_next_timer(head, pnode);
257 	}
258 
259 	spin_unlock_irqrestore(&head->lock, flags);
260 
261 	return rc;
262 }
263 
264 
265 /*
266  * Kernel interface routines.
267  */
268 
269 /*
270  * Read the RTC.
271  *
272  * Starting with HUB rev 2.0, the UV RTC register is replicated across all
273  * cachelines of it's own page.  This allows faster simultaneous reads
274  * from a given socket.
275  */
276 static u64 uv_read_rtc(struct clocksource *cs)
277 {
278 	unsigned long offset;
279 
280 	if (uv_get_min_hub_revision_id() == 1)
281 		offset = 0;
282 	else
283 		offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
284 
285 	return (u64)uv_read_local_mmr(UVH_RTC | offset);
286 }
287 
288 /*
289  * Program the next event, relative to now
290  */
291 static int uv_rtc_next_event(unsigned long delta,
292 			     struct clock_event_device *ced)
293 {
294 	int ced_cpu = cpumask_first(ced->cpumask);
295 
296 	return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
297 }
298 
299 /*
300  * Shutdown the RTC timer
301  */
302 static int uv_rtc_shutdown(struct clock_event_device *evt)
303 {
304 	int ced_cpu = cpumask_first(evt->cpumask);
305 
306 	uv_rtc_unset_timer(ced_cpu, 1);
307 	return 0;
308 }
309 
310 static void uv_rtc_interrupt(void)
311 {
312 	int cpu = smp_processor_id();
313 	struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
314 
315 	if (!ced || !ced->event_handler)
316 		return;
317 
318 	if (uv_rtc_unset_timer(cpu, 0) != 1)
319 		return;
320 
321 	ced->event_handler(ced);
322 }
323 
324 static int __init uv_enable_evt_rtc(char *str)
325 {
326 	uv_rtc_evt_enable = 1;
327 
328 	return 1;
329 }
330 __setup("uvrtcevt", uv_enable_evt_rtc);
331 
332 static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
333 {
334 	struct clock_event_device *ced = this_cpu_ptr(&cpu_ced);
335 
336 	*ced = clock_event_device_uv;
337 	ced->cpumask = cpumask_of(smp_processor_id());
338 	clockevents_register_device(ced);
339 }
340 
341 static __init int uv_rtc_setup_clock(void)
342 {
343 	int rc;
344 
345 	if (!is_uv_system())
346 		return -ENODEV;
347 
348 	rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second);
349 	if (rc)
350 		printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc);
351 	else
352 		printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n",
353 			sn_rtc_cycles_per_second/(unsigned long)1E6);
354 
355 	if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback)
356 		return rc;
357 
358 	/* Setup and register clockevents */
359 	rc = uv_rtc_allocate_timers();
360 	if (rc)
361 		goto error;
362 
363 	x86_platform_ipi_callback = uv_rtc_interrupt;
364 
365 	clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
366 				NSEC_PER_SEC, clock_event_device_uv.shift);
367 
368 	clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
369 						sn_rtc_cycles_per_second;
370 	clock_event_device_uv.min_delta_ticks = 1;
371 
372 	clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
373 				(NSEC_PER_SEC / sn_rtc_cycles_per_second);
374 	clock_event_device_uv.max_delta_ticks = clocksource_uv.mask;
375 
376 	rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
377 	if (rc) {
378 		x86_platform_ipi_callback = NULL;
379 		uv_rtc_deallocate_timers();
380 		goto error;
381 	}
382 
383 	printk(KERN_INFO "UV RTC clockevents registered\n");
384 
385 	return 0;
386 
387 error:
388 	clocksource_unregister(&clocksource_uv);
389 	printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc);
390 
391 	return rc;
392 }
393 arch_initcall(uv_rtc_setup_clock);
394