xref: /openbmc/linux/arch/x86/kernel/hpet.c (revision ae213c44)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/clocksource.h>
3 #include <linux/clockchips.h>
4 #include <linux/interrupt.h>
5 #include <linux/irq.h>
6 #include <linux/export.h>
7 #include <linux/delay.h>
8 #include <linux/errno.h>
9 #include <linux/i8253.h>
10 #include <linux/slab.h>
11 #include <linux/hpet.h>
12 #include <linux/init.h>
13 #include <linux/cpu.h>
14 #include <linux/pm.h>
15 #include <linux/io.h>
16 
17 #include <asm/cpufeature.h>
18 #include <asm/irqdomain.h>
19 #include <asm/fixmap.h>
20 #include <asm/hpet.h>
21 #include <asm/time.h>
22 
23 #define HPET_MASK			CLOCKSOURCE_MASK(32)
24 
25 #define HPET_DEV_USED_BIT		2
26 #define HPET_DEV_USED			(1 << HPET_DEV_USED_BIT)
27 #define HPET_DEV_VALID			0x8
28 #define HPET_DEV_FSB_CAP		0x1000
29 #define HPET_DEV_PERI_CAP		0x2000
30 
31 #define HPET_MIN_CYCLES			128
32 #define HPET_MIN_PROG_DELTA		(HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
33 
34 /*
35  * HPET address is set in acpi/boot.c, when an ACPI entry exists
36  */
37 unsigned long				hpet_address;
38 u8					hpet_blockid; /* OS timer block num */
39 bool					hpet_msi_disable;
40 
41 #ifdef CONFIG_PCI_MSI
42 static unsigned int			hpet_num_timers;
43 #endif
44 static void __iomem			*hpet_virt_address;
45 
46 struct hpet_dev {
47 	struct clock_event_device	evt;
48 	unsigned int			num;
49 	int				cpu;
50 	unsigned int			irq;
51 	unsigned int			flags;
52 	char				name[10];
53 };
54 
55 static inline struct hpet_dev *EVT_TO_HPET_DEV(struct clock_event_device *evtdev)
56 {
57 	return container_of(evtdev, struct hpet_dev, evt);
58 }
59 
60 inline unsigned int hpet_readl(unsigned int a)
61 {
62 	return readl(hpet_virt_address + a);
63 }
64 
65 static inline void hpet_writel(unsigned int d, unsigned int a)
66 {
67 	writel(d, hpet_virt_address + a);
68 }
69 
70 #ifdef CONFIG_X86_64
71 #include <asm/pgtable.h>
72 #endif
73 
74 static inline void hpet_set_mapping(void)
75 {
76 	hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
77 }
78 
79 static inline void hpet_clear_mapping(void)
80 {
81 	iounmap(hpet_virt_address);
82 	hpet_virt_address = NULL;
83 }
84 
85 /*
86  * HPET command line enable / disable
87  */
88 bool boot_hpet_disable;
89 bool hpet_force_user;
90 static bool hpet_verbose;
91 
92 static int __init hpet_setup(char *str)
93 {
94 	while (str) {
95 		char *next = strchr(str, ',');
96 
97 		if (next)
98 			*next++ = 0;
99 		if (!strncmp("disable", str, 7))
100 			boot_hpet_disable = true;
101 		if (!strncmp("force", str, 5))
102 			hpet_force_user = true;
103 		if (!strncmp("verbose", str, 7))
104 			hpet_verbose = true;
105 		str = next;
106 	}
107 	return 1;
108 }
109 __setup("hpet=", hpet_setup);
110 
111 static int __init disable_hpet(char *str)
112 {
113 	boot_hpet_disable = true;
114 	return 1;
115 }
116 __setup("nohpet", disable_hpet);
117 
118 static inline int is_hpet_capable(void)
119 {
120 	return !boot_hpet_disable && hpet_address;
121 }
122 
123 /*
124  * HPET timer interrupt enable / disable
125  */
126 static bool hpet_legacy_int_enabled;
127 
128 /**
129  * is_hpet_enabled - check whether the hpet timer interrupt is enabled
130  */
131 int is_hpet_enabled(void)
132 {
133 	return is_hpet_capable() && hpet_legacy_int_enabled;
134 }
135 EXPORT_SYMBOL_GPL(is_hpet_enabled);
136 
137 static void _hpet_print_config(const char *function, int line)
138 {
139 	u32 i, timers, l, h;
140 	printk(KERN_INFO "hpet: %s(%d):\n", function, line);
141 	l = hpet_readl(HPET_ID);
142 	h = hpet_readl(HPET_PERIOD);
143 	timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
144 	printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h);
145 	l = hpet_readl(HPET_CFG);
146 	h = hpet_readl(HPET_STATUS);
147 	printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h);
148 	l = hpet_readl(HPET_COUNTER);
149 	h = hpet_readl(HPET_COUNTER+4);
150 	printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
151 
152 	for (i = 0; i < timers; i++) {
153 		l = hpet_readl(HPET_Tn_CFG(i));
154 		h = hpet_readl(HPET_Tn_CFG(i)+4);
155 		printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n",
156 		       i, l, h);
157 		l = hpet_readl(HPET_Tn_CMP(i));
158 		h = hpet_readl(HPET_Tn_CMP(i)+4);
159 		printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n",
160 		       i, l, h);
161 		l = hpet_readl(HPET_Tn_ROUTE(i));
162 		h = hpet_readl(HPET_Tn_ROUTE(i)+4);
163 		printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n",
164 		       i, l, h);
165 	}
166 }
167 
168 #define hpet_print_config()					\
169 do {								\
170 	if (hpet_verbose)					\
171 		_hpet_print_config(__func__, __LINE__);	\
172 } while (0)
173 
174 /*
175  * When the hpet driver (/dev/hpet) is enabled, we need to reserve
176  * timer 0 and timer 1 in case of RTC emulation.
177  */
178 #ifdef CONFIG_HPET
179 
180 static void hpet_reserve_msi_timers(struct hpet_data *hd);
181 
182 static void hpet_reserve_platform_timers(unsigned int id)
183 {
184 	struct hpet __iomem *hpet = hpet_virt_address;
185 	struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
186 	unsigned int nrtimers, i;
187 	struct hpet_data hd;
188 
189 	nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
190 
191 	memset(&hd, 0, sizeof(hd));
192 	hd.hd_phys_address	= hpet_address;
193 	hd.hd_address		= hpet;
194 	hd.hd_nirqs		= nrtimers;
195 	hpet_reserve_timer(&hd, 0);
196 
197 #ifdef CONFIG_HPET_EMULATE_RTC
198 	hpet_reserve_timer(&hd, 1);
199 #endif
200 
201 	/*
202 	 * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
203 	 * is wrong for i8259!) not the output IRQ.  Many BIOS writers
204 	 * don't bother configuring *any* comparator interrupts.
205 	 */
206 	hd.hd_irq[0] = HPET_LEGACY_8254;
207 	hd.hd_irq[1] = HPET_LEGACY_RTC;
208 
209 	for (i = 2; i < nrtimers; timer++, i++) {
210 		hd.hd_irq[i] = (readl(&timer->hpet_config) &
211 			Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
212 	}
213 
214 	hpet_reserve_msi_timers(&hd);
215 
216 	hpet_alloc(&hd);
217 
218 }
219 #else
220 static void hpet_reserve_platform_timers(unsigned int id) { }
221 #endif
222 
223 /*
224  * Common hpet info
225  */
226 static unsigned long hpet_freq;
227 
228 static struct clock_event_device hpet_clockevent;
229 
230 static void hpet_stop_counter(void)
231 {
232 	u32 cfg = hpet_readl(HPET_CFG);
233 	cfg &= ~HPET_CFG_ENABLE;
234 	hpet_writel(cfg, HPET_CFG);
235 }
236 
237 static void hpet_reset_counter(void)
238 {
239 	hpet_writel(0, HPET_COUNTER);
240 	hpet_writel(0, HPET_COUNTER + 4);
241 }
242 
243 static void hpet_start_counter(void)
244 {
245 	unsigned int cfg = hpet_readl(HPET_CFG);
246 	cfg |= HPET_CFG_ENABLE;
247 	hpet_writel(cfg, HPET_CFG);
248 }
249 
250 static void hpet_restart_counter(void)
251 {
252 	hpet_stop_counter();
253 	hpet_reset_counter();
254 	hpet_start_counter();
255 }
256 
257 static void hpet_resume_device(void)
258 {
259 	force_hpet_resume();
260 }
261 
262 static void hpet_resume_counter(struct clocksource *cs)
263 {
264 	hpet_resume_device();
265 	hpet_restart_counter();
266 }
267 
268 static void hpet_enable_legacy_int(void)
269 {
270 	unsigned int cfg = hpet_readl(HPET_CFG);
271 
272 	cfg |= HPET_CFG_LEGACY;
273 	hpet_writel(cfg, HPET_CFG);
274 	hpet_legacy_int_enabled = true;
275 }
276 
277 static void hpet_legacy_clockevent_register(void)
278 {
279 	/* Start HPET legacy interrupts */
280 	hpet_enable_legacy_int();
281 
282 	/*
283 	 * Start hpet with the boot cpu mask and make it
284 	 * global after the IO_APIC has been initialized.
285 	 */
286 	hpet_clockevent.cpumask = cpumask_of(boot_cpu_data.cpu_index);
287 	clockevents_config_and_register(&hpet_clockevent, hpet_freq,
288 					HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
289 	global_clock_event = &hpet_clockevent;
290 	printk(KERN_DEBUG "hpet clockevent registered\n");
291 }
292 
293 static int hpet_set_periodic(struct clock_event_device *evt, int timer)
294 {
295 	unsigned int cfg, cmp, now;
296 	uint64_t delta;
297 
298 	hpet_stop_counter();
299 	delta = ((uint64_t)(NSEC_PER_SEC / HZ)) * evt->mult;
300 	delta >>= evt->shift;
301 	now = hpet_readl(HPET_COUNTER);
302 	cmp = now + (unsigned int)delta;
303 	cfg = hpet_readl(HPET_Tn_CFG(timer));
304 	cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
305 	       HPET_TN_32BIT;
306 	hpet_writel(cfg, HPET_Tn_CFG(timer));
307 	hpet_writel(cmp, HPET_Tn_CMP(timer));
308 	udelay(1);
309 	/*
310 	 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
311 	 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
312 	 * bit is automatically cleared after the first write.
313 	 * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
314 	 * Publication # 24674)
315 	 */
316 	hpet_writel((unsigned int)delta, HPET_Tn_CMP(timer));
317 	hpet_start_counter();
318 	hpet_print_config();
319 
320 	return 0;
321 }
322 
323 static int hpet_set_oneshot(struct clock_event_device *evt, int timer)
324 {
325 	unsigned int cfg;
326 
327 	cfg = hpet_readl(HPET_Tn_CFG(timer));
328 	cfg &= ~HPET_TN_PERIODIC;
329 	cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
330 	hpet_writel(cfg, HPET_Tn_CFG(timer));
331 
332 	return 0;
333 }
334 
335 static int hpet_shutdown(struct clock_event_device *evt, int timer)
336 {
337 	unsigned int cfg;
338 
339 	cfg = hpet_readl(HPET_Tn_CFG(timer));
340 	cfg &= ~HPET_TN_ENABLE;
341 	hpet_writel(cfg, HPET_Tn_CFG(timer));
342 
343 	return 0;
344 }
345 
346 static int hpet_resume(struct clock_event_device *evt)
347 {
348 	hpet_enable_legacy_int();
349 	hpet_print_config();
350 	return 0;
351 }
352 
353 static int hpet_next_event(unsigned long delta,
354 			   struct clock_event_device *evt, int timer)
355 {
356 	u32 cnt;
357 	s32 res;
358 
359 	cnt = hpet_readl(HPET_COUNTER);
360 	cnt += (u32) delta;
361 	hpet_writel(cnt, HPET_Tn_CMP(timer));
362 
363 	/*
364 	 * HPETs are a complete disaster. The compare register is
365 	 * based on a equal comparison and neither provides a less
366 	 * than or equal functionality (which would require to take
367 	 * the wraparound into account) nor a simple count down event
368 	 * mode. Further the write to the comparator register is
369 	 * delayed internally up to two HPET clock cycles in certain
370 	 * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
371 	 * longer delays. We worked around that by reading back the
372 	 * compare register, but that required another workaround for
373 	 * ICH9,10 chips where the first readout after write can
374 	 * return the old stale value. We already had a minimum
375 	 * programming delta of 5us enforced, but a NMI or SMI hitting
376 	 * between the counter readout and the comparator write can
377 	 * move us behind that point easily. Now instead of reading
378 	 * the compare register back several times, we make the ETIME
379 	 * decision based on the following: Return ETIME if the
380 	 * counter value after the write is less than HPET_MIN_CYCLES
381 	 * away from the event or if the counter is already ahead of
382 	 * the event. The minimum programming delta for the generic
383 	 * clockevents code is set to 1.5 * HPET_MIN_CYCLES.
384 	 */
385 	res = (s32)(cnt - hpet_readl(HPET_COUNTER));
386 
387 	return res < HPET_MIN_CYCLES ? -ETIME : 0;
388 }
389 
390 static int hpet_legacy_shutdown(struct clock_event_device *evt)
391 {
392 	return hpet_shutdown(evt, 0);
393 }
394 
395 static int hpet_legacy_set_oneshot(struct clock_event_device *evt)
396 {
397 	return hpet_set_oneshot(evt, 0);
398 }
399 
400 static int hpet_legacy_set_periodic(struct clock_event_device *evt)
401 {
402 	return hpet_set_periodic(evt, 0);
403 }
404 
405 static int hpet_legacy_resume(struct clock_event_device *evt)
406 {
407 	return hpet_resume(evt);
408 }
409 
410 static int hpet_legacy_next_event(unsigned long delta,
411 			struct clock_event_device *evt)
412 {
413 	return hpet_next_event(delta, evt, 0);
414 }
415 
416 /*
417  * The hpet clock event device
418  */
419 static struct clock_event_device hpet_clockevent = {
420 	.name			= "hpet",
421 	.features		= CLOCK_EVT_FEAT_PERIODIC |
422 				  CLOCK_EVT_FEAT_ONESHOT,
423 	.set_state_periodic	= hpet_legacy_set_periodic,
424 	.set_state_oneshot	= hpet_legacy_set_oneshot,
425 	.set_state_shutdown	= hpet_legacy_shutdown,
426 	.tick_resume		= hpet_legacy_resume,
427 	.set_next_event		= hpet_legacy_next_event,
428 	.irq			= 0,
429 	.rating			= 50,
430 };
431 
432 /*
433  * HPET MSI Support
434  */
435 #ifdef CONFIG_PCI_MSI
436 
437 static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
438 static struct hpet_dev	*hpet_devs;
439 static struct irq_domain *hpet_domain;
440 
441 void hpet_msi_unmask(struct irq_data *data)
442 {
443 	struct hpet_dev *hdev = irq_data_get_irq_handler_data(data);
444 	unsigned int cfg;
445 
446 	/* unmask it */
447 	cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
448 	cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
449 	hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
450 }
451 
452 void hpet_msi_mask(struct irq_data *data)
453 {
454 	struct hpet_dev *hdev = irq_data_get_irq_handler_data(data);
455 	unsigned int cfg;
456 
457 	/* mask it */
458 	cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
459 	cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
460 	hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
461 }
462 
463 void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg)
464 {
465 	hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
466 	hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
467 }
468 
469 void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg)
470 {
471 	msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
472 	msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
473 	msg->address_hi = 0;
474 }
475 
476 static int hpet_msi_shutdown(struct clock_event_device *evt)
477 {
478 	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
479 
480 	return hpet_shutdown(evt, hdev->num);
481 }
482 
483 static int hpet_msi_set_oneshot(struct clock_event_device *evt)
484 {
485 	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
486 
487 	return hpet_set_oneshot(evt, hdev->num);
488 }
489 
490 static int hpet_msi_set_periodic(struct clock_event_device *evt)
491 {
492 	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
493 
494 	return hpet_set_periodic(evt, hdev->num);
495 }
496 
497 static int hpet_msi_resume(struct clock_event_device *evt)
498 {
499 	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
500 	struct irq_data *data = irq_get_irq_data(hdev->irq);
501 	struct msi_msg msg;
502 
503 	/* Restore the MSI msg and unmask the interrupt */
504 	irq_chip_compose_msi_msg(data, &msg);
505 	hpet_msi_write(hdev, &msg);
506 	hpet_msi_unmask(data);
507 	return 0;
508 }
509 
510 static int hpet_msi_next_event(unsigned long delta,
511 				struct clock_event_device *evt)
512 {
513 	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
514 	return hpet_next_event(delta, evt, hdev->num);
515 }
516 
517 static irqreturn_t hpet_interrupt_handler(int irq, void *data)
518 {
519 	struct hpet_dev *dev = (struct hpet_dev *)data;
520 	struct clock_event_device *hevt = &dev->evt;
521 
522 	if (!hevt->event_handler) {
523 		printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
524 				dev->num);
525 		return IRQ_HANDLED;
526 	}
527 
528 	hevt->event_handler(hevt);
529 	return IRQ_HANDLED;
530 }
531 
532 static int hpet_setup_irq(struct hpet_dev *dev)
533 {
534 
535 	if (request_irq(dev->irq, hpet_interrupt_handler,
536 			IRQF_TIMER | IRQF_NOBALANCING,
537 			dev->name, dev))
538 		return -1;
539 
540 	disable_irq(dev->irq);
541 	irq_set_affinity(dev->irq, cpumask_of(dev->cpu));
542 	enable_irq(dev->irq);
543 
544 	printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
545 			 dev->name, dev->irq);
546 
547 	return 0;
548 }
549 
550 /* This should be called in specific @cpu */
551 static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
552 {
553 	struct clock_event_device *evt = &hdev->evt;
554 
555 	WARN_ON(cpu != smp_processor_id());
556 	if (!(hdev->flags & HPET_DEV_VALID))
557 		return;
558 
559 	hdev->cpu = cpu;
560 	per_cpu(cpu_hpet_dev, cpu) = hdev;
561 	evt->name = hdev->name;
562 	hpet_setup_irq(hdev);
563 	evt->irq = hdev->irq;
564 
565 	evt->rating = 110;
566 	evt->features = CLOCK_EVT_FEAT_ONESHOT;
567 	if (hdev->flags & HPET_DEV_PERI_CAP) {
568 		evt->features |= CLOCK_EVT_FEAT_PERIODIC;
569 		evt->set_state_periodic = hpet_msi_set_periodic;
570 	}
571 
572 	evt->set_state_shutdown = hpet_msi_shutdown;
573 	evt->set_state_oneshot = hpet_msi_set_oneshot;
574 	evt->tick_resume = hpet_msi_resume;
575 	evt->set_next_event = hpet_msi_next_event;
576 	evt->cpumask = cpumask_of(hdev->cpu);
577 
578 	clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA,
579 					0x7FFFFFFF);
580 }
581 
582 #ifdef CONFIG_HPET
583 /* Reserve at least one timer for userspace (/dev/hpet) */
584 #define RESERVE_TIMERS 1
585 #else
586 #define RESERVE_TIMERS 0
587 #endif
588 
589 static void hpet_msi_capability_lookup(unsigned int start_timer)
590 {
591 	unsigned int id;
592 	unsigned int num_timers;
593 	unsigned int num_timers_used = 0;
594 	int i, irq;
595 
596 	if (hpet_msi_disable)
597 		return;
598 
599 	if (boot_cpu_has(X86_FEATURE_ARAT))
600 		return;
601 	id = hpet_readl(HPET_ID);
602 
603 	num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
604 	num_timers++; /* Value read out starts from 0 */
605 	hpet_print_config();
606 
607 	hpet_domain = hpet_create_irq_domain(hpet_blockid);
608 	if (!hpet_domain)
609 		return;
610 
611 	hpet_devs = kcalloc(num_timers, sizeof(struct hpet_dev), GFP_KERNEL);
612 	if (!hpet_devs)
613 		return;
614 
615 	hpet_num_timers = num_timers;
616 
617 	for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
618 		struct hpet_dev *hdev = &hpet_devs[num_timers_used];
619 		unsigned int cfg = hpet_readl(HPET_Tn_CFG(i));
620 
621 		/* Only consider HPET timer with MSI support */
622 		if (!(cfg & HPET_TN_FSB_CAP))
623 			continue;
624 
625 		hdev->flags = 0;
626 		if (cfg & HPET_TN_PERIODIC_CAP)
627 			hdev->flags |= HPET_DEV_PERI_CAP;
628 		sprintf(hdev->name, "hpet%d", i);
629 		hdev->num = i;
630 
631 		irq = hpet_assign_irq(hpet_domain, hdev, hdev->num);
632 		if (irq <= 0)
633 			continue;
634 
635 		hdev->irq = irq;
636 		hdev->flags |= HPET_DEV_FSB_CAP;
637 		hdev->flags |= HPET_DEV_VALID;
638 		num_timers_used++;
639 		if (num_timers_used == num_possible_cpus())
640 			break;
641 	}
642 
643 	printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
644 		num_timers, num_timers_used);
645 }
646 
647 #ifdef CONFIG_HPET
648 static void hpet_reserve_msi_timers(struct hpet_data *hd)
649 {
650 	int i;
651 
652 	if (!hpet_devs)
653 		return;
654 
655 	for (i = 0; i < hpet_num_timers; i++) {
656 		struct hpet_dev *hdev = &hpet_devs[i];
657 
658 		if (!(hdev->flags & HPET_DEV_VALID))
659 			continue;
660 
661 		hd->hd_irq[hdev->num] = hdev->irq;
662 		hpet_reserve_timer(hd, hdev->num);
663 	}
664 }
665 #endif
666 
667 static struct hpet_dev *hpet_get_unused_timer(void)
668 {
669 	int i;
670 
671 	if (!hpet_devs)
672 		return NULL;
673 
674 	for (i = 0; i < hpet_num_timers; i++) {
675 		struct hpet_dev *hdev = &hpet_devs[i];
676 
677 		if (!(hdev->flags & HPET_DEV_VALID))
678 			continue;
679 		if (test_and_set_bit(HPET_DEV_USED_BIT,
680 			(unsigned long *)&hdev->flags))
681 			continue;
682 		return hdev;
683 	}
684 	return NULL;
685 }
686 
687 struct hpet_work_struct {
688 	struct delayed_work work;
689 	struct completion complete;
690 };
691 
692 static void hpet_work(struct work_struct *w)
693 {
694 	struct hpet_dev *hdev;
695 	int cpu = smp_processor_id();
696 	struct hpet_work_struct *hpet_work;
697 
698 	hpet_work = container_of(w, struct hpet_work_struct, work.work);
699 
700 	hdev = hpet_get_unused_timer();
701 	if (hdev)
702 		init_one_hpet_msi_clockevent(hdev, cpu);
703 
704 	complete(&hpet_work->complete);
705 }
706 
707 static int hpet_cpuhp_online(unsigned int cpu)
708 {
709 	struct hpet_work_struct work;
710 
711 	INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
712 	init_completion(&work.complete);
713 	/* FIXME: add schedule_work_on() */
714 	schedule_delayed_work_on(cpu, &work.work, 0);
715 	wait_for_completion(&work.complete);
716 	destroy_delayed_work_on_stack(&work.work);
717 	return 0;
718 }
719 
720 static int hpet_cpuhp_dead(unsigned int cpu)
721 {
722 	struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
723 
724 	if (!hdev)
725 		return 0;
726 	free_irq(hdev->irq, hdev);
727 	hdev->flags &= ~HPET_DEV_USED;
728 	per_cpu(cpu_hpet_dev, cpu) = NULL;
729 	return 0;
730 }
731 #else
732 
733 static void hpet_msi_capability_lookup(unsigned int start_timer)
734 {
735 	return;
736 }
737 
738 #ifdef CONFIG_HPET
739 static void hpet_reserve_msi_timers(struct hpet_data *hd)
740 {
741 	return;
742 }
743 #endif
744 
745 #define hpet_cpuhp_online	NULL
746 #define hpet_cpuhp_dead		NULL
747 
748 #endif
749 
750 /*
751  * Clock source related code
752  */
753 #if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
754 /*
755  * Reading the HPET counter is a very slow operation. If a large number of
756  * CPUs are trying to access the HPET counter simultaneously, it can cause
757  * massive delay and slow down system performance dramatically. This may
758  * happen when HPET is the default clock source instead of TSC. For a
759  * really large system with hundreds of CPUs, the slowdown may be so
760  * severe that it may actually crash the system because of a NMI watchdog
761  * soft lockup, for example.
762  *
763  * If multiple CPUs are trying to access the HPET counter at the same time,
764  * we don't actually need to read the counter multiple times. Instead, the
765  * other CPUs can use the counter value read by the first CPU in the group.
766  *
767  * This special feature is only enabled on x86-64 systems. It is unlikely
768  * that 32-bit x86 systems will have enough CPUs to require this feature
769  * with its associated locking overhead. And we also need 64-bit atomic
770  * read.
771  *
772  * The lock and the hpet value are stored together and can be read in a
773  * single atomic 64-bit read. It is explicitly assumed that arch_spinlock_t
774  * is 32 bits in size.
775  */
776 union hpet_lock {
777 	struct {
778 		arch_spinlock_t lock;
779 		u32 value;
780 	};
781 	u64 lockval;
782 };
783 
784 static union hpet_lock hpet __cacheline_aligned = {
785 	{ .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
786 };
787 
788 static u64 read_hpet(struct clocksource *cs)
789 {
790 	unsigned long flags;
791 	union hpet_lock old, new;
792 
793 	BUILD_BUG_ON(sizeof(union hpet_lock) != 8);
794 
795 	/*
796 	 * Read HPET directly if in NMI.
797 	 */
798 	if (in_nmi())
799 		return (u64)hpet_readl(HPET_COUNTER);
800 
801 	/*
802 	 * Read the current state of the lock and HPET value atomically.
803 	 */
804 	old.lockval = READ_ONCE(hpet.lockval);
805 
806 	if (arch_spin_is_locked(&old.lock))
807 		goto contended;
808 
809 	local_irq_save(flags);
810 	if (arch_spin_trylock(&hpet.lock)) {
811 		new.value = hpet_readl(HPET_COUNTER);
812 		/*
813 		 * Use WRITE_ONCE() to prevent store tearing.
814 		 */
815 		WRITE_ONCE(hpet.value, new.value);
816 		arch_spin_unlock(&hpet.lock);
817 		local_irq_restore(flags);
818 		return (u64)new.value;
819 	}
820 	local_irq_restore(flags);
821 
822 contended:
823 	/*
824 	 * Contended case
825 	 * --------------
826 	 * Wait until the HPET value change or the lock is free to indicate
827 	 * its value is up-to-date.
828 	 *
829 	 * It is possible that old.value has already contained the latest
830 	 * HPET value while the lock holder was in the process of releasing
831 	 * the lock. Checking for lock state change will enable us to return
832 	 * the value immediately instead of waiting for the next HPET reader
833 	 * to come along.
834 	 */
835 	do {
836 		cpu_relax();
837 		new.lockval = READ_ONCE(hpet.lockval);
838 	} while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
839 
840 	return (u64)new.value;
841 }
842 #else
843 /*
844  * For UP or 32-bit.
845  */
846 static u64 read_hpet(struct clocksource *cs)
847 {
848 	return (u64)hpet_readl(HPET_COUNTER);
849 }
850 #endif
851 
852 static struct clocksource clocksource_hpet = {
853 	.name		= "hpet",
854 	.rating		= 250,
855 	.read		= read_hpet,
856 	.mask		= HPET_MASK,
857 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
858 	.resume		= hpet_resume_counter,
859 };
860 
861 static int hpet_clocksource_register(void)
862 {
863 	u64 start, now;
864 	u64 t1;
865 
866 	/* Start the counter */
867 	hpet_restart_counter();
868 
869 	/* Verify whether hpet counter works */
870 	t1 = hpet_readl(HPET_COUNTER);
871 	start = rdtsc();
872 
873 	/*
874 	 * We don't know the TSC frequency yet, but waiting for
875 	 * 200000 TSC cycles is safe:
876 	 * 4 GHz == 50us
877 	 * 1 GHz == 200us
878 	 */
879 	do {
880 		rep_nop();
881 		now = rdtsc();
882 	} while ((now - start) < 200000UL);
883 
884 	if (t1 == hpet_readl(HPET_COUNTER)) {
885 		printk(KERN_WARNING
886 		       "HPET counter not counting. HPET disabled\n");
887 		return -ENODEV;
888 	}
889 
890 	clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
891 	return 0;
892 }
893 
894 static u32 *hpet_boot_cfg;
895 
896 /**
897  * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
898  */
899 int __init hpet_enable(void)
900 {
901 	u32 hpet_period, cfg, id;
902 	u64 freq;
903 	unsigned int i, last;
904 
905 	if (!is_hpet_capable())
906 		return 0;
907 
908 	hpet_set_mapping();
909 	if (!hpet_virt_address)
910 		return 0;
911 
912 	/*
913 	 * Read the period and check for a sane value:
914 	 */
915 	hpet_period = hpet_readl(HPET_PERIOD);
916 
917 	/*
918 	 * AMD SB700 based systems with spread spectrum enabled use a
919 	 * SMM based HPET emulation to provide proper frequency
920 	 * setting. The SMM code is initialized with the first HPET
921 	 * register access and takes some time to complete. During
922 	 * this time the config register reads 0xffffffff. We check
923 	 * for max. 1000 loops whether the config register reads a non
924 	 * 0xffffffff value to make sure that HPET is up and running
925 	 * before we go further. A counting loop is safe, as the HPET
926 	 * access takes thousands of CPU cycles. On non SB700 based
927 	 * machines this check is only done once and has no side
928 	 * effects.
929 	 */
930 	for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
931 		if (i == 1000) {
932 			printk(KERN_WARNING
933 			       "HPET config register value = 0xFFFFFFFF. "
934 			       "Disabling HPET\n");
935 			goto out_nohpet;
936 		}
937 	}
938 
939 	if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
940 		goto out_nohpet;
941 
942 	/*
943 	 * The period is a femto seconds value. Convert it to a
944 	 * frequency.
945 	 */
946 	freq = FSEC_PER_SEC;
947 	do_div(freq, hpet_period);
948 	hpet_freq = freq;
949 
950 	/*
951 	 * Read the HPET ID register to retrieve the IRQ routing
952 	 * information and the number of channels
953 	 */
954 	id = hpet_readl(HPET_ID);
955 	hpet_print_config();
956 
957 	last = (id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
958 
959 #ifdef CONFIG_HPET_EMULATE_RTC
960 	/*
961 	 * The legacy routing mode needs at least two channels, tick timer
962 	 * and the rtc emulation channel.
963 	 */
964 	if (!last)
965 		goto out_nohpet;
966 #endif
967 
968 	cfg = hpet_readl(HPET_CFG);
969 	hpet_boot_cfg = kmalloc_array(last + 2, sizeof(*hpet_boot_cfg),
970 				      GFP_KERNEL);
971 	if (hpet_boot_cfg)
972 		*hpet_boot_cfg = cfg;
973 	else
974 		pr_warn("HPET initial state will not be saved\n");
975 	cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
976 	hpet_writel(cfg, HPET_CFG);
977 	if (cfg)
978 		pr_warn("Unrecognized bits %#x set in global cfg\n", cfg);
979 
980 	for (i = 0; i <= last; ++i) {
981 		cfg = hpet_readl(HPET_Tn_CFG(i));
982 		if (hpet_boot_cfg)
983 			hpet_boot_cfg[i + 1] = cfg;
984 		cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB);
985 		hpet_writel(cfg, HPET_Tn_CFG(i));
986 		cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP
987 			 | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE
988 			 | HPET_TN_FSB | HPET_TN_FSB_CAP);
989 		if (cfg)
990 			pr_warn("Unrecognized bits %#x set in cfg#%u\n",
991 				cfg, i);
992 	}
993 	hpet_print_config();
994 
995 	if (hpet_clocksource_register())
996 		goto out_nohpet;
997 
998 	if (id & HPET_ID_LEGSUP) {
999 		hpet_legacy_clockevent_register();
1000 		return 1;
1001 	}
1002 	return 0;
1003 
1004 out_nohpet:
1005 	hpet_clear_mapping();
1006 	hpet_address = 0;
1007 	return 0;
1008 }
1009 
1010 /*
1011  * Needs to be late, as the reserve_timer code calls kalloc !
1012  *
1013  * Not a problem on i386 as hpet_enable is called from late_time_init,
1014  * but on x86_64 it is necessary !
1015  */
1016 static __init int hpet_late_init(void)
1017 {
1018 	int ret;
1019 
1020 	if (boot_hpet_disable)
1021 		return -ENODEV;
1022 
1023 	if (!hpet_address) {
1024 		if (!force_hpet_address)
1025 			return -ENODEV;
1026 
1027 		hpet_address = force_hpet_address;
1028 		hpet_enable();
1029 	}
1030 
1031 	if (!hpet_virt_address)
1032 		return -ENODEV;
1033 
1034 	if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP)
1035 		hpet_msi_capability_lookup(2);
1036 	else
1037 		hpet_msi_capability_lookup(0);
1038 
1039 	hpet_reserve_platform_timers(hpet_readl(HPET_ID));
1040 	hpet_print_config();
1041 
1042 	if (hpet_msi_disable)
1043 		return 0;
1044 
1045 	if (boot_cpu_has(X86_FEATURE_ARAT))
1046 		return 0;
1047 
1048 	/* This notifier should be called after workqueue is ready */
1049 	ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online",
1050 				hpet_cpuhp_online, NULL);
1051 	if (ret)
1052 		return ret;
1053 	ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL,
1054 				hpet_cpuhp_dead);
1055 	if (ret)
1056 		goto err_cpuhp;
1057 	return 0;
1058 
1059 err_cpuhp:
1060 	cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE);
1061 	return ret;
1062 }
1063 fs_initcall(hpet_late_init);
1064 
1065 void hpet_disable(void)
1066 {
1067 	if (is_hpet_capable() && hpet_virt_address) {
1068 		unsigned int cfg = hpet_readl(HPET_CFG), id, last;
1069 
1070 		if (hpet_boot_cfg)
1071 			cfg = *hpet_boot_cfg;
1072 		else if (hpet_legacy_int_enabled) {
1073 			cfg &= ~HPET_CFG_LEGACY;
1074 			hpet_legacy_int_enabled = false;
1075 		}
1076 		cfg &= ~HPET_CFG_ENABLE;
1077 		hpet_writel(cfg, HPET_CFG);
1078 
1079 		if (!hpet_boot_cfg)
1080 			return;
1081 
1082 		id = hpet_readl(HPET_ID);
1083 		last = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
1084 
1085 		for (id = 0; id <= last; ++id)
1086 			hpet_writel(hpet_boot_cfg[id + 1], HPET_Tn_CFG(id));
1087 
1088 		if (*hpet_boot_cfg & HPET_CFG_ENABLE)
1089 			hpet_writel(*hpet_boot_cfg, HPET_CFG);
1090 	}
1091 }
1092 
1093 #ifdef CONFIG_HPET_EMULATE_RTC
1094 
1095 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1096  * is enabled, we support RTC interrupt functionality in software.
1097  * RTC has 3 kinds of interrupts:
1098  * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1099  *    is updated
1100  * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1101  * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1102  *    2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1103  * (1) and (2) above are implemented using polling at a frequency of
1104  * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1105  * overhead. (DEFAULT_RTC_INT_FREQ)
1106  * For (3), we use interrupts at 64Hz or user specified periodic
1107  * frequency, whichever is higher.
1108  */
1109 #include <linux/mc146818rtc.h>
1110 #include <linux/rtc.h>
1111 
1112 #define DEFAULT_RTC_INT_FREQ	64
1113 #define DEFAULT_RTC_SHIFT	6
1114 #define RTC_NUM_INTS		1
1115 
1116 static unsigned long hpet_rtc_flags;
1117 static int hpet_prev_update_sec;
1118 static struct rtc_time hpet_alarm_time;
1119 static unsigned long hpet_pie_count;
1120 static u32 hpet_t1_cmp;
1121 static u32 hpet_default_delta;
1122 static u32 hpet_pie_delta;
1123 static unsigned long hpet_pie_limit;
1124 
1125 static rtc_irq_handler irq_handler;
1126 
1127 /*
1128  * Check that the hpet counter c1 is ahead of the c2
1129  */
1130 static inline int hpet_cnt_ahead(u32 c1, u32 c2)
1131 {
1132 	return (s32)(c2 - c1) < 0;
1133 }
1134 
1135 /*
1136  * Registers a IRQ handler.
1137  */
1138 int hpet_register_irq_handler(rtc_irq_handler handler)
1139 {
1140 	if (!is_hpet_enabled())
1141 		return -ENODEV;
1142 	if (irq_handler)
1143 		return -EBUSY;
1144 
1145 	irq_handler = handler;
1146 
1147 	return 0;
1148 }
1149 EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
1150 
1151 /*
1152  * Deregisters the IRQ handler registered with hpet_register_irq_handler()
1153  * and does cleanup.
1154  */
1155 void hpet_unregister_irq_handler(rtc_irq_handler handler)
1156 {
1157 	if (!is_hpet_enabled())
1158 		return;
1159 
1160 	irq_handler = NULL;
1161 	hpet_rtc_flags = 0;
1162 }
1163 EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
1164 
1165 /*
1166  * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
1167  * is not supported by all HPET implementations for timer 1.
1168  *
1169  * hpet_rtc_timer_init() is called when the rtc is initialized.
1170  */
1171 int hpet_rtc_timer_init(void)
1172 {
1173 	unsigned int cfg, cnt, delta;
1174 	unsigned long flags;
1175 
1176 	if (!is_hpet_enabled())
1177 		return 0;
1178 
1179 	if (!hpet_default_delta) {
1180 		uint64_t clc;
1181 
1182 		clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
1183 		clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
1184 		hpet_default_delta = clc;
1185 	}
1186 
1187 	if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
1188 		delta = hpet_default_delta;
1189 	else
1190 		delta = hpet_pie_delta;
1191 
1192 	local_irq_save(flags);
1193 
1194 	cnt = delta + hpet_readl(HPET_COUNTER);
1195 	hpet_writel(cnt, HPET_T1_CMP);
1196 	hpet_t1_cmp = cnt;
1197 
1198 	cfg = hpet_readl(HPET_T1_CFG);
1199 	cfg &= ~HPET_TN_PERIODIC;
1200 	cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
1201 	hpet_writel(cfg, HPET_T1_CFG);
1202 
1203 	local_irq_restore(flags);
1204 
1205 	return 1;
1206 }
1207 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
1208 
1209 static void hpet_disable_rtc_channel(void)
1210 {
1211 	u32 cfg = hpet_readl(HPET_T1_CFG);
1212 	cfg &= ~HPET_TN_ENABLE;
1213 	hpet_writel(cfg, HPET_T1_CFG);
1214 }
1215 
1216 /*
1217  * The functions below are called from rtc driver.
1218  * Return 0 if HPET is not being used.
1219  * Otherwise do the necessary changes and return 1.
1220  */
1221 int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1222 {
1223 	if (!is_hpet_enabled())
1224 		return 0;
1225 
1226 	hpet_rtc_flags &= ~bit_mask;
1227 	if (unlikely(!hpet_rtc_flags))
1228 		hpet_disable_rtc_channel();
1229 
1230 	return 1;
1231 }
1232 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
1233 
1234 int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1235 {
1236 	unsigned long oldbits = hpet_rtc_flags;
1237 
1238 	if (!is_hpet_enabled())
1239 		return 0;
1240 
1241 	hpet_rtc_flags |= bit_mask;
1242 
1243 	if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
1244 		hpet_prev_update_sec = -1;
1245 
1246 	if (!oldbits)
1247 		hpet_rtc_timer_init();
1248 
1249 	return 1;
1250 }
1251 EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
1252 
1253 int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
1254 			unsigned char sec)
1255 {
1256 	if (!is_hpet_enabled())
1257 		return 0;
1258 
1259 	hpet_alarm_time.tm_hour = hrs;
1260 	hpet_alarm_time.tm_min = min;
1261 	hpet_alarm_time.tm_sec = sec;
1262 
1263 	return 1;
1264 }
1265 EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
1266 
1267 int hpet_set_periodic_freq(unsigned long freq)
1268 {
1269 	uint64_t clc;
1270 
1271 	if (!is_hpet_enabled())
1272 		return 0;
1273 
1274 	if (freq <= DEFAULT_RTC_INT_FREQ)
1275 		hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
1276 	else {
1277 		clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
1278 		do_div(clc, freq);
1279 		clc >>= hpet_clockevent.shift;
1280 		hpet_pie_delta = clc;
1281 		hpet_pie_limit = 0;
1282 	}
1283 	return 1;
1284 }
1285 EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
1286 
1287 int hpet_rtc_dropped_irq(void)
1288 {
1289 	return is_hpet_enabled();
1290 }
1291 EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
1292 
1293 static void hpet_rtc_timer_reinit(void)
1294 {
1295 	unsigned int delta;
1296 	int lost_ints = -1;
1297 
1298 	if (unlikely(!hpet_rtc_flags))
1299 		hpet_disable_rtc_channel();
1300 
1301 	if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
1302 		delta = hpet_default_delta;
1303 	else
1304 		delta = hpet_pie_delta;
1305 
1306 	/*
1307 	 * Increment the comparator value until we are ahead of the
1308 	 * current count.
1309 	 */
1310 	do {
1311 		hpet_t1_cmp += delta;
1312 		hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
1313 		lost_ints++;
1314 	} while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
1315 
1316 	if (lost_ints) {
1317 		if (hpet_rtc_flags & RTC_PIE)
1318 			hpet_pie_count += lost_ints;
1319 		if (printk_ratelimit())
1320 			printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n",
1321 				lost_ints);
1322 	}
1323 }
1324 
1325 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
1326 {
1327 	struct rtc_time curr_time;
1328 	unsigned long rtc_int_flag = 0;
1329 
1330 	hpet_rtc_timer_reinit();
1331 	memset(&curr_time, 0, sizeof(struct rtc_time));
1332 
1333 	if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
1334 		mc146818_get_time(&curr_time);
1335 
1336 	if (hpet_rtc_flags & RTC_UIE &&
1337 	    curr_time.tm_sec != hpet_prev_update_sec) {
1338 		if (hpet_prev_update_sec >= 0)
1339 			rtc_int_flag = RTC_UF;
1340 		hpet_prev_update_sec = curr_time.tm_sec;
1341 	}
1342 
1343 	if (hpet_rtc_flags & RTC_PIE &&
1344 	    ++hpet_pie_count >= hpet_pie_limit) {
1345 		rtc_int_flag |= RTC_PF;
1346 		hpet_pie_count = 0;
1347 	}
1348 
1349 	if (hpet_rtc_flags & RTC_AIE &&
1350 	    (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
1351 	    (curr_time.tm_min == hpet_alarm_time.tm_min) &&
1352 	    (curr_time.tm_hour == hpet_alarm_time.tm_hour))
1353 			rtc_int_flag |= RTC_AF;
1354 
1355 	if (rtc_int_flag) {
1356 		rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1357 		if (irq_handler)
1358 			irq_handler(rtc_int_flag, dev_id);
1359 	}
1360 	return IRQ_HANDLED;
1361 }
1362 EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
1363 #endif
1364