1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
3
4 #include <linux/clk.h>
5 #include <linux/clockchips.h>
6 #include <linux/clocksource.h>
7 #include <linux/interrupt.h>
8 #include <linux/of_address.h>
9 #include <linux/of_irq.h>
10 #include <linux/of_platform.h>
11 #include <linux/platform_device.h>
12 #include <linux/sched_clock.h>
13
14 #define TIMER0_FREQ 1000000
15 #define GXP_TIMER_CNT_OFS 0x00
16 #define GXP_TIMESTAMP_OFS 0x08
17 #define GXP_TIMER_CTRL_OFS 0x14
18
19 /* TCS Stands for Timer Control/Status: these are masks to be used in */
20 /* the Timer Count Registers */
21 #define MASK_TCS_ENABLE 0x01
22 #define MASK_TCS_PERIOD 0x02
23 #define MASK_TCS_RELOAD 0x04
24 #define MASK_TCS_TC 0x80
25
26 struct gxp_timer {
27 void __iomem *counter;
28 void __iomem *control;
29 struct clock_event_device evt;
30 };
31
32 static struct gxp_timer *gxp_timer;
33
34 static void __iomem *system_clock __ro_after_init;
35
to_gxp_timer(struct clock_event_device * evt_dev)36 static inline struct gxp_timer *to_gxp_timer(struct clock_event_device *evt_dev)
37 {
38 return container_of(evt_dev, struct gxp_timer, evt);
39 }
40
gxp_sched_read(void)41 static u64 notrace gxp_sched_read(void)
42 {
43 return readl_relaxed(system_clock);
44 }
45
gxp_time_set_next_event(unsigned long event,struct clock_event_device * evt_dev)46 static int gxp_time_set_next_event(unsigned long event, struct clock_event_device *evt_dev)
47 {
48 struct gxp_timer *timer = to_gxp_timer(evt_dev);
49
50 /* Stop counting and disable interrupt before updating */
51 writeb_relaxed(MASK_TCS_TC, timer->control);
52 writel_relaxed(event, timer->counter);
53 writeb_relaxed(MASK_TCS_TC | MASK_TCS_ENABLE, timer->control);
54
55 return 0;
56 }
57
gxp_timer_interrupt(int irq,void * dev_id)58 static irqreturn_t gxp_timer_interrupt(int irq, void *dev_id)
59 {
60 struct gxp_timer *timer = (struct gxp_timer *)dev_id;
61
62 if (!(readb_relaxed(timer->control) & MASK_TCS_TC))
63 return IRQ_NONE;
64
65 writeb_relaxed(MASK_TCS_TC, timer->control);
66
67 timer->evt.event_handler(&timer->evt);
68
69 return IRQ_HANDLED;
70 }
71
gxp_timer_init(struct device_node * node)72 static int __init gxp_timer_init(struct device_node *node)
73 {
74 void __iomem *base;
75 struct clk *clk;
76 u32 freq;
77 int ret, irq;
78
79 gxp_timer = kzalloc(sizeof(*gxp_timer), GFP_KERNEL);
80 if (!gxp_timer) {
81 ret = -ENOMEM;
82 pr_err("Can't allocate gxp_timer");
83 return ret;
84 }
85
86 clk = of_clk_get(node, 0);
87 if (IS_ERR(clk)) {
88 ret = (int)PTR_ERR(clk);
89 pr_err("%pOFn clock not found: %d\n", node, ret);
90 goto err_free;
91 }
92
93 ret = clk_prepare_enable(clk);
94 if (ret) {
95 pr_err("%pOFn clock enable failed: %d\n", node, ret);
96 goto err_clk_enable;
97 }
98
99 base = of_iomap(node, 0);
100 if (!base) {
101 ret = -ENXIO;
102 pr_err("Can't map timer base registers");
103 goto err_iomap;
104 }
105
106 /* Set the offsets to the clock register and timer registers */
107 gxp_timer->counter = base + GXP_TIMER_CNT_OFS;
108 gxp_timer->control = base + GXP_TIMER_CTRL_OFS;
109 system_clock = base + GXP_TIMESTAMP_OFS;
110
111 gxp_timer->evt.name = node->name;
112 gxp_timer->evt.rating = 300;
113 gxp_timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
114 gxp_timer->evt.set_next_event = gxp_time_set_next_event;
115 gxp_timer->evt.cpumask = cpumask_of(0);
116
117 irq = irq_of_parse_and_map(node, 0);
118 if (irq <= 0) {
119 ret = -EINVAL;
120 pr_err("GXP Timer Can't parse IRQ %d", irq);
121 goto err_exit;
122 }
123
124 freq = clk_get_rate(clk);
125
126 ret = clocksource_mmio_init(system_clock, node->name, freq,
127 300, 32, clocksource_mmio_readl_up);
128 if (ret) {
129 pr_err("%pOFn init clocksource failed: %d", node, ret);
130 goto err_exit;
131 }
132
133 sched_clock_register(gxp_sched_read, 32, freq);
134
135 irq = irq_of_parse_and_map(node, 0);
136 if (irq <= 0) {
137 ret = -EINVAL;
138 pr_err("%pOFn Can't parse IRQ %d", node, irq);
139 goto err_exit;
140 }
141
142 clockevents_config_and_register(&gxp_timer->evt, TIMER0_FREQ,
143 0xf, 0xffffffff);
144
145 ret = request_irq(irq, gxp_timer_interrupt, IRQF_TIMER | IRQF_SHARED,
146 node->name, gxp_timer);
147 if (ret) {
148 pr_err("%pOFn request_irq() failed: %d", node, ret);
149 goto err_exit;
150 }
151
152 pr_debug("gxp: system timer (irq = %d)\n", irq);
153 return 0;
154
155 err_exit:
156 iounmap(base);
157 err_iomap:
158 clk_disable_unprepare(clk);
159 err_clk_enable:
160 clk_put(clk);
161 err_free:
162 kfree(gxp_timer);
163 return ret;
164 }
165
166 /*
167 * This probe gets called after the timer is already up and running. This will create
168 * the watchdog device as a child since the registers are shared.
169 */
170
gxp_timer_probe(struct platform_device * pdev)171 static int gxp_timer_probe(struct platform_device *pdev)
172 {
173 struct platform_device *gxp_watchdog_device;
174 struct device *dev = &pdev->dev;
175 int ret;
176
177 if (!gxp_timer) {
178 pr_err("Gxp Timer not initialized, cannot create watchdog");
179 return -ENOMEM;
180 }
181
182 gxp_watchdog_device = platform_device_alloc("gxp-wdt", -1);
183 if (!gxp_watchdog_device) {
184 pr_err("Timer failed to allocate gxp-wdt");
185 return -ENOMEM;
186 }
187
188 /* Pass the base address (counter) as platform data and nothing else */
189 gxp_watchdog_device->dev.platform_data = gxp_timer->counter;
190 gxp_watchdog_device->dev.parent = dev;
191
192 ret = platform_device_add(gxp_watchdog_device);
193 if (ret)
194 platform_device_put(gxp_watchdog_device);
195
196 return ret;
197 }
198
199 static const struct of_device_id gxp_timer_of_match[] = {
200 { .compatible = "hpe,gxp-timer", },
201 {},
202 };
203
204 static struct platform_driver gxp_timer_driver = {
205 .probe = gxp_timer_probe,
206 .driver = {
207 .name = "gxp-timer",
208 .of_match_table = gxp_timer_of_match,
209 .suppress_bind_attrs = true,
210 },
211 };
212
213 builtin_platform_driver(gxp_timer_driver);
214
215 TIMER_OF_DECLARE(gxp, "hpe,gxp-timer", gxp_timer_init);
216