1*3bb16560SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2cdce3546SChao Xie /*
3cdce3546SChao Xie * mmp gate clock operation source file
4cdce3546SChao Xie *
5cdce3546SChao Xie * Copyright (C) 2014 Marvell
6cdce3546SChao Xie * Chao Xie <chao.xie@marvell.com>
7cdce3546SChao Xie */
8cdce3546SChao Xie
9cdce3546SChao Xie #include <linux/clk-provider.h>
10cdce3546SChao Xie #include <linux/slab.h>
11cdce3546SChao Xie #include <linux/io.h>
12cdce3546SChao Xie #include <linux/err.h>
13cdce3546SChao Xie #include <linux/delay.h>
14cdce3546SChao Xie
15cdce3546SChao Xie #include "clk.h"
16cdce3546SChao Xie
17cdce3546SChao Xie /*
18cdce3546SChao Xie * Some clocks will have mutiple bits to enable the clocks, and
19cdce3546SChao Xie * the bits to disable the clock is not same as enabling bits.
20cdce3546SChao Xie */
21cdce3546SChao Xie
22cdce3546SChao Xie #define to_clk_mmp_gate(hw) container_of(hw, struct mmp_clk_gate, hw)
23cdce3546SChao Xie
mmp_clk_gate_enable(struct clk_hw * hw)24cdce3546SChao Xie static int mmp_clk_gate_enable(struct clk_hw *hw)
25cdce3546SChao Xie {
26cdce3546SChao Xie struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
27cdce3546SChao Xie unsigned long flags = 0;
28cdce3546SChao Xie unsigned long rate;
29cdce3546SChao Xie u32 tmp;
30cdce3546SChao Xie
31cdce3546SChao Xie if (gate->lock)
32cdce3546SChao Xie spin_lock_irqsave(gate->lock, flags);
33cdce3546SChao Xie
34cdce3546SChao Xie tmp = readl(gate->reg);
35cdce3546SChao Xie tmp &= ~gate->mask;
36cdce3546SChao Xie tmp |= gate->val_enable;
37cdce3546SChao Xie writel(tmp, gate->reg);
38cdce3546SChao Xie
39cdce3546SChao Xie if (gate->lock)
40cdce3546SChao Xie spin_unlock_irqrestore(gate->lock, flags);
41cdce3546SChao Xie
42cdce3546SChao Xie if (gate->flags & MMP_CLK_GATE_NEED_DELAY) {
43aef28cb6SStephen Boyd rate = clk_hw_get_rate(hw);
44cdce3546SChao Xie /* Need delay 2 cycles. */
45cdce3546SChao Xie udelay(2000000/rate);
46cdce3546SChao Xie }
47cdce3546SChao Xie
48cdce3546SChao Xie return 0;
49cdce3546SChao Xie }
50cdce3546SChao Xie
mmp_clk_gate_disable(struct clk_hw * hw)51cdce3546SChao Xie static void mmp_clk_gate_disable(struct clk_hw *hw)
52cdce3546SChao Xie {
53cdce3546SChao Xie struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
54cdce3546SChao Xie unsigned long flags = 0;
55cdce3546SChao Xie u32 tmp;
56cdce3546SChao Xie
57cdce3546SChao Xie if (gate->lock)
58cdce3546SChao Xie spin_lock_irqsave(gate->lock, flags);
59cdce3546SChao Xie
60cdce3546SChao Xie tmp = readl(gate->reg);
61cdce3546SChao Xie tmp &= ~gate->mask;
62cdce3546SChao Xie tmp |= gate->val_disable;
63cdce3546SChao Xie writel(tmp, gate->reg);
64cdce3546SChao Xie
65cdce3546SChao Xie if (gate->lock)
66cdce3546SChao Xie spin_unlock_irqrestore(gate->lock, flags);
67cdce3546SChao Xie }
68cdce3546SChao Xie
mmp_clk_gate_is_enabled(struct clk_hw * hw)69cdce3546SChao Xie static int mmp_clk_gate_is_enabled(struct clk_hw *hw)
70cdce3546SChao Xie {
71cdce3546SChao Xie struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
72cdce3546SChao Xie unsigned long flags = 0;
73cdce3546SChao Xie u32 tmp;
74cdce3546SChao Xie
75cdce3546SChao Xie if (gate->lock)
76cdce3546SChao Xie spin_lock_irqsave(gate->lock, flags);
77cdce3546SChao Xie
78cdce3546SChao Xie tmp = readl(gate->reg);
79cdce3546SChao Xie
80cdce3546SChao Xie if (gate->lock)
81cdce3546SChao Xie spin_unlock_irqrestore(gate->lock, flags);
82cdce3546SChao Xie
83cdce3546SChao Xie return (tmp & gate->mask) == gate->val_enable;
84cdce3546SChao Xie }
85cdce3546SChao Xie
86cdce3546SChao Xie const struct clk_ops mmp_clk_gate_ops = {
87cdce3546SChao Xie .enable = mmp_clk_gate_enable,
88cdce3546SChao Xie .disable = mmp_clk_gate_disable,
89cdce3546SChao Xie .is_enabled = mmp_clk_gate_is_enabled,
90cdce3546SChao Xie };
91cdce3546SChao Xie
mmp_clk_register_gate(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * reg,u32 mask,u32 val_enable,u32 val_disable,unsigned int gate_flags,spinlock_t * lock)92cdce3546SChao Xie struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
93cdce3546SChao Xie const char *parent_name, unsigned long flags,
94cdce3546SChao Xie void __iomem *reg, u32 mask, u32 val_enable, u32 val_disable,
95cdce3546SChao Xie unsigned int gate_flags, spinlock_t *lock)
96cdce3546SChao Xie {
97cdce3546SChao Xie struct mmp_clk_gate *gate;
98cdce3546SChao Xie struct clk *clk;
99cdce3546SChao Xie struct clk_init_data init;
100cdce3546SChao Xie
101cdce3546SChao Xie /* allocate the gate */
102cdce3546SChao Xie gate = kzalloc(sizeof(*gate), GFP_KERNEL);
1031cc36f73SMarkus Elfring if (!gate)
104cdce3546SChao Xie return ERR_PTR(-ENOMEM);
105cdce3546SChao Xie
106cdce3546SChao Xie init.name = name;
107cdce3546SChao Xie init.ops = &mmp_clk_gate_ops;
10890b6c5c7SStephen Boyd init.flags = flags;
109cdce3546SChao Xie init.parent_names = (parent_name ? &parent_name : NULL);
110cdce3546SChao Xie init.num_parents = (parent_name ? 1 : 0);
111cdce3546SChao Xie
112cdce3546SChao Xie /* struct clk_gate assignments */
113cdce3546SChao Xie gate->reg = reg;
114cdce3546SChao Xie gate->mask = mask;
115cdce3546SChao Xie gate->val_enable = val_enable;
116cdce3546SChao Xie gate->val_disable = val_disable;
117cdce3546SChao Xie gate->flags = gate_flags;
118cdce3546SChao Xie gate->lock = lock;
119cdce3546SChao Xie gate->hw.init = &init;
120cdce3546SChao Xie
121cdce3546SChao Xie clk = clk_register(dev, &gate->hw);
122cdce3546SChao Xie
123cdce3546SChao Xie if (IS_ERR(clk))
124cdce3546SChao Xie kfree(gate);
125cdce3546SChao Xie
126cdce3546SChao Xie return clk;
127cdce3546SChao Xie }
128