1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright 2018 NXP 4 */ 5 6 #include <linux/clk-provider.h> 7 #include <linux/errno.h> 8 #include <linux/export.h> 9 #include <linux/io.h> 10 #include <linux/slab.h> 11 12 #include "clk.h" 13 14 #define PCG_PREDIV_SHIFT 16 15 #define PCG_PREDIV_WIDTH 3 16 #define PCG_PREDIV_MAX 8 17 18 #define PCG_DIV_SHIFT 0 19 #define PCG_CORE_DIV_WIDTH 3 20 #define PCG_DIV_WIDTH 6 21 #define PCG_DIV_MAX 64 22 23 #define PCG_PCS_SHIFT 24 24 #define PCG_PCS_MASK 0x7 25 26 #define PCG_CGC_SHIFT 28 27 28 static unsigned long imx8m_clk_composite_divider_recalc_rate(struct clk_hw *hw, 29 unsigned long parent_rate) 30 { 31 struct clk_divider *divider = to_clk_divider(hw); 32 unsigned long prediv_rate; 33 unsigned int prediv_value; 34 unsigned int div_value; 35 36 prediv_value = readl(divider->reg) >> divider->shift; 37 prediv_value &= clk_div_mask(divider->width); 38 39 prediv_rate = divider_recalc_rate(hw, parent_rate, prediv_value, 40 NULL, divider->flags, 41 divider->width); 42 43 div_value = readl(divider->reg) >> PCG_DIV_SHIFT; 44 div_value &= clk_div_mask(PCG_DIV_WIDTH); 45 46 return divider_recalc_rate(hw, prediv_rate, div_value, NULL, 47 divider->flags, PCG_DIV_WIDTH); 48 } 49 50 static int imx8m_clk_composite_compute_dividers(unsigned long rate, 51 unsigned long parent_rate, 52 int *prediv, int *postdiv) 53 { 54 int div1, div2; 55 int error = INT_MAX; 56 int ret = -EINVAL; 57 58 *prediv = 1; 59 *postdiv = 1; 60 61 for (div1 = 1; div1 <= PCG_PREDIV_MAX; div1++) { 62 for (div2 = 1; div2 <= PCG_DIV_MAX; div2++) { 63 int new_error = ((parent_rate / div1) / div2) - rate; 64 65 if (abs(new_error) < abs(error)) { 66 *prediv = div1; 67 *postdiv = div2; 68 error = new_error; 69 ret = 0; 70 } 71 } 72 } 73 return ret; 74 } 75 76 static long imx8m_clk_composite_divider_round_rate(struct clk_hw *hw, 77 unsigned long rate, 78 unsigned long *prate) 79 { 80 int prediv_value; 81 int div_value; 82 83 imx8m_clk_composite_compute_dividers(rate, *prate, 84 &prediv_value, &div_value); 85 rate = DIV_ROUND_UP(*prate, prediv_value); 86 87 return DIV_ROUND_UP(rate, div_value); 88 89 } 90 91 static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw, 92 unsigned long rate, 93 unsigned long parent_rate) 94 { 95 struct clk_divider *divider = to_clk_divider(hw); 96 unsigned long flags; 97 int prediv_value; 98 int div_value; 99 int ret; 100 u32 orig, val; 101 102 ret = imx8m_clk_composite_compute_dividers(rate, parent_rate, 103 &prediv_value, &div_value); 104 if (ret) 105 return -EINVAL; 106 107 spin_lock_irqsave(divider->lock, flags); 108 109 orig = readl(divider->reg); 110 val = orig & ~((clk_div_mask(divider->width) << divider->shift) | 111 (clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT)); 112 113 val |= (u32)(prediv_value - 1) << divider->shift; 114 val |= (u32)(div_value - 1) << PCG_DIV_SHIFT; 115 116 if (val != orig) 117 writel(val, divider->reg); 118 119 spin_unlock_irqrestore(divider->lock, flags); 120 121 return ret; 122 } 123 124 static int imx8m_divider_determine_rate(struct clk_hw *hw, 125 struct clk_rate_request *req) 126 { 127 struct clk_divider *divider = to_clk_divider(hw); 128 int prediv_value; 129 int div_value; 130 131 /* if read only, just return current value */ 132 if (divider->flags & CLK_DIVIDER_READ_ONLY) { 133 u32 val; 134 135 val = readl(divider->reg); 136 prediv_value = val >> divider->shift; 137 prediv_value &= clk_div_mask(divider->width); 138 prediv_value++; 139 140 div_value = val >> PCG_DIV_SHIFT; 141 div_value &= clk_div_mask(PCG_DIV_WIDTH); 142 div_value++; 143 144 return divider_ro_determine_rate(hw, req, divider->table, 145 PCG_PREDIV_WIDTH + PCG_DIV_WIDTH, 146 divider->flags, prediv_value * div_value); 147 } 148 149 return divider_determine_rate(hw, req, divider->table, 150 PCG_PREDIV_WIDTH + PCG_DIV_WIDTH, 151 divider->flags); 152 } 153 154 static const struct clk_ops imx8m_clk_composite_divider_ops = { 155 .recalc_rate = imx8m_clk_composite_divider_recalc_rate, 156 .round_rate = imx8m_clk_composite_divider_round_rate, 157 .set_rate = imx8m_clk_composite_divider_set_rate, 158 .determine_rate = imx8m_divider_determine_rate, 159 }; 160 161 static u8 imx8m_clk_composite_mux_get_parent(struct clk_hw *hw) 162 { 163 return clk_mux_ops.get_parent(hw); 164 } 165 166 static int imx8m_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index) 167 { 168 struct clk_mux *mux = to_clk_mux(hw); 169 u32 val = clk_mux_index_to_val(mux->table, mux->flags, index); 170 unsigned long flags = 0; 171 u32 reg; 172 173 if (mux->lock) 174 spin_lock_irqsave(mux->lock, flags); 175 176 reg = readl(mux->reg); 177 reg &= ~(mux->mask << mux->shift); 178 val = val << mux->shift; 179 reg |= val; 180 /* 181 * write twice to make sure non-target interface 182 * SEL_A/B point the same clk input. 183 */ 184 writel(reg, mux->reg); 185 writel(reg, mux->reg); 186 187 if (mux->lock) 188 spin_unlock_irqrestore(mux->lock, flags); 189 190 return 0; 191 } 192 193 static int 194 imx8m_clk_composite_mux_determine_rate(struct clk_hw *hw, 195 struct clk_rate_request *req) 196 { 197 return clk_mux_ops.determine_rate(hw, req); 198 } 199 200 201 static const struct clk_ops imx8m_clk_composite_mux_ops = { 202 .get_parent = imx8m_clk_composite_mux_get_parent, 203 .set_parent = imx8m_clk_composite_mux_set_parent, 204 .determine_rate = imx8m_clk_composite_mux_determine_rate, 205 }; 206 207 static int imx8m_clk_composite_gate_enable(struct clk_hw *hw) 208 { 209 struct clk_gate *gate = to_clk_gate(hw); 210 unsigned long flags; 211 u32 val; 212 213 spin_lock_irqsave(gate->lock, flags); 214 215 val = readl(gate->reg); 216 val |= BIT(gate->bit_idx); 217 writel(val, gate->reg); 218 219 spin_unlock_irqrestore(gate->lock, flags); 220 221 return 0; 222 } 223 224 static void imx8m_clk_composite_gate_disable(struct clk_hw *hw) 225 { 226 /* composite clk requires the disable hook */ 227 } 228 229 static const struct clk_ops imx8m_clk_composite_gate_ops = { 230 .enable = imx8m_clk_composite_gate_enable, 231 .disable = imx8m_clk_composite_gate_disable, 232 .is_enabled = clk_gate_is_enabled, 233 }; 234 235 struct clk_hw *__imx8m_clk_hw_composite(const char *name, 236 const char * const *parent_names, 237 int num_parents, void __iomem *reg, 238 u32 composite_flags, 239 unsigned long flags) 240 { 241 struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw; 242 struct clk_hw *div_hw, *gate_hw = NULL; 243 struct clk_divider *div = NULL; 244 struct clk_gate *gate = NULL; 245 struct clk_mux *mux = NULL; 246 const struct clk_ops *divider_ops; 247 const struct clk_ops *mux_ops; 248 const struct clk_ops *gate_ops; 249 250 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 251 if (!mux) 252 return ERR_CAST(hw); 253 254 mux_hw = &mux->hw; 255 mux->reg = reg; 256 mux->shift = PCG_PCS_SHIFT; 257 mux->mask = PCG_PCS_MASK; 258 mux->lock = &imx_ccm_lock; 259 260 div = kzalloc(sizeof(*div), GFP_KERNEL); 261 if (!div) 262 goto free_mux; 263 264 div_hw = &div->hw; 265 div->reg = reg; 266 if (composite_flags & IMX_COMPOSITE_CORE) { 267 div->shift = PCG_DIV_SHIFT; 268 div->width = PCG_CORE_DIV_WIDTH; 269 divider_ops = &clk_divider_ops; 270 mux_ops = &imx8m_clk_composite_mux_ops; 271 } else if (composite_flags & IMX_COMPOSITE_BUS) { 272 div->shift = PCG_PREDIV_SHIFT; 273 div->width = PCG_PREDIV_WIDTH; 274 divider_ops = &imx8m_clk_composite_divider_ops; 275 mux_ops = &imx8m_clk_composite_mux_ops; 276 } else { 277 div->shift = PCG_PREDIV_SHIFT; 278 div->width = PCG_PREDIV_WIDTH; 279 divider_ops = &imx8m_clk_composite_divider_ops; 280 mux_ops = &clk_mux_ops; 281 if (!(composite_flags & IMX_COMPOSITE_FW_MANAGED)) 282 flags |= CLK_SET_PARENT_GATE; 283 } 284 285 div->lock = &imx_ccm_lock; 286 div->flags = CLK_DIVIDER_ROUND_CLOSEST; 287 288 /* skip registering the gate ops if M4 is enabled */ 289 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 290 if (!gate) 291 goto free_div; 292 293 gate_hw = &gate->hw; 294 gate->reg = reg; 295 gate->bit_idx = PCG_CGC_SHIFT; 296 gate->lock = &imx_ccm_lock; 297 if (!mcore_booted) 298 gate_ops = &clk_gate_ops; 299 else 300 gate_ops = &imx8m_clk_composite_gate_ops; 301 302 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, 303 mux_hw, mux_ops, div_hw, 304 divider_ops, gate_hw, gate_ops, flags); 305 if (IS_ERR(hw)) 306 goto free_gate; 307 308 return hw; 309 310 free_gate: 311 kfree(gate); 312 free_div: 313 kfree(div); 314 free_mux: 315 kfree(mux); 316 return ERR_CAST(hw); 317 } 318 EXPORT_SYMBOL_GPL(__imx8m_clk_hw_composite); 319