1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd 4 */ 5 6 #include <linux/slab.h> 7 #include <linux/clk-provider.h> 8 #include "clk.h" 9 10 #define div_mask(width) ((1 << (width)) - 1) 11 12 static bool _is_best_half_div(unsigned long rate, unsigned long now, 13 unsigned long best, unsigned long flags) 14 { 15 if (flags & CLK_DIVIDER_ROUND_CLOSEST) 16 return abs(rate - now) < abs(rate - best); 17 18 return now <= rate && now > best; 19 } 20 21 static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw, 22 unsigned long parent_rate) 23 { 24 struct clk_divider *divider = to_clk_divider(hw); 25 unsigned int val; 26 27 val = clk_readl(divider->reg) >> divider->shift; 28 val &= div_mask(divider->width); 29 val = val * 2 + 3; 30 31 return DIV_ROUND_UP_ULL(((u64)parent_rate * 2), val); 32 } 33 34 static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate, 35 unsigned long *best_parent_rate, u8 width, 36 unsigned long flags) 37 { 38 unsigned int i, bestdiv = 0; 39 unsigned long parent_rate, best = 0, now, maxdiv; 40 unsigned long parent_rate_saved = *best_parent_rate; 41 42 if (!rate) 43 rate = 1; 44 45 maxdiv = div_mask(width); 46 47 if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { 48 parent_rate = *best_parent_rate; 49 bestdiv = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate); 50 if (bestdiv < 3) 51 bestdiv = 0; 52 else 53 bestdiv = (bestdiv - 3) / 2; 54 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; 55 return bestdiv; 56 } 57 58 /* 59 * The maximum divider we can use without overflowing 60 * unsigned long in rate * i below 61 */ 62 maxdiv = min(ULONG_MAX / rate, maxdiv); 63 64 for (i = 0; i <= maxdiv; i++) { 65 if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) { 66 /* 67 * It's the most ideal case if the requested rate can be 68 * divided from parent clock without needing to change 69 * parent rate, so return the divider immediately. 70 */ 71 *best_parent_rate = parent_rate_saved; 72 return i; 73 } 74 parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 75 ((u64)rate * (i * 2 + 3)) / 2); 76 now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), 77 (i * 2 + 3)); 78 79 if (_is_best_half_div(rate, now, best, flags)) { 80 bestdiv = i; 81 best = now; 82 *best_parent_rate = parent_rate; 83 } 84 } 85 86 if (!bestdiv) { 87 bestdiv = div_mask(width); 88 *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1); 89 } 90 91 return bestdiv; 92 } 93 94 static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate, 95 unsigned long *prate) 96 { 97 struct clk_divider *divider = to_clk_divider(hw); 98 int div; 99 100 div = clk_half_divider_bestdiv(hw, rate, prate, 101 divider->width, 102 divider->flags); 103 104 return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3); 105 } 106 107 static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate, 108 unsigned long parent_rate) 109 { 110 struct clk_divider *divider = to_clk_divider(hw); 111 unsigned int value; 112 unsigned long flags = 0; 113 u32 val; 114 115 value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate); 116 value = (value - 3) / 2; 117 value = min_t(unsigned int, value, div_mask(divider->width)); 118 119 if (divider->lock) 120 spin_lock_irqsave(divider->lock, flags); 121 else 122 __acquire(divider->lock); 123 124 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { 125 val = div_mask(divider->width) << (divider->shift + 16); 126 } else { 127 val = clk_readl(divider->reg); 128 val &= ~(div_mask(divider->width) << divider->shift); 129 } 130 val |= value << divider->shift; 131 clk_writel(val, divider->reg); 132 133 if (divider->lock) 134 spin_unlock_irqrestore(divider->lock, flags); 135 else 136 __release(divider->lock); 137 138 return 0; 139 } 140 141 const struct clk_ops clk_half_divider_ops = { 142 .recalc_rate = clk_half_divider_recalc_rate, 143 .round_rate = clk_half_divider_round_rate, 144 .set_rate = clk_half_divider_set_rate, 145 }; 146 EXPORT_SYMBOL_GPL(clk_half_divider_ops); 147 148 /** 149 * Register a clock branch. 150 * Most clock branches have a form like 151 * 152 * src1 --|--\ 153 * |M |--[GATE]-[DIV]- 154 * src2 --|--/ 155 * 156 * sometimes without one of those components. 157 */ 158 struct clk *rockchip_clk_register_halfdiv(const char *name, 159 const char *const *parent_names, 160 u8 num_parents, void __iomem *base, 161 int muxdiv_offset, u8 mux_shift, 162 u8 mux_width, u8 mux_flags, 163 u8 div_shift, u8 div_width, 164 u8 div_flags, int gate_offset, 165 u8 gate_shift, u8 gate_flags, 166 unsigned long flags, 167 spinlock_t *lock) 168 { 169 struct clk *clk; 170 struct clk_mux *mux = NULL; 171 struct clk_gate *gate = NULL; 172 struct clk_divider *div = NULL; 173 const struct clk_ops *mux_ops = NULL, *div_ops = NULL, 174 *gate_ops = NULL; 175 176 if (num_parents > 1) { 177 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 178 if (!mux) 179 return ERR_PTR(-ENOMEM); 180 181 mux->reg = base + muxdiv_offset; 182 mux->shift = mux_shift; 183 mux->mask = BIT(mux_width) - 1; 184 mux->flags = mux_flags; 185 mux->lock = lock; 186 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops 187 : &clk_mux_ops; 188 } 189 190 if (gate_offset >= 0) { 191 gate = kzalloc(sizeof(*gate), GFP_KERNEL); 192 if (!gate) 193 goto err_gate; 194 195 gate->flags = gate_flags; 196 gate->reg = base + gate_offset; 197 gate->bit_idx = gate_shift; 198 gate->lock = lock; 199 gate_ops = &clk_gate_ops; 200 } 201 202 if (div_width > 0) { 203 div = kzalloc(sizeof(*div), GFP_KERNEL); 204 if (!div) 205 goto err_div; 206 207 div->flags = div_flags; 208 div->reg = base + muxdiv_offset; 209 div->shift = div_shift; 210 div->width = div_width; 211 div->lock = lock; 212 div_ops = &clk_half_divider_ops; 213 } 214 215 clk = clk_register_composite(NULL, name, parent_names, num_parents, 216 mux ? &mux->hw : NULL, mux_ops, 217 div ? &div->hw : NULL, div_ops, 218 gate ? &gate->hw : NULL, gate_ops, 219 flags); 220 221 return clk; 222 err_div: 223 kfree(gate); 224 err_gate: 225 kfree(mux); 226 return ERR_PTR(-ENOMEM); 227 } 228