1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2014 Google, Inc 4 * Author: Alexandru M Stan <amstan@chromium.org> 5 */ 6 7 #include <linux/slab.h> 8 #include <linux/clk.h> 9 #include <linux/clk-provider.h> 10 #include <linux/io.h> 11 #include <linux/kernel.h> 12 #include "clk.h" 13 14 struct rockchip_mmc_clock { 15 struct clk_hw hw; 16 void __iomem *reg; 17 int id; 18 int shift; 19 int cached_phase; 20 struct notifier_block clk_rate_change_nb; 21 }; 22 23 #define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw) 24 25 #define RK3288_MMC_CLKGEN_DIV 2 26 27 static unsigned long rockchip_mmc_recalc(struct clk_hw *hw, 28 unsigned long parent_rate) 29 { 30 return parent_rate / RK3288_MMC_CLKGEN_DIV; 31 } 32 33 #define ROCKCHIP_MMC_DELAY_SEL BIT(10) 34 #define ROCKCHIP_MMC_DEGREE_MASK 0x3 35 #define ROCKCHIP_MMC_DELAYNUM_OFFSET 2 36 #define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET) 37 38 #define PSECS_PER_SEC 1000000000000LL 39 40 /* 41 * Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to 42 * simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg. 43 */ 44 #define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60 45 46 static int rockchip_mmc_get_phase(struct clk_hw *hw) 47 { 48 struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw); 49 unsigned long rate = clk_get_rate(hw->clk); 50 u32 raw_value; 51 u16 degrees; 52 u32 delay_num = 0; 53 54 /* See the comment for rockchip_mmc_set_phase below */ 55 if (!rate) { 56 pr_err("%s: invalid clk rate\n", __func__); 57 return -EINVAL; 58 } 59 60 raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); 61 62 degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90; 63 64 if (raw_value & ROCKCHIP_MMC_DELAY_SEL) { 65 /* degrees/delaynum * 10000 */ 66 unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) * 67 36 * (rate / 1000000); 68 69 delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK); 70 delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET; 71 degrees += DIV_ROUND_CLOSEST(delay_num * factor, 10000); 72 } 73 74 return degrees % 360; 75 } 76 77 static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees) 78 { 79 struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw); 80 unsigned long rate = clk_get_rate(hw->clk); 81 u8 nineties, remainder; 82 u8 delay_num; 83 u32 raw_value; 84 u32 delay; 85 86 /* 87 * The below calculation is based on the output clock from 88 * MMC host to the card, which expects the phase clock inherits 89 * the clock rate from its parent, namely the output clock 90 * provider of MMC host. However, things may go wrong if 91 * (1) It is orphan. 92 * (2) It is assigned to the wrong parent. 93 * 94 * This check help debug the case (1), which seems to be the 95 * most likely problem we often face and which makes it difficult 96 * for people to debug unstable mmc tuning results. 97 */ 98 if (!rate) { 99 pr_err("%s: invalid clk rate\n", __func__); 100 return -EINVAL; 101 } 102 103 nineties = degrees / 90; 104 remainder = (degrees % 90); 105 106 /* 107 * Due to the inexact nature of the "fine" delay, we might 108 * actually go non-monotonic. We don't go _too_ monotonic 109 * though, so we should be OK. Here are options of how we may 110 * work: 111 * 112 * Ideally we end up with: 113 * 1.0, 2.0, ..., 69.0, 70.0, ..., 89.0, 90.0 114 * 115 * On one extreme (if delay is actually 44ps): 116 * .73, 1.5, ..., 50.6, 51.3, ..., 65.3, 90.0 117 * The other (if delay is actually 77ps): 118 * 1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90 119 * 120 * It's possible we might make a delay that is up to 25 121 * degrees off from what we think we're making. That's OK 122 * though because we should be REALLY far from any bad range. 123 */ 124 125 /* 126 * Convert to delay; do a little extra work to make sure we 127 * don't overflow 32-bit / 64-bit numbers. 128 */ 129 delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */ 130 delay *= remainder; 131 delay = DIV_ROUND_CLOSEST(delay, 132 (rate / 1000) * 36 * 133 (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10)); 134 135 delay_num = (u8) min_t(u32, delay, 255); 136 137 raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0; 138 raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET; 139 raw_value |= nineties; 140 writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), 141 mmc_clock->reg); 142 143 pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n", 144 clk_hw_get_name(hw), degrees, delay_num, 145 mmc_clock->reg, raw_value>>(mmc_clock->shift), 146 rockchip_mmc_get_phase(hw) 147 ); 148 149 return 0; 150 } 151 152 static const struct clk_ops rockchip_mmc_clk_ops = { 153 .recalc_rate = rockchip_mmc_recalc, 154 .get_phase = rockchip_mmc_get_phase, 155 .set_phase = rockchip_mmc_set_phase, 156 }; 157 158 #define to_rockchip_mmc_clock(x) \ 159 container_of(x, struct rockchip_mmc_clock, clk_rate_change_nb) 160 static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb, 161 unsigned long event, void *data) 162 { 163 struct rockchip_mmc_clock *mmc_clock = to_rockchip_mmc_clock(nb); 164 struct clk_notifier_data *ndata = data; 165 166 /* 167 * rockchip_mmc_clk is mostly used by mmc controllers to sample 168 * the intput data, which expects the fixed phase after the tuning 169 * process. However if the clock rate is changed, the phase is stale 170 * and may break the data sampling. So here we try to restore the phase 171 * for that case, except that 172 * (1) cached_phase is invaild since we inevitably cached it when the 173 * clock provider be reparented from orphan to its real parent in the 174 * first place. Otherwise we may mess up the initialization of MMC cards 175 * since we only set the default sample phase and drive phase later on. 176 * (2) the new coming rate is higher than the older one since mmc driver 177 * set the max-frequency to match the boards' ability but we can't go 178 * over the heads of that, otherwise the tests smoke out the issue. 179 */ 180 if (ndata->old_rate <= ndata->new_rate) 181 return NOTIFY_DONE; 182 183 if (event == PRE_RATE_CHANGE) 184 mmc_clock->cached_phase = 185 rockchip_mmc_get_phase(&mmc_clock->hw); 186 else if (mmc_clock->cached_phase != -EINVAL && 187 event == POST_RATE_CHANGE) 188 rockchip_mmc_set_phase(&mmc_clock->hw, mmc_clock->cached_phase); 189 190 return NOTIFY_DONE; 191 } 192 193 struct clk *rockchip_clk_register_mmc(const char *name, 194 const char *const *parent_names, u8 num_parents, 195 void __iomem *reg, int shift) 196 { 197 struct clk_init_data init; 198 struct rockchip_mmc_clock *mmc_clock; 199 struct clk *clk; 200 int ret; 201 202 mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL); 203 if (!mmc_clock) 204 return ERR_PTR(-ENOMEM); 205 206 init.name = name; 207 init.flags = 0; 208 init.num_parents = num_parents; 209 init.parent_names = parent_names; 210 init.ops = &rockchip_mmc_clk_ops; 211 212 mmc_clock->hw.init = &init; 213 mmc_clock->reg = reg; 214 mmc_clock->shift = shift; 215 216 clk = clk_register(NULL, &mmc_clock->hw); 217 if (IS_ERR(clk)) { 218 ret = PTR_ERR(clk); 219 goto err_register; 220 } 221 222 mmc_clock->clk_rate_change_nb.notifier_call = 223 &rockchip_mmc_clk_rate_notify; 224 ret = clk_notifier_register(clk, &mmc_clock->clk_rate_change_nb); 225 if (ret) 226 goto err_notifier; 227 228 return clk; 229 err_notifier: 230 clk_unregister(clk); 231 err_register: 232 kfree(mmc_clock); 233 return ERR_PTR(ret); 234 } 235