1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2014 Google, Inc 4 * Author: Alexandru M Stan <amstan@chromium.org> 5 */ 6 7 #include <linux/slab.h> 8 #include <linux/clk.h> 9 #include <linux/clk-provider.h> 10 #include <linux/io.h> 11 #include <linux/kernel.h> 12 #include "clk.h" 13 14 struct rockchip_mmc_clock { 15 struct clk_hw hw; 16 void __iomem *reg; 17 int id; 18 int shift; 19 int cached_phase; 20 struct notifier_block clk_rate_change_nb; 21 }; 22 23 #define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw) 24 25 #define RK3288_MMC_CLKGEN_DIV 2 26 27 static unsigned long rockchip_mmc_recalc(struct clk_hw *hw, 28 unsigned long parent_rate) 29 { 30 return parent_rate / RK3288_MMC_CLKGEN_DIV; 31 } 32 33 #define ROCKCHIP_MMC_DELAY_SEL BIT(10) 34 #define ROCKCHIP_MMC_DEGREE_MASK 0x3 35 #define ROCKCHIP_MMC_DELAYNUM_OFFSET 2 36 #define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET) 37 38 #define PSECS_PER_SEC 1000000000000LL 39 40 /* 41 * Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to 42 * simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg. 43 */ 44 #define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60 45 46 static int rockchip_mmc_get_phase(struct clk_hw *hw) 47 { 48 struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw); 49 unsigned long rate = clk_hw_get_rate(hw); 50 u32 raw_value; 51 u16 degrees; 52 u32 delay_num = 0; 53 54 /* Constant signal, no measurable phase shift */ 55 if (!rate) 56 return 0; 57 58 raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); 59 60 degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90; 61 62 if (raw_value & ROCKCHIP_MMC_DELAY_SEL) { 63 /* degrees/delaynum * 1000000 */ 64 unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) * 65 36 * (rate / 10000); 66 67 delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK); 68 delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET; 69 degrees += DIV_ROUND_CLOSEST(delay_num * factor, 1000000); 70 } 71 72 return degrees % 360; 73 } 74 75 static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees) 76 { 77 struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw); 78 unsigned long rate = clk_hw_get_rate(hw); 79 u8 nineties, remainder; 80 u8 delay_num; 81 u32 raw_value; 82 u32 delay; 83 84 /* 85 * The below calculation is based on the output clock from 86 * MMC host to the card, which expects the phase clock inherits 87 * the clock rate from its parent, namely the output clock 88 * provider of MMC host. However, things may go wrong if 89 * (1) It is orphan. 90 * (2) It is assigned to the wrong parent. 91 * 92 * This check help debug the case (1), which seems to be the 93 * most likely problem we often face and which makes it difficult 94 * for people to debug unstable mmc tuning results. 95 */ 96 if (!rate) { 97 pr_err("%s: invalid clk rate\n", __func__); 98 return -EINVAL; 99 } 100 101 nineties = degrees / 90; 102 remainder = (degrees % 90); 103 104 /* 105 * Due to the inexact nature of the "fine" delay, we might 106 * actually go non-monotonic. We don't go _too_ monotonic 107 * though, so we should be OK. Here are options of how we may 108 * work: 109 * 110 * Ideally we end up with: 111 * 1.0, 2.0, ..., 69.0, 70.0, ..., 89.0, 90.0 112 * 113 * On one extreme (if delay is actually 44ps): 114 * .73, 1.5, ..., 50.6, 51.3, ..., 65.3, 90.0 115 * The other (if delay is actually 77ps): 116 * 1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90 117 * 118 * It's possible we might make a delay that is up to 25 119 * degrees off from what we think we're making. That's OK 120 * though because we should be REALLY far from any bad range. 121 */ 122 123 /* 124 * Convert to delay; do a little extra work to make sure we 125 * don't overflow 32-bit / 64-bit numbers. 126 */ 127 delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */ 128 delay *= remainder; 129 delay = DIV_ROUND_CLOSEST(delay, 130 (rate / 1000) * 36 * 131 (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10)); 132 133 delay_num = (u8) min_t(u32, delay, 255); 134 135 raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0; 136 raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET; 137 raw_value |= nineties; 138 writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), 139 mmc_clock->reg); 140 141 pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n", 142 clk_hw_get_name(hw), degrees, delay_num, 143 mmc_clock->reg, raw_value>>(mmc_clock->shift), 144 rockchip_mmc_get_phase(hw) 145 ); 146 147 return 0; 148 } 149 150 static const struct clk_ops rockchip_mmc_clk_ops = { 151 .recalc_rate = rockchip_mmc_recalc, 152 .get_phase = rockchip_mmc_get_phase, 153 .set_phase = rockchip_mmc_set_phase, 154 }; 155 156 #define to_rockchip_mmc_clock(x) \ 157 container_of(x, struct rockchip_mmc_clock, clk_rate_change_nb) 158 static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb, 159 unsigned long event, void *data) 160 { 161 struct rockchip_mmc_clock *mmc_clock = to_rockchip_mmc_clock(nb); 162 struct clk_notifier_data *ndata = data; 163 164 /* 165 * rockchip_mmc_clk is mostly used by mmc controllers to sample 166 * the intput data, which expects the fixed phase after the tuning 167 * process. However if the clock rate is changed, the phase is stale 168 * and may break the data sampling. So here we try to restore the phase 169 * for that case, except that 170 * (1) cached_phase is invaild since we inevitably cached it when the 171 * clock provider be reparented from orphan to its real parent in the 172 * first place. Otherwise we may mess up the initialization of MMC cards 173 * since we only set the default sample phase and drive phase later on. 174 * (2) the new coming rate is higher than the older one since mmc driver 175 * set the max-frequency to match the boards' ability but we can't go 176 * over the heads of that, otherwise the tests smoke out the issue. 177 */ 178 if (ndata->old_rate <= ndata->new_rate) 179 return NOTIFY_DONE; 180 181 if (event == PRE_RATE_CHANGE) 182 mmc_clock->cached_phase = 183 rockchip_mmc_get_phase(&mmc_clock->hw); 184 else if (mmc_clock->cached_phase != -EINVAL && 185 event == POST_RATE_CHANGE) 186 rockchip_mmc_set_phase(&mmc_clock->hw, mmc_clock->cached_phase); 187 188 return NOTIFY_DONE; 189 } 190 191 struct clk *rockchip_clk_register_mmc(const char *name, 192 const char *const *parent_names, u8 num_parents, 193 void __iomem *reg, int shift) 194 { 195 struct clk_init_data init; 196 struct rockchip_mmc_clock *mmc_clock; 197 struct clk *clk; 198 int ret; 199 200 mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL); 201 if (!mmc_clock) 202 return ERR_PTR(-ENOMEM); 203 204 init.name = name; 205 init.flags = 0; 206 init.num_parents = num_parents; 207 init.parent_names = parent_names; 208 init.ops = &rockchip_mmc_clk_ops; 209 210 mmc_clock->hw.init = &init; 211 mmc_clock->reg = reg; 212 mmc_clock->shift = shift; 213 214 clk = clk_register(NULL, &mmc_clock->hw); 215 if (IS_ERR(clk)) { 216 ret = PTR_ERR(clk); 217 goto err_register; 218 } 219 220 mmc_clock->clk_rate_change_nb.notifier_call = 221 &rockchip_mmc_clk_rate_notify; 222 ret = clk_notifier_register(clk, &mmc_clock->clk_rate_change_nb); 223 if (ret) 224 goto err_notifier; 225 226 return clk; 227 err_notifier: 228 clk_unregister(clk); 229 err_register: 230 kfree(mmc_clock); 231 return ERR_PTR(ret); 232 } 233