1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Clock Protocol 4 * 5 * Copyright (C) 2018 ARM Ltd. 6 */ 7 8 #include "common.h" 9 10 enum scmi_clock_protocol_cmd { 11 CLOCK_ATTRIBUTES = 0x3, 12 CLOCK_DESCRIBE_RATES = 0x4, 13 CLOCK_RATE_SET = 0x5, 14 CLOCK_RATE_GET = 0x6, 15 CLOCK_CONFIG_SET = 0x7, 16 }; 17 18 struct scmi_msg_resp_clock_protocol_attributes { 19 __le16 num_clocks; 20 u8 max_async_req; 21 u8 reserved; 22 }; 23 24 struct scmi_msg_resp_clock_attributes { 25 __le32 attributes; 26 #define CLOCK_ENABLE BIT(0) 27 u8 name[SCMI_MAX_STR_SIZE]; 28 }; 29 30 struct scmi_clock_set_config { 31 __le32 id; 32 __le32 attributes; 33 }; 34 35 struct scmi_msg_clock_describe_rates { 36 __le32 id; 37 __le32 rate_index; 38 }; 39 40 struct scmi_msg_resp_clock_describe_rates { 41 __le32 num_rates_flags; 42 #define NUM_RETURNED(x) ((x) & 0xfff) 43 #define RATE_DISCRETE(x) !((x) & BIT(12)) 44 #define NUM_REMAINING(x) ((x) >> 16) 45 struct { 46 __le32 value_low; 47 __le32 value_high; 48 } rate[0]; 49 #define RATE_TO_U64(X) \ 50 ({ \ 51 typeof(X) x = (X); \ 52 le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \ 53 }) 54 }; 55 56 struct scmi_clock_set_rate { 57 __le32 flags; 58 #define CLOCK_SET_ASYNC BIT(0) 59 #define CLOCK_SET_IGNORE_RESP BIT(1) 60 #define CLOCK_SET_ROUND_UP BIT(2) 61 #define CLOCK_SET_ROUND_AUTO BIT(3) 62 __le32 id; 63 __le32 value_low; 64 __le32 value_high; 65 }; 66 67 struct clock_info { 68 u32 version; 69 int num_clocks; 70 int max_async_req; 71 atomic_t cur_async_req; 72 struct scmi_clock_info *clk; 73 }; 74 75 static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle, 76 struct clock_info *ci) 77 { 78 int ret; 79 struct scmi_xfer *t; 80 struct scmi_msg_resp_clock_protocol_attributes *attr; 81 82 ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, 83 SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t); 84 if (ret) 85 return ret; 86 87 attr = t->rx.buf; 88 89 ret = scmi_do_xfer(handle, t); 90 if (!ret) { 91 ci->num_clocks = le16_to_cpu(attr->num_clocks); 92 ci->max_async_req = attr->max_async_req; 93 } 94 95 scmi_xfer_put(handle, t); 96 return ret; 97 } 98 99 static int scmi_clock_attributes_get(const struct scmi_handle *handle, 100 u32 clk_id, struct scmi_clock_info *clk) 101 { 102 int ret; 103 struct scmi_xfer *t; 104 struct scmi_msg_resp_clock_attributes *attr; 105 106 ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK, 107 sizeof(clk_id), sizeof(*attr), &t); 108 if (ret) 109 return ret; 110 111 put_unaligned_le32(clk_id, t->tx.buf); 112 attr = t->rx.buf; 113 114 ret = scmi_do_xfer(handle, t); 115 if (!ret) 116 strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); 117 else 118 clk->name[0] = '\0'; 119 120 scmi_xfer_put(handle, t); 121 return ret; 122 } 123 124 static int 125 scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, 126 struct scmi_clock_info *clk) 127 { 128 u64 *rate; 129 int ret, cnt; 130 bool rate_discrete = false; 131 u32 tot_rate_cnt = 0, rates_flag; 132 u16 num_returned, num_remaining; 133 struct scmi_xfer *t; 134 struct scmi_msg_clock_describe_rates *clk_desc; 135 struct scmi_msg_resp_clock_describe_rates *rlist; 136 137 ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES, 138 SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t); 139 if (ret) 140 return ret; 141 142 clk_desc = t->tx.buf; 143 rlist = t->rx.buf; 144 145 do { 146 clk_desc->id = cpu_to_le32(clk_id); 147 /* Set the number of rates to be skipped/already read */ 148 clk_desc->rate_index = cpu_to_le32(tot_rate_cnt); 149 150 ret = scmi_do_xfer(handle, t); 151 if (ret) 152 goto err; 153 154 rates_flag = le32_to_cpu(rlist->num_rates_flags); 155 num_remaining = NUM_REMAINING(rates_flag); 156 rate_discrete = RATE_DISCRETE(rates_flag); 157 num_returned = NUM_RETURNED(rates_flag); 158 159 if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) { 160 dev_err(handle->dev, "No. of rates > MAX_NUM_RATES"); 161 break; 162 } 163 164 if (!rate_discrete) { 165 clk->range.min_rate = RATE_TO_U64(rlist->rate[0]); 166 clk->range.max_rate = RATE_TO_U64(rlist->rate[1]); 167 clk->range.step_size = RATE_TO_U64(rlist->rate[2]); 168 dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n", 169 clk->range.min_rate, clk->range.max_rate, 170 clk->range.step_size); 171 break; 172 } 173 174 rate = &clk->list.rates[tot_rate_cnt]; 175 for (cnt = 0; cnt < num_returned; cnt++, rate++) { 176 *rate = RATE_TO_U64(rlist->rate[cnt]); 177 dev_dbg(handle->dev, "Rate %llu Hz\n", *rate); 178 } 179 180 tot_rate_cnt += num_returned; 181 /* 182 * check for both returned and remaining to avoid infinite 183 * loop due to buggy firmware 184 */ 185 } while (num_returned && num_remaining); 186 187 if (rate_discrete) 188 clk->list.num_rates = tot_rate_cnt; 189 190 clk->rate_discrete = rate_discrete; 191 192 err: 193 scmi_xfer_put(handle, t); 194 return ret; 195 } 196 197 static int 198 scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value) 199 { 200 int ret; 201 struct scmi_xfer *t; 202 203 ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK, 204 sizeof(__le32), sizeof(u64), &t); 205 if (ret) 206 return ret; 207 208 put_unaligned_le32(clk_id, t->tx.buf); 209 210 ret = scmi_do_xfer(handle, t); 211 if (!ret) 212 *value = get_unaligned_le64(t->rx.buf); 213 214 scmi_xfer_put(handle, t); 215 return ret; 216 } 217 218 static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id, 219 u64 rate) 220 { 221 int ret; 222 u32 flags = 0; 223 struct scmi_xfer *t; 224 struct scmi_clock_set_rate *cfg; 225 struct clock_info *ci = handle->clk_priv; 226 227 ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK, 228 sizeof(*cfg), 0, &t); 229 if (ret) 230 return ret; 231 232 if (ci->max_async_req && 233 atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) 234 flags |= CLOCK_SET_ASYNC; 235 236 cfg = t->tx.buf; 237 cfg->flags = cpu_to_le32(flags); 238 cfg->id = cpu_to_le32(clk_id); 239 cfg->value_low = cpu_to_le32(rate & 0xffffffff); 240 cfg->value_high = cpu_to_le32(rate >> 32); 241 242 if (flags & CLOCK_SET_ASYNC) 243 ret = scmi_do_xfer_with_response(handle, t); 244 else 245 ret = scmi_do_xfer(handle, t); 246 247 if (ci->max_async_req) 248 atomic_dec(&ci->cur_async_req); 249 250 scmi_xfer_put(handle, t); 251 return ret; 252 } 253 254 static int 255 scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config) 256 { 257 int ret; 258 struct scmi_xfer *t; 259 struct scmi_clock_set_config *cfg; 260 261 ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK, 262 sizeof(*cfg), 0, &t); 263 if (ret) 264 return ret; 265 266 cfg = t->tx.buf; 267 cfg->id = cpu_to_le32(clk_id); 268 cfg->attributes = cpu_to_le32(config); 269 270 ret = scmi_do_xfer(handle, t); 271 272 scmi_xfer_put(handle, t); 273 return ret; 274 } 275 276 static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id) 277 { 278 return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE); 279 } 280 281 static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id) 282 { 283 return scmi_clock_config_set(handle, clk_id, 0); 284 } 285 286 static int scmi_clock_count_get(const struct scmi_handle *handle) 287 { 288 struct clock_info *ci = handle->clk_priv; 289 290 return ci->num_clocks; 291 } 292 293 static const struct scmi_clock_info * 294 scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id) 295 { 296 struct clock_info *ci = handle->clk_priv; 297 struct scmi_clock_info *clk = ci->clk + clk_id; 298 299 if (!clk->name[0]) 300 return NULL; 301 302 return clk; 303 } 304 305 static struct scmi_clk_ops clk_ops = { 306 .count_get = scmi_clock_count_get, 307 .info_get = scmi_clock_info_get, 308 .rate_get = scmi_clock_rate_get, 309 .rate_set = scmi_clock_rate_set, 310 .enable = scmi_clock_enable, 311 .disable = scmi_clock_disable, 312 }; 313 314 static int scmi_clock_protocol_init(struct scmi_handle *handle) 315 { 316 u32 version; 317 int clkid, ret; 318 struct clock_info *cinfo; 319 320 scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version); 321 322 dev_dbg(handle->dev, "Clock Version %d.%d\n", 323 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); 324 325 cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL); 326 if (!cinfo) 327 return -ENOMEM; 328 329 scmi_clock_protocol_attributes_get(handle, cinfo); 330 331 cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks, 332 sizeof(*cinfo->clk), GFP_KERNEL); 333 if (!cinfo->clk) 334 return -ENOMEM; 335 336 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { 337 struct scmi_clock_info *clk = cinfo->clk + clkid; 338 339 ret = scmi_clock_attributes_get(handle, clkid, clk); 340 if (!ret) 341 scmi_clock_describe_rates_get(handle, clkid, clk); 342 } 343 344 cinfo->version = version; 345 handle->clk_ops = &clk_ops; 346 handle->clk_priv = cinfo; 347 348 return 0; 349 } 350 351 static int __init scmi_clock_init(void) 352 { 353 return scmi_protocol_register(SCMI_PROTOCOL_CLOCK, 354 &scmi_clock_protocol_init); 355 } 356 subsys_initcall(scmi_clock_init); 357