1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Clock Protocol 4 * 5 * Copyright (C) 2018 ARM Ltd. 6 */ 7 8 #include <linux/sort.h> 9 10 #include "common.h" 11 12 enum scmi_clock_protocol_cmd { 13 CLOCK_ATTRIBUTES = 0x3, 14 CLOCK_DESCRIBE_RATES = 0x4, 15 CLOCK_RATE_SET = 0x5, 16 CLOCK_RATE_GET = 0x6, 17 CLOCK_CONFIG_SET = 0x7, 18 }; 19 20 struct scmi_msg_resp_clock_protocol_attributes { 21 __le16 num_clocks; 22 u8 max_async_req; 23 u8 reserved; 24 }; 25 26 struct scmi_msg_resp_clock_attributes { 27 __le32 attributes; 28 #define CLOCK_ENABLE BIT(0) 29 u8 name[SCMI_MAX_STR_SIZE]; 30 }; 31 32 struct scmi_clock_set_config { 33 __le32 id; 34 __le32 attributes; 35 }; 36 37 struct scmi_msg_clock_describe_rates { 38 __le32 id; 39 __le32 rate_index; 40 }; 41 42 struct scmi_msg_resp_clock_describe_rates { 43 __le32 num_rates_flags; 44 #define NUM_RETURNED(x) ((x) & 0xfff) 45 #define RATE_DISCRETE(x) !((x) & BIT(12)) 46 #define NUM_REMAINING(x) ((x) >> 16) 47 struct { 48 __le32 value_low; 49 __le32 value_high; 50 } rate[0]; 51 #define RATE_TO_U64(X) \ 52 ({ \ 53 typeof(X) x = (X); \ 54 le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \ 55 }) 56 }; 57 58 struct scmi_clock_set_rate { 59 __le32 flags; 60 #define CLOCK_SET_ASYNC BIT(0) 61 #define CLOCK_SET_IGNORE_RESP BIT(1) 62 #define CLOCK_SET_ROUND_UP BIT(2) 63 #define CLOCK_SET_ROUND_AUTO BIT(3) 64 __le32 id; 65 __le32 value_low; 66 __le32 value_high; 67 }; 68 69 struct clock_info { 70 u32 version; 71 int num_clocks; 72 int max_async_req; 73 atomic_t cur_async_req; 74 struct scmi_clock_info *clk; 75 }; 76 77 static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle, 78 struct clock_info *ci) 79 { 80 int ret; 81 struct scmi_xfer *t; 82 struct scmi_msg_resp_clock_protocol_attributes *attr; 83 84 ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, 85 SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t); 86 if (ret) 87 return ret; 88 89 attr = t->rx.buf; 90 91 ret = scmi_do_xfer(handle, t); 92 if (!ret) { 93 ci->num_clocks = le16_to_cpu(attr->num_clocks); 94 ci->max_async_req = attr->max_async_req; 95 } 96 97 scmi_xfer_put(handle, t); 98 return ret; 99 } 100 101 static int scmi_clock_attributes_get(const struct scmi_handle *handle, 102 u32 clk_id, struct scmi_clock_info *clk) 103 { 104 int ret; 105 struct scmi_xfer *t; 106 struct scmi_msg_resp_clock_attributes *attr; 107 108 ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK, 109 sizeof(clk_id), sizeof(*attr), &t); 110 if (ret) 111 return ret; 112 113 put_unaligned_le32(clk_id, t->tx.buf); 114 attr = t->rx.buf; 115 116 ret = scmi_do_xfer(handle, t); 117 if (!ret) 118 strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); 119 else 120 clk->name[0] = '\0'; 121 122 scmi_xfer_put(handle, t); 123 return ret; 124 } 125 126 static int rate_cmp_func(const void *_r1, const void *_r2) 127 { 128 const u64 *r1 = _r1, *r2 = _r2; 129 130 if (*r1 < *r2) 131 return -1; 132 else if (*r1 == *r2) 133 return 0; 134 else 135 return 1; 136 } 137 138 static int 139 scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, 140 struct scmi_clock_info *clk) 141 { 142 u64 *rate = NULL; 143 int ret, cnt; 144 bool rate_discrete = false; 145 u32 tot_rate_cnt = 0, rates_flag; 146 u16 num_returned, num_remaining; 147 struct scmi_xfer *t; 148 struct scmi_msg_clock_describe_rates *clk_desc; 149 struct scmi_msg_resp_clock_describe_rates *rlist; 150 151 ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES, 152 SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t); 153 if (ret) 154 return ret; 155 156 clk_desc = t->tx.buf; 157 rlist = t->rx.buf; 158 159 do { 160 clk_desc->id = cpu_to_le32(clk_id); 161 /* Set the number of rates to be skipped/already read */ 162 clk_desc->rate_index = cpu_to_le32(tot_rate_cnt); 163 164 ret = scmi_do_xfer(handle, t); 165 if (ret) 166 goto err; 167 168 rates_flag = le32_to_cpu(rlist->num_rates_flags); 169 num_remaining = NUM_REMAINING(rates_flag); 170 rate_discrete = RATE_DISCRETE(rates_flag); 171 num_returned = NUM_RETURNED(rates_flag); 172 173 if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) { 174 dev_err(handle->dev, "No. of rates > MAX_NUM_RATES"); 175 break; 176 } 177 178 if (!rate_discrete) { 179 clk->range.min_rate = RATE_TO_U64(rlist->rate[0]); 180 clk->range.max_rate = RATE_TO_U64(rlist->rate[1]); 181 clk->range.step_size = RATE_TO_U64(rlist->rate[2]); 182 dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n", 183 clk->range.min_rate, clk->range.max_rate, 184 clk->range.step_size); 185 break; 186 } 187 188 rate = &clk->list.rates[tot_rate_cnt]; 189 for (cnt = 0; cnt < num_returned; cnt++, rate++) { 190 *rate = RATE_TO_U64(rlist->rate[cnt]); 191 dev_dbg(handle->dev, "Rate %llu Hz\n", *rate); 192 } 193 194 tot_rate_cnt += num_returned; 195 /* 196 * check for both returned and remaining to avoid infinite 197 * loop due to buggy firmware 198 */ 199 } while (num_returned && num_remaining); 200 201 if (rate_discrete && rate) { 202 clk->list.num_rates = tot_rate_cnt; 203 sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL); 204 } 205 206 clk->rate_discrete = rate_discrete; 207 208 err: 209 scmi_xfer_put(handle, t); 210 return ret; 211 } 212 213 static int 214 scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value) 215 { 216 int ret; 217 struct scmi_xfer *t; 218 219 ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK, 220 sizeof(__le32), sizeof(u64), &t); 221 if (ret) 222 return ret; 223 224 put_unaligned_le32(clk_id, t->tx.buf); 225 226 ret = scmi_do_xfer(handle, t); 227 if (!ret) 228 *value = get_unaligned_le64(t->rx.buf); 229 230 scmi_xfer_put(handle, t); 231 return ret; 232 } 233 234 static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id, 235 u64 rate) 236 { 237 int ret; 238 u32 flags = 0; 239 struct scmi_xfer *t; 240 struct scmi_clock_set_rate *cfg; 241 struct clock_info *ci = handle->clk_priv; 242 243 ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK, 244 sizeof(*cfg), 0, &t); 245 if (ret) 246 return ret; 247 248 if (ci->max_async_req && 249 atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) 250 flags |= CLOCK_SET_ASYNC; 251 252 cfg = t->tx.buf; 253 cfg->flags = cpu_to_le32(flags); 254 cfg->id = cpu_to_le32(clk_id); 255 cfg->value_low = cpu_to_le32(rate & 0xffffffff); 256 cfg->value_high = cpu_to_le32(rate >> 32); 257 258 if (flags & CLOCK_SET_ASYNC) 259 ret = scmi_do_xfer_with_response(handle, t); 260 else 261 ret = scmi_do_xfer(handle, t); 262 263 if (ci->max_async_req) 264 atomic_dec(&ci->cur_async_req); 265 266 scmi_xfer_put(handle, t); 267 return ret; 268 } 269 270 static int 271 scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config) 272 { 273 int ret; 274 struct scmi_xfer *t; 275 struct scmi_clock_set_config *cfg; 276 277 ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK, 278 sizeof(*cfg), 0, &t); 279 if (ret) 280 return ret; 281 282 cfg = t->tx.buf; 283 cfg->id = cpu_to_le32(clk_id); 284 cfg->attributes = cpu_to_le32(config); 285 286 ret = scmi_do_xfer(handle, t); 287 288 scmi_xfer_put(handle, t); 289 return ret; 290 } 291 292 static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id) 293 { 294 return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE); 295 } 296 297 static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id) 298 { 299 return scmi_clock_config_set(handle, clk_id, 0); 300 } 301 302 static int scmi_clock_count_get(const struct scmi_handle *handle) 303 { 304 struct clock_info *ci = handle->clk_priv; 305 306 return ci->num_clocks; 307 } 308 309 static const struct scmi_clock_info * 310 scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id) 311 { 312 struct clock_info *ci = handle->clk_priv; 313 struct scmi_clock_info *clk = ci->clk + clk_id; 314 315 if (!clk->name[0]) 316 return NULL; 317 318 return clk; 319 } 320 321 static struct scmi_clk_ops clk_ops = { 322 .count_get = scmi_clock_count_get, 323 .info_get = scmi_clock_info_get, 324 .rate_get = scmi_clock_rate_get, 325 .rate_set = scmi_clock_rate_set, 326 .enable = scmi_clock_enable, 327 .disable = scmi_clock_disable, 328 }; 329 330 static int scmi_clock_protocol_init(struct scmi_handle *handle) 331 { 332 u32 version; 333 int clkid, ret; 334 struct clock_info *cinfo; 335 336 scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version); 337 338 dev_dbg(handle->dev, "Clock Version %d.%d\n", 339 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); 340 341 cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL); 342 if (!cinfo) 343 return -ENOMEM; 344 345 scmi_clock_protocol_attributes_get(handle, cinfo); 346 347 cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks, 348 sizeof(*cinfo->clk), GFP_KERNEL); 349 if (!cinfo->clk) 350 return -ENOMEM; 351 352 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { 353 struct scmi_clock_info *clk = cinfo->clk + clkid; 354 355 ret = scmi_clock_attributes_get(handle, clkid, clk); 356 if (!ret) 357 scmi_clock_describe_rates_get(handle, clkid, clk); 358 } 359 360 cinfo->version = version; 361 handle->clk_ops = &clk_ops; 362 handle->clk_priv = cinfo; 363 364 return 0; 365 } 366 367 static int __init scmi_clock_init(void) 368 { 369 return scmi_protocol_register(SCMI_PROTOCOL_CLOCK, 370 &scmi_clock_protocol_init); 371 } 372 subsys_initcall(scmi_clock_init); 373