1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Clock Protocol 4 * 5 * Copyright (C) 2018 ARM Ltd. 6 */ 7 8 #include <linux/sort.h> 9 10 #include "common.h" 11 12 enum scmi_clock_protocol_cmd { 13 CLOCK_ATTRIBUTES = 0x3, 14 CLOCK_DESCRIBE_RATES = 0x4, 15 CLOCK_RATE_SET = 0x5, 16 CLOCK_RATE_GET = 0x6, 17 CLOCK_CONFIG_SET = 0x7, 18 }; 19 20 struct scmi_msg_resp_clock_protocol_attributes { 21 __le16 num_clocks; 22 u8 max_async_req; 23 u8 reserved; 24 }; 25 26 struct scmi_msg_resp_clock_attributes { 27 __le32 attributes; 28 #define CLOCK_ENABLE BIT(0) 29 u8 name[SCMI_MAX_STR_SIZE]; 30 }; 31 32 struct scmi_clock_set_config { 33 __le32 id; 34 __le32 attributes; 35 }; 36 37 struct scmi_msg_clock_describe_rates { 38 __le32 id; 39 __le32 rate_index; 40 }; 41 42 struct scmi_msg_resp_clock_describe_rates { 43 __le32 num_rates_flags; 44 #define NUM_RETURNED(x) ((x) & 0xfff) 45 #define RATE_DISCRETE(x) !((x) & BIT(12)) 46 #define NUM_REMAINING(x) ((x) >> 16) 47 struct { 48 __le32 value_low; 49 __le32 value_high; 50 } rate[0]; 51 #define RATE_TO_U64(X) \ 52 ({ \ 53 typeof(X) x = (X); \ 54 le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \ 55 }) 56 }; 57 58 struct scmi_clock_set_rate { 59 __le32 flags; 60 #define CLOCK_SET_ASYNC BIT(0) 61 #define CLOCK_SET_IGNORE_RESP BIT(1) 62 #define CLOCK_SET_ROUND_UP BIT(2) 63 #define CLOCK_SET_ROUND_AUTO BIT(3) 64 __le32 id; 65 __le32 value_low; 66 __le32 value_high; 67 }; 68 69 struct clock_info { 70 u32 version; 71 int num_clocks; 72 int max_async_req; 73 atomic_t cur_async_req; 74 struct scmi_clock_info *clk; 75 }; 76 77 static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle, 78 struct clock_info *ci) 79 { 80 int ret; 81 struct scmi_xfer *t; 82 struct scmi_msg_resp_clock_protocol_attributes *attr; 83 84 ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, 85 SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t); 86 if (ret) 87 return ret; 88 89 attr = t->rx.buf; 90 91 ret = scmi_do_xfer(handle, t); 92 if (!ret) { 93 ci->num_clocks = le16_to_cpu(attr->num_clocks); 94 ci->max_async_req = attr->max_async_req; 95 } 96 97 scmi_xfer_put(handle, t); 98 return ret; 99 } 100 101 static int scmi_clock_attributes_get(const struct scmi_handle *handle, 102 u32 clk_id, struct scmi_clock_info *clk) 103 { 104 int ret; 105 struct scmi_xfer *t; 106 struct scmi_msg_resp_clock_attributes *attr; 107 108 ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK, 109 sizeof(clk_id), sizeof(*attr), &t); 110 if (ret) 111 return ret; 112 113 put_unaligned_le32(clk_id, t->tx.buf); 114 attr = t->rx.buf; 115 116 ret = scmi_do_xfer(handle, t); 117 if (!ret) 118 strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); 119 else 120 clk->name[0] = '\0'; 121 122 scmi_xfer_put(handle, t); 123 return ret; 124 } 125 126 static int rate_cmp_func(const void *_r1, const void *_r2) 127 { 128 const u64 *r1 = _r1, *r2 = _r2; 129 130 if (*r1 < *r2) 131 return -1; 132 else if (*r1 == *r2) 133 return 0; 134 else 135 return 1; 136 } 137 138 static int 139 scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, 140 struct scmi_clock_info *clk) 141 { 142 u64 *rate = NULL; 143 int ret, cnt; 144 bool rate_discrete = false; 145 u32 tot_rate_cnt = 0, rates_flag; 146 u16 num_returned, num_remaining; 147 struct scmi_xfer *t; 148 struct scmi_msg_clock_describe_rates *clk_desc; 149 struct scmi_msg_resp_clock_describe_rates *rlist; 150 151 ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES, 152 SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t); 153 if (ret) 154 return ret; 155 156 clk_desc = t->tx.buf; 157 rlist = t->rx.buf; 158 159 do { 160 clk_desc->id = cpu_to_le32(clk_id); 161 /* Set the number of rates to be skipped/already read */ 162 clk_desc->rate_index = cpu_to_le32(tot_rate_cnt); 163 164 ret = scmi_do_xfer(handle, t); 165 if (ret) 166 goto err; 167 168 rates_flag = le32_to_cpu(rlist->num_rates_flags); 169 num_remaining = NUM_REMAINING(rates_flag); 170 rate_discrete = RATE_DISCRETE(rates_flag); 171 num_returned = NUM_RETURNED(rates_flag); 172 173 if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) { 174 dev_err(handle->dev, "No. of rates > MAX_NUM_RATES"); 175 break; 176 } 177 178 if (!rate_discrete) { 179 clk->range.min_rate = RATE_TO_U64(rlist->rate[0]); 180 clk->range.max_rate = RATE_TO_U64(rlist->rate[1]); 181 clk->range.step_size = RATE_TO_U64(rlist->rate[2]); 182 dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n", 183 clk->range.min_rate, clk->range.max_rate, 184 clk->range.step_size); 185 break; 186 } 187 188 rate = &clk->list.rates[tot_rate_cnt]; 189 for (cnt = 0; cnt < num_returned; cnt++, rate++) { 190 *rate = RATE_TO_U64(rlist->rate[cnt]); 191 dev_dbg(handle->dev, "Rate %llu Hz\n", *rate); 192 } 193 194 tot_rate_cnt += num_returned; 195 196 scmi_reset_rx_to_maxsz(handle, t); 197 /* 198 * check for both returned and remaining to avoid infinite 199 * loop due to buggy firmware 200 */ 201 } while (num_returned && num_remaining); 202 203 if (rate_discrete && rate) { 204 clk->list.num_rates = tot_rate_cnt; 205 sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL); 206 } 207 208 clk->rate_discrete = rate_discrete; 209 210 err: 211 scmi_xfer_put(handle, t); 212 return ret; 213 } 214 215 static int 216 scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value) 217 { 218 int ret; 219 struct scmi_xfer *t; 220 221 ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK, 222 sizeof(__le32), sizeof(u64), &t); 223 if (ret) 224 return ret; 225 226 put_unaligned_le32(clk_id, t->tx.buf); 227 228 ret = scmi_do_xfer(handle, t); 229 if (!ret) 230 *value = get_unaligned_le64(t->rx.buf); 231 232 scmi_xfer_put(handle, t); 233 return ret; 234 } 235 236 static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id, 237 u64 rate) 238 { 239 int ret; 240 u32 flags = 0; 241 struct scmi_xfer *t; 242 struct scmi_clock_set_rate *cfg; 243 struct clock_info *ci = handle->clk_priv; 244 245 ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK, 246 sizeof(*cfg), 0, &t); 247 if (ret) 248 return ret; 249 250 if (ci->max_async_req && 251 atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) 252 flags |= CLOCK_SET_ASYNC; 253 254 cfg = t->tx.buf; 255 cfg->flags = cpu_to_le32(flags); 256 cfg->id = cpu_to_le32(clk_id); 257 cfg->value_low = cpu_to_le32(rate & 0xffffffff); 258 cfg->value_high = cpu_to_le32(rate >> 32); 259 260 if (flags & CLOCK_SET_ASYNC) 261 ret = scmi_do_xfer_with_response(handle, t); 262 else 263 ret = scmi_do_xfer(handle, t); 264 265 if (ci->max_async_req) 266 atomic_dec(&ci->cur_async_req); 267 268 scmi_xfer_put(handle, t); 269 return ret; 270 } 271 272 static int 273 scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config) 274 { 275 int ret; 276 struct scmi_xfer *t; 277 struct scmi_clock_set_config *cfg; 278 279 ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK, 280 sizeof(*cfg), 0, &t); 281 if (ret) 282 return ret; 283 284 cfg = t->tx.buf; 285 cfg->id = cpu_to_le32(clk_id); 286 cfg->attributes = cpu_to_le32(config); 287 288 ret = scmi_do_xfer(handle, t); 289 290 scmi_xfer_put(handle, t); 291 return ret; 292 } 293 294 static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id) 295 { 296 return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE); 297 } 298 299 static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id) 300 { 301 return scmi_clock_config_set(handle, clk_id, 0); 302 } 303 304 static int scmi_clock_count_get(const struct scmi_handle *handle) 305 { 306 struct clock_info *ci = handle->clk_priv; 307 308 return ci->num_clocks; 309 } 310 311 static const struct scmi_clock_info * 312 scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id) 313 { 314 struct clock_info *ci = handle->clk_priv; 315 struct scmi_clock_info *clk = ci->clk + clk_id; 316 317 if (!clk->name[0]) 318 return NULL; 319 320 return clk; 321 } 322 323 static const struct scmi_clk_ops clk_ops = { 324 .count_get = scmi_clock_count_get, 325 .info_get = scmi_clock_info_get, 326 .rate_get = scmi_clock_rate_get, 327 .rate_set = scmi_clock_rate_set, 328 .enable = scmi_clock_enable, 329 .disable = scmi_clock_disable, 330 }; 331 332 static int scmi_clock_protocol_init(struct scmi_handle *handle) 333 { 334 u32 version; 335 int clkid, ret; 336 struct clock_info *cinfo; 337 338 scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version); 339 340 dev_dbg(handle->dev, "Clock Version %d.%d\n", 341 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); 342 343 cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL); 344 if (!cinfo) 345 return -ENOMEM; 346 347 scmi_clock_protocol_attributes_get(handle, cinfo); 348 349 cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks, 350 sizeof(*cinfo->clk), GFP_KERNEL); 351 if (!cinfo->clk) 352 return -ENOMEM; 353 354 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { 355 struct scmi_clock_info *clk = cinfo->clk + clkid; 356 357 ret = scmi_clock_attributes_get(handle, clkid, clk); 358 if (!ret) 359 scmi_clock_describe_rates_get(handle, clkid, clk); 360 } 361 362 cinfo->version = version; 363 handle->clk_ops = &clk_ops; 364 handle->clk_priv = cinfo; 365 366 return 0; 367 } 368 369 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_CLOCK, clock) 370