1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Clock Protocol 4 * 5 * Copyright (C) 2018-2021 ARM Ltd. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/sort.h> 10 11 #include "common.h" 12 13 enum scmi_clock_protocol_cmd { 14 CLOCK_ATTRIBUTES = 0x3, 15 CLOCK_DESCRIBE_RATES = 0x4, 16 CLOCK_RATE_SET = 0x5, 17 CLOCK_RATE_GET = 0x6, 18 CLOCK_CONFIG_SET = 0x7, 19 }; 20 21 struct scmi_msg_resp_clock_protocol_attributes { 22 __le16 num_clocks; 23 u8 max_async_req; 24 u8 reserved; 25 }; 26 27 struct scmi_msg_resp_clock_attributes { 28 __le32 attributes; 29 #define CLOCK_ENABLE BIT(0) 30 u8 name[SCMI_MAX_STR_SIZE]; 31 __le32 clock_enable_latency; 32 }; 33 34 struct scmi_clock_set_config { 35 __le32 id; 36 __le32 attributes; 37 }; 38 39 struct scmi_msg_clock_describe_rates { 40 __le32 id; 41 __le32 rate_index; 42 }; 43 44 struct scmi_msg_resp_clock_describe_rates { 45 __le32 num_rates_flags; 46 #define NUM_RETURNED(x) ((x) & 0xfff) 47 #define RATE_DISCRETE(x) !((x) & BIT(12)) 48 #define NUM_REMAINING(x) ((x) >> 16) 49 struct { 50 __le32 value_low; 51 __le32 value_high; 52 } rate[]; 53 #define RATE_TO_U64(X) \ 54 ({ \ 55 typeof(X) x = (X); \ 56 le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \ 57 }) 58 }; 59 60 struct scmi_clock_set_rate { 61 __le32 flags; 62 #define CLOCK_SET_ASYNC BIT(0) 63 #define CLOCK_SET_IGNORE_RESP BIT(1) 64 #define CLOCK_SET_ROUND_UP BIT(2) 65 #define CLOCK_SET_ROUND_AUTO BIT(3) 66 __le32 id; 67 __le32 value_low; 68 __le32 value_high; 69 }; 70 71 struct clock_info { 72 u32 version; 73 int num_clocks; 74 int max_async_req; 75 atomic_t cur_async_req; 76 struct scmi_clock_info *clk; 77 }; 78 79 static int 80 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph, 81 struct clock_info *ci) 82 { 83 int ret; 84 struct scmi_xfer *t; 85 struct scmi_msg_resp_clock_protocol_attributes *attr; 86 87 ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 88 0, sizeof(*attr), &t); 89 if (ret) 90 return ret; 91 92 attr = t->rx.buf; 93 94 ret = ph->xops->do_xfer(ph, t); 95 if (!ret) { 96 ci->num_clocks = le16_to_cpu(attr->num_clocks); 97 ci->max_async_req = attr->max_async_req; 98 } 99 100 ph->xops->xfer_put(ph, t); 101 return ret; 102 } 103 104 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, 105 u32 clk_id, struct scmi_clock_info *clk) 106 { 107 int ret; 108 struct scmi_xfer *t; 109 struct scmi_msg_resp_clock_attributes *attr; 110 111 ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES, 112 sizeof(clk_id), sizeof(*attr), &t); 113 if (ret) 114 return ret; 115 116 put_unaligned_le32(clk_id, t->tx.buf); 117 attr = t->rx.buf; 118 119 ret = ph->xops->do_xfer(ph, t); 120 if (!ret) { 121 strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); 122 /* Is optional field clock_enable_latency provided ? */ 123 if (t->rx.len == sizeof(*attr)) 124 clk->enable_latency = 125 le32_to_cpu(attr->clock_enable_latency); 126 } else { 127 clk->name[0] = '\0'; 128 } 129 130 ph->xops->xfer_put(ph, t); 131 return ret; 132 } 133 134 static int rate_cmp_func(const void *_r1, const void *_r2) 135 { 136 const u64 *r1 = _r1, *r2 = _r2; 137 138 if (*r1 < *r2) 139 return -1; 140 else if (*r1 == *r2) 141 return 0; 142 else 143 return 1; 144 } 145 146 static int 147 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, 148 struct scmi_clock_info *clk) 149 { 150 u64 *rate = NULL; 151 int ret, cnt; 152 bool rate_discrete = false; 153 u32 tot_rate_cnt = 0, rates_flag; 154 u16 num_returned, num_remaining; 155 struct scmi_xfer *t; 156 struct scmi_msg_clock_describe_rates *clk_desc; 157 struct scmi_msg_resp_clock_describe_rates *rlist; 158 159 ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES, 160 sizeof(*clk_desc), 0, &t); 161 if (ret) 162 return ret; 163 164 clk_desc = t->tx.buf; 165 rlist = t->rx.buf; 166 167 do { 168 clk_desc->id = cpu_to_le32(clk_id); 169 /* Set the number of rates to be skipped/already read */ 170 clk_desc->rate_index = cpu_to_le32(tot_rate_cnt); 171 172 ret = ph->xops->do_xfer(ph, t); 173 if (ret) 174 goto err; 175 176 rates_flag = le32_to_cpu(rlist->num_rates_flags); 177 num_remaining = NUM_REMAINING(rates_flag); 178 rate_discrete = RATE_DISCRETE(rates_flag); 179 num_returned = NUM_RETURNED(rates_flag); 180 181 if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) { 182 dev_err(ph->dev, "No. of rates > MAX_NUM_RATES"); 183 break; 184 } 185 186 if (!rate_discrete) { 187 clk->range.min_rate = RATE_TO_U64(rlist->rate[0]); 188 clk->range.max_rate = RATE_TO_U64(rlist->rate[1]); 189 clk->range.step_size = RATE_TO_U64(rlist->rate[2]); 190 dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n", 191 clk->range.min_rate, clk->range.max_rate, 192 clk->range.step_size); 193 break; 194 } 195 196 rate = &clk->list.rates[tot_rate_cnt]; 197 for (cnt = 0; cnt < num_returned; cnt++, rate++) { 198 *rate = RATE_TO_U64(rlist->rate[cnt]); 199 dev_dbg(ph->dev, "Rate %llu Hz\n", *rate); 200 } 201 202 tot_rate_cnt += num_returned; 203 204 ph->xops->reset_rx_to_maxsz(ph, t); 205 /* 206 * check for both returned and remaining to avoid infinite 207 * loop due to buggy firmware 208 */ 209 } while (num_returned && num_remaining); 210 211 if (rate_discrete && rate) { 212 clk->list.num_rates = tot_rate_cnt; 213 sort(clk->list.rates, tot_rate_cnt, sizeof(*rate), 214 rate_cmp_func, NULL); 215 } 216 217 clk->rate_discrete = rate_discrete; 218 219 err: 220 ph->xops->xfer_put(ph, t); 221 return ret; 222 } 223 224 static int 225 scmi_clock_rate_get(const struct scmi_protocol_handle *ph, 226 u32 clk_id, u64 *value) 227 { 228 int ret; 229 struct scmi_xfer *t; 230 231 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET, 232 sizeof(__le32), sizeof(u64), &t); 233 if (ret) 234 return ret; 235 236 put_unaligned_le32(clk_id, t->tx.buf); 237 238 ret = ph->xops->do_xfer(ph, t); 239 if (!ret) 240 *value = get_unaligned_le64(t->rx.buf); 241 242 ph->xops->xfer_put(ph, t); 243 return ret; 244 } 245 246 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, 247 u32 clk_id, u64 rate) 248 { 249 int ret; 250 u32 flags = 0; 251 struct scmi_xfer *t; 252 struct scmi_clock_set_rate *cfg; 253 struct clock_info *ci = ph->get_priv(ph); 254 255 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t); 256 if (ret) 257 return ret; 258 259 if (ci->max_async_req && 260 atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) 261 flags |= CLOCK_SET_ASYNC; 262 263 cfg = t->tx.buf; 264 cfg->flags = cpu_to_le32(flags); 265 cfg->id = cpu_to_le32(clk_id); 266 cfg->value_low = cpu_to_le32(rate & 0xffffffff); 267 cfg->value_high = cpu_to_le32(rate >> 32); 268 269 if (flags & CLOCK_SET_ASYNC) 270 ret = ph->xops->do_xfer_with_response(ph, t); 271 else 272 ret = ph->xops->do_xfer(ph, t); 273 274 if (ci->max_async_req) 275 atomic_dec(&ci->cur_async_req); 276 277 ph->xops->xfer_put(ph, t); 278 return ret; 279 } 280 281 static int 282 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, 283 u32 config, bool atomic) 284 { 285 int ret; 286 struct scmi_xfer *t; 287 struct scmi_clock_set_config *cfg; 288 289 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET, 290 sizeof(*cfg), 0, &t); 291 if (ret) 292 return ret; 293 294 t->hdr.poll_completion = atomic; 295 296 cfg = t->tx.buf; 297 cfg->id = cpu_to_le32(clk_id); 298 cfg->attributes = cpu_to_le32(config); 299 300 ret = ph->xops->do_xfer(ph, t); 301 302 ph->xops->xfer_put(ph, t); 303 return ret; 304 } 305 306 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id) 307 { 308 return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false); 309 } 310 311 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id) 312 { 313 return scmi_clock_config_set(ph, clk_id, 0, false); 314 } 315 316 static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph, 317 u32 clk_id) 318 { 319 return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true); 320 } 321 322 static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph, 323 u32 clk_id) 324 { 325 return scmi_clock_config_set(ph, clk_id, 0, true); 326 } 327 328 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) 329 { 330 struct clock_info *ci = ph->get_priv(ph); 331 332 return ci->num_clocks; 333 } 334 335 static const struct scmi_clock_info * 336 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id) 337 { 338 struct clock_info *ci = ph->get_priv(ph); 339 struct scmi_clock_info *clk = ci->clk + clk_id; 340 341 if (!clk->name[0]) 342 return NULL; 343 344 return clk; 345 } 346 347 static const struct scmi_clk_proto_ops clk_proto_ops = { 348 .count_get = scmi_clock_count_get, 349 .info_get = scmi_clock_info_get, 350 .rate_get = scmi_clock_rate_get, 351 .rate_set = scmi_clock_rate_set, 352 .enable = scmi_clock_enable, 353 .disable = scmi_clock_disable, 354 .enable_atomic = scmi_clock_enable_atomic, 355 .disable_atomic = scmi_clock_disable_atomic, 356 }; 357 358 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) 359 { 360 u32 version; 361 int clkid, ret; 362 struct clock_info *cinfo; 363 364 ph->xops->version_get(ph, &version); 365 366 dev_dbg(ph->dev, "Clock Version %d.%d\n", 367 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); 368 369 cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL); 370 if (!cinfo) 371 return -ENOMEM; 372 373 scmi_clock_protocol_attributes_get(ph, cinfo); 374 375 cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks, 376 sizeof(*cinfo->clk), GFP_KERNEL); 377 if (!cinfo->clk) 378 return -ENOMEM; 379 380 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { 381 struct scmi_clock_info *clk = cinfo->clk + clkid; 382 383 ret = scmi_clock_attributes_get(ph, clkid, clk); 384 if (!ret) 385 scmi_clock_describe_rates_get(ph, clkid, clk); 386 } 387 388 cinfo->version = version; 389 return ph->set_priv(ph, cinfo); 390 } 391 392 static const struct scmi_protocol scmi_clock = { 393 .id = SCMI_PROTOCOL_CLOCK, 394 .owner = THIS_MODULE, 395 .instance_init = &scmi_clock_protocol_init, 396 .ops = &clk_proto_ops, 397 }; 398 399 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock) 400