1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Clock Protocol 4 * 5 * Copyright (C) 2018-2022 ARM Ltd. 6 */ 7 8 #include <linux/module.h> 9 #include <linux/limits.h> 10 #include <linux/sort.h> 11 12 #include "protocols.h" 13 #include "notify.h" 14 15 enum scmi_clock_protocol_cmd { 16 CLOCK_ATTRIBUTES = 0x3, 17 CLOCK_DESCRIBE_RATES = 0x4, 18 CLOCK_RATE_SET = 0x5, 19 CLOCK_RATE_GET = 0x6, 20 CLOCK_CONFIG_SET = 0x7, 21 CLOCK_NAME_GET = 0x8, 22 CLOCK_RATE_NOTIFY = 0x9, 23 CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA, 24 }; 25 26 struct scmi_msg_resp_clock_protocol_attributes { 27 __le16 num_clocks; 28 u8 max_async_req; 29 u8 reserved; 30 }; 31 32 struct scmi_msg_resp_clock_attributes { 33 __le32 attributes; 34 #define CLOCK_ENABLE BIT(0) 35 #define SUPPORTS_RATE_CHANGED_NOTIF(x) ((x) & BIT(31)) 36 #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30)) 37 #define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29)) 38 u8 name[SCMI_SHORT_NAME_MAX_SIZE]; 39 __le32 clock_enable_latency; 40 }; 41 42 struct scmi_clock_set_config { 43 __le32 id; 44 __le32 attributes; 45 }; 46 47 struct scmi_msg_clock_describe_rates { 48 __le32 id; 49 __le32 rate_index; 50 }; 51 52 struct scmi_msg_resp_clock_describe_rates { 53 __le32 num_rates_flags; 54 #define NUM_RETURNED(x) ((x) & 0xfff) 55 #define RATE_DISCRETE(x) !((x) & BIT(12)) 56 #define NUM_REMAINING(x) ((x) >> 16) 57 struct { 58 __le32 value_low; 59 __le32 value_high; 60 } rate[]; 61 #define RATE_TO_U64(X) \ 62 ({ \ 63 typeof(X) x = (X); \ 64 le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \ 65 }) 66 }; 67 68 struct scmi_clock_set_rate { 69 __le32 flags; 70 #define CLOCK_SET_ASYNC BIT(0) 71 #define CLOCK_SET_IGNORE_RESP BIT(1) 72 #define CLOCK_SET_ROUND_UP BIT(2) 73 #define CLOCK_SET_ROUND_AUTO BIT(3) 74 __le32 id; 75 __le32 value_low; 76 __le32 value_high; 77 }; 78 79 struct scmi_msg_resp_set_rate_complete { 80 __le32 id; 81 __le32 rate_low; 82 __le32 rate_high; 83 }; 84 85 struct scmi_msg_clock_rate_notify { 86 __le32 clk_id; 87 __le32 notify_enable; 88 }; 89 90 struct scmi_clock_rate_notify_payld { 91 __le32 agent_id; 92 __le32 clock_id; 93 __le32 rate_low; 94 __le32 rate_high; 95 }; 96 97 struct clock_info { 98 u32 version; 99 int num_clocks; 100 int max_async_req; 101 atomic_t cur_async_req; 102 struct scmi_clock_info *clk; 103 }; 104 105 static enum scmi_clock_protocol_cmd evt_2_cmd[] = { 106 CLOCK_RATE_NOTIFY, 107 CLOCK_RATE_CHANGE_REQUESTED_NOTIFY, 108 }; 109 110 static int 111 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph, 112 struct clock_info *ci) 113 { 114 int ret; 115 struct scmi_xfer *t; 116 struct scmi_msg_resp_clock_protocol_attributes *attr; 117 118 ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 119 0, sizeof(*attr), &t); 120 if (ret) 121 return ret; 122 123 attr = t->rx.buf; 124 125 ret = ph->xops->do_xfer(ph, t); 126 if (!ret) { 127 ci->num_clocks = le16_to_cpu(attr->num_clocks); 128 ci->max_async_req = attr->max_async_req; 129 } 130 131 ph->xops->xfer_put(ph, t); 132 return ret; 133 } 134 135 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, 136 u32 clk_id, struct scmi_clock_info *clk, 137 u32 version) 138 { 139 int ret; 140 u32 attributes; 141 struct scmi_xfer *t; 142 struct scmi_msg_resp_clock_attributes *attr; 143 144 ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES, 145 sizeof(clk_id), sizeof(*attr), &t); 146 if (ret) 147 return ret; 148 149 put_unaligned_le32(clk_id, t->tx.buf); 150 attr = t->rx.buf; 151 152 ret = ph->xops->do_xfer(ph, t); 153 if (!ret) { 154 u32 latency = 0; 155 attributes = le32_to_cpu(attr->attributes); 156 strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); 157 /* clock_enable_latency field is present only since SCMI v3.1 */ 158 if (PROTOCOL_REV_MAJOR(version) >= 0x2) 159 latency = le32_to_cpu(attr->clock_enable_latency); 160 clk->enable_latency = latency ? : U32_MAX; 161 } 162 163 ph->xops->xfer_put(ph, t); 164 165 /* 166 * If supported overwrite short name with the extended one; 167 * on error just carry on and use already provided short name. 168 */ 169 if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) { 170 if (SUPPORTS_EXTENDED_NAMES(attributes)) 171 ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id, 172 clk->name, 173 SCMI_MAX_STR_SIZE); 174 175 if (SUPPORTS_RATE_CHANGED_NOTIF(attributes)) 176 clk->rate_changed_notifications = true; 177 if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes)) 178 clk->rate_change_requested_notifications = true; 179 } 180 181 return ret; 182 } 183 184 static int rate_cmp_func(const void *_r1, const void *_r2) 185 { 186 const u64 *r1 = _r1, *r2 = _r2; 187 188 if (*r1 < *r2) 189 return -1; 190 else if (*r1 == *r2) 191 return 0; 192 else 193 return 1; 194 } 195 196 struct scmi_clk_ipriv { 197 u32 clk_id; 198 struct scmi_clock_info *clk; 199 }; 200 201 static void iter_clk_describe_prepare_message(void *message, 202 const unsigned int desc_index, 203 const void *priv) 204 { 205 struct scmi_msg_clock_describe_rates *msg = message; 206 const struct scmi_clk_ipriv *p = priv; 207 208 msg->id = cpu_to_le32(p->clk_id); 209 /* Set the number of rates to be skipped/already read */ 210 msg->rate_index = cpu_to_le32(desc_index); 211 } 212 213 static int 214 iter_clk_describe_update_state(struct scmi_iterator_state *st, 215 const void *response, void *priv) 216 { 217 u32 flags; 218 struct scmi_clk_ipriv *p = priv; 219 const struct scmi_msg_resp_clock_describe_rates *r = response; 220 221 flags = le32_to_cpu(r->num_rates_flags); 222 st->num_remaining = NUM_REMAINING(flags); 223 st->num_returned = NUM_RETURNED(flags); 224 p->clk->rate_discrete = RATE_DISCRETE(flags); 225 226 return 0; 227 } 228 229 static int 230 iter_clk_describe_process_response(const struct scmi_protocol_handle *ph, 231 const void *response, 232 struct scmi_iterator_state *st, void *priv) 233 { 234 int ret = 0; 235 struct scmi_clk_ipriv *p = priv; 236 const struct scmi_msg_resp_clock_describe_rates *r = response; 237 238 if (!p->clk->rate_discrete) { 239 switch (st->desc_index + st->loop_idx) { 240 case 0: 241 p->clk->range.min_rate = RATE_TO_U64(r->rate[0]); 242 break; 243 case 1: 244 p->clk->range.max_rate = RATE_TO_U64(r->rate[1]); 245 break; 246 case 2: 247 p->clk->range.step_size = RATE_TO_U64(r->rate[2]); 248 break; 249 default: 250 ret = -EINVAL; 251 break; 252 } 253 } else { 254 u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx]; 255 256 *rate = RATE_TO_U64(r->rate[st->loop_idx]); 257 p->clk->list.num_rates++; 258 //XXX dev_dbg(ph->dev, "Rate %llu Hz\n", *rate); 259 } 260 261 return ret; 262 } 263 264 static int 265 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, 266 struct scmi_clock_info *clk) 267 { 268 int ret; 269 270 void *iter; 271 struct scmi_msg_clock_describe_rates *msg; 272 struct scmi_iterator_ops ops = { 273 .prepare_message = iter_clk_describe_prepare_message, 274 .update_state = iter_clk_describe_update_state, 275 .process_response = iter_clk_describe_process_response, 276 }; 277 struct scmi_clk_ipriv cpriv = { 278 .clk_id = clk_id, 279 .clk = clk, 280 }; 281 282 iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES, 283 CLOCK_DESCRIBE_RATES, 284 sizeof(*msg), &cpriv); 285 if (IS_ERR(iter)) 286 return PTR_ERR(iter); 287 288 ret = ph->hops->iter_response_run(iter); 289 if (ret) 290 return ret; 291 292 if (!clk->rate_discrete) { 293 dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n", 294 clk->range.min_rate, clk->range.max_rate, 295 clk->range.step_size); 296 } else if (clk->list.num_rates) { 297 sort(clk->list.rates, clk->list.num_rates, 298 sizeof(clk->list.rates[0]), rate_cmp_func, NULL); 299 } 300 301 return ret; 302 } 303 304 static int 305 scmi_clock_rate_get(const struct scmi_protocol_handle *ph, 306 u32 clk_id, u64 *value) 307 { 308 int ret; 309 struct scmi_xfer *t; 310 311 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET, 312 sizeof(__le32), sizeof(u64), &t); 313 if (ret) 314 return ret; 315 316 put_unaligned_le32(clk_id, t->tx.buf); 317 318 ret = ph->xops->do_xfer(ph, t); 319 if (!ret) 320 *value = get_unaligned_le64(t->rx.buf); 321 322 ph->xops->xfer_put(ph, t); 323 return ret; 324 } 325 326 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph, 327 u32 clk_id, u64 rate) 328 { 329 int ret; 330 u32 flags = 0; 331 struct scmi_xfer *t; 332 struct scmi_clock_set_rate *cfg; 333 struct clock_info *ci = ph->get_priv(ph); 334 335 ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t); 336 if (ret) 337 return ret; 338 339 if (ci->max_async_req && 340 atomic_inc_return(&ci->cur_async_req) < ci->max_async_req) 341 flags |= CLOCK_SET_ASYNC; 342 343 cfg = t->tx.buf; 344 cfg->flags = cpu_to_le32(flags); 345 cfg->id = cpu_to_le32(clk_id); 346 cfg->value_low = cpu_to_le32(rate & 0xffffffff); 347 cfg->value_high = cpu_to_le32(rate >> 32); 348 349 if (flags & CLOCK_SET_ASYNC) { 350 ret = ph->xops->do_xfer_with_response(ph, t); 351 if (!ret) { 352 struct scmi_msg_resp_set_rate_complete *resp; 353 354 resp = t->rx.buf; 355 if (le32_to_cpu(resp->id) == clk_id) 356 dev_dbg(ph->dev, 357 "Clk ID %d set async to %llu\n", clk_id, 358 get_unaligned_le64(&resp->rate_low)); 359 else 360 ret = -EPROTO; 361 } 362 } else { 363 ret = ph->xops->do_xfer(ph, t); 364 } 365 366 if (ci->max_async_req) 367 atomic_dec(&ci->cur_async_req); 368 369 ph->xops->xfer_put(ph, t); 370 return ret; 371 } 372 373 static int 374 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id, 375 u32 config, bool atomic) 376 { 377 int ret; 378 struct scmi_xfer *t; 379 struct scmi_clock_set_config *cfg; 380 381 ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET, 382 sizeof(*cfg), 0, &t); 383 if (ret) 384 return ret; 385 386 t->hdr.poll_completion = atomic; 387 388 cfg = t->tx.buf; 389 cfg->id = cpu_to_le32(clk_id); 390 cfg->attributes = cpu_to_le32(config); 391 392 ret = ph->xops->do_xfer(ph, t); 393 394 ph->xops->xfer_put(ph, t); 395 return ret; 396 } 397 398 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id) 399 { 400 return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false); 401 } 402 403 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id) 404 { 405 return scmi_clock_config_set(ph, clk_id, 0, false); 406 } 407 408 static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph, 409 u32 clk_id) 410 { 411 return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true); 412 } 413 414 static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph, 415 u32 clk_id) 416 { 417 return scmi_clock_config_set(ph, clk_id, 0, true); 418 } 419 420 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph) 421 { 422 struct clock_info *ci = ph->get_priv(ph); 423 424 return ci->num_clocks; 425 } 426 427 static const struct scmi_clock_info * 428 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id) 429 { 430 struct clock_info *ci = ph->get_priv(ph); 431 struct scmi_clock_info *clk = ci->clk + clk_id; 432 433 if (!clk->name[0]) 434 return NULL; 435 436 return clk; 437 } 438 439 static const struct scmi_clk_proto_ops clk_proto_ops = { 440 .count_get = scmi_clock_count_get, 441 .info_get = scmi_clock_info_get, 442 .rate_get = scmi_clock_rate_get, 443 .rate_set = scmi_clock_rate_set, 444 .enable = scmi_clock_enable, 445 .disable = scmi_clock_disable, 446 .enable_atomic = scmi_clock_enable_atomic, 447 .disable_atomic = scmi_clock_disable_atomic, 448 }; 449 450 static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph, 451 u32 clk_id, int message_id, bool enable) 452 { 453 int ret; 454 struct scmi_xfer *t; 455 struct scmi_msg_clock_rate_notify *notify; 456 457 ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t); 458 if (ret) 459 return ret; 460 461 notify = t->tx.buf; 462 notify->clk_id = cpu_to_le32(clk_id); 463 notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; 464 465 ret = ph->xops->do_xfer(ph, t); 466 467 ph->xops->xfer_put(ph, t); 468 return ret; 469 } 470 471 static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph, 472 u8 evt_id, u32 src_id, bool enable) 473 { 474 int ret, cmd_id; 475 476 if (evt_id >= ARRAY_SIZE(evt_2_cmd)) 477 return -EINVAL; 478 479 cmd_id = evt_2_cmd[evt_id]; 480 ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable); 481 if (ret) 482 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", 483 evt_id, src_id, ret); 484 485 return ret; 486 } 487 488 static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph, 489 u8 evt_id, ktime_t timestamp, 490 const void *payld, size_t payld_sz, 491 void *report, u32 *src_id) 492 { 493 const struct scmi_clock_rate_notify_payld *p = payld; 494 struct scmi_clock_rate_notif_report *r = report; 495 496 if (sizeof(*p) != payld_sz || 497 (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED && 498 evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED)) 499 return NULL; 500 501 r->timestamp = timestamp; 502 r->agent_id = le32_to_cpu(p->agent_id); 503 r->clock_id = le32_to_cpu(p->clock_id); 504 r->rate = get_unaligned_le64(&p->rate_low); 505 *src_id = r->clock_id; 506 507 return r; 508 } 509 510 static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph) 511 { 512 struct clock_info *ci = ph->get_priv(ph); 513 514 if (!ci) 515 return -EINVAL; 516 517 return ci->num_clocks; 518 } 519 520 static const struct scmi_event clk_events[] = { 521 { 522 .id = SCMI_EVENT_CLOCK_RATE_CHANGED, 523 .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld), 524 .max_report_sz = sizeof(struct scmi_clock_rate_notif_report), 525 }, 526 { 527 .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED, 528 .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld), 529 .max_report_sz = sizeof(struct scmi_clock_rate_notif_report), 530 }, 531 }; 532 533 static const struct scmi_event_ops clk_event_ops = { 534 .get_num_sources = scmi_clk_get_num_sources, 535 .set_notify_enabled = scmi_clk_set_notify_enabled, 536 .fill_custom_report = scmi_clk_fill_custom_report, 537 }; 538 539 static const struct scmi_protocol_events clk_protocol_events = { 540 .queue_sz = SCMI_PROTO_QUEUE_SZ, 541 .ops = &clk_event_ops, 542 .evts = clk_events, 543 .num_events = ARRAY_SIZE(clk_events), 544 }; 545 546 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) 547 { 548 u32 version; 549 int clkid, ret; 550 struct clock_info *cinfo; 551 552 ret = ph->xops->version_get(ph, &version); 553 if (ret) 554 return ret; 555 556 dev_dbg(ph->dev, "Clock Version %d.%d\n", 557 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); 558 559 cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL); 560 if (!cinfo) 561 return -ENOMEM; 562 563 ret = scmi_clock_protocol_attributes_get(ph, cinfo); 564 if (ret) 565 return ret; 566 567 cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks, 568 sizeof(*cinfo->clk), GFP_KERNEL); 569 if (!cinfo->clk) 570 return -ENOMEM; 571 572 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) { 573 struct scmi_clock_info *clk = cinfo->clk + clkid; 574 575 ret = scmi_clock_attributes_get(ph, clkid, clk, version); 576 if (!ret) 577 scmi_clock_describe_rates_get(ph, clkid, clk); 578 } 579 580 cinfo->version = version; 581 return ph->set_priv(ph, cinfo); 582 } 583 584 static const struct scmi_protocol scmi_clock = { 585 .id = SCMI_PROTOCOL_CLOCK, 586 .owner = THIS_MODULE, 587 .instance_init = &scmi_clock_protocol_init, 588 .ops = &clk_proto_ops, 589 .events = &clk_protocol_events, 590 }; 591 592 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock) 593