1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Performance Protocol 4 * 5 * Copyright (C) 2018 ARM Ltd. 6 */ 7 8 #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt 9 10 #include <linux/bits.h> 11 #include <linux/of.h> 12 #include <linux/io.h> 13 #include <linux/io-64-nonatomic-hi-lo.h> 14 #include <linux/platform_device.h> 15 #include <linux/pm_opp.h> 16 #include <linux/scmi_protocol.h> 17 #include <linux/sort.h> 18 19 #include "common.h" 20 #include "notify.h" 21 22 enum scmi_performance_protocol_cmd { 23 PERF_DOMAIN_ATTRIBUTES = 0x3, 24 PERF_DESCRIBE_LEVELS = 0x4, 25 PERF_LIMITS_SET = 0x5, 26 PERF_LIMITS_GET = 0x6, 27 PERF_LEVEL_SET = 0x7, 28 PERF_LEVEL_GET = 0x8, 29 PERF_NOTIFY_LIMITS = 0x9, 30 PERF_NOTIFY_LEVEL = 0xa, 31 PERF_DESCRIBE_FASTCHANNEL = 0xb, 32 }; 33 34 struct scmi_opp { 35 u32 perf; 36 u32 power; 37 u32 trans_latency_us; 38 }; 39 40 struct scmi_msg_resp_perf_attributes { 41 __le16 num_domains; 42 __le16 flags; 43 #define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0)) 44 __le32 stats_addr_low; 45 __le32 stats_addr_high; 46 __le32 stats_size; 47 }; 48 49 struct scmi_msg_resp_perf_domain_attributes { 50 __le32 flags; 51 #define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31)) 52 #define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30)) 53 #define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29)) 54 #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28)) 55 #define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27)) 56 __le32 rate_limit_us; 57 __le32 sustained_freq_khz; 58 __le32 sustained_perf_level; 59 u8 name[SCMI_MAX_STR_SIZE]; 60 }; 61 62 struct scmi_msg_perf_describe_levels { 63 __le32 domain; 64 __le32 level_index; 65 }; 66 67 struct scmi_perf_set_limits { 68 __le32 domain; 69 __le32 max_level; 70 __le32 min_level; 71 }; 72 73 struct scmi_perf_get_limits { 74 __le32 max_level; 75 __le32 min_level; 76 }; 77 78 struct scmi_perf_set_level { 79 __le32 domain; 80 __le32 level; 81 }; 82 83 struct scmi_perf_notify_level_or_limits { 84 __le32 domain; 85 __le32 notify_enable; 86 }; 87 88 struct scmi_perf_limits_notify_payld { 89 __le32 agent_id; 90 __le32 domain_id; 91 __le32 range_max; 92 __le32 range_min; 93 }; 94 95 struct scmi_perf_level_notify_payld { 96 __le32 agent_id; 97 __le32 domain_id; 98 __le32 performance_level; 99 }; 100 101 struct scmi_msg_resp_perf_describe_levels { 102 __le16 num_returned; 103 __le16 num_remaining; 104 struct { 105 __le32 perf_val; 106 __le32 power; 107 __le16 transition_latency_us; 108 __le16 reserved; 109 } opp[]; 110 }; 111 112 struct scmi_perf_get_fc_info { 113 __le32 domain; 114 __le32 message_id; 115 }; 116 117 struct scmi_msg_resp_perf_desc_fc { 118 __le32 attr; 119 #define SUPPORTS_DOORBELL(x) ((x) & BIT(0)) 120 #define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x)) 121 __le32 rate_limit; 122 __le32 chan_addr_low; 123 __le32 chan_addr_high; 124 __le32 chan_size; 125 __le32 db_addr_low; 126 __le32 db_addr_high; 127 __le32 db_set_lmask; 128 __le32 db_set_hmask; 129 __le32 db_preserve_lmask; 130 __le32 db_preserve_hmask; 131 }; 132 133 struct scmi_fc_db_info { 134 int width; 135 u64 set; 136 u64 mask; 137 void __iomem *addr; 138 }; 139 140 struct scmi_fc_info { 141 void __iomem *level_set_addr; 142 void __iomem *limit_set_addr; 143 void __iomem *level_get_addr; 144 void __iomem *limit_get_addr; 145 struct scmi_fc_db_info *level_set_db; 146 struct scmi_fc_db_info *limit_set_db; 147 }; 148 149 struct perf_dom_info { 150 bool set_limits; 151 bool set_perf; 152 bool perf_limit_notify; 153 bool perf_level_notify; 154 bool perf_fastchannels; 155 u32 opp_count; 156 u32 sustained_freq_khz; 157 u32 sustained_perf_level; 158 u32 mult_factor; 159 char name[SCMI_MAX_STR_SIZE]; 160 struct scmi_opp opp[MAX_OPPS]; 161 struct scmi_fc_info *fc_info; 162 }; 163 164 struct scmi_perf_info { 165 u32 version; 166 int num_domains; 167 bool power_scale_mw; 168 u64 stats_addr; 169 u32 stats_size; 170 struct perf_dom_info *dom_info; 171 }; 172 173 static enum scmi_performance_protocol_cmd evt_2_cmd[] = { 174 PERF_NOTIFY_LIMITS, 175 PERF_NOTIFY_LEVEL, 176 }; 177 178 static int scmi_perf_attributes_get(const struct scmi_handle *handle, 179 struct scmi_perf_info *pi) 180 { 181 int ret; 182 struct scmi_xfer *t; 183 struct scmi_msg_resp_perf_attributes *attr; 184 185 ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES, 186 SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t); 187 if (ret) 188 return ret; 189 190 attr = t->rx.buf; 191 192 ret = scmi_do_xfer(handle, t); 193 if (!ret) { 194 u16 flags = le16_to_cpu(attr->flags); 195 196 pi->num_domains = le16_to_cpu(attr->num_domains); 197 pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags); 198 pi->stats_addr = le32_to_cpu(attr->stats_addr_low) | 199 (u64)le32_to_cpu(attr->stats_addr_high) << 32; 200 pi->stats_size = le32_to_cpu(attr->stats_size); 201 } 202 203 scmi_xfer_put(handle, t); 204 return ret; 205 } 206 207 static int 208 scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain, 209 struct perf_dom_info *dom_info) 210 { 211 int ret; 212 struct scmi_xfer *t; 213 struct scmi_msg_resp_perf_domain_attributes *attr; 214 215 ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES, 216 SCMI_PROTOCOL_PERF, sizeof(domain), 217 sizeof(*attr), &t); 218 if (ret) 219 return ret; 220 221 put_unaligned_le32(domain, t->tx.buf); 222 attr = t->rx.buf; 223 224 ret = scmi_do_xfer(handle, t); 225 if (!ret) { 226 u32 flags = le32_to_cpu(attr->flags); 227 228 dom_info->set_limits = SUPPORTS_SET_LIMITS(flags); 229 dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags); 230 dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags); 231 dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags); 232 dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags); 233 dom_info->sustained_freq_khz = 234 le32_to_cpu(attr->sustained_freq_khz); 235 dom_info->sustained_perf_level = 236 le32_to_cpu(attr->sustained_perf_level); 237 if (!dom_info->sustained_freq_khz || 238 !dom_info->sustained_perf_level) 239 /* CPUFreq converts to kHz, hence default 1000 */ 240 dom_info->mult_factor = 1000; 241 else 242 dom_info->mult_factor = 243 (dom_info->sustained_freq_khz * 1000) / 244 dom_info->sustained_perf_level; 245 strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); 246 } 247 248 scmi_xfer_put(handle, t); 249 return ret; 250 } 251 252 static int opp_cmp_func(const void *opp1, const void *opp2) 253 { 254 const struct scmi_opp *t1 = opp1, *t2 = opp2; 255 256 return t1->perf - t2->perf; 257 } 258 259 static int 260 scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain, 261 struct perf_dom_info *perf_dom) 262 { 263 int ret, cnt; 264 u32 tot_opp_cnt = 0; 265 u16 num_returned, num_remaining; 266 struct scmi_xfer *t; 267 struct scmi_opp *opp; 268 struct scmi_msg_perf_describe_levels *dom_info; 269 struct scmi_msg_resp_perf_describe_levels *level_info; 270 271 ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS, 272 SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t); 273 if (ret) 274 return ret; 275 276 dom_info = t->tx.buf; 277 level_info = t->rx.buf; 278 279 do { 280 dom_info->domain = cpu_to_le32(domain); 281 /* Set the number of OPPs to be skipped/already read */ 282 dom_info->level_index = cpu_to_le32(tot_opp_cnt); 283 284 ret = scmi_do_xfer(handle, t); 285 if (ret) 286 break; 287 288 num_returned = le16_to_cpu(level_info->num_returned); 289 num_remaining = le16_to_cpu(level_info->num_remaining); 290 if (tot_opp_cnt + num_returned > MAX_OPPS) { 291 dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS"); 292 break; 293 } 294 295 opp = &perf_dom->opp[tot_opp_cnt]; 296 for (cnt = 0; cnt < num_returned; cnt++, opp++) { 297 opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val); 298 opp->power = le32_to_cpu(level_info->opp[cnt].power); 299 opp->trans_latency_us = le16_to_cpu 300 (level_info->opp[cnt].transition_latency_us); 301 302 dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n", 303 opp->perf, opp->power, opp->trans_latency_us); 304 } 305 306 tot_opp_cnt += num_returned; 307 308 scmi_reset_rx_to_maxsz(handle, t); 309 /* 310 * check for both returned and remaining to avoid infinite 311 * loop due to buggy firmware 312 */ 313 } while (num_returned && num_remaining); 314 315 perf_dom->opp_count = tot_opp_cnt; 316 scmi_xfer_put(handle, t); 317 318 sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL); 319 return ret; 320 } 321 322 #define SCMI_PERF_FC_RING_DB(w) \ 323 do { \ 324 u##w val = 0; \ 325 \ 326 if (db->mask) \ 327 val = ioread##w(db->addr) & db->mask; \ 328 iowrite##w((u##w)db->set | val, db->addr); \ 329 } while (0) 330 331 static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db) 332 { 333 if (!db || !db->addr) 334 return; 335 336 if (db->width == 1) 337 SCMI_PERF_FC_RING_DB(8); 338 else if (db->width == 2) 339 SCMI_PERF_FC_RING_DB(16); 340 else if (db->width == 4) 341 SCMI_PERF_FC_RING_DB(32); 342 else /* db->width == 8 */ 343 #ifdef CONFIG_64BIT 344 SCMI_PERF_FC_RING_DB(64); 345 #else 346 { 347 u64 val = 0; 348 349 if (db->mask) 350 val = ioread64_hi_lo(db->addr) & db->mask; 351 iowrite64_hi_lo(db->set | val, db->addr); 352 } 353 #endif 354 } 355 356 static int scmi_perf_mb_limits_set(const struct scmi_handle *handle, u32 domain, 357 u32 max_perf, u32 min_perf) 358 { 359 int ret; 360 struct scmi_xfer *t; 361 struct scmi_perf_set_limits *limits; 362 363 ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF, 364 sizeof(*limits), 0, &t); 365 if (ret) 366 return ret; 367 368 limits = t->tx.buf; 369 limits->domain = cpu_to_le32(domain); 370 limits->max_level = cpu_to_le32(max_perf); 371 limits->min_level = cpu_to_le32(min_perf); 372 373 ret = scmi_do_xfer(handle, t); 374 375 scmi_xfer_put(handle, t); 376 return ret; 377 } 378 379 static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain, 380 u32 max_perf, u32 min_perf) 381 { 382 struct scmi_perf_info *pi = handle->perf_priv; 383 struct perf_dom_info *dom = pi->dom_info + domain; 384 385 if (dom->fc_info && dom->fc_info->limit_set_addr) { 386 iowrite32(max_perf, dom->fc_info->limit_set_addr); 387 iowrite32(min_perf, dom->fc_info->limit_set_addr + 4); 388 scmi_perf_fc_ring_db(dom->fc_info->limit_set_db); 389 return 0; 390 } 391 392 return scmi_perf_mb_limits_set(handle, domain, max_perf, min_perf); 393 } 394 395 static int scmi_perf_mb_limits_get(const struct scmi_handle *handle, u32 domain, 396 u32 *max_perf, u32 *min_perf) 397 { 398 int ret; 399 struct scmi_xfer *t; 400 struct scmi_perf_get_limits *limits; 401 402 ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF, 403 sizeof(__le32), 0, &t); 404 if (ret) 405 return ret; 406 407 put_unaligned_le32(domain, t->tx.buf); 408 409 ret = scmi_do_xfer(handle, t); 410 if (!ret) { 411 limits = t->rx.buf; 412 413 *max_perf = le32_to_cpu(limits->max_level); 414 *min_perf = le32_to_cpu(limits->min_level); 415 } 416 417 scmi_xfer_put(handle, t); 418 return ret; 419 } 420 421 static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain, 422 u32 *max_perf, u32 *min_perf) 423 { 424 struct scmi_perf_info *pi = handle->perf_priv; 425 struct perf_dom_info *dom = pi->dom_info + domain; 426 427 if (dom->fc_info && dom->fc_info->limit_get_addr) { 428 *max_perf = ioread32(dom->fc_info->limit_get_addr); 429 *min_perf = ioread32(dom->fc_info->limit_get_addr + 4); 430 return 0; 431 } 432 433 return scmi_perf_mb_limits_get(handle, domain, max_perf, min_perf); 434 } 435 436 static int scmi_perf_mb_level_set(const struct scmi_handle *handle, u32 domain, 437 u32 level, bool poll) 438 { 439 int ret; 440 struct scmi_xfer *t; 441 struct scmi_perf_set_level *lvl; 442 443 ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF, 444 sizeof(*lvl), 0, &t); 445 if (ret) 446 return ret; 447 448 t->hdr.poll_completion = poll; 449 lvl = t->tx.buf; 450 lvl->domain = cpu_to_le32(domain); 451 lvl->level = cpu_to_le32(level); 452 453 ret = scmi_do_xfer(handle, t); 454 455 scmi_xfer_put(handle, t); 456 return ret; 457 } 458 459 static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain, 460 u32 level, bool poll) 461 { 462 struct scmi_perf_info *pi = handle->perf_priv; 463 struct perf_dom_info *dom = pi->dom_info + domain; 464 465 if (dom->fc_info && dom->fc_info->level_set_addr) { 466 iowrite32(level, dom->fc_info->level_set_addr); 467 scmi_perf_fc_ring_db(dom->fc_info->level_set_db); 468 return 0; 469 } 470 471 return scmi_perf_mb_level_set(handle, domain, level, poll); 472 } 473 474 static int scmi_perf_mb_level_get(const struct scmi_handle *handle, u32 domain, 475 u32 *level, bool poll) 476 { 477 int ret; 478 struct scmi_xfer *t; 479 480 ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF, 481 sizeof(u32), sizeof(u32), &t); 482 if (ret) 483 return ret; 484 485 t->hdr.poll_completion = poll; 486 put_unaligned_le32(domain, t->tx.buf); 487 488 ret = scmi_do_xfer(handle, t); 489 if (!ret) 490 *level = get_unaligned_le32(t->rx.buf); 491 492 scmi_xfer_put(handle, t); 493 return ret; 494 } 495 496 static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain, 497 u32 *level, bool poll) 498 { 499 struct scmi_perf_info *pi = handle->perf_priv; 500 struct perf_dom_info *dom = pi->dom_info + domain; 501 502 if (dom->fc_info && dom->fc_info->level_get_addr) { 503 *level = ioread32(dom->fc_info->level_get_addr); 504 return 0; 505 } 506 507 return scmi_perf_mb_level_get(handle, domain, level, poll); 508 } 509 510 static int scmi_perf_level_limits_notify(const struct scmi_handle *handle, 511 u32 domain, int message_id, 512 bool enable) 513 { 514 int ret; 515 struct scmi_xfer *t; 516 struct scmi_perf_notify_level_or_limits *notify; 517 518 ret = scmi_xfer_get_init(handle, message_id, SCMI_PROTOCOL_PERF, 519 sizeof(*notify), 0, &t); 520 if (ret) 521 return ret; 522 523 notify = t->tx.buf; 524 notify->domain = cpu_to_le32(domain); 525 notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0; 526 527 ret = scmi_do_xfer(handle, t); 528 529 scmi_xfer_put(handle, t); 530 return ret; 531 } 532 533 static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size) 534 { 535 if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4) 536 return true; 537 if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8) 538 return true; 539 return false; 540 } 541 542 static void 543 scmi_perf_domain_desc_fc(const struct scmi_handle *handle, u32 domain, 544 u32 message_id, void __iomem **p_addr, 545 struct scmi_fc_db_info **p_db) 546 { 547 int ret; 548 u32 flags; 549 u64 phys_addr; 550 u8 size; 551 void __iomem *addr; 552 struct scmi_xfer *t; 553 struct scmi_fc_db_info *db; 554 struct scmi_perf_get_fc_info *info; 555 struct scmi_msg_resp_perf_desc_fc *resp; 556 557 if (!p_addr) 558 return; 559 560 ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_FASTCHANNEL, 561 SCMI_PROTOCOL_PERF, 562 sizeof(*info), sizeof(*resp), &t); 563 if (ret) 564 return; 565 566 info = t->tx.buf; 567 info->domain = cpu_to_le32(domain); 568 info->message_id = cpu_to_le32(message_id); 569 570 ret = scmi_do_xfer(handle, t); 571 if (ret) 572 goto err_xfer; 573 574 resp = t->rx.buf; 575 flags = le32_to_cpu(resp->attr); 576 size = le32_to_cpu(resp->chan_size); 577 if (!scmi_perf_fc_size_is_valid(message_id, size)) 578 goto err_xfer; 579 580 phys_addr = le32_to_cpu(resp->chan_addr_low); 581 phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; 582 addr = devm_ioremap(handle->dev, phys_addr, size); 583 if (!addr) 584 goto err_xfer; 585 *p_addr = addr; 586 587 if (p_db && SUPPORTS_DOORBELL(flags)) { 588 db = devm_kzalloc(handle->dev, sizeof(*db), GFP_KERNEL); 589 if (!db) 590 goto err_xfer; 591 592 size = 1 << DOORBELL_REG_WIDTH(flags); 593 phys_addr = le32_to_cpu(resp->db_addr_low); 594 phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; 595 addr = devm_ioremap(handle->dev, phys_addr, size); 596 if (!addr) 597 goto err_xfer; 598 599 db->addr = addr; 600 db->width = size; 601 db->set = le32_to_cpu(resp->db_set_lmask); 602 db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; 603 db->mask = le32_to_cpu(resp->db_preserve_lmask); 604 db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; 605 *p_db = db; 606 } 607 err_xfer: 608 scmi_xfer_put(handle, t); 609 } 610 611 static void scmi_perf_domain_init_fc(const struct scmi_handle *handle, 612 u32 domain, struct scmi_fc_info **p_fc) 613 { 614 struct scmi_fc_info *fc; 615 616 fc = devm_kzalloc(handle->dev, sizeof(*fc), GFP_KERNEL); 617 if (!fc) 618 return; 619 620 scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_SET, 621 &fc->level_set_addr, &fc->level_set_db); 622 scmi_perf_domain_desc_fc(handle, domain, PERF_LEVEL_GET, 623 &fc->level_get_addr, NULL); 624 scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_SET, 625 &fc->limit_set_addr, &fc->limit_set_db); 626 scmi_perf_domain_desc_fc(handle, domain, PERF_LIMITS_GET, 627 &fc->limit_get_addr, NULL); 628 *p_fc = fc; 629 } 630 631 /* Device specific ops */ 632 static int scmi_dev_domain_id(struct device *dev) 633 { 634 struct of_phandle_args clkspec; 635 636 if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells", 637 0, &clkspec)) 638 return -EINVAL; 639 640 return clkspec.args[0]; 641 } 642 643 static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle, 644 struct device *dev) 645 { 646 int idx, ret, domain; 647 unsigned long freq; 648 struct scmi_opp *opp; 649 struct perf_dom_info *dom; 650 struct scmi_perf_info *pi = handle->perf_priv; 651 652 domain = scmi_dev_domain_id(dev); 653 if (domain < 0) 654 return domain; 655 656 dom = pi->dom_info + domain; 657 658 for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) { 659 freq = opp->perf * dom->mult_factor; 660 661 ret = dev_pm_opp_add(dev, freq, 0); 662 if (ret) { 663 dev_warn(dev, "failed to add opp %luHz\n", freq); 664 665 while (idx-- > 0) { 666 freq = (--opp)->perf * dom->mult_factor; 667 dev_pm_opp_remove(dev, freq); 668 } 669 return ret; 670 } 671 } 672 return 0; 673 } 674 675 static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle, 676 struct device *dev) 677 { 678 struct perf_dom_info *dom; 679 struct scmi_perf_info *pi = handle->perf_priv; 680 int domain = scmi_dev_domain_id(dev); 681 682 if (domain < 0) 683 return domain; 684 685 dom = pi->dom_info + domain; 686 /* uS to nS */ 687 return dom->opp[dom->opp_count - 1].trans_latency_us * 1000; 688 } 689 690 static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain, 691 unsigned long freq, bool poll) 692 { 693 struct scmi_perf_info *pi = handle->perf_priv; 694 struct perf_dom_info *dom = pi->dom_info + domain; 695 696 return scmi_perf_level_set(handle, domain, freq / dom->mult_factor, 697 poll); 698 } 699 700 static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain, 701 unsigned long *freq, bool poll) 702 { 703 int ret; 704 u32 level; 705 struct scmi_perf_info *pi = handle->perf_priv; 706 struct perf_dom_info *dom = pi->dom_info + domain; 707 708 ret = scmi_perf_level_get(handle, domain, &level, poll); 709 if (!ret) 710 *freq = level * dom->mult_factor; 711 712 return ret; 713 } 714 715 static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain, 716 unsigned long *freq, unsigned long *power) 717 { 718 struct scmi_perf_info *pi = handle->perf_priv; 719 struct perf_dom_info *dom; 720 unsigned long opp_freq; 721 int idx, ret = -EINVAL; 722 struct scmi_opp *opp; 723 724 dom = pi->dom_info + domain; 725 if (!dom) 726 return -EIO; 727 728 for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) { 729 opp_freq = opp->perf * dom->mult_factor; 730 if (opp_freq < *freq) 731 continue; 732 733 *freq = opp_freq; 734 *power = opp->power; 735 ret = 0; 736 break; 737 } 738 739 return ret; 740 } 741 742 static bool scmi_fast_switch_possible(const struct scmi_handle *handle, 743 struct device *dev) 744 { 745 struct perf_dom_info *dom; 746 struct scmi_perf_info *pi = handle->perf_priv; 747 748 dom = pi->dom_info + scmi_dev_domain_id(dev); 749 750 return dom->fc_info && dom->fc_info->level_set_addr; 751 } 752 753 static bool scmi_power_scale_mw_get(const struct scmi_handle *handle) 754 { 755 struct scmi_perf_info *pi = handle->perf_priv; 756 757 return pi->power_scale_mw; 758 } 759 760 static const struct scmi_perf_ops perf_ops = { 761 .limits_set = scmi_perf_limits_set, 762 .limits_get = scmi_perf_limits_get, 763 .level_set = scmi_perf_level_set, 764 .level_get = scmi_perf_level_get, 765 .device_domain_id = scmi_dev_domain_id, 766 .transition_latency_get = scmi_dvfs_transition_latency_get, 767 .device_opps_add = scmi_dvfs_device_opps_add, 768 .freq_set = scmi_dvfs_freq_set, 769 .freq_get = scmi_dvfs_freq_get, 770 .est_power_get = scmi_dvfs_est_power_get, 771 .fast_switch_possible = scmi_fast_switch_possible, 772 .power_scale_mw_get = scmi_power_scale_mw_get, 773 }; 774 775 static int scmi_perf_set_notify_enabled(const struct scmi_handle *handle, 776 u8 evt_id, u32 src_id, bool enable) 777 { 778 int ret, cmd_id; 779 780 if (evt_id >= ARRAY_SIZE(evt_2_cmd)) 781 return -EINVAL; 782 783 cmd_id = evt_2_cmd[evt_id]; 784 ret = scmi_perf_level_limits_notify(handle, src_id, cmd_id, enable); 785 if (ret) 786 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", 787 evt_id, src_id, ret); 788 789 return ret; 790 } 791 792 static void *scmi_perf_fill_custom_report(const struct scmi_handle *handle, 793 u8 evt_id, ktime_t timestamp, 794 const void *payld, size_t payld_sz, 795 void *report, u32 *src_id) 796 { 797 void *rep = NULL; 798 799 switch (evt_id) { 800 case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED: 801 { 802 const struct scmi_perf_limits_notify_payld *p = payld; 803 struct scmi_perf_limits_report *r = report; 804 805 if (sizeof(*p) != payld_sz) 806 break; 807 808 r->timestamp = timestamp; 809 r->agent_id = le32_to_cpu(p->agent_id); 810 r->domain_id = le32_to_cpu(p->domain_id); 811 r->range_max = le32_to_cpu(p->range_max); 812 r->range_min = le32_to_cpu(p->range_min); 813 *src_id = r->domain_id; 814 rep = r; 815 break; 816 } 817 case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED: 818 { 819 const struct scmi_perf_level_notify_payld *p = payld; 820 struct scmi_perf_level_report *r = report; 821 822 if (sizeof(*p) != payld_sz) 823 break; 824 825 r->timestamp = timestamp; 826 r->agent_id = le32_to_cpu(p->agent_id); 827 r->domain_id = le32_to_cpu(p->domain_id); 828 r->performance_level = le32_to_cpu(p->performance_level); 829 *src_id = r->domain_id; 830 rep = r; 831 break; 832 } 833 default: 834 break; 835 } 836 837 return rep; 838 } 839 840 static const struct scmi_event perf_events[] = { 841 { 842 .id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED, 843 .max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld), 844 .max_report_sz = sizeof(struct scmi_perf_limits_report), 845 }, 846 { 847 .id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED, 848 .max_payld_sz = sizeof(struct scmi_perf_level_notify_payld), 849 .max_report_sz = sizeof(struct scmi_perf_level_report), 850 }, 851 }; 852 853 static const struct scmi_event_ops perf_event_ops = { 854 .set_notify_enabled = scmi_perf_set_notify_enabled, 855 .fill_custom_report = scmi_perf_fill_custom_report, 856 }; 857 858 static int scmi_perf_protocol_init(struct scmi_handle *handle) 859 { 860 int domain; 861 u32 version; 862 struct scmi_perf_info *pinfo; 863 864 scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version); 865 866 dev_dbg(handle->dev, "Performance Version %d.%d\n", 867 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); 868 869 pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL); 870 if (!pinfo) 871 return -ENOMEM; 872 873 scmi_perf_attributes_get(handle, pinfo); 874 875 pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains, 876 sizeof(*pinfo->dom_info), GFP_KERNEL); 877 if (!pinfo->dom_info) 878 return -ENOMEM; 879 880 for (domain = 0; domain < pinfo->num_domains; domain++) { 881 struct perf_dom_info *dom = pinfo->dom_info + domain; 882 883 scmi_perf_domain_attributes_get(handle, domain, dom); 884 scmi_perf_describe_levels_get(handle, domain, dom); 885 886 if (dom->perf_fastchannels) 887 scmi_perf_domain_init_fc(handle, domain, &dom->fc_info); 888 } 889 890 scmi_register_protocol_events(handle, 891 SCMI_PROTOCOL_PERF, SCMI_PROTO_QUEUE_SZ, 892 &perf_event_ops, perf_events, 893 ARRAY_SIZE(perf_events), 894 pinfo->num_domains); 895 896 pinfo->version = version; 897 handle->perf_ops = &perf_ops; 898 handle->perf_priv = pinfo; 899 900 return 0; 901 } 902 903 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_PERF, perf) 904