1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020 Linaro Ltd 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/device.h> 8 #include <linux/interconnect-provider.h> 9 #include <linux/io.h> 10 #include <linux/module.h> 11 #include <linux/of_device.h> 12 #include <linux/of_platform.h> 13 #include <linux/platform_device.h> 14 #include <linux/regmap.h> 15 #include <linux/slab.h> 16 17 #include "smd-rpm.h" 18 #include "icc-common.h" 19 #include "icc-rpm.h" 20 21 /* QNOC QoS */ 22 #define QNOC_QOS_MCTL_LOWn_ADDR(n) (0x8 + (n * 0x1000)) 23 #define QNOC_QOS_MCTL_DFLT_PRIO_MASK 0x70 24 #define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT 4 25 #define QNOC_QOS_MCTL_URGFWD_EN_MASK 0x8 26 #define QNOC_QOS_MCTL_URGFWD_EN_SHIFT 3 27 28 /* BIMC QoS */ 29 #define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n)) 30 #define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n)) 31 #define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i)) 32 33 #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000 34 #define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300 35 #define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3 36 #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8 37 #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f 38 39 #define M_BKE_EN_EN_BMASK 0x1 40 41 /* NoC QoS */ 42 #define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000)) 43 #define NOC_QOS_PRIORITY_P1_MASK 0xc 44 #define NOC_QOS_PRIORITY_P0_MASK 0x3 45 #define NOC_QOS_PRIORITY_P1_SHIFT 0x2 46 47 #define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000)) 48 #define NOC_QOS_MODEn_MASK 0x3 49 50 #define NOC_QOS_MODE_FIXED_VAL 0x0 51 #define NOC_QOS_MODE_BYPASS_VAL 0x2 52 53 static int qcom_icc_set_qnoc_qos(struct icc_node *src) 54 { 55 struct icc_provider *provider = src->provider; 56 struct qcom_icc_provider *qp = to_qcom_provider(provider); 57 struct qcom_icc_node *qn = src->data; 58 struct qcom_icc_qos *qos = &qn->qos; 59 int rc; 60 61 rc = regmap_update_bits(qp->regmap, 62 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port), 63 QNOC_QOS_MCTL_DFLT_PRIO_MASK, 64 qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT); 65 if (rc) 66 return rc; 67 68 return regmap_update_bits(qp->regmap, 69 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port), 70 QNOC_QOS_MCTL_URGFWD_EN_MASK, 71 !!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT); 72 } 73 74 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp, 75 struct qcom_icc_qos *qos, 76 int regnum) 77 { 78 u32 val; 79 u32 mask; 80 81 val = qos->prio_level; 82 mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK; 83 84 val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT; 85 mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK; 86 87 /* LIMITCMDS is not present on M_BKE_HEALTH_3 */ 88 if (regnum != 3) { 89 val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT; 90 mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK; 91 } 92 93 return regmap_update_bits(qp->regmap, 94 qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port), 95 mask, val); 96 } 97 98 static int qcom_icc_set_bimc_qos(struct icc_node *src) 99 { 100 struct qcom_icc_provider *qp; 101 struct qcom_icc_node *qn; 102 struct icc_provider *provider; 103 u32 mode = NOC_QOS_MODE_BYPASS; 104 u32 val = 0; 105 int i, rc = 0; 106 107 qn = src->data; 108 provider = src->provider; 109 qp = to_qcom_provider(provider); 110 111 if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID) 112 mode = qn->qos.qos_mode; 113 114 /* QoS Priority: The QoS Health parameters are getting considered 115 * only if we are NOT in Bypass Mode. 116 */ 117 if (mode != NOC_QOS_MODE_BYPASS) { 118 for (i = 3; i >= 0; i--) { 119 rc = qcom_icc_bimc_set_qos_health(qp, 120 &qn->qos, i); 121 if (rc) 122 return rc; 123 } 124 125 /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */ 126 val = 1; 127 } 128 129 return regmap_update_bits(qp->regmap, 130 qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port), 131 M_BKE_EN_EN_BMASK, val); 132 } 133 134 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp, 135 struct qcom_icc_qos *qos) 136 { 137 u32 val; 138 int rc; 139 140 /* Must be updated one at a time, P1 first, P0 last */ 141 val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT; 142 rc = regmap_update_bits(qp->regmap, 143 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port), 144 NOC_QOS_PRIORITY_P1_MASK, val); 145 if (rc) 146 return rc; 147 148 return regmap_update_bits(qp->regmap, 149 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port), 150 NOC_QOS_PRIORITY_P0_MASK, qos->prio_level); 151 } 152 153 static int qcom_icc_set_noc_qos(struct icc_node *src) 154 { 155 struct qcom_icc_provider *qp; 156 struct qcom_icc_node *qn; 157 struct icc_provider *provider; 158 u32 mode = NOC_QOS_MODE_BYPASS_VAL; 159 int rc = 0; 160 161 qn = src->data; 162 provider = src->provider; 163 qp = to_qcom_provider(provider); 164 165 if (qn->qos.qos_port < 0) { 166 dev_dbg(src->provider->dev, 167 "NoC QoS: Skipping %s: vote aggregated on parent.\n", 168 qn->name); 169 return 0; 170 } 171 172 if (qn->qos.qos_mode == NOC_QOS_MODE_FIXED) { 173 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n", qn->name); 174 mode = NOC_QOS_MODE_FIXED_VAL; 175 rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos); 176 if (rc) 177 return rc; 178 } else if (qn->qos.qos_mode == NOC_QOS_MODE_BYPASS) { 179 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n", qn->name); 180 mode = NOC_QOS_MODE_BYPASS_VAL; 181 } else { 182 /* How did we get here? */ 183 } 184 185 return regmap_update_bits(qp->regmap, 186 qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port), 187 NOC_QOS_MODEn_MASK, mode); 188 } 189 190 static int qcom_icc_qos_set(struct icc_node *node) 191 { 192 struct qcom_icc_provider *qp = to_qcom_provider(node->provider); 193 struct qcom_icc_node *qn = node->data; 194 195 dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name); 196 197 switch (qp->type) { 198 case QCOM_ICC_BIMC: 199 return qcom_icc_set_bimc_qos(node); 200 case QCOM_ICC_QNOC: 201 return qcom_icc_set_qnoc_qos(node); 202 default: 203 return qcom_icc_set_noc_qos(node); 204 } 205 } 206 207 static int qcom_icc_rpm_set(struct qcom_icc_node *qn, u64 sum_bw) 208 { 209 int ret = 0; 210 211 if (qn->qos.ap_owned) 212 return 0; 213 214 if (qn->mas_rpm_id != -1) { 215 ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE, 216 RPM_BUS_MASTER_REQ, 217 qn->mas_rpm_id, 218 sum_bw); 219 if (ret) { 220 pr_err("qcom_icc_rpm_smd_send mas %d error %d\n", 221 qn->mas_rpm_id, ret); 222 return ret; 223 } 224 } 225 226 if (qn->slv_rpm_id != -1) { 227 ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE, 228 RPM_BUS_SLAVE_REQ, 229 qn->slv_rpm_id, 230 sum_bw); 231 if (ret) { 232 pr_err("qcom_icc_rpm_smd_send slv %d error %d\n", 233 qn->slv_rpm_id, ret); 234 return ret; 235 } 236 } 237 238 return ret; 239 } 240 241 /** 242 * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests 243 * @node: icc node to operate on 244 */ 245 static void qcom_icc_pre_bw_aggregate(struct icc_node *node) 246 { 247 struct qcom_icc_node *qn; 248 size_t i; 249 250 qn = node->data; 251 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { 252 qn->sum_avg[i] = 0; 253 qn->max_peak[i] = 0; 254 } 255 } 256 257 /** 258 * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag 259 * @node: node to aggregate 260 * @tag: tag to indicate which buckets to aggregate 261 * @avg_bw: new bw to sum aggregate 262 * @peak_bw: new bw to max aggregate 263 * @agg_avg: existing aggregate avg bw val 264 * @agg_peak: existing aggregate peak bw val 265 */ 266 static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw, 267 u32 peak_bw, u32 *agg_avg, u32 *agg_peak) 268 { 269 size_t i; 270 struct qcom_icc_node *qn; 271 272 qn = node->data; 273 274 if (!tag) 275 tag = QCOM_ICC_TAG_ALWAYS; 276 277 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { 278 if (tag & BIT(i)) { 279 qn->sum_avg[i] += avg_bw; 280 qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw); 281 } 282 } 283 284 *agg_avg += avg_bw; 285 *agg_peak = max_t(u32, *agg_peak, peak_bw); 286 return 0; 287 } 288 289 /** 290 * qcom_icc_bus_aggregate - aggregate bandwidth by traversing all nodes 291 * @provider: generic interconnect provider 292 * @agg_avg: an array for aggregated average bandwidth of buckets 293 * @agg_peak: an array for aggregated peak bandwidth of buckets 294 * @max_agg_avg: pointer to max value of aggregated average bandwidth 295 */ 296 static void qcom_icc_bus_aggregate(struct icc_provider *provider, 297 u64 *agg_avg, u64 *agg_peak, 298 u64 *max_agg_avg) 299 { 300 struct icc_node *node; 301 struct qcom_icc_node *qn; 302 u64 sum_avg[QCOM_ICC_NUM_BUCKETS]; 303 int i; 304 305 /* Initialise aggregate values */ 306 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { 307 agg_avg[i] = 0; 308 agg_peak[i] = 0; 309 } 310 311 *max_agg_avg = 0; 312 313 /* 314 * Iterate nodes on the interconnect and aggregate bandwidth 315 * requests for every bucket. 316 */ 317 list_for_each_entry(node, &provider->nodes, node_list) { 318 qn = node->data; 319 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) { 320 if (qn->channels) 321 sum_avg[i] = div_u64(qn->sum_avg[i], qn->channels); 322 else 323 sum_avg[i] = qn->sum_avg[i]; 324 agg_avg[i] += sum_avg[i]; 325 agg_peak[i] = max_t(u64, agg_peak[i], qn->max_peak[i]); 326 } 327 } 328 329 /* Find maximum values across all buckets */ 330 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) 331 *max_agg_avg = max_t(u64, *max_agg_avg, agg_avg[i]); 332 } 333 334 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst) 335 { 336 struct qcom_icc_provider *qp; 337 struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL; 338 struct icc_provider *provider; 339 u64 sum_bw; 340 u64 rate; 341 u64 agg_avg[QCOM_ICC_NUM_BUCKETS], agg_peak[QCOM_ICC_NUM_BUCKETS]; 342 u64 max_agg_avg; 343 int ret, i; 344 int bucket; 345 346 src_qn = src->data; 347 if (dst) 348 dst_qn = dst->data; 349 provider = src->provider; 350 qp = to_qcom_provider(provider); 351 352 qcom_icc_bus_aggregate(provider, agg_avg, agg_peak, &max_agg_avg); 353 354 sum_bw = icc_units_to_bps(max_agg_avg); 355 356 ret = qcom_icc_rpm_set(src_qn, sum_bw); 357 if (ret) 358 return ret; 359 360 if (dst_qn) { 361 ret = qcom_icc_rpm_set(dst_qn, sum_bw); 362 if (ret) 363 return ret; 364 } 365 366 for (i = 0; i < qp->num_bus_clks; i++) { 367 /* 368 * Use WAKE bucket for active clock, otherwise, use SLEEP bucket 369 * for other clocks. If a platform doesn't set interconnect 370 * path tags, by default use sleep bucket for all clocks. 371 * 372 * Note, AMC bucket is not supported yet. 373 */ 374 if (!strcmp(qp->bus_clks[i].id, "bus_a")) 375 bucket = QCOM_ICC_BUCKET_WAKE; 376 else 377 bucket = QCOM_ICC_BUCKET_SLEEP; 378 379 rate = icc_units_to_bps(max(agg_avg[bucket], agg_peak[bucket])); 380 do_div(rate, src_qn->buswidth); 381 rate = min_t(u64, rate, LONG_MAX); 382 383 if (qp->bus_clk_rate[i] == rate) 384 continue; 385 386 ret = clk_set_rate(qp->bus_clks[i].clk, rate); 387 if (ret) { 388 pr_err("%s clk_set_rate error: %d\n", 389 qp->bus_clks[i].id, ret); 390 return ret; 391 } 392 qp->bus_clk_rate[i] = rate; 393 } 394 395 return 0; 396 } 397 398 static const char * const bus_clocks[] = { 399 "bus", "bus_a", 400 }; 401 402 int qnoc_probe(struct platform_device *pdev) 403 { 404 struct device *dev = &pdev->dev; 405 const struct qcom_icc_desc *desc; 406 struct icc_onecell_data *data; 407 struct icc_provider *provider; 408 struct qcom_icc_node * const *qnodes; 409 struct qcom_icc_provider *qp; 410 struct icc_node *node; 411 size_t num_nodes, i; 412 const char * const *cds = NULL; 413 int cd_num; 414 int ret; 415 416 /* wait for the RPM proxy */ 417 if (!qcom_icc_rpm_smd_available()) 418 return -EPROBE_DEFER; 419 420 desc = of_device_get_match_data(dev); 421 if (!desc) 422 return -EINVAL; 423 424 qnodes = desc->nodes; 425 num_nodes = desc->num_nodes; 426 427 if (desc->num_intf_clocks) { 428 cds = desc->intf_clocks; 429 cd_num = desc->num_intf_clocks; 430 } else { 431 /* 0 intf clocks is perfectly fine */ 432 cd_num = 0; 433 } 434 435 qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL); 436 if (!qp) 437 return -ENOMEM; 438 439 qp->intf_clks = devm_kcalloc(dev, cd_num, sizeof(*qp->intf_clks), GFP_KERNEL); 440 if (!qp->intf_clks) 441 return -ENOMEM; 442 443 data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes), 444 GFP_KERNEL); 445 if (!data) 446 return -ENOMEM; 447 448 qp->num_intf_clks = cd_num; 449 for (i = 0; i < cd_num; i++) 450 qp->intf_clks[i].id = cds[i]; 451 452 qp->num_bus_clks = desc->no_clk_scaling ? 0 : NUM_BUS_CLKS; 453 for (i = 0; i < qp->num_bus_clks; i++) 454 qp->bus_clks[i].id = bus_clocks[i]; 455 456 qp->type = desc->type; 457 qp->qos_offset = desc->qos_offset; 458 459 if (desc->regmap_cfg) { 460 struct resource *res; 461 void __iomem *mmio; 462 463 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 464 if (!res) { 465 /* Try parent's regmap */ 466 qp->regmap = dev_get_regmap(dev->parent, NULL); 467 if (qp->regmap) 468 goto regmap_done; 469 return -ENODEV; 470 } 471 472 mmio = devm_ioremap_resource(dev, res); 473 if (IS_ERR(mmio)) 474 return PTR_ERR(mmio); 475 476 qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg); 477 if (IS_ERR(qp->regmap)) { 478 dev_err(dev, "Cannot regmap interconnect bus resource\n"); 479 return PTR_ERR(qp->regmap); 480 } 481 } 482 483 regmap_done: 484 ret = devm_clk_bulk_get(dev, qp->num_bus_clks, qp->bus_clks); 485 if (ret) 486 return ret; 487 488 ret = clk_bulk_prepare_enable(qp->num_bus_clks, qp->bus_clks); 489 if (ret) 490 return ret; 491 492 ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks); 493 if (ret) 494 return ret; 495 496 provider = &qp->provider; 497 provider->dev = dev; 498 provider->set = qcom_icc_set; 499 provider->pre_aggregate = qcom_icc_pre_bw_aggregate; 500 provider->aggregate = qcom_icc_bw_aggregate; 501 provider->xlate_extended = qcom_icc_xlate_extended; 502 provider->data = data; 503 504 icc_provider_init(provider); 505 506 /* If this fails, bus accesses will crash the platform! */ 507 ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks); 508 if (ret) 509 return ret; 510 511 for (i = 0; i < num_nodes; i++) { 512 size_t j; 513 514 node = icc_node_create(qnodes[i]->id); 515 if (IS_ERR(node)) { 516 ret = PTR_ERR(node); 517 goto err_remove_nodes; 518 } 519 520 node->name = qnodes[i]->name; 521 node->data = qnodes[i]; 522 icc_node_add(node, provider); 523 524 for (j = 0; j < qnodes[i]->num_links; j++) 525 icc_link_create(node, qnodes[i]->links[j]); 526 527 /* Set QoS registers (we only need to do it once, generally) */ 528 if (qnodes[i]->qos.ap_owned && 529 qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) { 530 ret = qcom_icc_qos_set(node); 531 if (ret) 532 return ret; 533 } 534 535 data->nodes[i] = node; 536 } 537 data->num_nodes = num_nodes; 538 539 clk_bulk_disable_unprepare(qp->num_intf_clks, qp->intf_clks); 540 541 ret = icc_provider_register(provider); 542 if (ret) 543 goto err_remove_nodes; 544 545 platform_set_drvdata(pdev, qp); 546 547 /* Populate child NoC devices if any */ 548 if (of_get_child_count(dev->of_node) > 0) { 549 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 550 if (ret) 551 goto err_deregister_provider; 552 } 553 554 return 0; 555 556 err_deregister_provider: 557 icc_provider_deregister(provider); 558 err_remove_nodes: 559 icc_nodes_remove(provider); 560 clk_bulk_disable_unprepare(qp->num_bus_clks, qp->bus_clks); 561 562 return ret; 563 } 564 EXPORT_SYMBOL(qnoc_probe); 565 566 int qnoc_remove(struct platform_device *pdev) 567 { 568 struct qcom_icc_provider *qp = platform_get_drvdata(pdev); 569 570 icc_provider_deregister(&qp->provider); 571 icc_nodes_remove(&qp->provider); 572 clk_bulk_disable_unprepare(qp->num_bus_clks, qp->bus_clks); 573 574 return 0; 575 } 576 EXPORT_SYMBOL(qnoc_remove); 577