1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 Linaro Ltd
4 */
5
6 #include <linux/device.h>
7 #include <linux/interconnect-provider.h>
8 #include <linux/io.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_platform.h>
12 #include <linux/platform_device.h>
13 #include <linux/regmap.h>
14 #include <linux/slab.h>
15
16 #include "icc-common.h"
17 #include "icc-rpm.h"
18
19 /* QNOC QoS */
20 #define QNOC_QOS_MCTL_LOWn_ADDR(n) (0x8 + (n * 0x1000))
21 #define QNOC_QOS_MCTL_DFLT_PRIO_MASK 0x70
22 #define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT 4
23 #define QNOC_QOS_MCTL_URGFWD_EN_MASK 0x8
24 #define QNOC_QOS_MCTL_URGFWD_EN_SHIFT 3
25
26 /* BIMC QoS */
27 #define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
28 #define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
29 #define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
30
31 #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
32 #define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300
33 #define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3
34 #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
35 #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
36
37 #define M_BKE_EN_EN_BMASK 0x1
38
39 /* NoC QoS */
40 #define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
41 #define NOC_QOS_PRIORITY_P1_MASK 0xc
42 #define NOC_QOS_PRIORITY_P0_MASK 0x3
43 #define NOC_QOS_PRIORITY_P1_SHIFT 0x2
44
45 #define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
46 #define NOC_QOS_MODEn_MASK 0x3
47
48 #define NOC_QOS_MODE_FIXED_VAL 0x0
49 #define NOC_QOS_MODE_BYPASS_VAL 0x2
50
51 #define ICC_BUS_CLK_MIN_RATE 19200ULL /* kHz */
52
qcom_icc_set_qnoc_qos(struct icc_node * src)53 static int qcom_icc_set_qnoc_qos(struct icc_node *src)
54 {
55 struct icc_provider *provider = src->provider;
56 struct qcom_icc_provider *qp = to_qcom_provider(provider);
57 struct qcom_icc_node *qn = src->data;
58 struct qcom_icc_qos *qos = &qn->qos;
59 int rc;
60
61 rc = regmap_update_bits(qp->regmap,
62 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
63 QNOC_QOS_MCTL_DFLT_PRIO_MASK,
64 qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT);
65 if (rc)
66 return rc;
67
68 return regmap_update_bits(qp->regmap,
69 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
70 QNOC_QOS_MCTL_URGFWD_EN_MASK,
71 !!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT);
72 }
73
qcom_icc_bimc_set_qos_health(struct qcom_icc_provider * qp,struct qcom_icc_qos * qos,int regnum)74 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
75 struct qcom_icc_qos *qos,
76 int regnum)
77 {
78 u32 val;
79 u32 mask;
80
81 val = qos->prio_level;
82 mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
83
84 val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
85 mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
86
87 /* LIMITCMDS is not present on M_BKE_HEALTH_3 */
88 if (regnum != 3) {
89 val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
90 mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
91 }
92
93 return regmap_update_bits(qp->regmap,
94 qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
95 mask, val);
96 }
97
qcom_icc_set_bimc_qos(struct icc_node * src)98 static int qcom_icc_set_bimc_qos(struct icc_node *src)
99 {
100 struct qcom_icc_provider *qp;
101 struct qcom_icc_node *qn;
102 struct icc_provider *provider;
103 u32 mode = NOC_QOS_MODE_BYPASS;
104 u32 val = 0;
105 int i, rc = 0;
106
107 qn = src->data;
108 provider = src->provider;
109 qp = to_qcom_provider(provider);
110
111 if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
112 mode = qn->qos.qos_mode;
113
114 /* QoS Priority: The QoS Health parameters are getting considered
115 * only if we are NOT in Bypass Mode.
116 */
117 if (mode != NOC_QOS_MODE_BYPASS) {
118 for (i = 3; i >= 0; i--) {
119 rc = qcom_icc_bimc_set_qos_health(qp,
120 &qn->qos, i);
121 if (rc)
122 return rc;
123 }
124
125 /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
126 val = 1;
127 }
128
129 return regmap_update_bits(qp->regmap,
130 qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
131 M_BKE_EN_EN_BMASK, val);
132 }
133
qcom_icc_noc_set_qos_priority(struct qcom_icc_provider * qp,struct qcom_icc_qos * qos)134 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
135 struct qcom_icc_qos *qos)
136 {
137 u32 val;
138 int rc;
139
140 /* Must be updated one at a time, P1 first, P0 last */
141 val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
142 rc = regmap_update_bits(qp->regmap,
143 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
144 NOC_QOS_PRIORITY_P1_MASK, val);
145 if (rc)
146 return rc;
147
148 return regmap_update_bits(qp->regmap,
149 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
150 NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
151 }
152
qcom_icc_set_noc_qos(struct icc_node * src)153 static int qcom_icc_set_noc_qos(struct icc_node *src)
154 {
155 struct qcom_icc_provider *qp;
156 struct qcom_icc_node *qn;
157 struct icc_provider *provider;
158 u32 mode = NOC_QOS_MODE_BYPASS_VAL;
159 int rc = 0;
160
161 qn = src->data;
162 provider = src->provider;
163 qp = to_qcom_provider(provider);
164
165 if (qn->qos.qos_port < 0) {
166 dev_dbg(src->provider->dev,
167 "NoC QoS: Skipping %s: vote aggregated on parent.\n",
168 qn->name);
169 return 0;
170 }
171
172 if (qn->qos.qos_mode == NOC_QOS_MODE_FIXED) {
173 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n", qn->name);
174 mode = NOC_QOS_MODE_FIXED_VAL;
175 rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
176 if (rc)
177 return rc;
178 } else if (qn->qos.qos_mode == NOC_QOS_MODE_BYPASS) {
179 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n", qn->name);
180 mode = NOC_QOS_MODE_BYPASS_VAL;
181 } else {
182 /* How did we get here? */
183 }
184
185 return regmap_update_bits(qp->regmap,
186 qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
187 NOC_QOS_MODEn_MASK, mode);
188 }
189
qcom_icc_qos_set(struct icc_node * node)190 static int qcom_icc_qos_set(struct icc_node *node)
191 {
192 struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
193 struct qcom_icc_node *qn = node->data;
194
195 dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
196
197 switch (qp->type) {
198 case QCOM_ICC_BIMC:
199 return qcom_icc_set_bimc_qos(node);
200 case QCOM_ICC_QNOC:
201 return qcom_icc_set_qnoc_qos(node);
202 default:
203 return qcom_icc_set_noc_qos(node);
204 }
205 }
206
qcom_icc_rpm_set(struct qcom_icc_node * qn,u64 * bw)207 static int qcom_icc_rpm_set(struct qcom_icc_node *qn, u64 *bw)
208 {
209 int ret, rpm_ctx = 0;
210 u64 bw_bps;
211
212 if (qn->qos.ap_owned)
213 return 0;
214
215 for (rpm_ctx = 0; rpm_ctx < QCOM_SMD_RPM_STATE_NUM; rpm_ctx++) {
216 bw_bps = icc_units_to_bps(bw[rpm_ctx]);
217
218 if (qn->mas_rpm_id != -1) {
219 ret = qcom_icc_rpm_smd_send(rpm_ctx,
220 RPM_BUS_MASTER_REQ,
221 qn->mas_rpm_id,
222 bw_bps);
223 if (ret) {
224 pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
225 qn->mas_rpm_id, ret);
226 return ret;
227 }
228 }
229
230 if (qn->slv_rpm_id != -1) {
231 ret = qcom_icc_rpm_smd_send(rpm_ctx,
232 RPM_BUS_SLAVE_REQ,
233 qn->slv_rpm_id,
234 bw_bps);
235 if (ret) {
236 pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
237 qn->slv_rpm_id, ret);
238 return ret;
239 }
240 }
241 }
242
243 return 0;
244 }
245
246 /**
247 * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
248 * @node: icc node to operate on
249 */
qcom_icc_pre_bw_aggregate(struct icc_node * node)250 static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
251 {
252 struct qcom_icc_node *qn;
253 size_t i;
254
255 qn = node->data;
256 for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
257 qn->sum_avg[i] = 0;
258 qn->max_peak[i] = 0;
259 }
260 }
261
262 /**
263 * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag
264 * @node: node to aggregate
265 * @tag: tag to indicate which buckets to aggregate
266 * @avg_bw: new bw to sum aggregate
267 * @peak_bw: new bw to max aggregate
268 * @agg_avg: existing aggregate avg bw val
269 * @agg_peak: existing aggregate peak bw val
270 */
qcom_icc_bw_aggregate(struct icc_node * node,u32 tag,u32 avg_bw,u32 peak_bw,u32 * agg_avg,u32 * agg_peak)271 static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
272 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
273 {
274 size_t i;
275 struct qcom_icc_node *qn;
276
277 qn = node->data;
278
279 if (!tag)
280 tag = RPM_ALWAYS_TAG;
281
282 for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
283 if (tag & BIT(i)) {
284 qn->sum_avg[i] += avg_bw;
285 qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
286 }
287 }
288
289 *agg_avg += avg_bw;
290 *agg_peak = max_t(u32, *agg_peak, peak_bw);
291 return 0;
292 }
293
294 /**
295 * qcom_icc_bus_aggregate - calculate bus clock rates by traversing all nodes
296 * @provider: generic interconnect provider
297 * @agg_clk_rate: array containing the aggregated clock rates in kHz
298 */
qcom_icc_bus_aggregate(struct icc_provider * provider,u64 * agg_clk_rate)299 static void qcom_icc_bus_aggregate(struct icc_provider *provider, u64 *agg_clk_rate)
300 {
301 u64 agg_avg_rate, agg_rate;
302 struct qcom_icc_node *qn;
303 struct icc_node *node;
304 int i;
305
306 /*
307 * Iterate nodes on the provider, aggregate bandwidth requests for
308 * every bucket and convert them into bus clock rates.
309 */
310 list_for_each_entry(node, &provider->nodes, node_list) {
311 qn = node->data;
312 for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
313 if (qn->channels)
314 agg_avg_rate = div_u64(qn->sum_avg[i], qn->channels);
315 else
316 agg_avg_rate = qn->sum_avg[i];
317
318 agg_rate = max_t(u64, agg_avg_rate, qn->max_peak[i]);
319 do_div(agg_rate, qn->buswidth);
320
321 agg_clk_rate[i] = max_t(u64, agg_clk_rate[i], agg_rate);
322 }
323 }
324 }
325
qcom_icc_set(struct icc_node * src,struct icc_node * dst)326 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
327 {
328 struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
329 u64 agg_clk_rate[QCOM_SMD_RPM_STATE_NUM] = { 0 };
330 struct icc_provider *provider;
331 struct qcom_icc_provider *qp;
332 u64 active_rate, sleep_rate;
333 int ret;
334
335 src_qn = src->data;
336 if (dst)
337 dst_qn = dst->data;
338 provider = src->provider;
339 qp = to_qcom_provider(provider);
340
341 qcom_icc_bus_aggregate(provider, agg_clk_rate);
342 active_rate = agg_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE];
343 sleep_rate = agg_clk_rate[QCOM_SMD_RPM_SLEEP_STATE];
344
345 ret = qcom_icc_rpm_set(src_qn, src_qn->sum_avg);
346 if (ret)
347 return ret;
348
349 if (dst_qn) {
350 ret = qcom_icc_rpm_set(dst_qn, dst_qn->sum_avg);
351 if (ret)
352 return ret;
353 }
354
355 /* Some providers don't have a bus clock to scale */
356 if (!qp->bus_clk_desc && !qp->bus_clk)
357 return 0;
358
359 /*
360 * Downstream checks whether the requested rate is zero, but it makes little sense
361 * to vote for a value that's below the lower threshold, so let's not do so.
362 */
363 if (qp->keep_alive)
364 active_rate = max(ICC_BUS_CLK_MIN_RATE, active_rate);
365
366 /* Some providers have a non-RPM-owned bus clock - convert kHz->Hz for the CCF */
367 if (qp->bus_clk) {
368 active_rate = max_t(u64, active_rate, sleep_rate);
369 /* ARM32 caps clk_set_rate arg to u32.. Nothing we can do about that! */
370 active_rate = min_t(u64, 1000ULL * active_rate, ULONG_MAX);
371 return clk_set_rate(qp->bus_clk, active_rate);
372 }
373
374 /* RPM only accepts <=INT_MAX rates */
375 active_rate = min_t(u64, active_rate, INT_MAX);
376 sleep_rate = min_t(u64, sleep_rate, INT_MAX);
377
378 if (active_rate != qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]) {
379 ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_ACTIVE_STATE,
380 active_rate);
381 if (ret)
382 return ret;
383
384 /* Cache the rate after we've successfully commited it to RPM */
385 qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE] = active_rate;
386 }
387
388 if (sleep_rate != qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]) {
389 ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_SLEEP_STATE,
390 sleep_rate);
391 if (ret)
392 return ret;
393
394 /* Cache the rate after we've successfully commited it to RPM */
395 qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate;
396 }
397
398 return 0;
399 }
400
qnoc_probe(struct platform_device * pdev)401 int qnoc_probe(struct platform_device *pdev)
402 {
403 struct device *dev = &pdev->dev;
404 const struct qcom_icc_desc *desc;
405 struct icc_onecell_data *data;
406 struct icc_provider *provider;
407 struct qcom_icc_node * const *qnodes;
408 struct qcom_icc_provider *qp;
409 struct icc_node *node;
410 size_t num_nodes, i;
411 const char * const *cds = NULL;
412 int cd_num;
413 int ret;
414
415 /* wait for the RPM proxy */
416 if (!qcom_icc_rpm_smd_available())
417 return -EPROBE_DEFER;
418
419 desc = of_device_get_match_data(dev);
420 if (!desc)
421 return -EINVAL;
422
423 qnodes = desc->nodes;
424 num_nodes = desc->num_nodes;
425
426 if (desc->num_intf_clocks) {
427 cds = desc->intf_clocks;
428 cd_num = desc->num_intf_clocks;
429 } else {
430 /* 0 intf clocks is perfectly fine */
431 cd_num = 0;
432 }
433
434 qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
435 if (!qp)
436 return -ENOMEM;
437
438 qp->intf_clks = devm_kcalloc(dev, cd_num, sizeof(*qp->intf_clks), GFP_KERNEL);
439 if (!qp->intf_clks)
440 return -ENOMEM;
441
442 if (desc->bus_clk_desc) {
443 qp->bus_clk_desc = devm_kzalloc(dev, sizeof(*qp->bus_clk_desc),
444 GFP_KERNEL);
445 if (!qp->bus_clk_desc)
446 return -ENOMEM;
447
448 qp->bus_clk_desc = desc->bus_clk_desc;
449 } else {
450 /* Some older SoCs may have a single non-RPM-owned bus clock. */
451 qp->bus_clk = devm_clk_get_optional(dev, "bus");
452 if (IS_ERR(qp->bus_clk))
453 return PTR_ERR(qp->bus_clk);
454 }
455
456 data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
457 GFP_KERNEL);
458 if (!data)
459 return -ENOMEM;
460
461 qp->num_intf_clks = cd_num;
462 for (i = 0; i < cd_num; i++)
463 qp->intf_clks[i].id = cds[i];
464
465 qp->keep_alive = desc->keep_alive;
466 qp->type = desc->type;
467 qp->qos_offset = desc->qos_offset;
468
469 if (desc->regmap_cfg) {
470 struct resource *res;
471 void __iomem *mmio;
472
473 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
474 if (!res) {
475 /* Try parent's regmap */
476 qp->regmap = dev_get_regmap(dev->parent, NULL);
477 if (qp->regmap)
478 goto regmap_done;
479 return -ENODEV;
480 }
481
482 mmio = devm_ioremap_resource(dev, res);
483 if (IS_ERR(mmio))
484 return PTR_ERR(mmio);
485
486 qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
487 if (IS_ERR(qp->regmap)) {
488 dev_err(dev, "Cannot regmap interconnect bus resource\n");
489 return PTR_ERR(qp->regmap);
490 }
491 }
492
493 regmap_done:
494 ret = clk_prepare_enable(qp->bus_clk);
495 if (ret)
496 return ret;
497
498 ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks);
499 if (ret)
500 goto err_disable_unprepare_clk;
501
502 provider = &qp->provider;
503 provider->dev = dev;
504 provider->set = qcom_icc_set;
505 provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
506 provider->aggregate = qcom_icc_bw_aggregate;
507 provider->xlate_extended = qcom_icc_xlate_extended;
508 provider->data = data;
509
510 icc_provider_init(provider);
511
512 /* If this fails, bus accesses will crash the platform! */
513 ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks);
514 if (ret)
515 goto err_disable_unprepare_clk;
516
517 for (i = 0; i < num_nodes; i++) {
518 size_t j;
519
520 node = icc_node_create(qnodes[i]->id);
521 if (IS_ERR(node)) {
522 clk_bulk_disable_unprepare(qp->num_intf_clks,
523 qp->intf_clks);
524 ret = PTR_ERR(node);
525 goto err_remove_nodes;
526 }
527
528 node->name = qnodes[i]->name;
529 node->data = qnodes[i];
530 icc_node_add(node, provider);
531
532 for (j = 0; j < qnodes[i]->num_links; j++)
533 icc_link_create(node, qnodes[i]->links[j]);
534
535 /* Set QoS registers (we only need to do it once, generally) */
536 if (qnodes[i]->qos.ap_owned &&
537 qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) {
538 ret = qcom_icc_qos_set(node);
539 if (ret) {
540 clk_bulk_disable_unprepare(qp->num_intf_clks,
541 qp->intf_clks);
542 goto err_remove_nodes;
543 }
544 }
545
546 data->nodes[i] = node;
547 }
548 data->num_nodes = num_nodes;
549
550 clk_bulk_disable_unprepare(qp->num_intf_clks, qp->intf_clks);
551
552 ret = icc_provider_register(provider);
553 if (ret)
554 goto err_remove_nodes;
555
556 platform_set_drvdata(pdev, qp);
557
558 /* Populate child NoC devices if any */
559 if (of_get_child_count(dev->of_node) > 0) {
560 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
561 if (ret)
562 goto err_deregister_provider;
563 }
564
565 return 0;
566
567 err_deregister_provider:
568 icc_provider_deregister(provider);
569 err_remove_nodes:
570 icc_nodes_remove(provider);
571 err_disable_unprepare_clk:
572 clk_disable_unprepare(qp->bus_clk);
573
574 return ret;
575 }
576 EXPORT_SYMBOL(qnoc_probe);
577
qnoc_remove(struct platform_device * pdev)578 int qnoc_remove(struct platform_device *pdev)
579 {
580 struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
581
582 icc_provider_deregister(&qp->provider);
583 icc_nodes_remove(&qp->provider);
584 clk_disable_unprepare(qp->bus_clk);
585
586 return 0;
587 }
588 EXPORT_SYMBOL(qnoc_remove);
589