1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 Linaro Ltd
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/device.h>
8 #include <linux/interconnect-provider.h>
9 #include <linux/io.h>
10 #include <linux/module.h>
11 #include <linux/of_device.h>
12 #include <linux/of_platform.h>
13 #include <linux/platform_device.h>
14 #include <linux/regmap.h>
15 #include <linux/slab.h>
16 
17 #include "smd-rpm.h"
18 #include "icc-common.h"
19 #include "icc-rpm.h"
20 
21 /* QNOC QoS */
22 #define QNOC_QOS_MCTL_LOWn_ADDR(n)	(0x8 + (n * 0x1000))
23 #define QNOC_QOS_MCTL_DFLT_PRIO_MASK	0x70
24 #define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT	4
25 #define QNOC_QOS_MCTL_URGFWD_EN_MASK	0x8
26 #define QNOC_QOS_MCTL_URGFWD_EN_SHIFT	3
27 
28 /* BIMC QoS */
29 #define M_BKE_REG_BASE(n)		(0x300 + (0x4000 * n))
30 #define M_BKE_EN_ADDR(n)		(M_BKE_REG_BASE(n))
31 #define M_BKE_HEALTH_CFG_ADDR(i, n)	(M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
32 
33 #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK	0x80000000
34 #define M_BKE_HEALTH_CFG_AREQPRIO_MASK	0x300
35 #define M_BKE_HEALTH_CFG_PRIOLVL_MASK	0x3
36 #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT	0x8
37 #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
38 
39 #define M_BKE_EN_EN_BMASK		0x1
40 
41 /* NoC QoS */
42 #define NOC_QOS_PRIORITYn_ADDR(n)	(0x8 + (n * 0x1000))
43 #define NOC_QOS_PRIORITY_P1_MASK	0xc
44 #define NOC_QOS_PRIORITY_P0_MASK	0x3
45 #define NOC_QOS_PRIORITY_P1_SHIFT	0x2
46 
47 #define NOC_QOS_MODEn_ADDR(n)		(0xc + (n * 0x1000))
48 #define NOC_QOS_MODEn_MASK		0x3
49 
50 #define NOC_QOS_MODE_FIXED_VAL		0x0
51 #define NOC_QOS_MODE_BYPASS_VAL		0x2
52 
53 static int qcom_icc_set_qnoc_qos(struct icc_node *src, u64 max_bw)
54 {
55 	struct icc_provider *provider = src->provider;
56 	struct qcom_icc_provider *qp = to_qcom_provider(provider);
57 	struct qcom_icc_node *qn = src->data;
58 	struct qcom_icc_qos *qos = &qn->qos;
59 	int rc;
60 
61 	rc = regmap_update_bits(qp->regmap,
62 			qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
63 			QNOC_QOS_MCTL_DFLT_PRIO_MASK,
64 			qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT);
65 	if (rc)
66 		return rc;
67 
68 	return regmap_update_bits(qp->regmap,
69 			qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
70 			QNOC_QOS_MCTL_URGFWD_EN_MASK,
71 			!!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT);
72 }
73 
74 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
75 					struct qcom_icc_qos *qos,
76 					int regnum)
77 {
78 	u32 val;
79 	u32 mask;
80 
81 	val = qos->prio_level;
82 	mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
83 
84 	val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
85 	mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
86 
87 	/* LIMITCMDS is not present on M_BKE_HEALTH_3 */
88 	if (regnum != 3) {
89 		val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
90 		mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
91 	}
92 
93 	return regmap_update_bits(qp->regmap,
94 				  qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
95 				  mask, val);
96 }
97 
98 static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
99 {
100 	struct qcom_icc_provider *qp;
101 	struct qcom_icc_node *qn;
102 	struct icc_provider *provider;
103 	u32 mode = NOC_QOS_MODE_BYPASS;
104 	u32 val = 0;
105 	int i, rc = 0;
106 
107 	qn = src->data;
108 	provider = src->provider;
109 	qp = to_qcom_provider(provider);
110 
111 	if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
112 		mode = qn->qos.qos_mode;
113 
114 	/* QoS Priority: The QoS Health parameters are getting considered
115 	 * only if we are NOT in Bypass Mode.
116 	 */
117 	if (mode != NOC_QOS_MODE_BYPASS) {
118 		for (i = 3; i >= 0; i--) {
119 			rc = qcom_icc_bimc_set_qos_health(qp,
120 							  &qn->qos, i);
121 			if (rc)
122 				return rc;
123 		}
124 
125 		/* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
126 		val = 1;
127 	}
128 
129 	return regmap_update_bits(qp->regmap,
130 				  qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
131 				  M_BKE_EN_EN_BMASK, val);
132 }
133 
134 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
135 					 struct qcom_icc_qos *qos)
136 {
137 	u32 val;
138 	int rc;
139 
140 	/* Must be updated one at a time, P1 first, P0 last */
141 	val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
142 	rc = regmap_update_bits(qp->regmap,
143 				qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
144 				NOC_QOS_PRIORITY_P1_MASK, val);
145 	if (rc)
146 		return rc;
147 
148 	return regmap_update_bits(qp->regmap,
149 				  qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
150 				  NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
151 }
152 
153 static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
154 {
155 	struct qcom_icc_provider *qp;
156 	struct qcom_icc_node *qn;
157 	struct icc_provider *provider;
158 	u32 mode = NOC_QOS_MODE_BYPASS_VAL;
159 	int rc = 0;
160 
161 	qn = src->data;
162 	provider = src->provider;
163 	qp = to_qcom_provider(provider);
164 
165 	if (qn->qos.qos_port < 0) {
166 		dev_dbg(src->provider->dev,
167 			"NoC QoS: Skipping %s: vote aggregated on parent.\n",
168 			qn->name);
169 		return 0;
170 	}
171 
172 	if (qn->qos.qos_mode == NOC_QOS_MODE_FIXED) {
173 		dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n", qn->name);
174 		mode = NOC_QOS_MODE_FIXED_VAL;
175 		rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
176 		if (rc)
177 			return rc;
178 	} else if (qn->qos.qos_mode == NOC_QOS_MODE_BYPASS) {
179 		dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n", qn->name);
180 		mode = NOC_QOS_MODE_BYPASS_VAL;
181 	} else {
182 		/* How did we get here? */
183 	}
184 
185 	return regmap_update_bits(qp->regmap,
186 				  qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
187 				  NOC_QOS_MODEn_MASK, mode);
188 }
189 
190 static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
191 {
192 	struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
193 	struct qcom_icc_node *qn = node->data;
194 
195 	dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
196 
197 	switch (qp->type) {
198 	case QCOM_ICC_BIMC:
199 		return qcom_icc_set_bimc_qos(node, sum_bw);
200 	case QCOM_ICC_QNOC:
201 		return qcom_icc_set_qnoc_qos(node, sum_bw);
202 	default:
203 		return qcom_icc_set_noc_qos(node, sum_bw);
204 	}
205 }
206 
207 static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
208 {
209 	int ret = 0;
210 
211 	if (mas_rpm_id != -1) {
212 		ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
213 					    RPM_BUS_MASTER_REQ,
214 					    mas_rpm_id,
215 					    sum_bw);
216 		if (ret) {
217 			pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
218 			       mas_rpm_id, ret);
219 			return ret;
220 		}
221 	}
222 
223 	if (slv_rpm_id != -1) {
224 		ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
225 					    RPM_BUS_SLAVE_REQ,
226 					    slv_rpm_id,
227 					    sum_bw);
228 		if (ret) {
229 			pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
230 			       slv_rpm_id, ret);
231 			return ret;
232 		}
233 	}
234 
235 	return ret;
236 }
237 
238 static int __qcom_icc_set(struct icc_node *n, struct qcom_icc_node *qn,
239 			  u64 sum_bw)
240 {
241 	int ret;
242 
243 	if (!qn->qos.ap_owned) {
244 		/* send bandwidth request message to the RPM processor */
245 		ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
246 		if (ret)
247 			return ret;
248 	} else if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID) {
249 		/* set bandwidth directly from the AP */
250 		ret = qcom_icc_qos_set(n, sum_bw);
251 		if (ret)
252 			return ret;
253 	}
254 
255 	return 0;
256 }
257 
258 /**
259  * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
260  * @node: icc node to operate on
261  */
262 static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
263 {
264 	struct qcom_icc_node *qn;
265 	size_t i;
266 
267 	qn = node->data;
268 	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
269 		qn->sum_avg[i] = 0;
270 		qn->max_peak[i] = 0;
271 	}
272 }
273 
274 /**
275  * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag
276  * @node: node to aggregate
277  * @tag: tag to indicate which buckets to aggregate
278  * @avg_bw: new bw to sum aggregate
279  * @peak_bw: new bw to max aggregate
280  * @agg_avg: existing aggregate avg bw val
281  * @agg_peak: existing aggregate peak bw val
282  */
283 static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
284 				 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
285 {
286 	size_t i;
287 	struct qcom_icc_node *qn;
288 
289 	qn = node->data;
290 
291 	if (!tag)
292 		tag = QCOM_ICC_TAG_ALWAYS;
293 
294 	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
295 		if (tag & BIT(i)) {
296 			qn->sum_avg[i] += avg_bw;
297 			qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
298 		}
299 	}
300 
301 	*agg_avg += avg_bw;
302 	*agg_peak = max_t(u32, *agg_peak, peak_bw);
303 	return 0;
304 }
305 
306 /**
307  * qcom_icc_bus_aggregate - aggregate bandwidth by traversing all nodes
308  * @provider: generic interconnect provider
309  * @agg_avg: an array for aggregated average bandwidth of buckets
310  * @agg_peak: an array for aggregated peak bandwidth of buckets
311  * @max_agg_avg: pointer to max value of aggregated average bandwidth
312  */
313 static void qcom_icc_bus_aggregate(struct icc_provider *provider,
314 				   u64 *agg_avg, u64 *agg_peak,
315 				   u64 *max_agg_avg)
316 {
317 	struct icc_node *node;
318 	struct qcom_icc_node *qn;
319 	u64 sum_avg[QCOM_ICC_NUM_BUCKETS];
320 	int i;
321 
322 	/* Initialise aggregate values */
323 	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
324 		agg_avg[i] = 0;
325 		agg_peak[i] = 0;
326 	}
327 
328 	*max_agg_avg = 0;
329 
330 	/*
331 	 * Iterate nodes on the interconnect and aggregate bandwidth
332 	 * requests for every bucket.
333 	 */
334 	list_for_each_entry(node, &provider->nodes, node_list) {
335 		qn = node->data;
336 		for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
337 			if (qn->channels)
338 				sum_avg[i] = div_u64(qn->sum_avg[i], qn->channels);
339 			else
340 				sum_avg[i] = qn->sum_avg[i];
341 			agg_avg[i] += sum_avg[i];
342 			agg_peak[i] = max_t(u64, agg_peak[i], qn->max_peak[i]);
343 		}
344 	}
345 
346 	/* Find maximum values across all buckets */
347 	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++)
348 		*max_agg_avg = max_t(u64, *max_agg_avg, agg_avg[i]);
349 }
350 
351 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
352 {
353 	struct qcom_icc_provider *qp;
354 	struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
355 	struct icc_provider *provider;
356 	u64 sum_bw;
357 	u64 rate;
358 	u64 agg_avg[QCOM_ICC_NUM_BUCKETS], agg_peak[QCOM_ICC_NUM_BUCKETS];
359 	u64 max_agg_avg;
360 	int ret, i;
361 	int bucket;
362 
363 	src_qn = src->data;
364 	if (dst)
365 		dst_qn = dst->data;
366 	provider = src->provider;
367 	qp = to_qcom_provider(provider);
368 
369 	qcom_icc_bus_aggregate(provider, agg_avg, agg_peak, &max_agg_avg);
370 
371 	sum_bw = icc_units_to_bps(max_agg_avg);
372 
373 	ret = __qcom_icc_set(src, src_qn, sum_bw);
374 	if (ret)
375 		return ret;
376 	if (dst_qn) {
377 		ret = __qcom_icc_set(dst, dst_qn, sum_bw);
378 		if (ret)
379 			return ret;
380 	}
381 
382 	for (i = 0; i < qp->num_clks; i++) {
383 		/*
384 		 * Use WAKE bucket for active clock, otherwise, use SLEEP bucket
385 		 * for other clocks.  If a platform doesn't set interconnect
386 		 * path tags, by default use sleep bucket for all clocks.
387 		 *
388 		 * Note, AMC bucket is not supported yet.
389 		 */
390 		if (!strcmp(qp->bus_clks[i].id, "bus_a"))
391 			bucket = QCOM_ICC_BUCKET_WAKE;
392 		else
393 			bucket = QCOM_ICC_BUCKET_SLEEP;
394 
395 		rate = icc_units_to_bps(max(agg_avg[bucket], agg_peak[bucket]));
396 		do_div(rate, src_qn->buswidth);
397 		rate = min_t(u64, rate, LONG_MAX);
398 
399 		if (qp->bus_clk_rate[i] == rate)
400 			continue;
401 
402 		ret = clk_set_rate(qp->bus_clks[i].clk, rate);
403 		if (ret) {
404 			pr_err("%s clk_set_rate error: %d\n",
405 			       qp->bus_clks[i].id, ret);
406 			return ret;
407 		}
408 		qp->bus_clk_rate[i] = rate;
409 	}
410 
411 	return 0;
412 }
413 
414 static const char * const bus_clocks[] = {
415 	"bus", "bus_a",
416 };
417 
418 int qnoc_probe(struct platform_device *pdev)
419 {
420 	struct device *dev = &pdev->dev;
421 	const struct qcom_icc_desc *desc;
422 	struct icc_onecell_data *data;
423 	struct icc_provider *provider;
424 	struct qcom_icc_node * const *qnodes;
425 	struct qcom_icc_provider *qp;
426 	struct icc_node *node;
427 	size_t num_nodes, i;
428 	const char * const *cds;
429 	int cd_num;
430 	int ret;
431 
432 	/* wait for the RPM proxy */
433 	if (!qcom_icc_rpm_smd_available())
434 		return -EPROBE_DEFER;
435 
436 	desc = of_device_get_match_data(dev);
437 	if (!desc)
438 		return -EINVAL;
439 
440 	qnodes = desc->nodes;
441 	num_nodes = desc->num_nodes;
442 
443 	if (desc->num_clocks) {
444 		cds = desc->clocks;
445 		cd_num = desc->num_clocks;
446 	} else {
447 		cds = bus_clocks;
448 		cd_num = ARRAY_SIZE(bus_clocks);
449 	}
450 
451 	qp = devm_kzalloc(dev, struct_size(qp, bus_clks, cd_num), GFP_KERNEL);
452 	if (!qp)
453 		return -ENOMEM;
454 
455 	qp->bus_clk_rate = devm_kcalloc(dev, cd_num, sizeof(*qp->bus_clk_rate),
456 					GFP_KERNEL);
457 	if (!qp->bus_clk_rate)
458 		return -ENOMEM;
459 
460 	data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
461 			    GFP_KERNEL);
462 	if (!data)
463 		return -ENOMEM;
464 
465 	for (i = 0; i < cd_num; i++)
466 		qp->bus_clks[i].id = cds[i];
467 	qp->num_clks = cd_num;
468 
469 	qp->type = desc->type;
470 	qp->qos_offset = desc->qos_offset;
471 
472 	if (desc->regmap_cfg) {
473 		struct resource *res;
474 		void __iomem *mmio;
475 
476 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
477 		if (!res) {
478 			/* Try parent's regmap */
479 			qp->regmap = dev_get_regmap(dev->parent, NULL);
480 			if (qp->regmap)
481 				goto regmap_done;
482 			return -ENODEV;
483 		}
484 
485 		mmio = devm_ioremap_resource(dev, res);
486 		if (IS_ERR(mmio))
487 			return PTR_ERR(mmio);
488 
489 		qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
490 		if (IS_ERR(qp->regmap)) {
491 			dev_err(dev, "Cannot regmap interconnect bus resource\n");
492 			return PTR_ERR(qp->regmap);
493 		}
494 	}
495 
496 regmap_done:
497 	ret = devm_clk_bulk_get_optional(dev, qp->num_clks, qp->bus_clks);
498 	if (ret)
499 		return ret;
500 
501 	ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
502 	if (ret)
503 		return ret;
504 
505 	provider = &qp->provider;
506 	provider->dev = dev;
507 	provider->set = qcom_icc_set;
508 	provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
509 	provider->aggregate = qcom_icc_bw_aggregate;
510 	provider->xlate_extended = qcom_icc_xlate_extended;
511 	provider->data = data;
512 
513 	icc_provider_init(provider);
514 
515 	for (i = 0; i < num_nodes; i++) {
516 		size_t j;
517 
518 		node = icc_node_create(qnodes[i]->id);
519 		if (IS_ERR(node)) {
520 			ret = PTR_ERR(node);
521 			goto err_remove_nodes;
522 		}
523 
524 		node->name = qnodes[i]->name;
525 		node->data = qnodes[i];
526 		icc_node_add(node, provider);
527 
528 		for (j = 0; j < qnodes[i]->num_links; j++)
529 			icc_link_create(node, qnodes[i]->links[j]);
530 
531 		data->nodes[i] = node;
532 	}
533 	data->num_nodes = num_nodes;
534 
535 	ret = icc_provider_register(provider);
536 	if (ret)
537 		goto err_remove_nodes;
538 
539 	platform_set_drvdata(pdev, qp);
540 
541 	/* Populate child NoC devices if any */
542 	if (of_get_child_count(dev->of_node) > 0) {
543 		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
544 		if (ret)
545 			goto err_deregister_provider;
546 	}
547 
548 	return 0;
549 
550 err_deregister_provider:
551 	icc_provider_deregister(provider);
552 err_remove_nodes:
553 	icc_nodes_remove(provider);
554 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
555 
556 	return ret;
557 }
558 EXPORT_SYMBOL(qnoc_probe);
559 
560 int qnoc_remove(struct platform_device *pdev)
561 {
562 	struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
563 
564 	icc_provider_deregister(&qp->provider);
565 	icc_nodes_remove(&qp->provider);
566 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
567 
568 	return 0;
569 }
570 EXPORT_SYMBOL(qnoc_remove);
571