1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 Linaro Ltd
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/device.h>
8 #include <linux/interconnect-provider.h>
9 #include <linux/io.h>
10 #include <linux/module.h>
11 #include <linux/of_device.h>
12 #include <linux/of_platform.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_domain.h>
15 #include <linux/regmap.h>
16 #include <linux/slab.h>
17 
18 #include "smd-rpm.h"
19 #include "icc-common.h"
20 #include "icc-rpm.h"
21 
22 /* QNOC QoS */
23 #define QNOC_QOS_MCTL_LOWn_ADDR(n)	(0x8 + (n * 0x1000))
24 #define QNOC_QOS_MCTL_DFLT_PRIO_MASK	0x70
25 #define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT	4
26 #define QNOC_QOS_MCTL_URGFWD_EN_MASK	0x8
27 #define QNOC_QOS_MCTL_URGFWD_EN_SHIFT	3
28 
29 /* BIMC QoS */
30 #define M_BKE_REG_BASE(n)		(0x300 + (0x4000 * n))
31 #define M_BKE_EN_ADDR(n)		(M_BKE_REG_BASE(n))
32 #define M_BKE_HEALTH_CFG_ADDR(i, n)	(M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
33 
34 #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK	0x80000000
35 #define M_BKE_HEALTH_CFG_AREQPRIO_MASK	0x300
36 #define M_BKE_HEALTH_CFG_PRIOLVL_MASK	0x3
37 #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT	0x8
38 #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
39 
40 #define M_BKE_EN_EN_BMASK		0x1
41 
42 /* NoC QoS */
43 #define NOC_QOS_PRIORITYn_ADDR(n)	(0x8 + (n * 0x1000))
44 #define NOC_QOS_PRIORITY_P1_MASK	0xc
45 #define NOC_QOS_PRIORITY_P0_MASK	0x3
46 #define NOC_QOS_PRIORITY_P1_SHIFT	0x2
47 
48 #define NOC_QOS_MODEn_ADDR(n)		(0xc + (n * 0x1000))
49 #define NOC_QOS_MODEn_MASK		0x3
50 
51 static int qcom_icc_set_qnoc_qos(struct icc_node *src, u64 max_bw)
52 {
53 	struct icc_provider *provider = src->provider;
54 	struct qcom_icc_provider *qp = to_qcom_provider(provider);
55 	struct qcom_icc_node *qn = src->data;
56 	struct qcom_icc_qos *qos = &qn->qos;
57 	int rc;
58 
59 	rc = regmap_update_bits(qp->regmap,
60 			qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
61 			QNOC_QOS_MCTL_DFLT_PRIO_MASK,
62 			qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT);
63 	if (rc)
64 		return rc;
65 
66 	return regmap_update_bits(qp->regmap,
67 			qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
68 			QNOC_QOS_MCTL_URGFWD_EN_MASK,
69 			!!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT);
70 }
71 
72 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
73 					struct qcom_icc_qos *qos,
74 					int regnum)
75 {
76 	u32 val;
77 	u32 mask;
78 
79 	val = qos->prio_level;
80 	mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
81 
82 	val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
83 	mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
84 
85 	/* LIMITCMDS is not present on M_BKE_HEALTH_3 */
86 	if (regnum != 3) {
87 		val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
88 		mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
89 	}
90 
91 	return regmap_update_bits(qp->regmap,
92 				  qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
93 				  mask, val);
94 }
95 
96 static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
97 {
98 	struct qcom_icc_provider *qp;
99 	struct qcom_icc_node *qn;
100 	struct icc_provider *provider;
101 	u32 mode = NOC_QOS_MODE_BYPASS;
102 	u32 val = 0;
103 	int i, rc = 0;
104 
105 	qn = src->data;
106 	provider = src->provider;
107 	qp = to_qcom_provider(provider);
108 
109 	if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
110 		mode = qn->qos.qos_mode;
111 
112 	/* QoS Priority: The QoS Health parameters are getting considered
113 	 * only if we are NOT in Bypass Mode.
114 	 */
115 	if (mode != NOC_QOS_MODE_BYPASS) {
116 		for (i = 3; i >= 0; i--) {
117 			rc = qcom_icc_bimc_set_qos_health(qp,
118 							  &qn->qos, i);
119 			if (rc)
120 				return rc;
121 		}
122 
123 		/* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
124 		val = 1;
125 	}
126 
127 	return regmap_update_bits(qp->regmap,
128 				  qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
129 				  M_BKE_EN_EN_BMASK, val);
130 }
131 
132 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
133 					 struct qcom_icc_qos *qos)
134 {
135 	u32 val;
136 	int rc;
137 
138 	/* Must be updated one at a time, P1 first, P0 last */
139 	val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
140 	rc = regmap_update_bits(qp->regmap,
141 				qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
142 				NOC_QOS_PRIORITY_P1_MASK, val);
143 	if (rc)
144 		return rc;
145 
146 	return regmap_update_bits(qp->regmap,
147 				  qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
148 				  NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
149 }
150 
151 static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
152 {
153 	struct qcom_icc_provider *qp;
154 	struct qcom_icc_node *qn;
155 	struct icc_provider *provider;
156 	u32 mode = NOC_QOS_MODE_BYPASS;
157 	int rc = 0;
158 
159 	qn = src->data;
160 	provider = src->provider;
161 	qp = to_qcom_provider(provider);
162 
163 	if (qn->qos.qos_port < 0) {
164 		dev_dbg(src->provider->dev,
165 			"NoC QoS: Skipping %s: vote aggregated on parent.\n",
166 			qn->name);
167 		return 0;
168 	}
169 
170 	if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
171 		mode = qn->qos.qos_mode;
172 
173 	if (mode == NOC_QOS_MODE_FIXED) {
174 		dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n",
175 			qn->name);
176 		rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
177 		if (rc)
178 			return rc;
179 	} else if (mode == NOC_QOS_MODE_BYPASS) {
180 		dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n",
181 			qn->name);
182 	}
183 
184 	return regmap_update_bits(qp->regmap,
185 				  qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
186 				  NOC_QOS_MODEn_MASK, mode);
187 }
188 
189 static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
190 {
191 	struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
192 	struct qcom_icc_node *qn = node->data;
193 
194 	dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
195 
196 	switch (qp->type) {
197 	case QCOM_ICC_BIMC:
198 		return qcom_icc_set_bimc_qos(node, sum_bw);
199 	case QCOM_ICC_QNOC:
200 		return qcom_icc_set_qnoc_qos(node, sum_bw);
201 	default:
202 		return qcom_icc_set_noc_qos(node, sum_bw);
203 	}
204 }
205 
206 static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
207 {
208 	int ret = 0;
209 
210 	if (mas_rpm_id != -1) {
211 		ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
212 					    RPM_BUS_MASTER_REQ,
213 					    mas_rpm_id,
214 					    sum_bw);
215 		if (ret) {
216 			pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
217 			       mas_rpm_id, ret);
218 			return ret;
219 		}
220 	}
221 
222 	if (slv_rpm_id != -1) {
223 		ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
224 					    RPM_BUS_SLAVE_REQ,
225 					    slv_rpm_id,
226 					    sum_bw);
227 		if (ret) {
228 			pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
229 			       slv_rpm_id, ret);
230 			return ret;
231 		}
232 	}
233 
234 	return ret;
235 }
236 
237 static int __qcom_icc_set(struct icc_node *n, struct qcom_icc_node *qn,
238 			  u64 sum_bw)
239 {
240 	int ret;
241 
242 	if (!qn->qos.ap_owned) {
243 		/* send bandwidth request message to the RPM processor */
244 		ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
245 		if (ret)
246 			return ret;
247 	} else if (qn->qos.qos_mode != -1) {
248 		/* set bandwidth directly from the AP */
249 		ret = qcom_icc_qos_set(n, sum_bw);
250 		if (ret)
251 			return ret;
252 	}
253 
254 	return 0;
255 }
256 
257 /**
258  * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
259  * @node: icc node to operate on
260  */
261 static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
262 {
263 	struct qcom_icc_node *qn;
264 	size_t i;
265 
266 	qn = node->data;
267 	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
268 		qn->sum_avg[i] = 0;
269 		qn->max_peak[i] = 0;
270 	}
271 }
272 
273 /**
274  * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag
275  * @node: node to aggregate
276  * @tag: tag to indicate which buckets to aggregate
277  * @avg_bw: new bw to sum aggregate
278  * @peak_bw: new bw to max aggregate
279  * @agg_avg: existing aggregate avg bw val
280  * @agg_peak: existing aggregate peak bw val
281  */
282 static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
283 				 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
284 {
285 	size_t i;
286 	struct qcom_icc_node *qn;
287 
288 	qn = node->data;
289 
290 	if (!tag)
291 		tag = QCOM_ICC_TAG_ALWAYS;
292 
293 	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
294 		if (tag & BIT(i)) {
295 			qn->sum_avg[i] += avg_bw;
296 			qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
297 		}
298 	}
299 
300 	*agg_avg += avg_bw;
301 	*agg_peak = max_t(u32, *agg_peak, peak_bw);
302 	return 0;
303 }
304 
305 /**
306  * qcom_icc_bus_aggregate - aggregate bandwidth by traversing all nodes
307  * @provider: generic interconnect provider
308  * @agg_avg: an array for aggregated average bandwidth of buckets
309  * @agg_peak: an array for aggregated peak bandwidth of buckets
310  * @max_agg_avg: pointer to max value of aggregated average bandwidth
311  */
312 static void qcom_icc_bus_aggregate(struct icc_provider *provider,
313 				   u64 *agg_avg, u64 *agg_peak,
314 				   u64 *max_agg_avg)
315 {
316 	struct icc_node *node;
317 	struct qcom_icc_node *qn;
318 	int i;
319 
320 	/* Initialise aggregate values */
321 	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
322 		agg_avg[i] = 0;
323 		agg_peak[i] = 0;
324 	}
325 
326 	*max_agg_avg = 0;
327 
328 	/*
329 	 * Iterate nodes on the interconnect and aggregate bandwidth
330 	 * requests for every bucket.
331 	 */
332 	list_for_each_entry(node, &provider->nodes, node_list) {
333 		qn = node->data;
334 		for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
335 			agg_avg[i] += qn->sum_avg[i];
336 			agg_peak[i] = max_t(u64, agg_peak[i], qn->max_peak[i]);
337 		}
338 	}
339 
340 	/* Find maximum values across all buckets */
341 	for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++)
342 		*max_agg_avg = max_t(u64, *max_agg_avg, agg_avg[i]);
343 }
344 
345 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
346 {
347 	struct qcom_icc_provider *qp;
348 	struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
349 	struct icc_provider *provider;
350 	u64 sum_bw;
351 	u64 rate;
352 	u64 agg_avg[QCOM_ICC_NUM_BUCKETS], agg_peak[QCOM_ICC_NUM_BUCKETS];
353 	u64 max_agg_avg;
354 	int ret, i;
355 	int bucket;
356 
357 	src_qn = src->data;
358 	if (dst)
359 		dst_qn = dst->data;
360 	provider = src->provider;
361 	qp = to_qcom_provider(provider);
362 
363 	qcom_icc_bus_aggregate(provider, agg_avg, agg_peak, &max_agg_avg);
364 
365 	sum_bw = icc_units_to_bps(max_agg_avg);
366 
367 	ret = __qcom_icc_set(src, src_qn, sum_bw);
368 	if (ret)
369 		return ret;
370 	if (dst_qn) {
371 		ret = __qcom_icc_set(dst, dst_qn, sum_bw);
372 		if (ret)
373 			return ret;
374 	}
375 
376 	for (i = 0; i < qp->num_clks; i++) {
377 		/*
378 		 * Use WAKE bucket for active clock, otherwise, use SLEEP bucket
379 		 * for other clocks.  If a platform doesn't set interconnect
380 		 * path tags, by default use sleep bucket for all clocks.
381 		 *
382 		 * Note, AMC bucket is not supported yet.
383 		 */
384 		if (!strcmp(qp->bus_clks[i].id, "bus_a"))
385 			bucket = QCOM_ICC_BUCKET_WAKE;
386 		else
387 			bucket = QCOM_ICC_BUCKET_SLEEP;
388 
389 		rate = icc_units_to_bps(max(agg_avg[bucket], agg_peak[bucket]));
390 		do_div(rate, src_qn->buswidth);
391 		rate = min_t(u64, rate, LONG_MAX);
392 
393 		if (qp->bus_clk_rate[i] == rate)
394 			continue;
395 
396 		ret = clk_set_rate(qp->bus_clks[i].clk, rate);
397 		if (ret) {
398 			pr_err("%s clk_set_rate error: %d\n",
399 			       qp->bus_clks[i].id, ret);
400 			return ret;
401 		}
402 		qp->bus_clk_rate[i] = rate;
403 	}
404 
405 	return 0;
406 }
407 
408 static const char * const bus_clocks[] = {
409 	"bus", "bus_a",
410 };
411 
412 int qnoc_probe(struct platform_device *pdev)
413 {
414 	struct device *dev = &pdev->dev;
415 	const struct qcom_icc_desc *desc;
416 	struct icc_onecell_data *data;
417 	struct icc_provider *provider;
418 	struct qcom_icc_node * const *qnodes;
419 	struct qcom_icc_provider *qp;
420 	struct icc_node *node;
421 	size_t num_nodes, i;
422 	const char * const *cds;
423 	int cd_num;
424 	int ret;
425 
426 	/* wait for the RPM proxy */
427 	if (!qcom_icc_rpm_smd_available())
428 		return -EPROBE_DEFER;
429 
430 	desc = of_device_get_match_data(dev);
431 	if (!desc)
432 		return -EINVAL;
433 
434 	qnodes = desc->nodes;
435 	num_nodes = desc->num_nodes;
436 
437 	if (desc->num_clocks) {
438 		cds = desc->clocks;
439 		cd_num = desc->num_clocks;
440 	} else {
441 		cds = bus_clocks;
442 		cd_num = ARRAY_SIZE(bus_clocks);
443 	}
444 
445 	qp = devm_kzalloc(dev, struct_size(qp, bus_clks, cd_num), GFP_KERNEL);
446 	if (!qp)
447 		return -ENOMEM;
448 
449 	qp->bus_clk_rate = devm_kcalloc(dev, cd_num, sizeof(*qp->bus_clk_rate),
450 					GFP_KERNEL);
451 	if (!qp->bus_clk_rate)
452 		return -ENOMEM;
453 
454 	data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
455 			    GFP_KERNEL);
456 	if (!data)
457 		return -ENOMEM;
458 
459 	for (i = 0; i < cd_num; i++)
460 		qp->bus_clks[i].id = cds[i];
461 	qp->num_clks = cd_num;
462 
463 	qp->type = desc->type;
464 	qp->qos_offset = desc->qos_offset;
465 
466 	if (desc->regmap_cfg) {
467 		struct resource *res;
468 		void __iomem *mmio;
469 
470 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
471 		if (!res) {
472 			/* Try parent's regmap */
473 			qp->regmap = dev_get_regmap(dev->parent, NULL);
474 			if (qp->regmap)
475 				goto regmap_done;
476 			return -ENODEV;
477 		}
478 
479 		mmio = devm_ioremap_resource(dev, res);
480 
481 		if (IS_ERR(mmio)) {
482 			dev_err(dev, "Cannot ioremap interconnect bus resource\n");
483 			return PTR_ERR(mmio);
484 		}
485 
486 		qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
487 		if (IS_ERR(qp->regmap)) {
488 			dev_err(dev, "Cannot regmap interconnect bus resource\n");
489 			return PTR_ERR(qp->regmap);
490 		}
491 	}
492 
493 regmap_done:
494 	ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
495 	if (ret)
496 		return ret;
497 
498 	ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
499 	if (ret)
500 		return ret;
501 
502 	if (desc->has_bus_pd) {
503 		ret = dev_pm_domain_attach(dev, true);
504 		if (ret)
505 			return ret;
506 	}
507 
508 	provider = &qp->provider;
509 	INIT_LIST_HEAD(&provider->nodes);
510 	provider->dev = dev;
511 	provider->set = qcom_icc_set;
512 	provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
513 	provider->aggregate = qcom_icc_bw_aggregate;
514 	provider->xlate_extended = qcom_icc_xlate_extended;
515 	provider->data = data;
516 
517 	ret = icc_provider_add(provider);
518 	if (ret) {
519 		dev_err(dev, "error adding interconnect provider: %d\n", ret);
520 		clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
521 		return ret;
522 	}
523 
524 	for (i = 0; i < num_nodes; i++) {
525 		size_t j;
526 
527 		node = icc_node_create(qnodes[i]->id);
528 		if (IS_ERR(node)) {
529 			ret = PTR_ERR(node);
530 			goto err;
531 		}
532 
533 		node->name = qnodes[i]->name;
534 		node->data = qnodes[i];
535 		icc_node_add(node, provider);
536 
537 		for (j = 0; j < qnodes[i]->num_links; j++)
538 			icc_link_create(node, qnodes[i]->links[j]);
539 
540 		data->nodes[i] = node;
541 	}
542 	data->num_nodes = num_nodes;
543 
544 	platform_set_drvdata(pdev, qp);
545 
546 	/* Populate child NoC devices if any */
547 	if (of_get_child_count(dev->of_node) > 0)
548 		return of_platform_populate(dev->of_node, NULL, NULL, dev);
549 
550 	return 0;
551 err:
552 	icc_nodes_remove(provider);
553 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
554 	icc_provider_del(provider);
555 
556 	return ret;
557 }
558 EXPORT_SYMBOL(qnoc_probe);
559 
560 int qnoc_remove(struct platform_device *pdev)
561 {
562 	struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
563 
564 	icc_nodes_remove(&qp->provider);
565 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
566 	icc_provider_del(&qp->provider);
567 
568 	return 0;
569 }
570 EXPORT_SYMBOL(qnoc_remove);
571