1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 Linaro Ltd
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/device.h>
8 #include <linux/interconnect-provider.h>
9 #include <linux/io.h>
10 #include <linux/module.h>
11 #include <linux/of_device.h>
12 #include <linux/of_platform.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_domain.h>
15 #include <linux/regmap.h>
16 #include <linux/slab.h>
17 
18 #include "smd-rpm.h"
19 #include "icc-rpm.h"
20 
21 /* QNOC QoS */
22 #define QNOC_QOS_MCTL_LOWn_ADDR(n)	(0x8 + (n * 0x1000))
23 #define QNOC_QOS_MCTL_DFLT_PRIO_MASK	0x70
24 #define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT	4
25 #define QNOC_QOS_MCTL_URGFWD_EN_MASK	0x8
26 #define QNOC_QOS_MCTL_URGFWD_EN_SHIFT	3
27 
28 /* BIMC QoS */
29 #define M_BKE_REG_BASE(n)		(0x300 + (0x4000 * n))
30 #define M_BKE_EN_ADDR(n)		(M_BKE_REG_BASE(n))
31 #define M_BKE_HEALTH_CFG_ADDR(i, n)	(M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
32 
33 #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK	0x80000000
34 #define M_BKE_HEALTH_CFG_AREQPRIO_MASK	0x300
35 #define M_BKE_HEALTH_CFG_PRIOLVL_MASK	0x3
36 #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT	0x8
37 #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
38 
39 #define M_BKE_EN_EN_BMASK		0x1
40 
41 /* NoC QoS */
42 #define NOC_QOS_PRIORITYn_ADDR(n)	(0x8 + (n * 0x1000))
43 #define NOC_QOS_PRIORITY_P1_MASK	0xc
44 #define NOC_QOS_PRIORITY_P0_MASK	0x3
45 #define NOC_QOS_PRIORITY_P1_SHIFT	0x2
46 
47 #define NOC_QOS_MODEn_ADDR(n)		(0xc + (n * 0x1000))
48 #define NOC_QOS_MODEn_MASK		0x3
49 
50 static int qcom_icc_set_qnoc_qos(struct icc_node *src, u64 max_bw)
51 {
52 	struct icc_provider *provider = src->provider;
53 	struct qcom_icc_provider *qp = to_qcom_provider(provider);
54 	struct qcom_icc_node *qn = src->data;
55 	struct qcom_icc_qos *qos = &qn->qos;
56 	int rc;
57 
58 	rc = regmap_update_bits(qp->regmap,
59 			qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
60 			QNOC_QOS_MCTL_DFLT_PRIO_MASK,
61 			qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT);
62 	if (rc)
63 		return rc;
64 
65 	return regmap_update_bits(qp->regmap,
66 			qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
67 			QNOC_QOS_MCTL_URGFWD_EN_MASK,
68 			!!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT);
69 }
70 
71 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
72 					struct qcom_icc_qos *qos,
73 					int regnum)
74 {
75 	u32 val;
76 	u32 mask;
77 
78 	val = qos->prio_level;
79 	mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
80 
81 	val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
82 	mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
83 
84 	/* LIMITCMDS is not present on M_BKE_HEALTH_3 */
85 	if (regnum != 3) {
86 		val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
87 		mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
88 	}
89 
90 	return regmap_update_bits(qp->regmap,
91 				  qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
92 				  mask, val);
93 }
94 
95 static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
96 {
97 	struct qcom_icc_provider *qp;
98 	struct qcom_icc_node *qn;
99 	struct icc_provider *provider;
100 	u32 mode = NOC_QOS_MODE_BYPASS;
101 	u32 val = 0;
102 	int i, rc = 0;
103 
104 	qn = src->data;
105 	provider = src->provider;
106 	qp = to_qcom_provider(provider);
107 
108 	if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
109 		mode = qn->qos.qos_mode;
110 
111 	/* QoS Priority: The QoS Health parameters are getting considered
112 	 * only if we are NOT in Bypass Mode.
113 	 */
114 	if (mode != NOC_QOS_MODE_BYPASS) {
115 		for (i = 3; i >= 0; i--) {
116 			rc = qcom_icc_bimc_set_qos_health(qp,
117 							  &qn->qos, i);
118 			if (rc)
119 				return rc;
120 		}
121 
122 		/* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
123 		val = 1;
124 	}
125 
126 	return regmap_update_bits(qp->regmap,
127 				  qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
128 				  M_BKE_EN_EN_BMASK, val);
129 }
130 
131 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
132 					 struct qcom_icc_qos *qos)
133 {
134 	u32 val;
135 	int rc;
136 
137 	/* Must be updated one at a time, P1 first, P0 last */
138 	val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
139 	rc = regmap_update_bits(qp->regmap,
140 				qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
141 				NOC_QOS_PRIORITY_P1_MASK, val);
142 	if (rc)
143 		return rc;
144 
145 	return regmap_update_bits(qp->regmap,
146 				  qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
147 				  NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
148 }
149 
150 static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
151 {
152 	struct qcom_icc_provider *qp;
153 	struct qcom_icc_node *qn;
154 	struct icc_provider *provider;
155 	u32 mode = NOC_QOS_MODE_BYPASS;
156 	int rc = 0;
157 
158 	qn = src->data;
159 	provider = src->provider;
160 	qp = to_qcom_provider(provider);
161 
162 	if (qn->qos.qos_port < 0) {
163 		dev_dbg(src->provider->dev,
164 			"NoC QoS: Skipping %s: vote aggregated on parent.\n",
165 			qn->name);
166 		return 0;
167 	}
168 
169 	if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
170 		mode = qn->qos.qos_mode;
171 
172 	if (mode == NOC_QOS_MODE_FIXED) {
173 		dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n",
174 			qn->name);
175 		rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
176 		if (rc)
177 			return rc;
178 	} else if (mode == NOC_QOS_MODE_BYPASS) {
179 		dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n",
180 			qn->name);
181 	}
182 
183 	return regmap_update_bits(qp->regmap,
184 				  qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
185 				  NOC_QOS_MODEn_MASK, mode);
186 }
187 
188 static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
189 {
190 	struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
191 	struct qcom_icc_node *qn = node->data;
192 
193 	dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
194 
195 	switch (qp->type) {
196 	case QCOM_ICC_BIMC:
197 		return qcom_icc_set_bimc_qos(node, sum_bw);
198 	case QCOM_ICC_QNOC:
199 		return qcom_icc_set_qnoc_qos(node, sum_bw);
200 	default:
201 		return qcom_icc_set_noc_qos(node, sum_bw);
202 	}
203 }
204 
205 static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
206 {
207 	int ret = 0;
208 
209 	if (mas_rpm_id != -1) {
210 		ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
211 					    RPM_BUS_MASTER_REQ,
212 					    mas_rpm_id,
213 					    sum_bw);
214 		if (ret) {
215 			pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
216 			       mas_rpm_id, ret);
217 			return ret;
218 		}
219 	}
220 
221 	if (slv_rpm_id != -1) {
222 		ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
223 					    RPM_BUS_SLAVE_REQ,
224 					    slv_rpm_id,
225 					    sum_bw);
226 		if (ret) {
227 			pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
228 			       slv_rpm_id, ret);
229 			return ret;
230 		}
231 	}
232 
233 	return ret;
234 }
235 
236 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
237 {
238 	struct qcom_icc_provider *qp;
239 	struct qcom_icc_node *qn;
240 	struct icc_provider *provider;
241 	struct icc_node *n;
242 	u64 sum_bw;
243 	u64 max_peak_bw;
244 	u64 rate;
245 	u32 agg_avg = 0;
246 	u32 agg_peak = 0;
247 	int ret, i;
248 
249 	qn = src->data;
250 	provider = src->provider;
251 	qp = to_qcom_provider(provider);
252 
253 	list_for_each_entry(n, &provider->nodes, node_list)
254 		provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
255 				    &agg_avg, &agg_peak);
256 
257 	sum_bw = icc_units_to_bps(agg_avg);
258 	max_peak_bw = icc_units_to_bps(agg_peak);
259 
260 	if (!qn->qos.ap_owned) {
261 		/* send bandwidth request message to the RPM processor */
262 		ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
263 		if (ret)
264 			return ret;
265 	} else if (qn->qos.qos_mode != -1) {
266 		/* set bandwidth directly from the AP */
267 		ret = qcom_icc_qos_set(src, sum_bw);
268 		if (ret)
269 			return ret;
270 	}
271 
272 	rate = max(sum_bw, max_peak_bw);
273 
274 	do_div(rate, qn->buswidth);
275 	rate = min_t(u64, rate, LONG_MAX);
276 
277 	if (qn->rate == rate)
278 		return 0;
279 
280 	for (i = 0; i < qp->num_clks; i++) {
281 		ret = clk_set_rate(qp->bus_clks[i].clk, rate);
282 		if (ret) {
283 			pr_err("%s clk_set_rate error: %d\n",
284 			       qp->bus_clks[i].id, ret);
285 			return ret;
286 		}
287 	}
288 
289 	qn->rate = rate;
290 
291 	return 0;
292 }
293 
294 static const char * const bus_clocks[] = {
295 	"bus", "bus_a",
296 };
297 
298 int qnoc_probe(struct platform_device *pdev)
299 {
300 	struct device *dev = &pdev->dev;
301 	const struct qcom_icc_desc *desc;
302 	struct icc_onecell_data *data;
303 	struct icc_provider *provider;
304 	struct qcom_icc_node **qnodes;
305 	struct qcom_icc_provider *qp;
306 	struct icc_node *node;
307 	size_t num_nodes, i;
308 	const char * const *cds;
309 	int cd_num;
310 	int ret;
311 
312 	/* wait for the RPM proxy */
313 	if (!qcom_icc_rpm_smd_available())
314 		return -EPROBE_DEFER;
315 
316 	desc = of_device_get_match_data(dev);
317 	if (!desc)
318 		return -EINVAL;
319 
320 	qnodes = desc->nodes;
321 	num_nodes = desc->num_nodes;
322 
323 	if (desc->num_clocks) {
324 		cds = desc->clocks;
325 		cd_num = desc->num_clocks;
326 	} else {
327 		cds = bus_clocks;
328 		cd_num = ARRAY_SIZE(bus_clocks);
329 	}
330 
331 	qp = devm_kzalloc(dev, struct_size(qp, bus_clks, cd_num), GFP_KERNEL);
332 	if (!qp)
333 		return -ENOMEM;
334 
335 	data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
336 			    GFP_KERNEL);
337 	if (!data)
338 		return -ENOMEM;
339 
340 	for (i = 0; i < cd_num; i++)
341 		qp->bus_clks[i].id = cds[i];
342 	qp->num_clks = cd_num;
343 
344 	qp->type = desc->type;
345 	qp->qos_offset = desc->qos_offset;
346 
347 	if (desc->regmap_cfg) {
348 		struct resource *res;
349 		void __iomem *mmio;
350 
351 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
352 		if (!res) {
353 			/* Try parent's regmap */
354 			qp->regmap = dev_get_regmap(dev->parent, NULL);
355 			if (qp->regmap)
356 				goto regmap_done;
357 			return -ENODEV;
358 		}
359 
360 		mmio = devm_ioremap_resource(dev, res);
361 
362 		if (IS_ERR(mmio)) {
363 			dev_err(dev, "Cannot ioremap interconnect bus resource\n");
364 			return PTR_ERR(mmio);
365 		}
366 
367 		qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
368 		if (IS_ERR(qp->regmap)) {
369 			dev_err(dev, "Cannot regmap interconnect bus resource\n");
370 			return PTR_ERR(qp->regmap);
371 		}
372 	}
373 
374 regmap_done:
375 	ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
376 	if (ret)
377 		return ret;
378 
379 	ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
380 	if (ret)
381 		return ret;
382 
383 	if (desc->has_bus_pd) {
384 		ret = dev_pm_domain_attach(dev, true);
385 		if (ret)
386 			return ret;
387 	}
388 
389 	provider = &qp->provider;
390 	INIT_LIST_HEAD(&provider->nodes);
391 	provider->dev = dev;
392 	provider->set = qcom_icc_set;
393 	provider->aggregate = icc_std_aggregate;
394 	provider->xlate = of_icc_xlate_onecell;
395 	provider->data = data;
396 
397 	ret = icc_provider_add(provider);
398 	if (ret) {
399 		dev_err(dev, "error adding interconnect provider: %d\n", ret);
400 		clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
401 		return ret;
402 	}
403 
404 	for (i = 0; i < num_nodes; i++) {
405 		size_t j;
406 
407 		node = icc_node_create(qnodes[i]->id);
408 		if (IS_ERR(node)) {
409 			ret = PTR_ERR(node);
410 			goto err;
411 		}
412 
413 		node->name = qnodes[i]->name;
414 		node->data = qnodes[i];
415 		icc_node_add(node, provider);
416 
417 		for (j = 0; j < qnodes[i]->num_links; j++)
418 			icc_link_create(node, qnodes[i]->links[j]);
419 
420 		data->nodes[i] = node;
421 	}
422 	data->num_nodes = num_nodes;
423 
424 	platform_set_drvdata(pdev, qp);
425 
426 	/* Populate child NoC devices if any */
427 	if (of_get_child_count(dev->of_node) > 0)
428 		return of_platform_populate(dev->of_node, NULL, NULL, dev);
429 
430 	return 0;
431 err:
432 	icc_nodes_remove(provider);
433 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
434 	icc_provider_del(provider);
435 
436 	return ret;
437 }
438 EXPORT_SYMBOL(qnoc_probe);
439 
440 int qnoc_remove(struct platform_device *pdev)
441 {
442 	struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
443 
444 	icc_nodes_remove(&qp->provider);
445 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
446 	return icc_provider_del(&qp->provider);
447 }
448 EXPORT_SYMBOL(qnoc_remove);
449