xref: /openbmc/linux/drivers/gpu/drm/msm/dp/dp_parser.c (revision 84cc6674)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/of_gpio.h>
7 #include <linux/phy/phy.h>
8 
9 #include <drm/drm_of.h>
10 #include <drm/drm_print.h>
11 #include <drm/drm_bridge.h>
12 
13 #include "dp_parser.h"
14 #include "dp_reg.h"
15 
16 #define DP_DEFAULT_AHB_OFFSET	0x0000
17 #define DP_DEFAULT_AHB_SIZE	0x0200
18 #define DP_DEFAULT_AUX_OFFSET	0x0200
19 #define DP_DEFAULT_AUX_SIZE	0x0200
20 #define DP_DEFAULT_LINK_OFFSET	0x0400
21 #define DP_DEFAULT_LINK_SIZE	0x0C00
22 #define DP_DEFAULT_P0_OFFSET	0x1000
23 #define DP_DEFAULT_P0_SIZE	0x0400
24 
25 static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
26 {
27 	struct resource *res;
28 	void __iomem *base;
29 
30 	base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
31 	if (!IS_ERR(base))
32 		*len = resource_size(res);
33 
34 	return base;
35 }
36 
37 static int dp_parser_ctrl_res(struct dp_parser *parser)
38 {
39 	struct platform_device *pdev = parser->pdev;
40 	struct dp_io *io = &parser->io;
41 	struct dss_io_data *dss = &io->dp_controller;
42 
43 	dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len);
44 	if (IS_ERR(dss->ahb.base))
45 		return PTR_ERR(dss->ahb.base);
46 
47 	dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len);
48 	if (IS_ERR(dss->aux.base)) {
49 		/*
50 		 * The initial binding had a single reg, but in order to
51 		 * support variation in the sub-region sizes this was split.
52 		 * dp_ioremap() will fail with -EINVAL here if only a single
53 		 * reg is specified, so fill in the sub-region offsets and
54 		 * lengths based on this single region.
55 		 */
56 		if (PTR_ERR(dss->aux.base) == -EINVAL) {
57 			if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
58 				DRM_ERROR("legacy memory region not large enough\n");
59 				return -EINVAL;
60 			}
61 
62 			dss->ahb.len = DP_DEFAULT_AHB_SIZE;
63 			dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
64 			dss->aux.len = DP_DEFAULT_AUX_SIZE;
65 			dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
66 			dss->link.len = DP_DEFAULT_LINK_SIZE;
67 			dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
68 			dss->p0.len = DP_DEFAULT_P0_SIZE;
69 		} else {
70 			DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
71 			return PTR_ERR(dss->aux.base);
72 		}
73 	} else {
74 		dss->link.base = dp_ioremap(pdev, 2, &dss->link.len);
75 		if (IS_ERR(dss->link.base)) {
76 			DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
77 			return PTR_ERR(dss->link.base);
78 		}
79 
80 		dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len);
81 		if (IS_ERR(dss->p0.base)) {
82 			DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
83 			return PTR_ERR(dss->p0.base);
84 		}
85 	}
86 
87 	io->phy = devm_phy_get(&pdev->dev, "dp");
88 	if (IS_ERR(io->phy))
89 		return PTR_ERR(io->phy);
90 
91 	return 0;
92 }
93 
94 static u32 dp_parser_link_frequencies(struct device_node *of_node)
95 {
96 	struct device_node *endpoint;
97 	u64 frequency = 0;
98 	int cnt;
99 
100 	endpoint = of_graph_get_endpoint_by_regs(of_node, 1, 0); /* port@1 */
101 	if (!endpoint)
102 		return 0;
103 
104 	cnt = of_property_count_u64_elems(endpoint, "link-frequencies");
105 
106 	if (cnt > 0)
107 		of_property_read_u64_index(endpoint, "link-frequencies",
108 						cnt - 1, &frequency);
109 	of_node_put(endpoint);
110 
111 	do_div(frequency,
112 		10 * /* from symbol rate to link rate */
113 		1000); /* kbytes */
114 
115 	return frequency;
116 }
117 
118 static int dp_parser_misc(struct dp_parser *parser)
119 {
120 	struct device_node *of_node = parser->pdev->dev.of_node;
121 	int cnt;
122 
123 	/*
124 	 * data-lanes is the property of dp_out endpoint
125 	 */
126 	cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES);
127 	if (cnt < 0) {
128 		/* legacy code, data-lanes is the property of mdss_dp node */
129 		cnt = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES);
130 	}
131 
132 	if (cnt > 0)
133 		parser->max_dp_lanes = cnt;
134 	else
135 		parser->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */
136 
137 	parser->max_dp_link_rate = dp_parser_link_frequencies(of_node);
138 	if (!parser->max_dp_link_rate)
139 		parser->max_dp_link_rate = DP_LINK_RATE_HBR2;
140 
141 	return 0;
142 }
143 
144 static inline bool dp_parser_check_prefix(const char *clk_prefix,
145 						const char *clk_name)
146 {
147 	return !strncmp(clk_prefix, clk_name, strlen(clk_prefix));
148 }
149 
150 static int dp_parser_init_clk_data(struct dp_parser *parser)
151 {
152 	int num_clk, i, rc;
153 	int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
154 	const char *clk_name;
155 	struct device *dev = &parser->pdev->dev;
156 	struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
157 	struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
158 	struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
159 
160 	num_clk = of_property_count_strings(dev->of_node, "clock-names");
161 	if (num_clk <= 0) {
162 		DRM_ERROR("no clocks are defined\n");
163 		return -EINVAL;
164 	}
165 
166 	for (i = 0; i < num_clk; i++) {
167 		rc = of_property_read_string_index(dev->of_node,
168 				"clock-names", i, &clk_name);
169 		if (rc < 0)
170 			return rc;
171 
172 		if (dp_parser_check_prefix("core", clk_name))
173 			core_clk_count++;
174 
175 		if (dp_parser_check_prefix("ctrl", clk_name))
176 			ctrl_clk_count++;
177 
178 		if (dp_parser_check_prefix("stream", clk_name))
179 			stream_clk_count++;
180 	}
181 
182 	/* Initialize the CORE power module */
183 	if (core_clk_count == 0) {
184 		DRM_ERROR("no core clocks are defined\n");
185 		return -EINVAL;
186 	}
187 
188 	core_power->num_clk = core_clk_count;
189 	core_power->clocks = devm_kcalloc(dev,
190 			core_power->num_clk, sizeof(struct clk_bulk_data),
191 			GFP_KERNEL);
192 	if (!core_power->clocks)
193 		return -ENOMEM;
194 
195 	/* Initialize the CTRL power module */
196 	if (ctrl_clk_count == 0) {
197 		DRM_ERROR("no ctrl clocks are defined\n");
198 		return -EINVAL;
199 	}
200 
201 	ctrl_power->num_clk = ctrl_clk_count;
202 	ctrl_power->clocks = devm_kcalloc(dev,
203 			ctrl_power->num_clk, sizeof(struct clk_bulk_data),
204 			GFP_KERNEL);
205 	if (!ctrl_power->clocks) {
206 		ctrl_power->num_clk = 0;
207 		return -ENOMEM;
208 	}
209 
210 	/* Initialize the STREAM power module */
211 	if (stream_clk_count == 0) {
212 		DRM_ERROR("no stream (pixel) clocks are defined\n");
213 		return -EINVAL;
214 	}
215 
216 	stream_power->num_clk = stream_clk_count;
217 	stream_power->clocks = devm_kcalloc(dev,
218 			stream_power->num_clk, sizeof(struct clk_bulk_data),
219 			GFP_KERNEL);
220 	if (!stream_power->clocks) {
221 		stream_power->num_clk = 0;
222 		return -ENOMEM;
223 	}
224 
225 	return 0;
226 }
227 
228 static int dp_parser_clock(struct dp_parser *parser)
229 {
230 	int rc = 0, i = 0;
231 	int num_clk = 0;
232 	int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0;
233 	int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
234 	const char *clk_name;
235 	struct device *dev = &parser->pdev->dev;
236 	struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
237 	struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
238 	struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
239 
240 	rc =  dp_parser_init_clk_data(parser);
241 	if (rc) {
242 		DRM_ERROR("failed to initialize power data %d\n", rc);
243 		return -EINVAL;
244 	}
245 
246 	core_clk_count = core_power->num_clk;
247 	ctrl_clk_count = ctrl_power->num_clk;
248 	stream_clk_count = stream_power->num_clk;
249 
250 	num_clk = core_clk_count + ctrl_clk_count + stream_clk_count;
251 
252 	for (i = 0; i < num_clk; i++) {
253 		rc = of_property_read_string_index(dev->of_node, "clock-names",
254 				i, &clk_name);
255 		if (rc) {
256 			DRM_ERROR("error reading clock-names %d\n", rc);
257 			return rc;
258 		}
259 		if (dp_parser_check_prefix("core", clk_name) &&
260 				core_clk_index < core_clk_count) {
261 			core_power->clocks[core_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
262 			core_clk_index++;
263 		} else if (dp_parser_check_prefix("stream", clk_name) &&
264 				stream_clk_index < stream_clk_count) {
265 			stream_power->clocks[stream_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
266 			stream_clk_index++;
267 		} else if (dp_parser_check_prefix("ctrl", clk_name) &&
268 			   ctrl_clk_index < ctrl_clk_count) {
269 			ctrl_power->clocks[ctrl_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
270 			ctrl_clk_index++;
271 		}
272 	}
273 
274 	return 0;
275 }
276 
277 int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser)
278 {
279 	struct platform_device *pdev = parser->pdev;
280 	struct drm_bridge *bridge;
281 
282 	bridge = devm_drm_of_get_bridge(dev, pdev->dev.of_node, 1, 0);
283 	if (IS_ERR(bridge))
284 		return PTR_ERR(bridge);
285 
286 	parser->next_bridge = bridge;
287 
288 	return 0;
289 }
290 
291 static int dp_parser_parse(struct dp_parser *parser)
292 {
293 	int rc = 0;
294 
295 	if (!parser) {
296 		DRM_ERROR("invalid input\n");
297 		return -EINVAL;
298 	}
299 
300 	rc = dp_parser_ctrl_res(parser);
301 	if (rc)
302 		return rc;
303 
304 	rc = dp_parser_misc(parser);
305 	if (rc)
306 		return rc;
307 
308 	rc = dp_parser_clock(parser);
309 	if (rc)
310 		return rc;
311 
312 	return 0;
313 }
314 
315 struct dp_parser *dp_parser_get(struct platform_device *pdev)
316 {
317 	struct dp_parser *parser;
318 
319 	parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
320 	if (!parser)
321 		return ERR_PTR(-ENOMEM);
322 
323 	parser->parse = dp_parser_parse;
324 	parser->pdev = pdev;
325 
326 	return parser;
327 }
328