xref: /openbmc/linux/drivers/gpu/drm/msm/dp/dp_parser.c (revision 47010c04)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/of_gpio.h>
7 #include <linux/phy/phy.h>
8 
9 #include <drm/drm_of.h>
10 #include <drm/drm_print.h>
11 #include <drm/drm_bridge.h>
12 
13 #include "dp_parser.h"
14 #include "dp_reg.h"
15 
16 #define DP_DEFAULT_AHB_OFFSET	0x0000
17 #define DP_DEFAULT_AHB_SIZE	0x0200
18 #define DP_DEFAULT_AUX_OFFSET	0x0200
19 #define DP_DEFAULT_AUX_SIZE	0x0200
20 #define DP_DEFAULT_LINK_OFFSET	0x0400
21 #define DP_DEFAULT_LINK_SIZE	0x0C00
22 #define DP_DEFAULT_P0_OFFSET	0x1000
23 #define DP_DEFAULT_P0_SIZE	0x0400
24 
25 static const struct dp_regulator_cfg sdm845_dp_reg_cfg = {
26 	.num = 2,
27 	.regs = {
28 		{"vdda-1p2", 21800, 4 },	/* 1.2 V */
29 		{"vdda-0p9", 36000, 32 },	/* 0.9 V */
30 	},
31 };
32 
33 static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
34 {
35 	struct resource *res;
36 	void __iomem *base;
37 
38 	base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
39 	if (!IS_ERR(base))
40 		*len = resource_size(res);
41 
42 	return base;
43 }
44 
45 static int dp_parser_ctrl_res(struct dp_parser *parser)
46 {
47 	struct platform_device *pdev = parser->pdev;
48 	struct dp_io *io = &parser->io;
49 	struct dss_io_data *dss = &io->dp_controller;
50 
51 	dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len);
52 	if (IS_ERR(dss->ahb.base))
53 		return PTR_ERR(dss->ahb.base);
54 
55 	dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len);
56 	if (IS_ERR(dss->aux.base)) {
57 		/*
58 		 * The initial binding had a single reg, but in order to
59 		 * support variation in the sub-region sizes this was split.
60 		 * dp_ioremap() will fail with -EINVAL here if only a single
61 		 * reg is specified, so fill in the sub-region offsets and
62 		 * lengths based on this single region.
63 		 */
64 		if (PTR_ERR(dss->aux.base) == -EINVAL) {
65 			if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
66 				DRM_ERROR("legacy memory region not large enough\n");
67 				return -EINVAL;
68 			}
69 
70 			dss->ahb.len = DP_DEFAULT_AHB_SIZE;
71 			dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
72 			dss->aux.len = DP_DEFAULT_AUX_SIZE;
73 			dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
74 			dss->link.len = DP_DEFAULT_LINK_SIZE;
75 			dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
76 			dss->p0.len = DP_DEFAULT_P0_SIZE;
77 		} else {
78 			DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
79 			return PTR_ERR(dss->aux.base);
80 		}
81 	} else {
82 		dss->link.base = dp_ioremap(pdev, 2, &dss->link.len);
83 		if (IS_ERR(dss->link.base)) {
84 			DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
85 			return PTR_ERR(dss->link.base);
86 		}
87 
88 		dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len);
89 		if (IS_ERR(dss->p0.base)) {
90 			DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
91 			return PTR_ERR(dss->p0.base);
92 		}
93 	}
94 
95 	io->phy = devm_phy_get(&pdev->dev, "dp");
96 	if (IS_ERR(io->phy))
97 		return PTR_ERR(io->phy);
98 
99 	return 0;
100 }
101 
102 static int dp_parser_misc(struct dp_parser *parser)
103 {
104 	struct device_node *of_node = parser->pdev->dev.of_node;
105 	int len = 0;
106 	const char *data_lane_property = "data-lanes";
107 
108 	len = of_property_count_elems_of_size(of_node,
109 			 data_lane_property, sizeof(u32));
110 	if (len < 0) {
111 		DRM_WARN("Invalid property %s, default max DP lanes = %d\n",
112 				data_lane_property, DP_MAX_NUM_DP_LANES);
113 		len = DP_MAX_NUM_DP_LANES;
114 	}
115 
116 	parser->max_dp_lanes = len;
117 	return 0;
118 }
119 
120 static inline bool dp_parser_check_prefix(const char *clk_prefix,
121 						const char *clk_name)
122 {
123 	return !strncmp(clk_prefix, clk_name, strlen(clk_prefix));
124 }
125 
126 static int dp_parser_init_clk_data(struct dp_parser *parser)
127 {
128 	int num_clk, i, rc;
129 	int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
130 	const char *clk_name;
131 	struct device *dev = &parser->pdev->dev;
132 	struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
133 	struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
134 	struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
135 
136 	num_clk = of_property_count_strings(dev->of_node, "clock-names");
137 	if (num_clk <= 0) {
138 		DRM_ERROR("no clocks are defined\n");
139 		return -EINVAL;
140 	}
141 
142 	for (i = 0; i < num_clk; i++) {
143 		rc = of_property_read_string_index(dev->of_node,
144 				"clock-names", i, &clk_name);
145 		if (rc < 0)
146 			return rc;
147 
148 		if (dp_parser_check_prefix("core", clk_name))
149 			core_clk_count++;
150 
151 		if (dp_parser_check_prefix("ctrl", clk_name))
152 			ctrl_clk_count++;
153 
154 		if (dp_parser_check_prefix("stream", clk_name))
155 			stream_clk_count++;
156 	}
157 
158 	/* Initialize the CORE power module */
159 	if (core_clk_count == 0) {
160 		DRM_ERROR("no core clocks are defined\n");
161 		return -EINVAL;
162 	}
163 
164 	core_power->num_clk = core_clk_count;
165 	core_power->clk_config = devm_kzalloc(dev,
166 			sizeof(struct dss_clk) * core_power->num_clk,
167 			GFP_KERNEL);
168 	if (!core_power->clk_config)
169 		return -EINVAL;
170 
171 	/* Initialize the CTRL power module */
172 	if (ctrl_clk_count == 0) {
173 		DRM_ERROR("no ctrl clocks are defined\n");
174 		return -EINVAL;
175 	}
176 
177 	ctrl_power->num_clk = ctrl_clk_count;
178 	ctrl_power->clk_config = devm_kzalloc(dev,
179 			sizeof(struct dss_clk) * ctrl_power->num_clk,
180 			GFP_KERNEL);
181 	if (!ctrl_power->clk_config) {
182 		ctrl_power->num_clk = 0;
183 		return -EINVAL;
184 	}
185 
186 	/* Initialize the STREAM power module */
187 	if (stream_clk_count == 0) {
188 		DRM_ERROR("no stream (pixel) clocks are defined\n");
189 		return -EINVAL;
190 	}
191 
192 	stream_power->num_clk = stream_clk_count;
193 	stream_power->clk_config = devm_kzalloc(dev,
194 			sizeof(struct dss_clk) * stream_power->num_clk,
195 			GFP_KERNEL);
196 	if (!stream_power->clk_config) {
197 		stream_power->num_clk = 0;
198 		return -EINVAL;
199 	}
200 
201 	return 0;
202 }
203 
204 static int dp_parser_clock(struct dp_parser *parser)
205 {
206 	int rc = 0, i = 0;
207 	int num_clk = 0;
208 	int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0;
209 	int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
210 	const char *clk_name;
211 	struct device *dev = &parser->pdev->dev;
212 	struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
213 	struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
214 	struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
215 
216 	rc =  dp_parser_init_clk_data(parser);
217 	if (rc) {
218 		DRM_ERROR("failed to initialize power data %d\n", rc);
219 		return -EINVAL;
220 	}
221 
222 	core_clk_count = core_power->num_clk;
223 	ctrl_clk_count = ctrl_power->num_clk;
224 	stream_clk_count = stream_power->num_clk;
225 
226 	num_clk = core_clk_count + ctrl_clk_count + stream_clk_count;
227 
228 	for (i = 0; i < num_clk; i++) {
229 		rc = of_property_read_string_index(dev->of_node, "clock-names",
230 				i, &clk_name);
231 		if (rc) {
232 			DRM_ERROR("error reading clock-names %d\n", rc);
233 			return rc;
234 		}
235 		if (dp_parser_check_prefix("core", clk_name) &&
236 				core_clk_index < core_clk_count) {
237 			struct dss_clk *clk =
238 				&core_power->clk_config[core_clk_index];
239 			strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
240 			clk->type = DSS_CLK_AHB;
241 			core_clk_index++;
242 		} else if (dp_parser_check_prefix("stream", clk_name) &&
243 				stream_clk_index < stream_clk_count) {
244 			struct dss_clk *clk =
245 				&stream_power->clk_config[stream_clk_index];
246 			strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
247 			clk->type = DSS_CLK_PCLK;
248 			stream_clk_index++;
249 		} else if (dp_parser_check_prefix("ctrl", clk_name) &&
250 			   ctrl_clk_index < ctrl_clk_count) {
251 			struct dss_clk *clk =
252 				&ctrl_power->clk_config[ctrl_clk_index];
253 			strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
254 			ctrl_clk_index++;
255 			if (dp_parser_check_prefix("ctrl_link", clk_name) ||
256 			    dp_parser_check_prefix("stream_pixel", clk_name))
257 				clk->type = DSS_CLK_PCLK;
258 			else
259 				clk->type = DSS_CLK_AHB;
260 		}
261 	}
262 
263 	DRM_DEBUG_DP("clock parsing successful\n");
264 
265 	return 0;
266 }
267 
268 static int dp_parser_find_next_bridge(struct dp_parser *parser)
269 {
270 	struct device *dev = &parser->pdev->dev;
271 	struct drm_bridge *bridge;
272 
273 	bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
274 	if (IS_ERR(bridge))
275 		return PTR_ERR(bridge);
276 
277 	parser->next_bridge = bridge;
278 
279 	return 0;
280 }
281 
282 static int dp_parser_parse(struct dp_parser *parser, int connector_type)
283 {
284 	int rc = 0;
285 
286 	if (!parser) {
287 		DRM_ERROR("invalid input\n");
288 		return -EINVAL;
289 	}
290 
291 	rc = dp_parser_ctrl_res(parser);
292 	if (rc)
293 		return rc;
294 
295 	rc = dp_parser_misc(parser);
296 	if (rc)
297 		return rc;
298 
299 	rc = dp_parser_clock(parser);
300 	if (rc)
301 		return rc;
302 
303 	/*
304 	 * External bridges are mandatory for eDP interfaces: one has to
305 	 * provide at least an eDP panel (which gets wrapped into panel-bridge).
306 	 *
307 	 * For DisplayPort interfaces external bridges are optional, so
308 	 * silently ignore an error if one is not present (-ENODEV).
309 	 */
310 	rc = dp_parser_find_next_bridge(parser);
311 	if (rc == -ENODEV) {
312 		if (connector_type == DRM_MODE_CONNECTOR_eDP) {
313 			DRM_ERROR("eDP: next bridge is not present\n");
314 			return rc;
315 		}
316 	} else if (rc) {
317 		if (rc != -EPROBE_DEFER)
318 			DRM_ERROR("DP: error parsing next bridge: %d\n", rc);
319 		return rc;
320 	}
321 
322 	/* Map the corresponding regulator information according to
323 	 * version. Currently, since we only have one supported platform,
324 	 * mapping the regulator directly.
325 	 */
326 	parser->regulator_cfg = &sdm845_dp_reg_cfg;
327 
328 	return 0;
329 }
330 
331 struct dp_parser *dp_parser_get(struct platform_device *pdev)
332 {
333 	struct dp_parser *parser;
334 
335 	parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
336 	if (!parser)
337 		return ERR_PTR(-ENOMEM);
338 
339 	parser->parse = dp_parser_parse;
340 	parser->pdev = pdev;
341 
342 	return parser;
343 }
344