xref: /openbmc/linux/drivers/gpu/drm/msm/dp/dp_parser.c (revision 5e8bf00e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/of_gpio.h>
7 #include <linux/phy/phy.h>
8 
9 #include <drm/drm_of.h>
10 #include <drm/drm_print.h>
11 #include <drm/drm_bridge.h>
12 
13 #include "dp_parser.h"
14 #include "dp_reg.h"
15 
16 #define DP_DEFAULT_AHB_OFFSET	0x0000
17 #define DP_DEFAULT_AHB_SIZE	0x0200
18 #define DP_DEFAULT_AUX_OFFSET	0x0200
19 #define DP_DEFAULT_AUX_SIZE	0x0200
20 #define DP_DEFAULT_LINK_OFFSET	0x0400
21 #define DP_DEFAULT_LINK_SIZE	0x0C00
22 #define DP_DEFAULT_P0_OFFSET	0x1000
23 #define DP_DEFAULT_P0_SIZE	0x0400
24 
25 static const struct dp_regulator_cfg sdm845_dp_reg_cfg = {
26 	.num = 2,
27 	.regs = {
28 		{"vdda-1p2", 21800, 4 },	/* 1.2 V */
29 		{"vdda-0p9", 36000, 32 },	/* 0.9 V */
30 	},
31 };
32 
33 static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
34 {
35 	struct resource *res;
36 	void __iomem *base;
37 
38 	base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
39 	if (!IS_ERR(base))
40 		*len = resource_size(res);
41 
42 	return base;
43 }
44 
45 static int dp_parser_ctrl_res(struct dp_parser *parser)
46 {
47 	struct platform_device *pdev = parser->pdev;
48 	struct dp_io *io = &parser->io;
49 	struct dss_io_data *dss = &io->dp_controller;
50 
51 	dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len);
52 	if (IS_ERR(dss->ahb.base))
53 		return PTR_ERR(dss->ahb.base);
54 
55 	dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len);
56 	if (IS_ERR(dss->aux.base)) {
57 		/*
58 		 * The initial binding had a single reg, but in order to
59 		 * support variation in the sub-region sizes this was split.
60 		 * dp_ioremap() will fail with -EINVAL here if only a single
61 		 * reg is specified, so fill in the sub-region offsets and
62 		 * lengths based on this single region.
63 		 */
64 		if (PTR_ERR(dss->aux.base) == -EINVAL) {
65 			if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
66 				DRM_ERROR("legacy memory region not large enough\n");
67 				return -EINVAL;
68 			}
69 
70 			dss->ahb.len = DP_DEFAULT_AHB_SIZE;
71 			dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
72 			dss->aux.len = DP_DEFAULT_AUX_SIZE;
73 			dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
74 			dss->link.len = DP_DEFAULT_LINK_SIZE;
75 			dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
76 			dss->p0.len = DP_DEFAULT_P0_SIZE;
77 		} else {
78 			DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
79 			return PTR_ERR(dss->aux.base);
80 		}
81 	} else {
82 		dss->link.base = dp_ioremap(pdev, 2, &dss->link.len);
83 		if (IS_ERR(dss->link.base)) {
84 			DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
85 			return PTR_ERR(dss->link.base);
86 		}
87 
88 		dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len);
89 		if (IS_ERR(dss->p0.base)) {
90 			DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
91 			return PTR_ERR(dss->p0.base);
92 		}
93 	}
94 
95 	io->phy = devm_phy_get(&pdev->dev, "dp");
96 	if (IS_ERR(io->phy))
97 		return PTR_ERR(io->phy);
98 
99 	return 0;
100 }
101 
102 static int dp_parser_misc(struct dp_parser *parser)
103 {
104 	struct device_node *of_node = parser->pdev->dev.of_node;
105 	int len;
106 
107 	len = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES);
108 	if (len < 0) {
109 		DRM_WARN("Invalid property \"data-lanes\", default max DP lanes = %d\n",
110 			 DP_MAX_NUM_DP_LANES);
111 		len = DP_MAX_NUM_DP_LANES;
112 	}
113 
114 	parser->max_dp_lanes = len;
115 	return 0;
116 }
117 
118 static inline bool dp_parser_check_prefix(const char *clk_prefix,
119 						const char *clk_name)
120 {
121 	return !strncmp(clk_prefix, clk_name, strlen(clk_prefix));
122 }
123 
124 static int dp_parser_init_clk_data(struct dp_parser *parser)
125 {
126 	int num_clk, i, rc;
127 	int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
128 	const char *clk_name;
129 	struct device *dev = &parser->pdev->dev;
130 	struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
131 	struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
132 	struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
133 
134 	num_clk = of_property_count_strings(dev->of_node, "clock-names");
135 	if (num_clk <= 0) {
136 		DRM_ERROR("no clocks are defined\n");
137 		return -EINVAL;
138 	}
139 
140 	for (i = 0; i < num_clk; i++) {
141 		rc = of_property_read_string_index(dev->of_node,
142 				"clock-names", i, &clk_name);
143 		if (rc < 0)
144 			return rc;
145 
146 		if (dp_parser_check_prefix("core", clk_name))
147 			core_clk_count++;
148 
149 		if (dp_parser_check_prefix("ctrl", clk_name))
150 			ctrl_clk_count++;
151 
152 		if (dp_parser_check_prefix("stream", clk_name))
153 			stream_clk_count++;
154 	}
155 
156 	/* Initialize the CORE power module */
157 	if (core_clk_count == 0) {
158 		DRM_ERROR("no core clocks are defined\n");
159 		return -EINVAL;
160 	}
161 
162 	core_power->num_clk = core_clk_count;
163 	core_power->clocks = devm_kcalloc(dev,
164 			core_power->num_clk, sizeof(struct clk_bulk_data),
165 			GFP_KERNEL);
166 	if (!core_power->clocks)
167 		return -ENOMEM;
168 
169 	/* Initialize the CTRL power module */
170 	if (ctrl_clk_count == 0) {
171 		DRM_ERROR("no ctrl clocks are defined\n");
172 		return -EINVAL;
173 	}
174 
175 	ctrl_power->num_clk = ctrl_clk_count;
176 	ctrl_power->clocks = devm_kcalloc(dev,
177 			ctrl_power->num_clk, sizeof(struct clk_bulk_data),
178 			GFP_KERNEL);
179 	if (!ctrl_power->clocks) {
180 		ctrl_power->num_clk = 0;
181 		return -ENOMEM;
182 	}
183 
184 	/* Initialize the STREAM power module */
185 	if (stream_clk_count == 0) {
186 		DRM_ERROR("no stream (pixel) clocks are defined\n");
187 		return -EINVAL;
188 	}
189 
190 	stream_power->num_clk = stream_clk_count;
191 	stream_power->clocks = devm_kcalloc(dev,
192 			stream_power->num_clk, sizeof(struct clk_bulk_data),
193 			GFP_KERNEL);
194 	if (!stream_power->clocks) {
195 		stream_power->num_clk = 0;
196 		return -ENOMEM;
197 	}
198 
199 	return 0;
200 }
201 
202 static int dp_parser_clock(struct dp_parser *parser)
203 {
204 	int rc = 0, i = 0;
205 	int num_clk = 0;
206 	int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0;
207 	int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
208 	const char *clk_name;
209 	struct device *dev = &parser->pdev->dev;
210 	struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
211 	struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
212 	struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
213 
214 	rc =  dp_parser_init_clk_data(parser);
215 	if (rc) {
216 		DRM_ERROR("failed to initialize power data %d\n", rc);
217 		return -EINVAL;
218 	}
219 
220 	core_clk_count = core_power->num_clk;
221 	ctrl_clk_count = ctrl_power->num_clk;
222 	stream_clk_count = stream_power->num_clk;
223 
224 	num_clk = core_clk_count + ctrl_clk_count + stream_clk_count;
225 
226 	for (i = 0; i < num_clk; i++) {
227 		rc = of_property_read_string_index(dev->of_node, "clock-names",
228 				i, &clk_name);
229 		if (rc) {
230 			DRM_ERROR("error reading clock-names %d\n", rc);
231 			return rc;
232 		}
233 		if (dp_parser_check_prefix("core", clk_name) &&
234 				core_clk_index < core_clk_count) {
235 			core_power->clocks[core_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
236 			core_clk_index++;
237 		} else if (dp_parser_check_prefix("stream", clk_name) &&
238 				stream_clk_index < stream_clk_count) {
239 			stream_power->clocks[stream_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
240 			stream_clk_index++;
241 		} else if (dp_parser_check_prefix("ctrl", clk_name) &&
242 			   ctrl_clk_index < ctrl_clk_count) {
243 			ctrl_power->clocks[ctrl_clk_index].id = devm_kstrdup(dev, clk_name, GFP_KERNEL);
244 			ctrl_clk_index++;
245 		}
246 	}
247 
248 	return 0;
249 }
250 
251 int dp_parser_find_next_bridge(struct dp_parser *parser)
252 {
253 	struct device *dev = &parser->pdev->dev;
254 	struct drm_bridge *bridge;
255 
256 	bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
257 	if (IS_ERR(bridge))
258 		return PTR_ERR(bridge);
259 
260 	parser->next_bridge = bridge;
261 
262 	return 0;
263 }
264 
265 static int dp_parser_parse(struct dp_parser *parser)
266 {
267 	int rc = 0;
268 
269 	if (!parser) {
270 		DRM_ERROR("invalid input\n");
271 		return -EINVAL;
272 	}
273 
274 	rc = dp_parser_ctrl_res(parser);
275 	if (rc)
276 		return rc;
277 
278 	rc = dp_parser_misc(parser);
279 	if (rc)
280 		return rc;
281 
282 	rc = dp_parser_clock(parser);
283 	if (rc)
284 		return rc;
285 
286 	/* Map the corresponding regulator information according to
287 	 * version. Currently, since we only have one supported platform,
288 	 * mapping the regulator directly.
289 	 */
290 	parser->regulator_cfg = &sdm845_dp_reg_cfg;
291 
292 	return 0;
293 }
294 
295 struct dp_parser *dp_parser_get(struct platform_device *pdev)
296 {
297 	struct dp_parser *parser;
298 
299 	parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
300 	if (!parser)
301 		return ERR_PTR(-ENOMEM);
302 
303 	parser->parse = dp_parser_parse;
304 	parser->pdev = pdev;
305 
306 	return parser;
307 }
308