xref: /openbmc/linux/drivers/ufs/host/ufshcd-pltfrm.c (revision 517f8eb3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Universal Flash Storage Host controller Platform bus based glue driver
4  * Copyright (C) 2011-2013 Samsung India Software Operations
5  *
6  * Authors:
7  *	Santosh Yaraganavi <santosh.sy@samsung.com>
8  *	Vinayak Holikatti <h.vinayak@samsung.com>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/of.h>
15 
16 #include <ufs/ufshcd.h>
17 #include "ufshcd-pltfrm.h"
18 #include <ufs/unipro.h>
19 
20 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION		2
21 
ufshcd_parse_clock_info(struct ufs_hba * hba)22 static int ufshcd_parse_clock_info(struct ufs_hba *hba)
23 {
24 	int ret = 0;
25 	int cnt;
26 	int i;
27 	struct device *dev = hba->dev;
28 	struct device_node *np = dev->of_node;
29 	const char *name;
30 	u32 *clkfreq = NULL;
31 	struct ufs_clk_info *clki;
32 	int len = 0;
33 	size_t sz = 0;
34 
35 	if (!np)
36 		goto out;
37 
38 	cnt = of_property_count_strings(np, "clock-names");
39 	if (!cnt || (cnt == -EINVAL)) {
40 		dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
41 				__func__);
42 	} else if (cnt < 0) {
43 		dev_err(dev, "%s: count clock strings failed, err %d\n",
44 				__func__, cnt);
45 		ret = cnt;
46 	}
47 
48 	if (cnt <= 0)
49 		goto out;
50 
51 	if (!of_get_property(np, "freq-table-hz", &len)) {
52 		dev_info(dev, "freq-table-hz property not specified\n");
53 		goto out;
54 	}
55 
56 	if (len <= 0)
57 		goto out;
58 
59 	sz = len / sizeof(*clkfreq);
60 	if (sz != 2 * cnt) {
61 		dev_err(dev, "%s len mismatch\n", "freq-table-hz");
62 		ret = -EINVAL;
63 		goto out;
64 	}
65 
66 	clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq),
67 			       GFP_KERNEL);
68 	if (!clkfreq) {
69 		ret = -ENOMEM;
70 		goto out;
71 	}
72 
73 	ret = of_property_read_u32_array(np, "freq-table-hz",
74 			clkfreq, sz);
75 	if (ret && (ret != -EINVAL)) {
76 		dev_err(dev, "%s: error reading array %d\n",
77 				"freq-table-hz", ret);
78 		return ret;
79 	}
80 
81 	for (i = 0; i < sz; i += 2) {
82 		ret = of_property_read_string_index(np,	"clock-names", i/2,
83 						    &name);
84 		if (ret)
85 			goto out;
86 
87 		clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
88 		if (!clki) {
89 			ret = -ENOMEM;
90 			goto out;
91 		}
92 
93 		clki->min_freq = clkfreq[i];
94 		clki->max_freq = clkfreq[i+1];
95 		clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
96 		if (!clki->name) {
97 			ret = -ENOMEM;
98 			goto out;
99 		}
100 
101 		if (!strcmp(name, "ref_clk"))
102 			clki->keep_link_active = true;
103 		dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
104 				clki->min_freq, clki->max_freq, clki->name);
105 		list_add_tail(&clki->list, &hba->clk_list_head);
106 	}
107 out:
108 	return ret;
109 }
110 
phandle_exists(const struct device_node * np,const char * phandle_name,int index)111 static bool phandle_exists(const struct device_node *np,
112 			   const char *phandle_name, int index)
113 {
114 	struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
115 
116 	if (parse_np)
117 		of_node_put(parse_np);
118 
119 	return parse_np != NULL;
120 }
121 
122 #define MAX_PROP_SIZE 32
ufshcd_populate_vreg(struct device * dev,const char * name,struct ufs_vreg ** out_vreg)123 int ufshcd_populate_vreg(struct device *dev, const char *name,
124 			 struct ufs_vreg **out_vreg)
125 {
126 	char prop_name[MAX_PROP_SIZE];
127 	struct ufs_vreg *vreg = NULL;
128 	struct device_node *np = dev->of_node;
129 
130 	if (!np) {
131 		dev_err(dev, "%s: non DT initialization\n", __func__);
132 		goto out;
133 	}
134 
135 	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
136 	if (!phandle_exists(np, prop_name, 0)) {
137 		dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
138 				__func__, prop_name);
139 		goto out;
140 	}
141 
142 	vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
143 	if (!vreg)
144 		return -ENOMEM;
145 
146 	vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
147 	if (!vreg->name)
148 		return -ENOMEM;
149 
150 	snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
151 	if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
152 		dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
153 		vreg->max_uA = 0;
154 	}
155 out:
156 	*out_vreg = vreg;
157 	return 0;
158 }
159 EXPORT_SYMBOL_GPL(ufshcd_populate_vreg);
160 
161 /**
162  * ufshcd_parse_regulator_info - get regulator info from device tree
163  * @hba: per adapter instance
164  *
165  * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
166  * If any of the supplies are not defined it is assumed that they are always-on
167  * and hence return zero. If the property is defined but parsing is failed
168  * then return corresponding error.
169  *
170  * Return: 0 upon success; < 0 upon failure.
171  */
ufshcd_parse_regulator_info(struct ufs_hba * hba)172 static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
173 {
174 	int err;
175 	struct device *dev = hba->dev;
176 	struct ufs_vreg_info *info = &hba->vreg_info;
177 
178 	err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
179 	if (err)
180 		goto out;
181 
182 	err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
183 	if (err)
184 		goto out;
185 
186 	err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
187 	if (err)
188 		goto out;
189 
190 	err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
191 out:
192 	return err;
193 }
194 
ufshcd_init_lanes_per_dir(struct ufs_hba * hba)195 static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
196 {
197 	struct device *dev = hba->dev;
198 	int ret;
199 
200 	ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
201 		&hba->lanes_per_direction);
202 	if (ret) {
203 		dev_dbg(hba->dev,
204 			"%s: failed to read lanes-per-direction, ret=%d\n",
205 			__func__, ret);
206 		hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
207 	}
208 }
209 
210 /**
211  * ufshcd_get_pwr_dev_param - get finally agreed attributes for
212  *                            power mode change
213  * @pltfrm_param: pointer to platform parameters
214  * @dev_max: pointer to device attributes
215  * @agreed_pwr: returned agreed attributes
216  *
217  * Return: 0 on success, non-zero value on failure.
218  */
ufshcd_get_pwr_dev_param(const struct ufs_dev_params * pltfrm_param,const struct ufs_pa_layer_attr * dev_max,struct ufs_pa_layer_attr * agreed_pwr)219 int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
220 			     const struct ufs_pa_layer_attr *dev_max,
221 			     struct ufs_pa_layer_attr *agreed_pwr)
222 {
223 	int min_pltfrm_gear;
224 	int min_dev_gear;
225 	bool is_dev_sup_hs = false;
226 	bool is_pltfrm_max_hs = false;
227 
228 	if (dev_max->pwr_rx == FAST_MODE)
229 		is_dev_sup_hs = true;
230 
231 	if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
232 		is_pltfrm_max_hs = true;
233 		min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
234 					pltfrm_param->hs_tx_gear);
235 	} else {
236 		min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
237 					pltfrm_param->pwm_tx_gear);
238 	}
239 
240 	/*
241 	 * device doesn't support HS but
242 	 * pltfrm_param->desired_working_mode is HS,
243 	 * thus device and pltfrm_param don't agree
244 	 */
245 	if (!is_dev_sup_hs && is_pltfrm_max_hs) {
246 		pr_info("%s: device doesn't support HS\n",
247 			__func__);
248 		return -ENOTSUPP;
249 	} else if (is_dev_sup_hs && is_pltfrm_max_hs) {
250 		/*
251 		 * since device supports HS, it supports FAST_MODE.
252 		 * since pltfrm_param->desired_working_mode is also HS
253 		 * then final decision (FAST/FASTAUTO) is done according
254 		 * to pltfrm_params as it is the restricting factor
255 		 */
256 		agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
257 		agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
258 	} else {
259 		/*
260 		 * here pltfrm_param->desired_working_mode is PWM.
261 		 * it doesn't matter whether device supports HS or PWM,
262 		 * in both cases pltfrm_param->desired_working_mode will
263 		 * determine the mode
264 		 */
265 		agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
266 		agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
267 	}
268 
269 	/*
270 	 * we would like tx to work in the minimum number of lanes
271 	 * between device capability and vendor preferences.
272 	 * the same decision will be made for rx
273 	 */
274 	agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
275 				    pltfrm_param->tx_lanes);
276 	agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
277 				    pltfrm_param->rx_lanes);
278 
279 	/* device maximum gear is the minimum between device rx and tx gears */
280 	min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
281 
282 	/*
283 	 * if both device capabilities and vendor pre-defined preferences are
284 	 * both HS or both PWM then set the minimum gear to be the chosen
285 	 * working gear.
286 	 * if one is PWM and one is HS then the one that is PWM get to decide
287 	 * what is the gear, as it is the one that also decided previously what
288 	 * pwr the device will be configured to.
289 	 */
290 	if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
291 	    (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
292 		agreed_pwr->gear_rx =
293 			min_t(u32, min_dev_gear, min_pltfrm_gear);
294 	} else if (!is_dev_sup_hs) {
295 		agreed_pwr->gear_rx = min_dev_gear;
296 	} else {
297 		agreed_pwr->gear_rx = min_pltfrm_gear;
298 	}
299 	agreed_pwr->gear_tx = agreed_pwr->gear_rx;
300 
301 	agreed_pwr->hs_rate = pltfrm_param->hs_rate;
302 
303 	return 0;
304 }
305 EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
306 
ufshcd_init_pwr_dev_param(struct ufs_dev_params * dev_param)307 void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
308 {
309 	*dev_param = (struct ufs_dev_params){
310 		.tx_lanes = UFS_LANE_2,
311 		.rx_lanes = UFS_LANE_2,
312 		.hs_rx_gear = UFS_HS_G3,
313 		.hs_tx_gear = UFS_HS_G3,
314 		.pwm_rx_gear = UFS_PWM_G4,
315 		.pwm_tx_gear = UFS_PWM_G4,
316 		.rx_pwr_pwm = SLOW_MODE,
317 		.tx_pwr_pwm = SLOW_MODE,
318 		.rx_pwr_hs = FAST_MODE,
319 		.tx_pwr_hs = FAST_MODE,
320 		.hs_rate = PA_HS_MODE_B,
321 		.desired_working_mode = UFS_HS_MODE,
322 	};
323 }
324 EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
325 
326 /**
327  * ufshcd_pltfrm_init - probe routine of the driver
328  * @pdev: pointer to Platform device handle
329  * @vops: pointer to variant ops
330  *
331  * Return: 0 on success, non-zero value on failure.
332  */
ufshcd_pltfrm_init(struct platform_device * pdev,const struct ufs_hba_variant_ops * vops)333 int ufshcd_pltfrm_init(struct platform_device *pdev,
334 		       const struct ufs_hba_variant_ops *vops)
335 {
336 	struct ufs_hba *hba;
337 	void __iomem *mmio_base;
338 	int irq, err;
339 	struct device *dev = &pdev->dev;
340 
341 	mmio_base = devm_platform_ioremap_resource(pdev, 0);
342 	if (IS_ERR(mmio_base)) {
343 		err = PTR_ERR(mmio_base);
344 		goto out;
345 	}
346 
347 	irq = platform_get_irq(pdev, 0);
348 	if (irq < 0) {
349 		err = irq;
350 		goto out;
351 	}
352 
353 	err = ufshcd_alloc_host(dev, &hba);
354 	if (err) {
355 		dev_err(dev, "Allocation failed\n");
356 		goto out;
357 	}
358 
359 	hba->vops = vops;
360 
361 	err = ufshcd_parse_clock_info(hba);
362 	if (err) {
363 		dev_err(dev, "%s: clock parse failed %d\n",
364 				__func__, err);
365 		goto dealloc_host;
366 	}
367 	err = ufshcd_parse_regulator_info(hba);
368 	if (err) {
369 		dev_err(dev, "%s: regulator init failed %d\n",
370 				__func__, err);
371 		goto dealloc_host;
372 	}
373 
374 	ufshcd_init_lanes_per_dir(hba);
375 
376 	err = ufshcd_init(hba, mmio_base, irq);
377 	if (err) {
378 		dev_err_probe(dev, err, "Initialization failed with error %d\n",
379 			      err);
380 		goto dealloc_host;
381 	}
382 
383 	pm_runtime_set_active(dev);
384 	pm_runtime_enable(dev);
385 
386 	return 0;
387 
388 dealloc_host:
389 	ufshcd_dealloc_host(hba);
390 out:
391 	return err;
392 }
393 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
394 
395 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
396 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
397 MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
398 MODULE_LICENSE("GPL");
399