1 /*
2  * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3  *
4  *  Copyright (C) 2014 Samsung Electronics
5  *  Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
6  *  Lukasz Majewski <l.majewski@samsung.com>
7  *
8  *  Copyright (C) 2011 Samsung Electronics
9  *  Donggeun Kim <dg77.kim@samsung.com>
10  *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  */
27 
28 #include <linux/clk.h>
29 #include <linux/io.h>
30 #include <linux/interrupt.h>
31 #include <linux/module.h>
32 #include <linux/of.h>
33 #include <linux/of_address.h>
34 #include <linux/of_irq.h>
35 #include <linux/platform_device.h>
36 #include <linux/regulator/consumer.h>
37 
38 #include "exynos_tmu.h"
39 #include "../thermal_core.h"
40 
41 /* Exynos generic registers */
42 #define EXYNOS_TMU_REG_TRIMINFO		0x0
43 #define EXYNOS_TMU_REG_CONTROL		0x20
44 #define EXYNOS_TMU_REG_STATUS		0x28
45 #define EXYNOS_TMU_REG_CURRENT_TEMP	0x40
46 #define EXYNOS_TMU_REG_INTEN		0x70
47 #define EXYNOS_TMU_REG_INTSTAT		0x74
48 #define EXYNOS_TMU_REG_INTCLEAR		0x78
49 
50 #define EXYNOS_TMU_TEMP_MASK		0xff
51 #define EXYNOS_TMU_REF_VOLTAGE_SHIFT	24
52 #define EXYNOS_TMU_REF_VOLTAGE_MASK	0x1f
53 #define EXYNOS_TMU_BUF_SLOPE_SEL_MASK	0xf
54 #define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT	8
55 #define EXYNOS_TMU_CORE_EN_SHIFT	0
56 
57 /* Exynos3250 specific registers */
58 #define EXYNOS_TMU_TRIMINFO_CON1	0x10
59 
60 /* Exynos4210 specific registers */
61 #define EXYNOS4210_TMU_REG_THRESHOLD_TEMP	0x44
62 #define EXYNOS4210_TMU_REG_TRIG_LEVEL0	0x50
63 
64 /* Exynos5250, Exynos4412, Exynos3250 specific registers */
65 #define EXYNOS_TMU_TRIMINFO_CON2	0x14
66 #define EXYNOS_THD_TEMP_RISE		0x50
67 #define EXYNOS_THD_TEMP_FALL		0x54
68 #define EXYNOS_EMUL_CON		0x80
69 
70 #define EXYNOS_TRIMINFO_RELOAD_ENABLE	1
71 #define EXYNOS_TRIMINFO_25_SHIFT	0
72 #define EXYNOS_TRIMINFO_85_SHIFT	8
73 #define EXYNOS_TMU_TRIP_MODE_SHIFT	13
74 #define EXYNOS_TMU_TRIP_MODE_MASK	0x7
75 #define EXYNOS_TMU_THERM_TRIP_EN_SHIFT	12
76 
77 #define EXYNOS_TMU_INTEN_RISE0_SHIFT	0
78 #define EXYNOS_TMU_INTEN_RISE1_SHIFT	4
79 #define EXYNOS_TMU_INTEN_RISE2_SHIFT	8
80 #define EXYNOS_TMU_INTEN_RISE3_SHIFT	12
81 #define EXYNOS_TMU_INTEN_FALL0_SHIFT	16
82 
83 #define EXYNOS_EMUL_TIME	0x57F0
84 #define EXYNOS_EMUL_TIME_MASK	0xffff
85 #define EXYNOS_EMUL_TIME_SHIFT	16
86 #define EXYNOS_EMUL_DATA_SHIFT	8
87 #define EXYNOS_EMUL_DATA_MASK	0xFF
88 #define EXYNOS_EMUL_ENABLE	0x1
89 
90 /* Exynos5260 specific */
91 #define EXYNOS5260_TMU_REG_INTEN		0xC0
92 #define EXYNOS5260_TMU_REG_INTSTAT		0xC4
93 #define EXYNOS5260_TMU_REG_INTCLEAR		0xC8
94 #define EXYNOS5260_EMUL_CON			0x100
95 
96 /* Exynos4412 specific */
97 #define EXYNOS4412_MUX_ADDR_VALUE          6
98 #define EXYNOS4412_MUX_ADDR_SHIFT          20
99 
100 /* Exynos5433 specific registers */
101 #define EXYNOS5433_TMU_REG_CONTROL1		0x024
102 #define EXYNOS5433_TMU_SAMPLING_INTERVAL	0x02c
103 #define EXYNOS5433_TMU_COUNTER_VALUE0		0x030
104 #define EXYNOS5433_TMU_COUNTER_VALUE1		0x034
105 #define EXYNOS5433_TMU_REG_CURRENT_TEMP1	0x044
106 #define EXYNOS5433_THD_TEMP_RISE3_0		0x050
107 #define EXYNOS5433_THD_TEMP_RISE7_4		0x054
108 #define EXYNOS5433_THD_TEMP_FALL3_0		0x060
109 #define EXYNOS5433_THD_TEMP_FALL7_4		0x064
110 #define EXYNOS5433_TMU_REG_INTEN		0x0c0
111 #define EXYNOS5433_TMU_REG_INTPEND		0x0c8
112 #define EXYNOS5433_TMU_EMUL_CON			0x110
113 #define EXYNOS5433_TMU_PD_DET_EN		0x130
114 
115 #define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT	16
116 #define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT	23
117 #define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK	\
118 			(0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT)
119 #define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK	BIT(23)
120 
121 #define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING	0
122 #define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING	1
123 
124 #define EXYNOS5433_PD_DET_EN			1
125 
126 /*exynos5440 specific registers*/
127 #define EXYNOS5440_TMU_S0_7_TRIM		0x000
128 #define EXYNOS5440_TMU_S0_7_CTRL		0x020
129 #define EXYNOS5440_TMU_S0_7_DEBUG		0x040
130 #define EXYNOS5440_TMU_S0_7_TEMP		0x0f0
131 #define EXYNOS5440_TMU_S0_7_TH0			0x110
132 #define EXYNOS5440_TMU_S0_7_TH1			0x130
133 #define EXYNOS5440_TMU_S0_7_TH2			0x150
134 #define EXYNOS5440_TMU_S0_7_IRQEN		0x210
135 #define EXYNOS5440_TMU_S0_7_IRQ			0x230
136 /* exynos5440 common registers */
137 #define EXYNOS5440_TMU_IRQ_STATUS		0x000
138 #define EXYNOS5440_TMU_PMIN			0x004
139 
140 #define EXYNOS5440_TMU_INTEN_RISE0_SHIFT	0
141 #define EXYNOS5440_TMU_INTEN_RISE1_SHIFT	1
142 #define EXYNOS5440_TMU_INTEN_RISE2_SHIFT	2
143 #define EXYNOS5440_TMU_INTEN_RISE3_SHIFT	3
144 #define EXYNOS5440_TMU_INTEN_FALL0_SHIFT	4
145 #define EXYNOS5440_TMU_TH_RISE4_SHIFT		24
146 #define EXYNOS5440_EFUSE_SWAP_OFFSET		8
147 
148 /* Exynos7 specific registers */
149 #define EXYNOS7_THD_TEMP_RISE7_6		0x50
150 #define EXYNOS7_THD_TEMP_FALL7_6		0x60
151 #define EXYNOS7_TMU_REG_INTEN			0x110
152 #define EXYNOS7_TMU_REG_INTPEND			0x118
153 #define EXYNOS7_TMU_REG_EMUL_CON		0x160
154 
155 #define EXYNOS7_TMU_TEMP_MASK			0x1ff
156 #define EXYNOS7_PD_DET_EN_SHIFT			23
157 #define EXYNOS7_TMU_INTEN_RISE0_SHIFT		0
158 #define EXYNOS7_TMU_INTEN_RISE1_SHIFT		1
159 #define EXYNOS7_TMU_INTEN_RISE2_SHIFT		2
160 #define EXYNOS7_TMU_INTEN_RISE3_SHIFT		3
161 #define EXYNOS7_TMU_INTEN_RISE4_SHIFT		4
162 #define EXYNOS7_TMU_INTEN_RISE5_SHIFT		5
163 #define EXYNOS7_TMU_INTEN_RISE6_SHIFT		6
164 #define EXYNOS7_TMU_INTEN_RISE7_SHIFT		7
165 #define EXYNOS7_EMUL_DATA_SHIFT			7
166 #define EXYNOS7_EMUL_DATA_MASK			0x1ff
167 
168 #define MCELSIUS	1000
169 /**
170  * struct exynos_tmu_data : A structure to hold the private data of the TMU
171 	driver
172  * @id: identifier of the one instance of the TMU controller.
173  * @pdata: pointer to the tmu platform/configuration data
174  * @base: base address of the single instance of the TMU controller.
175  * @base_second: base address of the common registers of the TMU controller.
176  * @irq: irq number of the TMU controller.
177  * @soc: id of the SOC type.
178  * @irq_work: pointer to the irq work structure.
179  * @lock: lock to implement synchronization.
180  * @clk: pointer to the clock structure.
181  * @clk_sec: pointer to the clock structure for accessing the base_second.
182  * @sclk: pointer to the clock structure for accessing the tmu special clk.
183  * @temp_error1: fused value of the first point trim.
184  * @temp_error2: fused value of the second point trim.
185  * @regulator: pointer to the TMU regulator structure.
186  * @reg_conf: pointer to structure to register with core thermal.
187  * @ntrip: number of supported trip points.
188  * @tmu_initialize: SoC specific TMU initialization method
189  * @tmu_control: SoC specific TMU control method
190  * @tmu_read: SoC specific TMU temperature read method
191  * @tmu_set_emulation: SoC specific TMU emulation setting method
192  * @tmu_clear_irqs: SoC specific TMU interrupts clearing method
193  */
194 struct exynos_tmu_data {
195 	int id;
196 	struct exynos_tmu_platform_data *pdata;
197 	void __iomem *base;
198 	void __iomem *base_second;
199 	int irq;
200 	enum soc_type soc;
201 	struct work_struct irq_work;
202 	struct mutex lock;
203 	struct clk *clk, *clk_sec, *sclk;
204 	u16 temp_error1, temp_error2;
205 	struct regulator *regulator;
206 	struct thermal_zone_device *tzd;
207 	unsigned int ntrip;
208 
209 	int (*tmu_initialize)(struct platform_device *pdev);
210 	void (*tmu_control)(struct platform_device *pdev, bool on);
211 	int (*tmu_read)(struct exynos_tmu_data *data);
212 	void (*tmu_set_emulation)(struct exynos_tmu_data *data, int temp);
213 	void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
214 };
215 
216 static void exynos_report_trigger(struct exynos_tmu_data *p)
217 {
218 	char data[10], *envp[] = { data, NULL };
219 	struct thermal_zone_device *tz = p->tzd;
220 	int temp;
221 	unsigned int i;
222 
223 	if (!tz) {
224 		pr_err("No thermal zone device defined\n");
225 		return;
226 	}
227 
228 	thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
229 
230 	mutex_lock(&tz->lock);
231 	/* Find the level for which trip happened */
232 	for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
233 		tz->ops->get_trip_temp(tz, i, &temp);
234 		if (tz->last_temperature < temp)
235 			break;
236 	}
237 
238 	snprintf(data, sizeof(data), "%u", i);
239 	kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, envp);
240 	mutex_unlock(&tz->lock);
241 }
242 
243 /*
244  * TMU treats temperature as a mapped temperature code.
245  * The temperature is converted differently depending on the calibration type.
246  */
247 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
248 {
249 	struct exynos_tmu_platform_data *pdata = data->pdata;
250 	int temp_code;
251 
252 	switch (pdata->cal_type) {
253 	case TYPE_TWO_POINT_TRIMMING:
254 		temp_code = (temp - pdata->first_point_trim) *
255 			(data->temp_error2 - data->temp_error1) /
256 			(pdata->second_point_trim - pdata->first_point_trim) +
257 			data->temp_error1;
258 		break;
259 	case TYPE_ONE_POINT_TRIMMING:
260 		temp_code = temp + data->temp_error1 - pdata->first_point_trim;
261 		break;
262 	default:
263 		temp_code = temp + pdata->default_temp_offset;
264 		break;
265 	}
266 
267 	return temp_code;
268 }
269 
270 /*
271  * Calculate a temperature value from a temperature code.
272  * The unit of the temperature is degree Celsius.
273  */
274 static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code)
275 {
276 	struct exynos_tmu_platform_data *pdata = data->pdata;
277 	int temp;
278 
279 	switch (pdata->cal_type) {
280 	case TYPE_TWO_POINT_TRIMMING:
281 		temp = (temp_code - data->temp_error1) *
282 			(pdata->second_point_trim - pdata->first_point_trim) /
283 			(data->temp_error2 - data->temp_error1) +
284 			pdata->first_point_trim;
285 		break;
286 	case TYPE_ONE_POINT_TRIMMING:
287 		temp = temp_code - data->temp_error1 + pdata->first_point_trim;
288 		break;
289 	default:
290 		temp = temp_code - pdata->default_temp_offset;
291 		break;
292 	}
293 
294 	return temp;
295 }
296 
297 static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
298 {
299 	struct exynos_tmu_platform_data *pdata = data->pdata;
300 
301 	data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
302 	data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
303 				EXYNOS_TMU_TEMP_MASK);
304 
305 	if (!data->temp_error1 ||
306 		(pdata->min_efuse_value > data->temp_error1) ||
307 		(data->temp_error1 > pdata->max_efuse_value))
308 		data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
309 
310 	if (!data->temp_error2)
311 		data->temp_error2 =
312 			(pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
313 			EXYNOS_TMU_TEMP_MASK;
314 }
315 
316 static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
317 {
318 	struct thermal_zone_device *tz = data->tzd;
319 	const struct thermal_trip * const trips =
320 		of_thermal_get_trip_points(tz);
321 	unsigned long temp;
322 	int i;
323 
324 	if (!trips) {
325 		pr_err("%s: Cannot get trip points from of-thermal.c!\n",
326 		       __func__);
327 		return 0;
328 	}
329 
330 	for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
331 		if (trips[i].type == THERMAL_TRIP_CRITICAL)
332 			continue;
333 
334 		temp = trips[i].temperature / MCELSIUS;
335 		if (falling)
336 			temp -= (trips[i].hysteresis / MCELSIUS);
337 		else
338 			threshold &= ~(0xff << 8 * i);
339 
340 		threshold |= temp_to_code(data, temp) << 8 * i;
341 	}
342 
343 	return threshold;
344 }
345 
346 static int exynos_tmu_initialize(struct platform_device *pdev)
347 {
348 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
349 	int ret;
350 
351 	if (of_thermal_get_ntrips(data->tzd) > data->ntrip) {
352 		dev_info(&pdev->dev,
353 			 "More trip points than supported by this TMU.\n");
354 		dev_info(&pdev->dev,
355 			 "%d trip points should be configured in polling mode.\n",
356 			 (of_thermal_get_ntrips(data->tzd) - data->ntrip));
357 	}
358 
359 	mutex_lock(&data->lock);
360 	clk_enable(data->clk);
361 	if (!IS_ERR(data->clk_sec))
362 		clk_enable(data->clk_sec);
363 	ret = data->tmu_initialize(pdev);
364 	clk_disable(data->clk);
365 	mutex_unlock(&data->lock);
366 	if (!IS_ERR(data->clk_sec))
367 		clk_disable(data->clk_sec);
368 
369 	return ret;
370 }
371 
372 static u32 get_con_reg(struct exynos_tmu_data *data, u32 con)
373 {
374 	struct exynos_tmu_platform_data *pdata = data->pdata;
375 
376 	if (data->soc == SOC_ARCH_EXYNOS4412 ||
377 	    data->soc == SOC_ARCH_EXYNOS3250)
378 		con |= (EXYNOS4412_MUX_ADDR_VALUE << EXYNOS4412_MUX_ADDR_SHIFT);
379 
380 	con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
381 	con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
382 
383 	con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
384 	con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
385 
386 	if (pdata->noise_cancel_mode) {
387 		con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT);
388 		con |= (pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT);
389 	}
390 
391 	return con;
392 }
393 
394 static void exynos_tmu_control(struct platform_device *pdev, bool on)
395 {
396 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
397 
398 	mutex_lock(&data->lock);
399 	clk_enable(data->clk);
400 	data->tmu_control(pdev, on);
401 	clk_disable(data->clk);
402 	mutex_unlock(&data->lock);
403 }
404 
405 static int exynos4210_tmu_initialize(struct platform_device *pdev)
406 {
407 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
408 	struct thermal_zone_device *tz = data->tzd;
409 	const struct thermal_trip * const trips =
410 		of_thermal_get_trip_points(tz);
411 	int ret = 0, threshold_code, i;
412 	unsigned long reference, temp;
413 	unsigned int status;
414 
415 	if (!trips) {
416 		pr_err("%s: Cannot get trip points from of-thermal.c!\n",
417 		       __func__);
418 		ret = -ENODEV;
419 		goto out;
420 	}
421 
422 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
423 	if (!status) {
424 		ret = -EBUSY;
425 		goto out;
426 	}
427 
428 	sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
429 
430 	/* Write temperature code for threshold */
431 	reference = trips[0].temperature / MCELSIUS;
432 	threshold_code = temp_to_code(data, reference);
433 	if (threshold_code < 0) {
434 		ret = threshold_code;
435 		goto out;
436 	}
437 	writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
438 
439 	for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
440 		temp = trips[i].temperature / MCELSIUS;
441 		writeb(temp - reference, data->base +
442 		       EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
443 	}
444 
445 	data->tmu_clear_irqs(data);
446 out:
447 	return ret;
448 }
449 
450 static int exynos4412_tmu_initialize(struct platform_device *pdev)
451 {
452 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
453 	const struct thermal_trip * const trips =
454 		of_thermal_get_trip_points(data->tzd);
455 	unsigned int status, trim_info, con, ctrl, rising_threshold;
456 	int ret = 0, threshold_code, i;
457 	unsigned long crit_temp = 0;
458 
459 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
460 	if (!status) {
461 		ret = -EBUSY;
462 		goto out;
463 	}
464 
465 	if (data->soc == SOC_ARCH_EXYNOS3250 ||
466 	    data->soc == SOC_ARCH_EXYNOS4412 ||
467 	    data->soc == SOC_ARCH_EXYNOS5250) {
468 		if (data->soc == SOC_ARCH_EXYNOS3250) {
469 			ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
470 			ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
471 			writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
472 		}
473 		ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
474 		ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
475 		writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
476 	}
477 
478 	/* On exynos5420 the triminfo register is in the shared space */
479 	if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
480 		trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO);
481 	else
482 		trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
483 
484 	sanitize_temp_error(data, trim_info);
485 
486 	/* Write temperature code for rising and falling threshold */
487 	rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE);
488 	rising_threshold = get_th_reg(data, rising_threshold, false);
489 	writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
490 	writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL);
491 
492 	data->tmu_clear_irqs(data);
493 
494 	/* if last threshold limit is also present */
495 	for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) {
496 		if (trips[i].type == THERMAL_TRIP_CRITICAL) {
497 			crit_temp = trips[i].temperature;
498 			break;
499 		}
500 	}
501 
502 	if (i == of_thermal_get_ntrips(data->tzd)) {
503 		pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n",
504 		       __func__);
505 		ret = -EINVAL;
506 		goto out;
507 	}
508 
509 	threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
510 	/* 1-4 level to be assigned in th0 reg */
511 	rising_threshold &= ~(0xff << 8 * i);
512 	rising_threshold |= threshold_code << 8 * i;
513 	writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
514 	con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
515 	con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
516 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
517 
518 out:
519 	return ret;
520 }
521 
522 static int exynos5433_tmu_initialize(struct platform_device *pdev)
523 {
524 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
525 	struct exynos_tmu_platform_data *pdata = data->pdata;
526 	struct thermal_zone_device *tz = data->tzd;
527 	unsigned int status, trim_info;
528 	unsigned int rising_threshold = 0, falling_threshold = 0;
529 	int temp, temp_hist;
530 	int ret = 0, threshold_code, i, sensor_id, cal_type;
531 
532 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
533 	if (!status) {
534 		ret = -EBUSY;
535 		goto out;
536 	}
537 
538 	trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
539 	sanitize_temp_error(data, trim_info);
540 
541 	/* Read the temperature sensor id */
542 	sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK)
543 				>> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT;
544 	dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id);
545 
546 	/* Read the calibration mode */
547 	writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO);
548 	cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK)
549 				>> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT;
550 
551 	switch (cal_type) {
552 	case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING:
553 		pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
554 		break;
555 	case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING:
556 		pdata->cal_type = TYPE_TWO_POINT_TRIMMING;
557 		break;
558 	default:
559 		pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
560 		break;
561 	}
562 
563 	dev_info(&pdev->dev, "Calibration type is %d-point calibration\n",
564 			cal_type ?  2 : 1);
565 
566 	/* Write temperature code for rising and falling threshold */
567 	for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
568 		int rising_reg_offset, falling_reg_offset;
569 		int j = 0;
570 
571 		switch (i) {
572 		case 0:
573 		case 1:
574 		case 2:
575 		case 3:
576 			rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0;
577 			falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0;
578 			j = i;
579 			break;
580 		case 4:
581 		case 5:
582 		case 6:
583 		case 7:
584 			rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4;
585 			falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4;
586 			j = i - 4;
587 			break;
588 		default:
589 			continue;
590 		}
591 
592 		/* Write temperature code for rising threshold */
593 		tz->ops->get_trip_temp(tz, i, &temp);
594 		temp /= MCELSIUS;
595 		threshold_code = temp_to_code(data, temp);
596 
597 		rising_threshold = readl(data->base + rising_reg_offset);
598 		rising_threshold |= (threshold_code << j * 8);
599 		writel(rising_threshold, data->base + rising_reg_offset);
600 
601 		/* Write temperature code for falling threshold */
602 		tz->ops->get_trip_hyst(tz, i, &temp_hist);
603 		temp_hist = temp - (temp_hist / MCELSIUS);
604 		threshold_code = temp_to_code(data, temp_hist);
605 
606 		falling_threshold = readl(data->base + falling_reg_offset);
607 		falling_threshold &= ~(0xff << j * 8);
608 		falling_threshold |= (threshold_code << j * 8);
609 		writel(falling_threshold, data->base + falling_reg_offset);
610 	}
611 
612 	data->tmu_clear_irqs(data);
613 out:
614 	return ret;
615 }
616 
617 static int exynos5440_tmu_initialize(struct platform_device *pdev)
618 {
619 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
620 	unsigned int trim_info = 0, con, rising_threshold;
621 	int threshold_code;
622 	int crit_temp = 0;
623 
624 	/*
625 	 * For exynos5440 soc triminfo value is swapped between TMU0 and
626 	 * TMU2, so the below logic is needed.
627 	 */
628 	switch (data->id) {
629 	case 0:
630 		trim_info = readl(data->base + EXYNOS5440_EFUSE_SWAP_OFFSET +
631 				 EXYNOS5440_TMU_S0_7_TRIM);
632 		break;
633 	case 1:
634 		trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM);
635 		break;
636 	case 2:
637 		trim_info = readl(data->base - EXYNOS5440_EFUSE_SWAP_OFFSET +
638 				  EXYNOS5440_TMU_S0_7_TRIM);
639 	}
640 	sanitize_temp_error(data, trim_info);
641 
642 	/* Write temperature code for rising and falling threshold */
643 	rising_threshold = readl(data->base + EXYNOS5440_TMU_S0_7_TH0);
644 	rising_threshold = get_th_reg(data, rising_threshold, false);
645 	writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH0);
646 	writel(0, data->base + EXYNOS5440_TMU_S0_7_TH1);
647 
648 	data->tmu_clear_irqs(data);
649 
650 	/* if last threshold limit is also present */
651 	if (!data->tzd->ops->get_crit_temp(data->tzd, &crit_temp)) {
652 		threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
653 		/* 5th level to be assigned in th2 reg */
654 		rising_threshold =
655 			threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
656 		writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH2);
657 		con = readl(data->base + EXYNOS5440_TMU_S0_7_CTRL);
658 		con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
659 		writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
660 	}
661 	/* Clear the PMIN in the common TMU register */
662 	if (!data->id)
663 		writel(0, data->base_second + EXYNOS5440_TMU_PMIN);
664 
665 	return 0;
666 }
667 
668 static int exynos7_tmu_initialize(struct platform_device *pdev)
669 {
670 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
671 	struct thermal_zone_device *tz = data->tzd;
672 	struct exynos_tmu_platform_data *pdata = data->pdata;
673 	unsigned int status, trim_info;
674 	unsigned int rising_threshold = 0, falling_threshold = 0;
675 	int ret = 0, threshold_code, i;
676 	int temp, temp_hist;
677 	unsigned int reg_off, bit_off;
678 
679 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
680 	if (!status) {
681 		ret = -EBUSY;
682 		goto out;
683 	}
684 
685 	trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
686 
687 	data->temp_error1 = trim_info & EXYNOS7_TMU_TEMP_MASK;
688 	if (!data->temp_error1 ||
689 	    (pdata->min_efuse_value > data->temp_error1) ||
690 	    (data->temp_error1 > pdata->max_efuse_value))
691 		data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
692 
693 	/* Write temperature code for rising and falling threshold */
694 	for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
695 		/*
696 		 * On exynos7 there are 4 rising and 4 falling threshold
697 		 * registers (0x50-0x5c and 0x60-0x6c respectively). Each
698 		 * register holds the value of two threshold levels (at bit
699 		 * offsets 0 and 16). Based on the fact that there are atmost
700 		 * eight possible trigger levels, calculate the register and
701 		 * bit offsets where the threshold levels are to be written.
702 		 *
703 		 * e.g. EXYNOS7_THD_TEMP_RISE7_6 (0x50)
704 		 * [24:16] - Threshold level 7
705 		 * [8:0] - Threshold level 6
706 		 * e.g. EXYNOS7_THD_TEMP_RISE5_4 (0x54)
707 		 * [24:16] - Threshold level 5
708 		 * [8:0] - Threshold level 4
709 		 *
710 		 * and similarly for falling thresholds.
711 		 *
712 		 * Based on the above, calculate the register and bit offsets
713 		 * for rising/falling threshold levels and populate them.
714 		 */
715 		reg_off = ((7 - i) / 2) * 4;
716 		bit_off = ((8 - i) % 2);
717 
718 		tz->ops->get_trip_temp(tz, i, &temp);
719 		temp /= MCELSIUS;
720 
721 		tz->ops->get_trip_hyst(tz, i, &temp_hist);
722 		temp_hist = temp - (temp_hist / MCELSIUS);
723 
724 		/* Set 9-bit temperature code for rising threshold levels */
725 		threshold_code = temp_to_code(data, temp);
726 		rising_threshold = readl(data->base +
727 			EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
728 		rising_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
729 		rising_threshold |= threshold_code << (16 * bit_off);
730 		writel(rising_threshold,
731 		       data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
732 
733 		/* Set 9-bit temperature code for falling threshold levels */
734 		threshold_code = temp_to_code(data, temp_hist);
735 		falling_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
736 		falling_threshold |= threshold_code << (16 * bit_off);
737 		writel(falling_threshold,
738 		       data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
739 	}
740 
741 	data->tmu_clear_irqs(data);
742 out:
743 	return ret;
744 }
745 
746 static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
747 {
748 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
749 	struct thermal_zone_device *tz = data->tzd;
750 	unsigned int con, interrupt_en;
751 
752 	con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
753 
754 	if (on) {
755 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
756 		interrupt_en =
757 			(of_thermal_is_trip_valid(tz, 3)
758 			 << EXYNOS_TMU_INTEN_RISE3_SHIFT) |
759 			(of_thermal_is_trip_valid(tz, 2)
760 			 << EXYNOS_TMU_INTEN_RISE2_SHIFT) |
761 			(of_thermal_is_trip_valid(tz, 1)
762 			 << EXYNOS_TMU_INTEN_RISE1_SHIFT) |
763 			(of_thermal_is_trip_valid(tz, 0)
764 			 << EXYNOS_TMU_INTEN_RISE0_SHIFT);
765 
766 		if (data->soc != SOC_ARCH_EXYNOS4210)
767 			interrupt_en |=
768 				interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
769 	} else {
770 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
771 		interrupt_en = 0; /* Disable all interrupts */
772 	}
773 	writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
774 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
775 }
776 
777 static void exynos5433_tmu_control(struct platform_device *pdev, bool on)
778 {
779 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
780 	struct thermal_zone_device *tz = data->tzd;
781 	unsigned int con, interrupt_en, pd_det_en;
782 
783 	con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
784 
785 	if (on) {
786 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
787 		interrupt_en =
788 			(of_thermal_is_trip_valid(tz, 7)
789 			<< EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
790 			(of_thermal_is_trip_valid(tz, 6)
791 			<< EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
792 			(of_thermal_is_trip_valid(tz, 5)
793 			<< EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
794 			(of_thermal_is_trip_valid(tz, 4)
795 			<< EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
796 			(of_thermal_is_trip_valid(tz, 3)
797 			<< EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
798 			(of_thermal_is_trip_valid(tz, 2)
799 			<< EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
800 			(of_thermal_is_trip_valid(tz, 1)
801 			<< EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
802 			(of_thermal_is_trip_valid(tz, 0)
803 			<< EXYNOS7_TMU_INTEN_RISE0_SHIFT);
804 
805 		interrupt_en |=
806 			interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
807 	} else {
808 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
809 		interrupt_en = 0; /* Disable all interrupts */
810 	}
811 
812 	pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0;
813 
814 	writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN);
815 	writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN);
816 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
817 }
818 
819 static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
820 {
821 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
822 	struct thermal_zone_device *tz = data->tzd;
823 	unsigned int con, interrupt_en;
824 
825 	con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
826 
827 	if (on) {
828 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
829 		interrupt_en =
830 			(of_thermal_is_trip_valid(tz, 3)
831 			 << EXYNOS5440_TMU_INTEN_RISE3_SHIFT) |
832 			(of_thermal_is_trip_valid(tz, 2)
833 			 << EXYNOS5440_TMU_INTEN_RISE2_SHIFT) |
834 			(of_thermal_is_trip_valid(tz, 1)
835 			 << EXYNOS5440_TMU_INTEN_RISE1_SHIFT) |
836 			(of_thermal_is_trip_valid(tz, 0)
837 			 << EXYNOS5440_TMU_INTEN_RISE0_SHIFT);
838 		interrupt_en |=
839 			interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
840 	} else {
841 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
842 		interrupt_en = 0; /* Disable all interrupts */
843 	}
844 	writel(interrupt_en, data->base + EXYNOS5440_TMU_S0_7_IRQEN);
845 	writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
846 }
847 
848 static void exynos7_tmu_control(struct platform_device *pdev, bool on)
849 {
850 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
851 	struct thermal_zone_device *tz = data->tzd;
852 	unsigned int con, interrupt_en;
853 
854 	con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
855 
856 	if (on) {
857 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
858 		con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
859 		interrupt_en =
860 			(of_thermal_is_trip_valid(tz, 7)
861 			<< EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
862 			(of_thermal_is_trip_valid(tz, 6)
863 			<< EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
864 			(of_thermal_is_trip_valid(tz, 5)
865 			<< EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
866 			(of_thermal_is_trip_valid(tz, 4)
867 			<< EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
868 			(of_thermal_is_trip_valid(tz, 3)
869 			<< EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
870 			(of_thermal_is_trip_valid(tz, 2)
871 			<< EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
872 			(of_thermal_is_trip_valid(tz, 1)
873 			<< EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
874 			(of_thermal_is_trip_valid(tz, 0)
875 			<< EXYNOS7_TMU_INTEN_RISE0_SHIFT);
876 
877 		interrupt_en |=
878 			interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
879 	} else {
880 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
881 		con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
882 		interrupt_en = 0; /* Disable all interrupts */
883 	}
884 
885 	writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
886 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
887 }
888 
889 static int exynos_get_temp(void *p, int *temp)
890 {
891 	struct exynos_tmu_data *data = p;
892 
893 	if (!data || !data->tmu_read)
894 		return -EINVAL;
895 
896 	mutex_lock(&data->lock);
897 	clk_enable(data->clk);
898 
899 	*temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
900 
901 	clk_disable(data->clk);
902 	mutex_unlock(&data->lock);
903 
904 	return 0;
905 }
906 
907 #ifdef CONFIG_THERMAL_EMULATION
908 static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
909 			    int temp)
910 {
911 	if (temp) {
912 		temp /= MCELSIUS;
913 
914 		if (data->soc != SOC_ARCH_EXYNOS5440) {
915 			val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
916 			val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
917 		}
918 		if (data->soc == SOC_ARCH_EXYNOS7) {
919 			val &= ~(EXYNOS7_EMUL_DATA_MASK <<
920 				EXYNOS7_EMUL_DATA_SHIFT);
921 			val |= (temp_to_code(data, temp) <<
922 				EXYNOS7_EMUL_DATA_SHIFT) |
923 				EXYNOS_EMUL_ENABLE;
924 		} else {
925 			val &= ~(EXYNOS_EMUL_DATA_MASK <<
926 				EXYNOS_EMUL_DATA_SHIFT);
927 			val |= (temp_to_code(data, temp) <<
928 				EXYNOS_EMUL_DATA_SHIFT) |
929 				EXYNOS_EMUL_ENABLE;
930 		}
931 	} else {
932 		val &= ~EXYNOS_EMUL_ENABLE;
933 	}
934 
935 	return val;
936 }
937 
938 static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
939 					 int temp)
940 {
941 	unsigned int val;
942 	u32 emul_con;
943 
944 	if (data->soc == SOC_ARCH_EXYNOS5260)
945 		emul_con = EXYNOS5260_EMUL_CON;
946 	else if (data->soc == SOC_ARCH_EXYNOS5433)
947 		emul_con = EXYNOS5433_TMU_EMUL_CON;
948 	else if (data->soc == SOC_ARCH_EXYNOS7)
949 		emul_con = EXYNOS7_TMU_REG_EMUL_CON;
950 	else
951 		emul_con = EXYNOS_EMUL_CON;
952 
953 	val = readl(data->base + emul_con);
954 	val = get_emul_con_reg(data, val, temp);
955 	writel(val, data->base + emul_con);
956 }
957 
958 static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
959 					 int temp)
960 {
961 	unsigned int val;
962 
963 	val = readl(data->base + EXYNOS5440_TMU_S0_7_DEBUG);
964 	val = get_emul_con_reg(data, val, temp);
965 	writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG);
966 }
967 
968 static int exynos_tmu_set_emulation(void *drv_data, int temp)
969 {
970 	struct exynos_tmu_data *data = drv_data;
971 	int ret = -EINVAL;
972 
973 	if (data->soc == SOC_ARCH_EXYNOS4210)
974 		goto out;
975 
976 	if (temp && temp < MCELSIUS)
977 		goto out;
978 
979 	mutex_lock(&data->lock);
980 	clk_enable(data->clk);
981 	data->tmu_set_emulation(data, temp);
982 	clk_disable(data->clk);
983 	mutex_unlock(&data->lock);
984 	return 0;
985 out:
986 	return ret;
987 }
988 #else
989 #define exynos4412_tmu_set_emulation NULL
990 #define exynos5440_tmu_set_emulation NULL
991 static int exynos_tmu_set_emulation(void *drv_data, int temp)
992 	{ return -EINVAL; }
993 #endif /* CONFIG_THERMAL_EMULATION */
994 
995 static int exynos4210_tmu_read(struct exynos_tmu_data *data)
996 {
997 	int ret = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
998 
999 	/* "temp_code" should range between 75 and 175 */
1000 	return (ret < 75 || ret > 175) ? -ENODATA : ret;
1001 }
1002 
1003 static int exynos4412_tmu_read(struct exynos_tmu_data *data)
1004 {
1005 	return readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
1006 }
1007 
1008 static int exynos5440_tmu_read(struct exynos_tmu_data *data)
1009 {
1010 	return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP);
1011 }
1012 
1013 static int exynos7_tmu_read(struct exynos_tmu_data *data)
1014 {
1015 	return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) &
1016 		EXYNOS7_TMU_TEMP_MASK;
1017 }
1018 
1019 static void exynos_tmu_work(struct work_struct *work)
1020 {
1021 	struct exynos_tmu_data *data = container_of(work,
1022 			struct exynos_tmu_data, irq_work);
1023 	unsigned int val_type;
1024 
1025 	if (!IS_ERR(data->clk_sec))
1026 		clk_enable(data->clk_sec);
1027 	/* Find which sensor generated this interrupt */
1028 	if (data->soc == SOC_ARCH_EXYNOS5440) {
1029 		val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS);
1030 		if (!((val_type >> data->id) & 0x1))
1031 			goto out;
1032 	}
1033 	if (!IS_ERR(data->clk_sec))
1034 		clk_disable(data->clk_sec);
1035 
1036 	exynos_report_trigger(data);
1037 	mutex_lock(&data->lock);
1038 	clk_enable(data->clk);
1039 
1040 	/* TODO: take action based on particular interrupt */
1041 	data->tmu_clear_irqs(data);
1042 
1043 	clk_disable(data->clk);
1044 	mutex_unlock(&data->lock);
1045 out:
1046 	enable_irq(data->irq);
1047 }
1048 
1049 static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
1050 {
1051 	unsigned int val_irq;
1052 	u32 tmu_intstat, tmu_intclear;
1053 
1054 	if (data->soc == SOC_ARCH_EXYNOS5260) {
1055 		tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
1056 		tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
1057 	} else if (data->soc == SOC_ARCH_EXYNOS7) {
1058 		tmu_intstat = EXYNOS7_TMU_REG_INTPEND;
1059 		tmu_intclear = EXYNOS7_TMU_REG_INTPEND;
1060 	} else if (data->soc == SOC_ARCH_EXYNOS5433) {
1061 		tmu_intstat = EXYNOS5433_TMU_REG_INTPEND;
1062 		tmu_intclear = EXYNOS5433_TMU_REG_INTPEND;
1063 	} else {
1064 		tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
1065 		tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
1066 	}
1067 
1068 	val_irq = readl(data->base + tmu_intstat);
1069 	/*
1070 	 * Clear the interrupts.  Please note that the documentation for
1071 	 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
1072 	 * states that INTCLEAR register has a different placing of bits
1073 	 * responsible for FALL IRQs than INTSTAT register.  Exynos5420
1074 	 * and Exynos5440 documentation is correct (Exynos4210 doesn't
1075 	 * support FALL IRQs at all).
1076 	 */
1077 	writel(val_irq, data->base + tmu_intclear);
1078 }
1079 
1080 static void exynos5440_tmu_clear_irqs(struct exynos_tmu_data *data)
1081 {
1082 	unsigned int val_irq;
1083 
1084 	val_irq = readl(data->base + EXYNOS5440_TMU_S0_7_IRQ);
1085 	/* clear the interrupts */
1086 	writel(val_irq, data->base + EXYNOS5440_TMU_S0_7_IRQ);
1087 }
1088 
1089 static irqreturn_t exynos_tmu_irq(int irq, void *id)
1090 {
1091 	struct exynos_tmu_data *data = id;
1092 
1093 	disable_irq_nosync(irq);
1094 	schedule_work(&data->irq_work);
1095 
1096 	return IRQ_HANDLED;
1097 }
1098 
1099 static const struct of_device_id exynos_tmu_match[] = {
1100 	{ .compatible = "samsung,exynos3250-tmu", },
1101 	{ .compatible = "samsung,exynos4210-tmu", },
1102 	{ .compatible = "samsung,exynos4412-tmu", },
1103 	{ .compatible = "samsung,exynos5250-tmu", },
1104 	{ .compatible = "samsung,exynos5260-tmu", },
1105 	{ .compatible = "samsung,exynos5420-tmu", },
1106 	{ .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
1107 	{ .compatible = "samsung,exynos5433-tmu", },
1108 	{ .compatible = "samsung,exynos5440-tmu", },
1109 	{ .compatible = "samsung,exynos7-tmu", },
1110 	{ /* sentinel */ },
1111 };
1112 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
1113 
1114 static int exynos_of_get_soc_type(struct device_node *np)
1115 {
1116 	if (of_device_is_compatible(np, "samsung,exynos3250-tmu"))
1117 		return SOC_ARCH_EXYNOS3250;
1118 	else if (of_device_is_compatible(np, "samsung,exynos4210-tmu"))
1119 		return SOC_ARCH_EXYNOS4210;
1120 	else if (of_device_is_compatible(np, "samsung,exynos4412-tmu"))
1121 		return SOC_ARCH_EXYNOS4412;
1122 	else if (of_device_is_compatible(np, "samsung,exynos5250-tmu"))
1123 		return SOC_ARCH_EXYNOS5250;
1124 	else if (of_device_is_compatible(np, "samsung,exynos5260-tmu"))
1125 		return SOC_ARCH_EXYNOS5260;
1126 	else if (of_device_is_compatible(np, "samsung,exynos5420-tmu"))
1127 		return SOC_ARCH_EXYNOS5420;
1128 	else if (of_device_is_compatible(np,
1129 					 "samsung,exynos5420-tmu-ext-triminfo"))
1130 		return SOC_ARCH_EXYNOS5420_TRIMINFO;
1131 	else if (of_device_is_compatible(np, "samsung,exynos5433-tmu"))
1132 		return SOC_ARCH_EXYNOS5433;
1133 	else if (of_device_is_compatible(np, "samsung,exynos5440-tmu"))
1134 		return SOC_ARCH_EXYNOS5440;
1135 	else if (of_device_is_compatible(np, "samsung,exynos7-tmu"))
1136 		return SOC_ARCH_EXYNOS7;
1137 
1138 	return -EINVAL;
1139 }
1140 
1141 static int exynos_of_sensor_conf(struct device_node *np,
1142 				 struct exynos_tmu_platform_data *pdata)
1143 {
1144 	u32 value;
1145 	int ret;
1146 
1147 	of_node_get(np);
1148 
1149 	ret = of_property_read_u32(np, "samsung,tmu_gain", &value);
1150 	pdata->gain = (u8)value;
1151 	of_property_read_u32(np, "samsung,tmu_reference_voltage", &value);
1152 	pdata->reference_voltage = (u8)value;
1153 	of_property_read_u32(np, "samsung,tmu_noise_cancel_mode", &value);
1154 	pdata->noise_cancel_mode = (u8)value;
1155 
1156 	of_property_read_u32(np, "samsung,tmu_efuse_value",
1157 			     &pdata->efuse_value);
1158 	of_property_read_u32(np, "samsung,tmu_min_efuse_value",
1159 			     &pdata->min_efuse_value);
1160 	of_property_read_u32(np, "samsung,tmu_max_efuse_value",
1161 			     &pdata->max_efuse_value);
1162 
1163 	of_property_read_u32(np, "samsung,tmu_first_point_trim", &value);
1164 	pdata->first_point_trim = (u8)value;
1165 	of_property_read_u32(np, "samsung,tmu_second_point_trim", &value);
1166 	pdata->second_point_trim = (u8)value;
1167 	of_property_read_u32(np, "samsung,tmu_default_temp_offset", &value);
1168 	pdata->default_temp_offset = (u8)value;
1169 
1170 	of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type);
1171 	of_property_read_u32(np, "samsung,tmu_cal_mode", &pdata->cal_mode);
1172 
1173 	of_node_put(np);
1174 	return 0;
1175 }
1176 
1177 static int exynos_map_dt_data(struct platform_device *pdev)
1178 {
1179 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
1180 	struct exynos_tmu_platform_data *pdata;
1181 	struct resource res;
1182 
1183 	if (!data || !pdev->dev.of_node)
1184 		return -ENODEV;
1185 
1186 	data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
1187 	if (data->id < 0)
1188 		data->id = 0;
1189 
1190 	data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1191 	if (data->irq <= 0) {
1192 		dev_err(&pdev->dev, "failed to get IRQ\n");
1193 		return -ENODEV;
1194 	}
1195 
1196 	if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
1197 		dev_err(&pdev->dev, "failed to get Resource 0\n");
1198 		return -ENODEV;
1199 	}
1200 
1201 	data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
1202 	if (!data->base) {
1203 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
1204 		return -EADDRNOTAVAIL;
1205 	}
1206 
1207 	pdata = devm_kzalloc(&pdev->dev,
1208 			     sizeof(struct exynos_tmu_platform_data),
1209 			     GFP_KERNEL);
1210 	if (!pdata)
1211 		return -ENOMEM;
1212 
1213 	exynos_of_sensor_conf(pdev->dev.of_node, pdata);
1214 	data->pdata = pdata;
1215 	data->soc = exynos_of_get_soc_type(pdev->dev.of_node);
1216 
1217 	switch (data->soc) {
1218 	case SOC_ARCH_EXYNOS4210:
1219 		data->tmu_initialize = exynos4210_tmu_initialize;
1220 		data->tmu_control = exynos4210_tmu_control;
1221 		data->tmu_read = exynos4210_tmu_read;
1222 		data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1223 		data->ntrip = 4;
1224 		break;
1225 	case SOC_ARCH_EXYNOS3250:
1226 	case SOC_ARCH_EXYNOS4412:
1227 	case SOC_ARCH_EXYNOS5250:
1228 	case SOC_ARCH_EXYNOS5260:
1229 	case SOC_ARCH_EXYNOS5420:
1230 	case SOC_ARCH_EXYNOS5420_TRIMINFO:
1231 		data->tmu_initialize = exynos4412_tmu_initialize;
1232 		data->tmu_control = exynos4210_tmu_control;
1233 		data->tmu_read = exynos4412_tmu_read;
1234 		data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1235 		data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1236 		data->ntrip = 4;
1237 		break;
1238 	case SOC_ARCH_EXYNOS5433:
1239 		data->tmu_initialize = exynos5433_tmu_initialize;
1240 		data->tmu_control = exynos5433_tmu_control;
1241 		data->tmu_read = exynos4412_tmu_read;
1242 		data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1243 		data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1244 		data->ntrip = 8;
1245 		break;
1246 	case SOC_ARCH_EXYNOS5440:
1247 		data->tmu_initialize = exynos5440_tmu_initialize;
1248 		data->tmu_control = exynos5440_tmu_control;
1249 		data->tmu_read = exynos5440_tmu_read;
1250 		data->tmu_set_emulation = exynos5440_tmu_set_emulation;
1251 		data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
1252 		data->ntrip = 4;
1253 		break;
1254 	case SOC_ARCH_EXYNOS7:
1255 		data->tmu_initialize = exynos7_tmu_initialize;
1256 		data->tmu_control = exynos7_tmu_control;
1257 		data->tmu_read = exynos7_tmu_read;
1258 		data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1259 		data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1260 		data->ntrip = 8;
1261 		break;
1262 	default:
1263 		dev_err(&pdev->dev, "Platform not supported\n");
1264 		return -EINVAL;
1265 	}
1266 
1267 	/*
1268 	 * Check if the TMU shares some registers and then try to map the
1269 	 * memory of common registers.
1270 	 */
1271 	if (data->soc != SOC_ARCH_EXYNOS5420_TRIMINFO &&
1272 	    data->soc != SOC_ARCH_EXYNOS5440)
1273 		return 0;
1274 
1275 	if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
1276 		dev_err(&pdev->dev, "failed to get Resource 1\n");
1277 		return -ENODEV;
1278 	}
1279 
1280 	data->base_second = devm_ioremap(&pdev->dev, res.start,
1281 					resource_size(&res));
1282 	if (!data->base_second) {
1283 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
1284 		return -ENOMEM;
1285 	}
1286 
1287 	return 0;
1288 }
1289 
1290 static struct thermal_zone_of_device_ops exynos_sensor_ops = {
1291 	.get_temp = exynos_get_temp,
1292 	.set_emul_temp = exynos_tmu_set_emulation,
1293 };
1294 
1295 static int exynos_tmu_probe(struct platform_device *pdev)
1296 {
1297 	struct exynos_tmu_data *data;
1298 	int ret;
1299 
1300 	data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
1301 					GFP_KERNEL);
1302 	if (!data)
1303 		return -ENOMEM;
1304 
1305 	platform_set_drvdata(pdev, data);
1306 	mutex_init(&data->lock);
1307 
1308 	/*
1309 	 * Try enabling the regulator if found
1310 	 * TODO: Add regulator as an SOC feature, so that regulator enable
1311 	 * is a compulsory call.
1312 	 */
1313 	data->regulator = devm_regulator_get_optional(&pdev->dev, "vtmu");
1314 	if (!IS_ERR(data->regulator)) {
1315 		ret = regulator_enable(data->regulator);
1316 		if (ret) {
1317 			dev_err(&pdev->dev, "failed to enable vtmu\n");
1318 			return ret;
1319 		}
1320 	} else {
1321 		if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
1322 			return -EPROBE_DEFER;
1323 		dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
1324 	}
1325 
1326 	ret = exynos_map_dt_data(pdev);
1327 	if (ret)
1328 		goto err_sensor;
1329 
1330 	INIT_WORK(&data->irq_work, exynos_tmu_work);
1331 
1332 	data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
1333 	if (IS_ERR(data->clk)) {
1334 		dev_err(&pdev->dev, "Failed to get clock\n");
1335 		ret = PTR_ERR(data->clk);
1336 		goto err_sensor;
1337 	}
1338 
1339 	data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
1340 	if (IS_ERR(data->clk_sec)) {
1341 		if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
1342 			dev_err(&pdev->dev, "Failed to get triminfo clock\n");
1343 			ret = PTR_ERR(data->clk_sec);
1344 			goto err_sensor;
1345 		}
1346 	} else {
1347 		ret = clk_prepare(data->clk_sec);
1348 		if (ret) {
1349 			dev_err(&pdev->dev, "Failed to get clock\n");
1350 			goto err_sensor;
1351 		}
1352 	}
1353 
1354 	ret = clk_prepare(data->clk);
1355 	if (ret) {
1356 		dev_err(&pdev->dev, "Failed to get clock\n");
1357 		goto err_clk_sec;
1358 	}
1359 
1360 	switch (data->soc) {
1361 	case SOC_ARCH_EXYNOS5433:
1362 	case SOC_ARCH_EXYNOS7:
1363 		data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
1364 		if (IS_ERR(data->sclk)) {
1365 			dev_err(&pdev->dev, "Failed to get sclk\n");
1366 			goto err_clk;
1367 		} else {
1368 			ret = clk_prepare_enable(data->sclk);
1369 			if (ret) {
1370 				dev_err(&pdev->dev, "Failed to enable sclk\n");
1371 				goto err_clk;
1372 			}
1373 		}
1374 		break;
1375 	default:
1376 		break;
1377 	}
1378 
1379 	/*
1380 	 * data->tzd must be registered before calling exynos_tmu_initialize(),
1381 	 * requesting irq and calling exynos_tmu_control().
1382 	 */
1383 	data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
1384 						    &exynos_sensor_ops);
1385 	if (IS_ERR(data->tzd)) {
1386 		ret = PTR_ERR(data->tzd);
1387 		dev_err(&pdev->dev, "Failed to register sensor: %d\n", ret);
1388 		goto err_sclk;
1389 	}
1390 
1391 	ret = exynos_tmu_initialize(pdev);
1392 	if (ret) {
1393 		dev_err(&pdev->dev, "Failed to initialize TMU\n");
1394 		goto err_thermal;
1395 	}
1396 
1397 	ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
1398 		IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
1399 	if (ret) {
1400 		dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
1401 		goto err_thermal;
1402 	}
1403 
1404 	exynos_tmu_control(pdev, true);
1405 	return 0;
1406 
1407 err_thermal:
1408 	thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
1409 err_sclk:
1410 	clk_disable_unprepare(data->sclk);
1411 err_clk:
1412 	clk_unprepare(data->clk);
1413 err_clk_sec:
1414 	if (!IS_ERR(data->clk_sec))
1415 		clk_unprepare(data->clk_sec);
1416 err_sensor:
1417 	if (!IS_ERR(data->regulator))
1418 		regulator_disable(data->regulator);
1419 
1420 	return ret;
1421 }
1422 
1423 static int exynos_tmu_remove(struct platform_device *pdev)
1424 {
1425 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
1426 	struct thermal_zone_device *tzd = data->tzd;
1427 
1428 	thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
1429 	exynos_tmu_control(pdev, false);
1430 
1431 	clk_disable_unprepare(data->sclk);
1432 	clk_unprepare(data->clk);
1433 	if (!IS_ERR(data->clk_sec))
1434 		clk_unprepare(data->clk_sec);
1435 
1436 	if (!IS_ERR(data->regulator))
1437 		regulator_disable(data->regulator);
1438 
1439 	return 0;
1440 }
1441 
1442 #ifdef CONFIG_PM_SLEEP
1443 static int exynos_tmu_suspend(struct device *dev)
1444 {
1445 	exynos_tmu_control(to_platform_device(dev), false);
1446 
1447 	return 0;
1448 }
1449 
1450 static int exynos_tmu_resume(struct device *dev)
1451 {
1452 	struct platform_device *pdev = to_platform_device(dev);
1453 
1454 	exynos_tmu_initialize(pdev);
1455 	exynos_tmu_control(pdev, true);
1456 
1457 	return 0;
1458 }
1459 
1460 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
1461 			 exynos_tmu_suspend, exynos_tmu_resume);
1462 #define EXYNOS_TMU_PM	(&exynos_tmu_pm)
1463 #else
1464 #define EXYNOS_TMU_PM	NULL
1465 #endif
1466 
1467 static struct platform_driver exynos_tmu_driver = {
1468 	.driver = {
1469 		.name   = "exynos-tmu",
1470 		.pm     = EXYNOS_TMU_PM,
1471 		.of_match_table = exynos_tmu_match,
1472 	},
1473 	.probe = exynos_tmu_probe,
1474 	.remove	= exynos_tmu_remove,
1475 };
1476 
1477 module_platform_driver(exynos_tmu_driver);
1478 
1479 MODULE_DESCRIPTION("EXYNOS TMU Driver");
1480 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
1481 MODULE_LICENSE("GPL");
1482 MODULE_ALIAS("platform:exynos-tmu");
1483