1 /*
2  * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3  *
4  *  Copyright (C) 2014 Samsung Electronics
5  *  Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
6  *  Lukasz Majewski <l.majewski@samsung.com>
7  *
8  *  Copyright (C) 2011 Samsung Electronics
9  *  Donggeun Kim <dg77.kim@samsung.com>
10  *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  */
27 
28 #include <linux/clk.h>
29 #include <linux/io.h>
30 #include <linux/interrupt.h>
31 #include <linux/module.h>
32 #include <linux/of.h>
33 #include <linux/of_address.h>
34 #include <linux/of_irq.h>
35 #include <linux/platform_device.h>
36 #include <linux/regulator/consumer.h>
37 
38 #include "exynos_tmu.h"
39 #include "../thermal_core.h"
40 
41 /* Exynos generic registers */
42 #define EXYNOS_TMU_REG_TRIMINFO		0x0
43 #define EXYNOS_TMU_REG_CONTROL		0x20
44 #define EXYNOS_TMU_REG_STATUS		0x28
45 #define EXYNOS_TMU_REG_CURRENT_TEMP	0x40
46 #define EXYNOS_TMU_REG_INTEN		0x70
47 #define EXYNOS_TMU_REG_INTSTAT		0x74
48 #define EXYNOS_TMU_REG_INTCLEAR		0x78
49 
50 #define EXYNOS_TMU_TEMP_MASK		0xff
51 #define EXYNOS_TMU_REF_VOLTAGE_SHIFT	24
52 #define EXYNOS_TMU_REF_VOLTAGE_MASK	0x1f
53 #define EXYNOS_TMU_BUF_SLOPE_SEL_MASK	0xf
54 #define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT	8
55 #define EXYNOS_TMU_CORE_EN_SHIFT	0
56 
57 /* Exynos3250 specific registers */
58 #define EXYNOS_TMU_TRIMINFO_CON1	0x10
59 
60 /* Exynos4210 specific registers */
61 #define EXYNOS4210_TMU_REG_THRESHOLD_TEMP	0x44
62 #define EXYNOS4210_TMU_REG_TRIG_LEVEL0	0x50
63 
64 /* Exynos5250, Exynos4412, Exynos3250 specific registers */
65 #define EXYNOS_TMU_TRIMINFO_CON2	0x14
66 #define EXYNOS_THD_TEMP_RISE		0x50
67 #define EXYNOS_THD_TEMP_FALL		0x54
68 #define EXYNOS_EMUL_CON		0x80
69 
70 #define EXYNOS_TRIMINFO_RELOAD_ENABLE	1
71 #define EXYNOS_TRIMINFO_25_SHIFT	0
72 #define EXYNOS_TRIMINFO_85_SHIFT	8
73 #define EXYNOS_TMU_TRIP_MODE_SHIFT	13
74 #define EXYNOS_TMU_TRIP_MODE_MASK	0x7
75 #define EXYNOS_TMU_THERM_TRIP_EN_SHIFT	12
76 
77 #define EXYNOS_TMU_INTEN_RISE0_SHIFT	0
78 #define EXYNOS_TMU_INTEN_RISE1_SHIFT	4
79 #define EXYNOS_TMU_INTEN_RISE2_SHIFT	8
80 #define EXYNOS_TMU_INTEN_RISE3_SHIFT	12
81 #define EXYNOS_TMU_INTEN_FALL0_SHIFT	16
82 
83 #define EXYNOS_EMUL_TIME	0x57F0
84 #define EXYNOS_EMUL_TIME_MASK	0xffff
85 #define EXYNOS_EMUL_TIME_SHIFT	16
86 #define EXYNOS_EMUL_DATA_SHIFT	8
87 #define EXYNOS_EMUL_DATA_MASK	0xFF
88 #define EXYNOS_EMUL_ENABLE	0x1
89 
90 /* Exynos5260 specific */
91 #define EXYNOS5260_TMU_REG_INTEN		0xC0
92 #define EXYNOS5260_TMU_REG_INTSTAT		0xC4
93 #define EXYNOS5260_TMU_REG_INTCLEAR		0xC8
94 #define EXYNOS5260_EMUL_CON			0x100
95 
96 /* Exynos4412 specific */
97 #define EXYNOS4412_MUX_ADDR_VALUE          6
98 #define EXYNOS4412_MUX_ADDR_SHIFT          20
99 
100 /* Exynos5433 specific registers */
101 #define EXYNOS5433_TMU_REG_CONTROL1		0x024
102 #define EXYNOS5433_TMU_SAMPLING_INTERVAL	0x02c
103 #define EXYNOS5433_TMU_COUNTER_VALUE0		0x030
104 #define EXYNOS5433_TMU_COUNTER_VALUE1		0x034
105 #define EXYNOS5433_TMU_REG_CURRENT_TEMP1	0x044
106 #define EXYNOS5433_THD_TEMP_RISE3_0		0x050
107 #define EXYNOS5433_THD_TEMP_RISE7_4		0x054
108 #define EXYNOS5433_THD_TEMP_FALL3_0		0x060
109 #define EXYNOS5433_THD_TEMP_FALL7_4		0x064
110 #define EXYNOS5433_TMU_REG_INTEN		0x0c0
111 #define EXYNOS5433_TMU_REG_INTPEND		0x0c8
112 #define EXYNOS5433_TMU_EMUL_CON			0x110
113 #define EXYNOS5433_TMU_PD_DET_EN		0x130
114 
115 #define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT	16
116 #define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT	23
117 #define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK	\
118 			(0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT)
119 #define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK	BIT(23)
120 
121 #define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING	0
122 #define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING	1
123 
124 #define EXYNOS5433_PD_DET_EN			1
125 
126 /*exynos5440 specific registers*/
127 #define EXYNOS5440_TMU_S0_7_TRIM		0x000
128 #define EXYNOS5440_TMU_S0_7_CTRL		0x020
129 #define EXYNOS5440_TMU_S0_7_DEBUG		0x040
130 #define EXYNOS5440_TMU_S0_7_TEMP		0x0f0
131 #define EXYNOS5440_TMU_S0_7_TH0			0x110
132 #define EXYNOS5440_TMU_S0_7_TH1			0x130
133 #define EXYNOS5440_TMU_S0_7_TH2			0x150
134 #define EXYNOS5440_TMU_S0_7_IRQEN		0x210
135 #define EXYNOS5440_TMU_S0_7_IRQ			0x230
136 /* exynos5440 common registers */
137 #define EXYNOS5440_TMU_IRQ_STATUS		0x000
138 #define EXYNOS5440_TMU_PMIN			0x004
139 
140 #define EXYNOS5440_TMU_INTEN_RISE0_SHIFT	0
141 #define EXYNOS5440_TMU_INTEN_RISE1_SHIFT	1
142 #define EXYNOS5440_TMU_INTEN_RISE2_SHIFT	2
143 #define EXYNOS5440_TMU_INTEN_RISE3_SHIFT	3
144 #define EXYNOS5440_TMU_INTEN_FALL0_SHIFT	4
145 #define EXYNOS5440_TMU_TH_RISE4_SHIFT		24
146 #define EXYNOS5440_EFUSE_SWAP_OFFSET		8
147 
148 /* Exynos7 specific registers */
149 #define EXYNOS7_THD_TEMP_RISE7_6		0x50
150 #define EXYNOS7_THD_TEMP_FALL7_6		0x60
151 #define EXYNOS7_TMU_REG_INTEN			0x110
152 #define EXYNOS7_TMU_REG_INTPEND			0x118
153 #define EXYNOS7_TMU_REG_EMUL_CON		0x160
154 
155 #define EXYNOS7_TMU_TEMP_MASK			0x1ff
156 #define EXYNOS7_PD_DET_EN_SHIFT			23
157 #define EXYNOS7_TMU_INTEN_RISE0_SHIFT		0
158 #define EXYNOS7_TMU_INTEN_RISE1_SHIFT		1
159 #define EXYNOS7_TMU_INTEN_RISE2_SHIFT		2
160 #define EXYNOS7_TMU_INTEN_RISE3_SHIFT		3
161 #define EXYNOS7_TMU_INTEN_RISE4_SHIFT		4
162 #define EXYNOS7_TMU_INTEN_RISE5_SHIFT		5
163 #define EXYNOS7_TMU_INTEN_RISE6_SHIFT		6
164 #define EXYNOS7_TMU_INTEN_RISE7_SHIFT		7
165 #define EXYNOS7_EMUL_DATA_SHIFT			7
166 #define EXYNOS7_EMUL_DATA_MASK			0x1ff
167 
168 #define MCELSIUS	1000
169 /**
170  * struct exynos_tmu_data : A structure to hold the private data of the TMU
171 	driver
172  * @id: identifier of the one instance of the TMU controller.
173  * @pdata: pointer to the tmu platform/configuration data
174  * @base: base address of the single instance of the TMU controller.
175  * @base_second: base address of the common registers of the TMU controller.
176  * @irq: irq number of the TMU controller.
177  * @soc: id of the SOC type.
178  * @irq_work: pointer to the irq work structure.
179  * @lock: lock to implement synchronization.
180  * @clk: pointer to the clock structure.
181  * @clk_sec: pointer to the clock structure for accessing the base_second.
182  * @sclk: pointer to the clock structure for accessing the tmu special clk.
183  * @temp_error1: fused value of the first point trim.
184  * @temp_error2: fused value of the second point trim.
185  * @regulator: pointer to the TMU regulator structure.
186  * @reg_conf: pointer to structure to register with core thermal.
187  * @ntrip: number of supported trip points.
188  * @enabled: current status of TMU device
189  * @tmu_initialize: SoC specific TMU initialization method
190  * @tmu_control: SoC specific TMU control method
191  * @tmu_read: SoC specific TMU temperature read method
192  * @tmu_set_emulation: SoC specific TMU emulation setting method
193  * @tmu_clear_irqs: SoC specific TMU interrupts clearing method
194  */
195 struct exynos_tmu_data {
196 	int id;
197 	struct exynos_tmu_platform_data *pdata;
198 	void __iomem *base;
199 	void __iomem *base_second;
200 	int irq;
201 	enum soc_type soc;
202 	struct work_struct irq_work;
203 	struct mutex lock;
204 	struct clk *clk, *clk_sec, *sclk;
205 	u16 temp_error1, temp_error2;
206 	struct regulator *regulator;
207 	struct thermal_zone_device *tzd;
208 	unsigned int ntrip;
209 	bool enabled;
210 
211 	int (*tmu_initialize)(struct platform_device *pdev);
212 	void (*tmu_control)(struct platform_device *pdev, bool on);
213 	int (*tmu_read)(struct exynos_tmu_data *data);
214 	void (*tmu_set_emulation)(struct exynos_tmu_data *data, int temp);
215 	void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
216 };
217 
218 static void exynos_report_trigger(struct exynos_tmu_data *p)
219 {
220 	char data[10], *envp[] = { data, NULL };
221 	struct thermal_zone_device *tz = p->tzd;
222 	int temp;
223 	unsigned int i;
224 
225 	if (!tz) {
226 		pr_err("No thermal zone device defined\n");
227 		return;
228 	}
229 
230 	thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
231 
232 	mutex_lock(&tz->lock);
233 	/* Find the level for which trip happened */
234 	for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
235 		tz->ops->get_trip_temp(tz, i, &temp);
236 		if (tz->last_temperature < temp)
237 			break;
238 	}
239 
240 	snprintf(data, sizeof(data), "%u", i);
241 	kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, envp);
242 	mutex_unlock(&tz->lock);
243 }
244 
245 /*
246  * TMU treats temperature as a mapped temperature code.
247  * The temperature is converted differently depending on the calibration type.
248  */
249 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
250 {
251 	struct exynos_tmu_platform_data *pdata = data->pdata;
252 	int temp_code;
253 
254 	switch (pdata->cal_type) {
255 	case TYPE_TWO_POINT_TRIMMING:
256 		temp_code = (temp - pdata->first_point_trim) *
257 			(data->temp_error2 - data->temp_error1) /
258 			(pdata->second_point_trim - pdata->first_point_trim) +
259 			data->temp_error1;
260 		break;
261 	case TYPE_ONE_POINT_TRIMMING:
262 		temp_code = temp + data->temp_error1 - pdata->first_point_trim;
263 		break;
264 	default:
265 		temp_code = temp + pdata->default_temp_offset;
266 		break;
267 	}
268 
269 	return temp_code;
270 }
271 
272 /*
273  * Calculate a temperature value from a temperature code.
274  * The unit of the temperature is degree Celsius.
275  */
276 static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code)
277 {
278 	struct exynos_tmu_platform_data *pdata = data->pdata;
279 	int temp;
280 
281 	switch (pdata->cal_type) {
282 	case TYPE_TWO_POINT_TRIMMING:
283 		temp = (temp_code - data->temp_error1) *
284 			(pdata->second_point_trim - pdata->first_point_trim) /
285 			(data->temp_error2 - data->temp_error1) +
286 			pdata->first_point_trim;
287 		break;
288 	case TYPE_ONE_POINT_TRIMMING:
289 		temp = temp_code - data->temp_error1 + pdata->first_point_trim;
290 		break;
291 	default:
292 		temp = temp_code - pdata->default_temp_offset;
293 		break;
294 	}
295 
296 	return temp;
297 }
298 
299 static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
300 {
301 	struct exynos_tmu_platform_data *pdata = data->pdata;
302 
303 	data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
304 	data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
305 				EXYNOS_TMU_TEMP_MASK);
306 
307 	if (!data->temp_error1 ||
308 		(pdata->min_efuse_value > data->temp_error1) ||
309 		(data->temp_error1 > pdata->max_efuse_value))
310 		data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
311 
312 	if (!data->temp_error2)
313 		data->temp_error2 =
314 			(pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
315 			EXYNOS_TMU_TEMP_MASK;
316 }
317 
318 static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
319 {
320 	struct thermal_zone_device *tz = data->tzd;
321 	const struct thermal_trip * const trips =
322 		of_thermal_get_trip_points(tz);
323 	unsigned long temp;
324 	int i;
325 
326 	if (!trips) {
327 		pr_err("%s: Cannot get trip points from of-thermal.c!\n",
328 		       __func__);
329 		return 0;
330 	}
331 
332 	for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
333 		if (trips[i].type == THERMAL_TRIP_CRITICAL)
334 			continue;
335 
336 		temp = trips[i].temperature / MCELSIUS;
337 		if (falling)
338 			temp -= (trips[i].hysteresis / MCELSIUS);
339 		else
340 			threshold &= ~(0xff << 8 * i);
341 
342 		threshold |= temp_to_code(data, temp) << 8 * i;
343 	}
344 
345 	return threshold;
346 }
347 
348 static int exynos_tmu_initialize(struct platform_device *pdev)
349 {
350 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
351 	int ret;
352 
353 	if (of_thermal_get_ntrips(data->tzd) > data->ntrip) {
354 		dev_info(&pdev->dev,
355 			 "More trip points than supported by this TMU.\n");
356 		dev_info(&pdev->dev,
357 			 "%d trip points should be configured in polling mode.\n",
358 			 (of_thermal_get_ntrips(data->tzd) - data->ntrip));
359 	}
360 
361 	mutex_lock(&data->lock);
362 	clk_enable(data->clk);
363 	if (!IS_ERR(data->clk_sec))
364 		clk_enable(data->clk_sec);
365 	ret = data->tmu_initialize(pdev);
366 	clk_disable(data->clk);
367 	mutex_unlock(&data->lock);
368 	if (!IS_ERR(data->clk_sec))
369 		clk_disable(data->clk_sec);
370 
371 	return ret;
372 }
373 
374 static u32 get_con_reg(struct exynos_tmu_data *data, u32 con)
375 {
376 	struct exynos_tmu_platform_data *pdata = data->pdata;
377 
378 	if (data->soc == SOC_ARCH_EXYNOS4412 ||
379 	    data->soc == SOC_ARCH_EXYNOS3250)
380 		con |= (EXYNOS4412_MUX_ADDR_VALUE << EXYNOS4412_MUX_ADDR_SHIFT);
381 
382 	con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
383 	con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
384 
385 	con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
386 	con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
387 
388 	if (pdata->noise_cancel_mode) {
389 		con &= ~(EXYNOS_TMU_TRIP_MODE_MASK << EXYNOS_TMU_TRIP_MODE_SHIFT);
390 		con |= (pdata->noise_cancel_mode << EXYNOS_TMU_TRIP_MODE_SHIFT);
391 	}
392 
393 	return con;
394 }
395 
396 static void exynos_tmu_control(struct platform_device *pdev, bool on)
397 {
398 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
399 
400 	mutex_lock(&data->lock);
401 	clk_enable(data->clk);
402 	data->tmu_control(pdev, on);
403 	data->enabled = on;
404 	clk_disable(data->clk);
405 	mutex_unlock(&data->lock);
406 }
407 
408 static int exynos4210_tmu_initialize(struct platform_device *pdev)
409 {
410 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
411 	struct thermal_zone_device *tz = data->tzd;
412 	const struct thermal_trip * const trips =
413 		of_thermal_get_trip_points(tz);
414 	int ret = 0, threshold_code, i;
415 	unsigned long reference, temp;
416 	unsigned int status;
417 
418 	if (!trips) {
419 		pr_err("%s: Cannot get trip points from of-thermal.c!\n",
420 		       __func__);
421 		ret = -ENODEV;
422 		goto out;
423 	}
424 
425 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
426 	if (!status) {
427 		ret = -EBUSY;
428 		goto out;
429 	}
430 
431 	sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
432 
433 	/* Write temperature code for threshold */
434 	reference = trips[0].temperature / MCELSIUS;
435 	threshold_code = temp_to_code(data, reference);
436 	if (threshold_code < 0) {
437 		ret = threshold_code;
438 		goto out;
439 	}
440 	writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
441 
442 	for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
443 		temp = trips[i].temperature / MCELSIUS;
444 		writeb(temp - reference, data->base +
445 		       EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
446 	}
447 
448 	data->tmu_clear_irqs(data);
449 out:
450 	return ret;
451 }
452 
453 static int exynos4412_tmu_initialize(struct platform_device *pdev)
454 {
455 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
456 	const struct thermal_trip * const trips =
457 		of_thermal_get_trip_points(data->tzd);
458 	unsigned int status, trim_info, con, ctrl, rising_threshold;
459 	int ret = 0, threshold_code, i;
460 	unsigned long crit_temp = 0;
461 
462 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
463 	if (!status) {
464 		ret = -EBUSY;
465 		goto out;
466 	}
467 
468 	if (data->soc == SOC_ARCH_EXYNOS3250 ||
469 	    data->soc == SOC_ARCH_EXYNOS4412 ||
470 	    data->soc == SOC_ARCH_EXYNOS5250) {
471 		if (data->soc == SOC_ARCH_EXYNOS3250) {
472 			ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON1);
473 			ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
474 			writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON1);
475 		}
476 		ctrl = readl(data->base + EXYNOS_TMU_TRIMINFO_CON2);
477 		ctrl |= EXYNOS_TRIMINFO_RELOAD_ENABLE;
478 		writel(ctrl, data->base + EXYNOS_TMU_TRIMINFO_CON2);
479 	}
480 
481 	/* On exynos5420 the triminfo register is in the shared space */
482 	if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
483 		trim_info = readl(data->base_second + EXYNOS_TMU_REG_TRIMINFO);
484 	else
485 		trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
486 
487 	sanitize_temp_error(data, trim_info);
488 
489 	/* Write temperature code for rising and falling threshold */
490 	rising_threshold = readl(data->base + EXYNOS_THD_TEMP_RISE);
491 	rising_threshold = get_th_reg(data, rising_threshold, false);
492 	writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
493 	writel(get_th_reg(data, 0, true), data->base + EXYNOS_THD_TEMP_FALL);
494 
495 	data->tmu_clear_irqs(data);
496 
497 	/* if last threshold limit is also present */
498 	for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) {
499 		if (trips[i].type == THERMAL_TRIP_CRITICAL) {
500 			crit_temp = trips[i].temperature;
501 			break;
502 		}
503 	}
504 
505 	if (i == of_thermal_get_ntrips(data->tzd)) {
506 		pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n",
507 		       __func__);
508 		ret = -EINVAL;
509 		goto out;
510 	}
511 
512 	threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
513 	/* 1-4 level to be assigned in th0 reg */
514 	rising_threshold &= ~(0xff << 8 * i);
515 	rising_threshold |= threshold_code << 8 * i;
516 	writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
517 	con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
518 	con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
519 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
520 
521 out:
522 	return ret;
523 }
524 
525 static int exynos5433_tmu_initialize(struct platform_device *pdev)
526 {
527 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
528 	struct exynos_tmu_platform_data *pdata = data->pdata;
529 	struct thermal_zone_device *tz = data->tzd;
530 	unsigned int status, trim_info;
531 	unsigned int rising_threshold = 0, falling_threshold = 0;
532 	int temp, temp_hist;
533 	int ret = 0, threshold_code, i, sensor_id, cal_type;
534 
535 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
536 	if (!status) {
537 		ret = -EBUSY;
538 		goto out;
539 	}
540 
541 	trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
542 	sanitize_temp_error(data, trim_info);
543 
544 	/* Read the temperature sensor id */
545 	sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK)
546 				>> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT;
547 	dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id);
548 
549 	/* Read the calibration mode */
550 	writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO);
551 	cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK)
552 				>> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT;
553 
554 	switch (cal_type) {
555 	case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING:
556 		pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
557 		break;
558 	case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING:
559 		pdata->cal_type = TYPE_TWO_POINT_TRIMMING;
560 		break;
561 	default:
562 		pdata->cal_type = TYPE_ONE_POINT_TRIMMING;
563 		break;
564 	}
565 
566 	dev_info(&pdev->dev, "Calibration type is %d-point calibration\n",
567 			cal_type ?  2 : 1);
568 
569 	/* Write temperature code for rising and falling threshold */
570 	for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
571 		int rising_reg_offset, falling_reg_offset;
572 		int j = 0;
573 
574 		switch (i) {
575 		case 0:
576 		case 1:
577 		case 2:
578 		case 3:
579 			rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0;
580 			falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0;
581 			j = i;
582 			break;
583 		case 4:
584 		case 5:
585 		case 6:
586 		case 7:
587 			rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4;
588 			falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4;
589 			j = i - 4;
590 			break;
591 		default:
592 			continue;
593 		}
594 
595 		/* Write temperature code for rising threshold */
596 		tz->ops->get_trip_temp(tz, i, &temp);
597 		temp /= MCELSIUS;
598 		threshold_code = temp_to_code(data, temp);
599 
600 		rising_threshold = readl(data->base + rising_reg_offset);
601 		rising_threshold |= (threshold_code << j * 8);
602 		writel(rising_threshold, data->base + rising_reg_offset);
603 
604 		/* Write temperature code for falling threshold */
605 		tz->ops->get_trip_hyst(tz, i, &temp_hist);
606 		temp_hist = temp - (temp_hist / MCELSIUS);
607 		threshold_code = temp_to_code(data, temp_hist);
608 
609 		falling_threshold = readl(data->base + falling_reg_offset);
610 		falling_threshold &= ~(0xff << j * 8);
611 		falling_threshold |= (threshold_code << j * 8);
612 		writel(falling_threshold, data->base + falling_reg_offset);
613 	}
614 
615 	data->tmu_clear_irqs(data);
616 out:
617 	return ret;
618 }
619 
620 static int exynos5440_tmu_initialize(struct platform_device *pdev)
621 {
622 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
623 	unsigned int trim_info = 0, con, rising_threshold;
624 	int threshold_code;
625 	int crit_temp = 0;
626 
627 	/*
628 	 * For exynos5440 soc triminfo value is swapped between TMU0 and
629 	 * TMU2, so the below logic is needed.
630 	 */
631 	switch (data->id) {
632 	case 0:
633 		trim_info = readl(data->base + EXYNOS5440_EFUSE_SWAP_OFFSET +
634 				 EXYNOS5440_TMU_S0_7_TRIM);
635 		break;
636 	case 1:
637 		trim_info = readl(data->base + EXYNOS5440_TMU_S0_7_TRIM);
638 		break;
639 	case 2:
640 		trim_info = readl(data->base - EXYNOS5440_EFUSE_SWAP_OFFSET +
641 				  EXYNOS5440_TMU_S0_7_TRIM);
642 	}
643 	sanitize_temp_error(data, trim_info);
644 
645 	/* Write temperature code for rising and falling threshold */
646 	rising_threshold = readl(data->base + EXYNOS5440_TMU_S0_7_TH0);
647 	rising_threshold = get_th_reg(data, rising_threshold, false);
648 	writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH0);
649 	writel(0, data->base + EXYNOS5440_TMU_S0_7_TH1);
650 
651 	data->tmu_clear_irqs(data);
652 
653 	/* if last threshold limit is also present */
654 	if (!data->tzd->ops->get_crit_temp(data->tzd, &crit_temp)) {
655 		threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
656 		/* 5th level to be assigned in th2 reg */
657 		rising_threshold =
658 			threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
659 		writel(rising_threshold, data->base + EXYNOS5440_TMU_S0_7_TH2);
660 		con = readl(data->base + EXYNOS5440_TMU_S0_7_CTRL);
661 		con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
662 		writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
663 	}
664 	/* Clear the PMIN in the common TMU register */
665 	if (!data->id)
666 		writel(0, data->base_second + EXYNOS5440_TMU_PMIN);
667 
668 	return 0;
669 }
670 
671 static int exynos7_tmu_initialize(struct platform_device *pdev)
672 {
673 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
674 	struct thermal_zone_device *tz = data->tzd;
675 	struct exynos_tmu_platform_data *pdata = data->pdata;
676 	unsigned int status, trim_info;
677 	unsigned int rising_threshold = 0, falling_threshold = 0;
678 	int ret = 0, threshold_code, i;
679 	int temp, temp_hist;
680 	unsigned int reg_off, bit_off;
681 
682 	status = readb(data->base + EXYNOS_TMU_REG_STATUS);
683 	if (!status) {
684 		ret = -EBUSY;
685 		goto out;
686 	}
687 
688 	trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
689 
690 	data->temp_error1 = trim_info & EXYNOS7_TMU_TEMP_MASK;
691 	if (!data->temp_error1 ||
692 	    (pdata->min_efuse_value > data->temp_error1) ||
693 	    (data->temp_error1 > pdata->max_efuse_value))
694 		data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
695 
696 	/* Write temperature code for rising and falling threshold */
697 	for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
698 		/*
699 		 * On exynos7 there are 4 rising and 4 falling threshold
700 		 * registers (0x50-0x5c and 0x60-0x6c respectively). Each
701 		 * register holds the value of two threshold levels (at bit
702 		 * offsets 0 and 16). Based on the fact that there are atmost
703 		 * eight possible trigger levels, calculate the register and
704 		 * bit offsets where the threshold levels are to be written.
705 		 *
706 		 * e.g. EXYNOS7_THD_TEMP_RISE7_6 (0x50)
707 		 * [24:16] - Threshold level 7
708 		 * [8:0] - Threshold level 6
709 		 * e.g. EXYNOS7_THD_TEMP_RISE5_4 (0x54)
710 		 * [24:16] - Threshold level 5
711 		 * [8:0] - Threshold level 4
712 		 *
713 		 * and similarly for falling thresholds.
714 		 *
715 		 * Based on the above, calculate the register and bit offsets
716 		 * for rising/falling threshold levels and populate them.
717 		 */
718 		reg_off = ((7 - i) / 2) * 4;
719 		bit_off = ((8 - i) % 2);
720 
721 		tz->ops->get_trip_temp(tz, i, &temp);
722 		temp /= MCELSIUS;
723 
724 		tz->ops->get_trip_hyst(tz, i, &temp_hist);
725 		temp_hist = temp - (temp_hist / MCELSIUS);
726 
727 		/* Set 9-bit temperature code for rising threshold levels */
728 		threshold_code = temp_to_code(data, temp);
729 		rising_threshold = readl(data->base +
730 			EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
731 		rising_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
732 		rising_threshold |= threshold_code << (16 * bit_off);
733 		writel(rising_threshold,
734 		       data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
735 
736 		/* Set 9-bit temperature code for falling threshold levels */
737 		threshold_code = temp_to_code(data, temp_hist);
738 		falling_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
739 		falling_threshold |= threshold_code << (16 * bit_off);
740 		writel(falling_threshold,
741 		       data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
742 	}
743 
744 	data->tmu_clear_irqs(data);
745 out:
746 	return ret;
747 }
748 
749 static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
750 {
751 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
752 	struct thermal_zone_device *tz = data->tzd;
753 	unsigned int con, interrupt_en;
754 
755 	con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
756 
757 	if (on) {
758 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
759 		interrupt_en =
760 			(of_thermal_is_trip_valid(tz, 3)
761 			 << EXYNOS_TMU_INTEN_RISE3_SHIFT) |
762 			(of_thermal_is_trip_valid(tz, 2)
763 			 << EXYNOS_TMU_INTEN_RISE2_SHIFT) |
764 			(of_thermal_is_trip_valid(tz, 1)
765 			 << EXYNOS_TMU_INTEN_RISE1_SHIFT) |
766 			(of_thermal_is_trip_valid(tz, 0)
767 			 << EXYNOS_TMU_INTEN_RISE0_SHIFT);
768 
769 		if (data->soc != SOC_ARCH_EXYNOS4210)
770 			interrupt_en |=
771 				interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
772 	} else {
773 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
774 		interrupt_en = 0; /* Disable all interrupts */
775 	}
776 	writel(interrupt_en, data->base + EXYNOS_TMU_REG_INTEN);
777 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
778 }
779 
780 static void exynos5433_tmu_control(struct platform_device *pdev, bool on)
781 {
782 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
783 	struct thermal_zone_device *tz = data->tzd;
784 	unsigned int con, interrupt_en, pd_det_en;
785 
786 	con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
787 
788 	if (on) {
789 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
790 		interrupt_en =
791 			(of_thermal_is_trip_valid(tz, 7)
792 			<< EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
793 			(of_thermal_is_trip_valid(tz, 6)
794 			<< EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
795 			(of_thermal_is_trip_valid(tz, 5)
796 			<< EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
797 			(of_thermal_is_trip_valid(tz, 4)
798 			<< EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
799 			(of_thermal_is_trip_valid(tz, 3)
800 			<< EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
801 			(of_thermal_is_trip_valid(tz, 2)
802 			<< EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
803 			(of_thermal_is_trip_valid(tz, 1)
804 			<< EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
805 			(of_thermal_is_trip_valid(tz, 0)
806 			<< EXYNOS7_TMU_INTEN_RISE0_SHIFT);
807 
808 		interrupt_en |=
809 			interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
810 	} else {
811 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
812 		interrupt_en = 0; /* Disable all interrupts */
813 	}
814 
815 	pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0;
816 
817 	writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN);
818 	writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN);
819 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
820 }
821 
822 static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
823 {
824 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
825 	struct thermal_zone_device *tz = data->tzd;
826 	unsigned int con, interrupt_en;
827 
828 	con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
829 
830 	if (on) {
831 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
832 		interrupt_en =
833 			(of_thermal_is_trip_valid(tz, 3)
834 			 << EXYNOS5440_TMU_INTEN_RISE3_SHIFT) |
835 			(of_thermal_is_trip_valid(tz, 2)
836 			 << EXYNOS5440_TMU_INTEN_RISE2_SHIFT) |
837 			(of_thermal_is_trip_valid(tz, 1)
838 			 << EXYNOS5440_TMU_INTEN_RISE1_SHIFT) |
839 			(of_thermal_is_trip_valid(tz, 0)
840 			 << EXYNOS5440_TMU_INTEN_RISE0_SHIFT);
841 		interrupt_en |=
842 			interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
843 	} else {
844 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
845 		interrupt_en = 0; /* Disable all interrupts */
846 	}
847 	writel(interrupt_en, data->base + EXYNOS5440_TMU_S0_7_IRQEN);
848 	writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
849 }
850 
851 static void exynos7_tmu_control(struct platform_device *pdev, bool on)
852 {
853 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
854 	struct thermal_zone_device *tz = data->tzd;
855 	unsigned int con, interrupt_en;
856 
857 	con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
858 
859 	if (on) {
860 		con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
861 		con |= (1 << EXYNOS7_PD_DET_EN_SHIFT);
862 		interrupt_en =
863 			(of_thermal_is_trip_valid(tz, 7)
864 			<< EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
865 			(of_thermal_is_trip_valid(tz, 6)
866 			<< EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
867 			(of_thermal_is_trip_valid(tz, 5)
868 			<< EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
869 			(of_thermal_is_trip_valid(tz, 4)
870 			<< EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
871 			(of_thermal_is_trip_valid(tz, 3)
872 			<< EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
873 			(of_thermal_is_trip_valid(tz, 2)
874 			<< EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
875 			(of_thermal_is_trip_valid(tz, 1)
876 			<< EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
877 			(of_thermal_is_trip_valid(tz, 0)
878 			<< EXYNOS7_TMU_INTEN_RISE0_SHIFT);
879 
880 		interrupt_en |=
881 			interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
882 	} else {
883 		con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
884 		con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT);
885 		interrupt_en = 0; /* Disable all interrupts */
886 	}
887 
888 	writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
889 	writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
890 }
891 
892 static int exynos_get_temp(void *p, int *temp)
893 {
894 	struct exynos_tmu_data *data = p;
895 	int value, ret = 0;
896 
897 	if (!data || !data->tmu_read || !data->enabled)
898 		return -EINVAL;
899 
900 	mutex_lock(&data->lock);
901 	clk_enable(data->clk);
902 
903 	value = data->tmu_read(data);
904 	if (value < 0)
905 		ret = value;
906 	else
907 		*temp = code_to_temp(data, value) * MCELSIUS;
908 
909 	clk_disable(data->clk);
910 	mutex_unlock(&data->lock);
911 
912 	return ret;
913 }
914 
915 #ifdef CONFIG_THERMAL_EMULATION
916 static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
917 			    int temp)
918 {
919 	if (temp) {
920 		temp /= MCELSIUS;
921 
922 		if (data->soc != SOC_ARCH_EXYNOS5440) {
923 			val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
924 			val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
925 		}
926 		if (data->soc == SOC_ARCH_EXYNOS7) {
927 			val &= ~(EXYNOS7_EMUL_DATA_MASK <<
928 				EXYNOS7_EMUL_DATA_SHIFT);
929 			val |= (temp_to_code(data, temp) <<
930 				EXYNOS7_EMUL_DATA_SHIFT) |
931 				EXYNOS_EMUL_ENABLE;
932 		} else {
933 			val &= ~(EXYNOS_EMUL_DATA_MASK <<
934 				EXYNOS_EMUL_DATA_SHIFT);
935 			val |= (temp_to_code(data, temp) <<
936 				EXYNOS_EMUL_DATA_SHIFT) |
937 				EXYNOS_EMUL_ENABLE;
938 		}
939 	} else {
940 		val &= ~EXYNOS_EMUL_ENABLE;
941 	}
942 
943 	return val;
944 }
945 
946 static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
947 					 int temp)
948 {
949 	unsigned int val;
950 	u32 emul_con;
951 
952 	if (data->soc == SOC_ARCH_EXYNOS5260)
953 		emul_con = EXYNOS5260_EMUL_CON;
954 	else if (data->soc == SOC_ARCH_EXYNOS5433)
955 		emul_con = EXYNOS5433_TMU_EMUL_CON;
956 	else if (data->soc == SOC_ARCH_EXYNOS7)
957 		emul_con = EXYNOS7_TMU_REG_EMUL_CON;
958 	else
959 		emul_con = EXYNOS_EMUL_CON;
960 
961 	val = readl(data->base + emul_con);
962 	val = get_emul_con_reg(data, val, temp);
963 	writel(val, data->base + emul_con);
964 }
965 
966 static void exynos5440_tmu_set_emulation(struct exynos_tmu_data *data,
967 					 int temp)
968 {
969 	unsigned int val;
970 
971 	val = readl(data->base + EXYNOS5440_TMU_S0_7_DEBUG);
972 	val = get_emul_con_reg(data, val, temp);
973 	writel(val, data->base + EXYNOS5440_TMU_S0_7_DEBUG);
974 }
975 
976 static int exynos_tmu_set_emulation(void *drv_data, int temp)
977 {
978 	struct exynos_tmu_data *data = drv_data;
979 	int ret = -EINVAL;
980 
981 	if (data->soc == SOC_ARCH_EXYNOS4210)
982 		goto out;
983 
984 	if (temp && temp < MCELSIUS)
985 		goto out;
986 
987 	mutex_lock(&data->lock);
988 	clk_enable(data->clk);
989 	data->tmu_set_emulation(data, temp);
990 	clk_disable(data->clk);
991 	mutex_unlock(&data->lock);
992 	return 0;
993 out:
994 	return ret;
995 }
996 #else
997 #define exynos4412_tmu_set_emulation NULL
998 #define exynos5440_tmu_set_emulation NULL
999 static int exynos_tmu_set_emulation(void *drv_data, int temp)
1000 	{ return -EINVAL; }
1001 #endif /* CONFIG_THERMAL_EMULATION */
1002 
1003 static int exynos4210_tmu_read(struct exynos_tmu_data *data)
1004 {
1005 	int ret = readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
1006 
1007 	/* "temp_code" should range between 75 and 175 */
1008 	return (ret < 75 || ret > 175) ? -ENODATA : ret;
1009 }
1010 
1011 static int exynos4412_tmu_read(struct exynos_tmu_data *data)
1012 {
1013 	return readb(data->base + EXYNOS_TMU_REG_CURRENT_TEMP);
1014 }
1015 
1016 static int exynos5440_tmu_read(struct exynos_tmu_data *data)
1017 {
1018 	return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP);
1019 }
1020 
1021 static int exynos7_tmu_read(struct exynos_tmu_data *data)
1022 {
1023 	return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) &
1024 		EXYNOS7_TMU_TEMP_MASK;
1025 }
1026 
1027 static void exynos_tmu_work(struct work_struct *work)
1028 {
1029 	struct exynos_tmu_data *data = container_of(work,
1030 			struct exynos_tmu_data, irq_work);
1031 	unsigned int val_type;
1032 
1033 	if (!IS_ERR(data->clk_sec))
1034 		clk_enable(data->clk_sec);
1035 	/* Find which sensor generated this interrupt */
1036 	if (data->soc == SOC_ARCH_EXYNOS5440) {
1037 		val_type = readl(data->base_second + EXYNOS5440_TMU_IRQ_STATUS);
1038 		if (!((val_type >> data->id) & 0x1))
1039 			goto out;
1040 	}
1041 	if (!IS_ERR(data->clk_sec))
1042 		clk_disable(data->clk_sec);
1043 
1044 	exynos_report_trigger(data);
1045 	mutex_lock(&data->lock);
1046 	clk_enable(data->clk);
1047 
1048 	/* TODO: take action based on particular interrupt */
1049 	data->tmu_clear_irqs(data);
1050 
1051 	clk_disable(data->clk);
1052 	mutex_unlock(&data->lock);
1053 out:
1054 	enable_irq(data->irq);
1055 }
1056 
1057 static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
1058 {
1059 	unsigned int val_irq;
1060 	u32 tmu_intstat, tmu_intclear;
1061 
1062 	if (data->soc == SOC_ARCH_EXYNOS5260) {
1063 		tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
1064 		tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
1065 	} else if (data->soc == SOC_ARCH_EXYNOS7) {
1066 		tmu_intstat = EXYNOS7_TMU_REG_INTPEND;
1067 		tmu_intclear = EXYNOS7_TMU_REG_INTPEND;
1068 	} else if (data->soc == SOC_ARCH_EXYNOS5433) {
1069 		tmu_intstat = EXYNOS5433_TMU_REG_INTPEND;
1070 		tmu_intclear = EXYNOS5433_TMU_REG_INTPEND;
1071 	} else {
1072 		tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
1073 		tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
1074 	}
1075 
1076 	val_irq = readl(data->base + tmu_intstat);
1077 	/*
1078 	 * Clear the interrupts.  Please note that the documentation for
1079 	 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
1080 	 * states that INTCLEAR register has a different placing of bits
1081 	 * responsible for FALL IRQs than INTSTAT register.  Exynos5420
1082 	 * and Exynos5440 documentation is correct (Exynos4210 doesn't
1083 	 * support FALL IRQs at all).
1084 	 */
1085 	writel(val_irq, data->base + tmu_intclear);
1086 }
1087 
1088 static void exynos5440_tmu_clear_irqs(struct exynos_tmu_data *data)
1089 {
1090 	unsigned int val_irq;
1091 
1092 	val_irq = readl(data->base + EXYNOS5440_TMU_S0_7_IRQ);
1093 	/* clear the interrupts */
1094 	writel(val_irq, data->base + EXYNOS5440_TMU_S0_7_IRQ);
1095 }
1096 
1097 static irqreturn_t exynos_tmu_irq(int irq, void *id)
1098 {
1099 	struct exynos_tmu_data *data = id;
1100 
1101 	disable_irq_nosync(irq);
1102 	schedule_work(&data->irq_work);
1103 
1104 	return IRQ_HANDLED;
1105 }
1106 
1107 static const struct of_device_id exynos_tmu_match[] = {
1108 	{ .compatible = "samsung,exynos3250-tmu", },
1109 	{ .compatible = "samsung,exynos4210-tmu", },
1110 	{ .compatible = "samsung,exynos4412-tmu", },
1111 	{ .compatible = "samsung,exynos5250-tmu", },
1112 	{ .compatible = "samsung,exynos5260-tmu", },
1113 	{ .compatible = "samsung,exynos5420-tmu", },
1114 	{ .compatible = "samsung,exynos5420-tmu-ext-triminfo", },
1115 	{ .compatible = "samsung,exynos5433-tmu", },
1116 	{ .compatible = "samsung,exynos5440-tmu", },
1117 	{ .compatible = "samsung,exynos7-tmu", },
1118 	{ /* sentinel */ },
1119 };
1120 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
1121 
1122 static int exynos_of_get_soc_type(struct device_node *np)
1123 {
1124 	if (of_device_is_compatible(np, "samsung,exynos3250-tmu"))
1125 		return SOC_ARCH_EXYNOS3250;
1126 	else if (of_device_is_compatible(np, "samsung,exynos4210-tmu"))
1127 		return SOC_ARCH_EXYNOS4210;
1128 	else if (of_device_is_compatible(np, "samsung,exynos4412-tmu"))
1129 		return SOC_ARCH_EXYNOS4412;
1130 	else if (of_device_is_compatible(np, "samsung,exynos5250-tmu"))
1131 		return SOC_ARCH_EXYNOS5250;
1132 	else if (of_device_is_compatible(np, "samsung,exynos5260-tmu"))
1133 		return SOC_ARCH_EXYNOS5260;
1134 	else if (of_device_is_compatible(np, "samsung,exynos5420-tmu"))
1135 		return SOC_ARCH_EXYNOS5420;
1136 	else if (of_device_is_compatible(np,
1137 					 "samsung,exynos5420-tmu-ext-triminfo"))
1138 		return SOC_ARCH_EXYNOS5420_TRIMINFO;
1139 	else if (of_device_is_compatible(np, "samsung,exynos5433-tmu"))
1140 		return SOC_ARCH_EXYNOS5433;
1141 	else if (of_device_is_compatible(np, "samsung,exynos5440-tmu"))
1142 		return SOC_ARCH_EXYNOS5440;
1143 	else if (of_device_is_compatible(np, "samsung,exynos7-tmu"))
1144 		return SOC_ARCH_EXYNOS7;
1145 
1146 	return -EINVAL;
1147 }
1148 
1149 static int exynos_of_sensor_conf(struct device_node *np,
1150 				 struct exynos_tmu_platform_data *pdata)
1151 {
1152 	u32 value;
1153 	int ret;
1154 
1155 	of_node_get(np);
1156 
1157 	ret = of_property_read_u32(np, "samsung,tmu_gain", &value);
1158 	pdata->gain = (u8)value;
1159 	of_property_read_u32(np, "samsung,tmu_reference_voltage", &value);
1160 	pdata->reference_voltage = (u8)value;
1161 	of_property_read_u32(np, "samsung,tmu_noise_cancel_mode", &value);
1162 	pdata->noise_cancel_mode = (u8)value;
1163 
1164 	of_property_read_u32(np, "samsung,tmu_efuse_value",
1165 			     &pdata->efuse_value);
1166 	of_property_read_u32(np, "samsung,tmu_min_efuse_value",
1167 			     &pdata->min_efuse_value);
1168 	of_property_read_u32(np, "samsung,tmu_max_efuse_value",
1169 			     &pdata->max_efuse_value);
1170 
1171 	of_property_read_u32(np, "samsung,tmu_first_point_trim", &value);
1172 	pdata->first_point_trim = (u8)value;
1173 	of_property_read_u32(np, "samsung,tmu_second_point_trim", &value);
1174 	pdata->second_point_trim = (u8)value;
1175 	of_property_read_u32(np, "samsung,tmu_default_temp_offset", &value);
1176 	pdata->default_temp_offset = (u8)value;
1177 
1178 	of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type);
1179 
1180 	of_node_put(np);
1181 	return 0;
1182 }
1183 
1184 static int exynos_map_dt_data(struct platform_device *pdev)
1185 {
1186 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
1187 	struct exynos_tmu_platform_data *pdata;
1188 	struct resource res;
1189 
1190 	if (!data || !pdev->dev.of_node)
1191 		return -ENODEV;
1192 
1193 	data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
1194 	if (data->id < 0)
1195 		data->id = 0;
1196 
1197 	data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1198 	if (data->irq <= 0) {
1199 		dev_err(&pdev->dev, "failed to get IRQ\n");
1200 		return -ENODEV;
1201 	}
1202 
1203 	if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
1204 		dev_err(&pdev->dev, "failed to get Resource 0\n");
1205 		return -ENODEV;
1206 	}
1207 
1208 	data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
1209 	if (!data->base) {
1210 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
1211 		return -EADDRNOTAVAIL;
1212 	}
1213 
1214 	pdata = devm_kzalloc(&pdev->dev,
1215 			     sizeof(struct exynos_tmu_platform_data),
1216 			     GFP_KERNEL);
1217 	if (!pdata)
1218 		return -ENOMEM;
1219 
1220 	exynos_of_sensor_conf(pdev->dev.of_node, pdata);
1221 	data->pdata = pdata;
1222 	data->soc = exynos_of_get_soc_type(pdev->dev.of_node);
1223 
1224 	switch (data->soc) {
1225 	case SOC_ARCH_EXYNOS4210:
1226 		data->tmu_initialize = exynos4210_tmu_initialize;
1227 		data->tmu_control = exynos4210_tmu_control;
1228 		data->tmu_read = exynos4210_tmu_read;
1229 		data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1230 		data->ntrip = 4;
1231 		break;
1232 	case SOC_ARCH_EXYNOS3250:
1233 	case SOC_ARCH_EXYNOS4412:
1234 	case SOC_ARCH_EXYNOS5250:
1235 	case SOC_ARCH_EXYNOS5260:
1236 	case SOC_ARCH_EXYNOS5420:
1237 	case SOC_ARCH_EXYNOS5420_TRIMINFO:
1238 		data->tmu_initialize = exynos4412_tmu_initialize;
1239 		data->tmu_control = exynos4210_tmu_control;
1240 		data->tmu_read = exynos4412_tmu_read;
1241 		data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1242 		data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1243 		data->ntrip = 4;
1244 		break;
1245 	case SOC_ARCH_EXYNOS5433:
1246 		data->tmu_initialize = exynos5433_tmu_initialize;
1247 		data->tmu_control = exynos5433_tmu_control;
1248 		data->tmu_read = exynos4412_tmu_read;
1249 		data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1250 		data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1251 		data->ntrip = 8;
1252 		break;
1253 	case SOC_ARCH_EXYNOS5440:
1254 		data->tmu_initialize = exynos5440_tmu_initialize;
1255 		data->tmu_control = exynos5440_tmu_control;
1256 		data->tmu_read = exynos5440_tmu_read;
1257 		data->tmu_set_emulation = exynos5440_tmu_set_emulation;
1258 		data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
1259 		data->ntrip = 4;
1260 		break;
1261 	case SOC_ARCH_EXYNOS7:
1262 		data->tmu_initialize = exynos7_tmu_initialize;
1263 		data->tmu_control = exynos7_tmu_control;
1264 		data->tmu_read = exynos7_tmu_read;
1265 		data->tmu_set_emulation = exynos4412_tmu_set_emulation;
1266 		data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
1267 		data->ntrip = 8;
1268 		break;
1269 	default:
1270 		dev_err(&pdev->dev, "Platform not supported\n");
1271 		return -EINVAL;
1272 	}
1273 
1274 	/*
1275 	 * Check if the TMU shares some registers and then try to map the
1276 	 * memory of common registers.
1277 	 */
1278 	if (data->soc != SOC_ARCH_EXYNOS5420_TRIMINFO &&
1279 	    data->soc != SOC_ARCH_EXYNOS5440)
1280 		return 0;
1281 
1282 	if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
1283 		dev_err(&pdev->dev, "failed to get Resource 1\n");
1284 		return -ENODEV;
1285 	}
1286 
1287 	data->base_second = devm_ioremap(&pdev->dev, res.start,
1288 					resource_size(&res));
1289 	if (!data->base_second) {
1290 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
1291 		return -ENOMEM;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 static const struct thermal_zone_of_device_ops exynos_sensor_ops = {
1298 	.get_temp = exynos_get_temp,
1299 	.set_emul_temp = exynos_tmu_set_emulation,
1300 };
1301 
1302 static int exynos_tmu_probe(struct platform_device *pdev)
1303 {
1304 	struct exynos_tmu_data *data;
1305 	int ret;
1306 
1307 	data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
1308 					GFP_KERNEL);
1309 	if (!data)
1310 		return -ENOMEM;
1311 
1312 	platform_set_drvdata(pdev, data);
1313 	mutex_init(&data->lock);
1314 
1315 	/*
1316 	 * Try enabling the regulator if found
1317 	 * TODO: Add regulator as an SOC feature, so that regulator enable
1318 	 * is a compulsory call.
1319 	 */
1320 	data->regulator = devm_regulator_get_optional(&pdev->dev, "vtmu");
1321 	if (!IS_ERR(data->regulator)) {
1322 		ret = regulator_enable(data->regulator);
1323 		if (ret) {
1324 			dev_err(&pdev->dev, "failed to enable vtmu\n");
1325 			return ret;
1326 		}
1327 	} else {
1328 		if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
1329 			return -EPROBE_DEFER;
1330 		dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
1331 	}
1332 
1333 	ret = exynos_map_dt_data(pdev);
1334 	if (ret)
1335 		goto err_sensor;
1336 
1337 	INIT_WORK(&data->irq_work, exynos_tmu_work);
1338 
1339 	data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
1340 	if (IS_ERR(data->clk)) {
1341 		dev_err(&pdev->dev, "Failed to get clock\n");
1342 		ret = PTR_ERR(data->clk);
1343 		goto err_sensor;
1344 	}
1345 
1346 	data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
1347 	if (IS_ERR(data->clk_sec)) {
1348 		if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
1349 			dev_err(&pdev->dev, "Failed to get triminfo clock\n");
1350 			ret = PTR_ERR(data->clk_sec);
1351 			goto err_sensor;
1352 		}
1353 	} else {
1354 		ret = clk_prepare(data->clk_sec);
1355 		if (ret) {
1356 			dev_err(&pdev->dev, "Failed to get clock\n");
1357 			goto err_sensor;
1358 		}
1359 	}
1360 
1361 	ret = clk_prepare(data->clk);
1362 	if (ret) {
1363 		dev_err(&pdev->dev, "Failed to get clock\n");
1364 		goto err_clk_sec;
1365 	}
1366 
1367 	switch (data->soc) {
1368 	case SOC_ARCH_EXYNOS5433:
1369 	case SOC_ARCH_EXYNOS7:
1370 		data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
1371 		if (IS_ERR(data->sclk)) {
1372 			dev_err(&pdev->dev, "Failed to get sclk\n");
1373 			goto err_clk;
1374 		} else {
1375 			ret = clk_prepare_enable(data->sclk);
1376 			if (ret) {
1377 				dev_err(&pdev->dev, "Failed to enable sclk\n");
1378 				goto err_clk;
1379 			}
1380 		}
1381 		break;
1382 	default:
1383 		break;
1384 	}
1385 
1386 	/*
1387 	 * data->tzd must be registered before calling exynos_tmu_initialize(),
1388 	 * requesting irq and calling exynos_tmu_control().
1389 	 */
1390 	data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
1391 						    &exynos_sensor_ops);
1392 	if (IS_ERR(data->tzd)) {
1393 		ret = PTR_ERR(data->tzd);
1394 		dev_err(&pdev->dev, "Failed to register sensor: %d\n", ret);
1395 		goto err_sclk;
1396 	}
1397 
1398 	ret = exynos_tmu_initialize(pdev);
1399 	if (ret) {
1400 		dev_err(&pdev->dev, "Failed to initialize TMU\n");
1401 		goto err_thermal;
1402 	}
1403 
1404 	ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
1405 		IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
1406 	if (ret) {
1407 		dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
1408 		goto err_thermal;
1409 	}
1410 
1411 	exynos_tmu_control(pdev, true);
1412 	return 0;
1413 
1414 err_thermal:
1415 	thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
1416 err_sclk:
1417 	clk_disable_unprepare(data->sclk);
1418 err_clk:
1419 	clk_unprepare(data->clk);
1420 err_clk_sec:
1421 	if (!IS_ERR(data->clk_sec))
1422 		clk_unprepare(data->clk_sec);
1423 err_sensor:
1424 	if (!IS_ERR(data->regulator))
1425 		regulator_disable(data->regulator);
1426 
1427 	return ret;
1428 }
1429 
1430 static int exynos_tmu_remove(struct platform_device *pdev)
1431 {
1432 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
1433 	struct thermal_zone_device *tzd = data->tzd;
1434 
1435 	thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
1436 	exynos_tmu_control(pdev, false);
1437 
1438 	clk_disable_unprepare(data->sclk);
1439 	clk_unprepare(data->clk);
1440 	if (!IS_ERR(data->clk_sec))
1441 		clk_unprepare(data->clk_sec);
1442 
1443 	if (!IS_ERR(data->regulator))
1444 		regulator_disable(data->regulator);
1445 
1446 	return 0;
1447 }
1448 
1449 #ifdef CONFIG_PM_SLEEP
1450 static int exynos_tmu_suspend(struct device *dev)
1451 {
1452 	exynos_tmu_control(to_platform_device(dev), false);
1453 
1454 	return 0;
1455 }
1456 
1457 static int exynos_tmu_resume(struct device *dev)
1458 {
1459 	struct platform_device *pdev = to_platform_device(dev);
1460 
1461 	exynos_tmu_initialize(pdev);
1462 	exynos_tmu_control(pdev, true);
1463 
1464 	return 0;
1465 }
1466 
1467 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
1468 			 exynos_tmu_suspend, exynos_tmu_resume);
1469 #define EXYNOS_TMU_PM	(&exynos_tmu_pm)
1470 #else
1471 #define EXYNOS_TMU_PM	NULL
1472 #endif
1473 
1474 static struct platform_driver exynos_tmu_driver = {
1475 	.driver = {
1476 		.name   = "exynos-tmu",
1477 		.pm     = EXYNOS_TMU_PM,
1478 		.of_match_table = exynos_tmu_match,
1479 	},
1480 	.probe = exynos_tmu_probe,
1481 	.remove	= exynos_tmu_remove,
1482 };
1483 
1484 module_platform_driver(exynos_tmu_driver);
1485 
1486 MODULE_DESCRIPTION("EXYNOS TMU Driver");
1487 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
1488 MODULE_LICENSE("GPL");
1489 MODULE_ALIAS("platform:exynos-tmu");
1490