1 /*
2  * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
3  *
4  *  Copyright (C) 2011 Samsung Electronics
5  *  Donggeun Kim <dg77.kim@samsung.com>
6  *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 #include <linux/clk.h>
25 #include <linux/io.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/platform_device.h>
32 #include <linux/regulator/consumer.h>
33 
34 #include "exynos_thermal_common.h"
35 #include "exynos_tmu.h"
36 #include "exynos_tmu_data.h"
37 
38 /**
39  * struct exynos_tmu_data : A structure to hold the private data of the TMU
40 	driver
41  * @id: identifier of the one instance of the TMU controller.
42  * @pdata: pointer to the tmu platform/configuration data
43  * @base: base address of the single instance of the TMU controller.
44  * @base_second: base address of the common registers of the TMU controller.
45  * @irq: irq number of the TMU controller.
46  * @soc: id of the SOC type.
47  * @irq_work: pointer to the irq work structure.
48  * @lock: lock to implement synchronization.
49  * @clk: pointer to the clock structure.
50  * @clk_sec: pointer to the clock structure for accessing the base_second.
51  * @temp_error1: fused value of the first point trim.
52  * @temp_error2: fused value of the second point trim.
53  * @regulator: pointer to the TMU regulator structure.
54  * @reg_conf: pointer to structure to register with core thermal.
55  */
56 struct exynos_tmu_data {
57 	int id;
58 	struct exynos_tmu_platform_data *pdata;
59 	void __iomem *base;
60 	void __iomem *base_second;
61 	int irq;
62 	enum soc_type soc;
63 	struct work_struct irq_work;
64 	struct mutex lock;
65 	struct clk *clk, *clk_sec;
66 	u8 temp_error1, temp_error2;
67 	struct regulator *regulator;
68 	struct thermal_sensor_conf *reg_conf;
69 };
70 
71 /*
72  * TMU treats temperature as a mapped temperature code.
73  * The temperature is converted differently depending on the calibration type.
74  */
75 static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
76 {
77 	struct exynos_tmu_platform_data *pdata = data->pdata;
78 	int temp_code;
79 
80 	if (pdata->cal_mode == HW_MODE)
81 		return temp;
82 
83 	if (data->soc == SOC_ARCH_EXYNOS4210)
84 		/* temp should range between 25 and 125 */
85 		if (temp < 25 || temp > 125) {
86 			temp_code = -EINVAL;
87 			goto out;
88 		}
89 
90 	switch (pdata->cal_type) {
91 	case TYPE_TWO_POINT_TRIMMING:
92 		temp_code = (temp - pdata->first_point_trim) *
93 			(data->temp_error2 - data->temp_error1) /
94 			(pdata->second_point_trim - pdata->first_point_trim) +
95 			data->temp_error1;
96 		break;
97 	case TYPE_ONE_POINT_TRIMMING:
98 		temp_code = temp + data->temp_error1 - pdata->first_point_trim;
99 		break;
100 	default:
101 		temp_code = temp + pdata->default_temp_offset;
102 		break;
103 	}
104 out:
105 	return temp_code;
106 }
107 
108 /*
109  * Calculate a temperature value from a temperature code.
110  * The unit of the temperature is degree Celsius.
111  */
112 static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
113 {
114 	struct exynos_tmu_platform_data *pdata = data->pdata;
115 	int temp;
116 
117 	if (pdata->cal_mode == HW_MODE)
118 		return temp_code;
119 
120 	if (data->soc == SOC_ARCH_EXYNOS4210)
121 		/* temp_code should range between 75 and 175 */
122 		if (temp_code < 75 || temp_code > 175) {
123 			temp = -ENODATA;
124 			goto out;
125 		}
126 
127 	switch (pdata->cal_type) {
128 	case TYPE_TWO_POINT_TRIMMING:
129 		temp = (temp_code - data->temp_error1) *
130 			(pdata->second_point_trim - pdata->first_point_trim) /
131 			(data->temp_error2 - data->temp_error1) +
132 			pdata->first_point_trim;
133 		break;
134 	case TYPE_ONE_POINT_TRIMMING:
135 		temp = temp_code - data->temp_error1 + pdata->first_point_trim;
136 		break;
137 	default:
138 		temp = temp_code - pdata->default_temp_offset;
139 		break;
140 	}
141 out:
142 	return temp;
143 }
144 
145 static int exynos_tmu_initialize(struct platform_device *pdev)
146 {
147 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
148 	struct exynos_tmu_platform_data *pdata = data->pdata;
149 	const struct exynos_tmu_registers *reg = pdata->registers;
150 	unsigned int status, trim_info = 0, con;
151 	unsigned int rising_threshold = 0, falling_threshold = 0;
152 	int ret = 0, threshold_code, i, trigger_levs = 0;
153 
154 	mutex_lock(&data->lock);
155 	clk_enable(data->clk);
156 	if (!IS_ERR(data->clk_sec))
157 		clk_enable(data->clk_sec);
158 
159 	if (TMU_SUPPORTS(pdata, READY_STATUS)) {
160 		status = readb(data->base + reg->tmu_status);
161 		if (!status) {
162 			ret = -EBUSY;
163 			goto out;
164 		}
165 	}
166 
167 	if (TMU_SUPPORTS(pdata, TRIM_RELOAD))
168 		__raw_writel(1, data->base + reg->triminfo_ctrl);
169 
170 	if (pdata->cal_mode == HW_MODE)
171 		goto skip_calib_data;
172 
173 	/* Save trimming info in order to perform calibration */
174 	if (data->soc == SOC_ARCH_EXYNOS5440) {
175 		/*
176 		 * For exynos5440 soc triminfo value is swapped between TMU0 and
177 		 * TMU2, so the below logic is needed.
178 		 */
179 		switch (data->id) {
180 		case 0:
181 			trim_info = readl(data->base +
182 			EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
183 			break;
184 		case 1:
185 			trim_info = readl(data->base + reg->triminfo_data);
186 			break;
187 		case 2:
188 			trim_info = readl(data->base -
189 			EXYNOS5440_EFUSE_SWAP_OFFSET + reg->triminfo_data);
190 		}
191 	} else {
192 		/* On exynos5420 the triminfo register is in the shared space */
193 		if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO)
194 			trim_info = readl(data->base_second +
195 							reg->triminfo_data);
196 		else
197 			trim_info = readl(data->base + reg->triminfo_data);
198 	}
199 	data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
200 	data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) &
201 				EXYNOS_TMU_TEMP_MASK);
202 
203 	if (!data->temp_error1 ||
204 		(pdata->min_efuse_value > data->temp_error1) ||
205 		(data->temp_error1 > pdata->max_efuse_value))
206 		data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
207 
208 	if (!data->temp_error2)
209 		data->temp_error2 =
210 			(pdata->efuse_value >> reg->triminfo_85_shift) &
211 			EXYNOS_TMU_TEMP_MASK;
212 
213 skip_calib_data:
214 	if (pdata->max_trigger_level > MAX_THRESHOLD_LEVS) {
215 		dev_err(&pdev->dev, "Invalid max trigger level\n");
216 		ret = -EINVAL;
217 		goto out;
218 	}
219 
220 	for (i = 0; i < pdata->max_trigger_level; i++) {
221 		if (!pdata->trigger_levels[i])
222 			continue;
223 
224 		if ((pdata->trigger_type[i] == HW_TRIP) &&
225 		(!pdata->trigger_levels[pdata->max_trigger_level - 1])) {
226 			dev_err(&pdev->dev, "Invalid hw trigger level\n");
227 			ret = -EINVAL;
228 			goto out;
229 		}
230 
231 		/* Count trigger levels except the HW trip*/
232 		if (!(pdata->trigger_type[i] == HW_TRIP))
233 			trigger_levs++;
234 	}
235 
236 	rising_threshold = readl(data->base + reg->threshold_th0);
237 
238 	if (data->soc == SOC_ARCH_EXYNOS4210) {
239 		/* Write temperature code for threshold */
240 		threshold_code = temp_to_code(data, pdata->threshold);
241 		if (threshold_code < 0) {
242 			ret = threshold_code;
243 			goto out;
244 		}
245 		writeb(threshold_code,
246 			data->base + reg->threshold_temp);
247 		for (i = 0; i < trigger_levs; i++)
248 			writeb(pdata->trigger_levels[i], data->base +
249 			reg->threshold_th0 + i * sizeof(reg->threshold_th0));
250 
251 		writel(reg->intclr_rise_mask, data->base + reg->tmu_intclear);
252 	} else {
253 		/* Write temperature code for rising and falling threshold */
254 		for (i = 0;
255 		i < trigger_levs && i < EXYNOS_MAX_TRIGGER_PER_REG; i++) {
256 			threshold_code = temp_to_code(data,
257 						pdata->trigger_levels[i]);
258 			if (threshold_code < 0) {
259 				ret = threshold_code;
260 				goto out;
261 			}
262 			rising_threshold &= ~(0xff << 8 * i);
263 			rising_threshold |= threshold_code << 8 * i;
264 			if (pdata->threshold_falling) {
265 				threshold_code = temp_to_code(data,
266 						pdata->trigger_levels[i] -
267 						pdata->threshold_falling);
268 				if (threshold_code > 0)
269 					falling_threshold |=
270 						threshold_code << 8 * i;
271 			}
272 		}
273 
274 		writel(rising_threshold,
275 				data->base + reg->threshold_th0);
276 		writel(falling_threshold,
277 				data->base + reg->threshold_th1);
278 
279 		writel((reg->intclr_rise_mask << reg->intclr_rise_shift) |
280 			(reg->intclr_fall_mask << reg->intclr_fall_shift),
281 				data->base + reg->tmu_intclear);
282 
283 		/* if last threshold limit is also present */
284 		i = pdata->max_trigger_level - 1;
285 		if (pdata->trigger_levels[i] &&
286 				(pdata->trigger_type[i] == HW_TRIP)) {
287 			threshold_code = temp_to_code(data,
288 						pdata->trigger_levels[i]);
289 			if (threshold_code < 0) {
290 				ret = threshold_code;
291 				goto out;
292 			}
293 			if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
294 				/* 1-4 level to be assigned in th0 reg */
295 				rising_threshold &= ~(0xff << 8 * i);
296 				rising_threshold |= threshold_code << 8 * i;
297 				writel(rising_threshold,
298 					data->base + reg->threshold_th0);
299 			} else if (i == EXYNOS_MAX_TRIGGER_PER_REG) {
300 				/* 5th level to be assigned in th2 reg */
301 				rising_threshold =
302 				threshold_code << reg->threshold_th3_l0_shift;
303 				writel(rising_threshold,
304 					data->base + reg->threshold_th2);
305 			}
306 			con = readl(data->base + reg->tmu_ctrl);
307 			con |= (1 << reg->therm_trip_en_shift);
308 			writel(con, data->base + reg->tmu_ctrl);
309 		}
310 	}
311 	/*Clear the PMIN in the common TMU register*/
312 	if (reg->tmu_pmin && !data->id)
313 		writel(0, data->base_second + reg->tmu_pmin);
314 out:
315 	clk_disable(data->clk);
316 	mutex_unlock(&data->lock);
317 	if (!IS_ERR(data->clk_sec))
318 		clk_disable(data->clk_sec);
319 
320 	return ret;
321 }
322 
323 static void exynos_tmu_control(struct platform_device *pdev, bool on)
324 {
325 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
326 	struct exynos_tmu_platform_data *pdata = data->pdata;
327 	const struct exynos_tmu_registers *reg = pdata->registers;
328 	unsigned int con, interrupt_en, cal_val;
329 
330 	mutex_lock(&data->lock);
331 	clk_enable(data->clk);
332 
333 	con = readl(data->base + reg->tmu_ctrl);
334 
335 	if (pdata->test_mux)
336 		con |= (pdata->test_mux << reg->test_mux_addr_shift);
337 
338 	if (pdata->reference_voltage) {
339 		con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
340 		con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
341 	}
342 
343 	if (pdata->gain) {
344 		con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift);
345 		con |= (pdata->gain << reg->buf_slope_sel_shift);
346 	}
347 
348 	if (pdata->noise_cancel_mode) {
349 		con &= ~(reg->therm_trip_mode_mask <<
350 					reg->therm_trip_mode_shift);
351 		con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
352 	}
353 
354 	if (pdata->cal_mode == HW_MODE) {
355 		con &= ~(reg->calib_mode_mask << reg->calib_mode_shift);
356 		cal_val = 0;
357 		switch (pdata->cal_type) {
358 		case TYPE_TWO_POINT_TRIMMING:
359 			cal_val = 3;
360 			break;
361 		case TYPE_ONE_POINT_TRIMMING_85:
362 			cal_val = 2;
363 			break;
364 		case TYPE_ONE_POINT_TRIMMING_25:
365 			cal_val = 1;
366 			break;
367 		case TYPE_NONE:
368 			break;
369 		default:
370 			dev_err(&pdev->dev, "Invalid calibration type, using none\n");
371 		}
372 		con |= cal_val << reg->calib_mode_shift;
373 	}
374 
375 	if (on) {
376 		con |= (1 << reg->core_en_shift);
377 		interrupt_en =
378 			pdata->trigger_enable[3] << reg->inten_rise3_shift |
379 			pdata->trigger_enable[2] << reg->inten_rise2_shift |
380 			pdata->trigger_enable[1] << reg->inten_rise1_shift |
381 			pdata->trigger_enable[0] << reg->inten_rise0_shift;
382 		if (TMU_SUPPORTS(pdata, FALLING_TRIP))
383 			interrupt_en |=
384 				interrupt_en << reg->inten_fall0_shift;
385 	} else {
386 		con &= ~(1 << reg->core_en_shift);
387 		interrupt_en = 0; /* Disable all interrupts */
388 	}
389 	writel(interrupt_en, data->base + reg->tmu_inten);
390 	writel(con, data->base + reg->tmu_ctrl);
391 
392 	clk_disable(data->clk);
393 	mutex_unlock(&data->lock);
394 }
395 
396 static int exynos_tmu_read(struct exynos_tmu_data *data)
397 {
398 	struct exynos_tmu_platform_data *pdata = data->pdata;
399 	const struct exynos_tmu_registers *reg = pdata->registers;
400 	u8 temp_code;
401 	int temp;
402 
403 	mutex_lock(&data->lock);
404 	clk_enable(data->clk);
405 
406 	temp_code = readb(data->base + reg->tmu_cur_temp);
407 	temp = code_to_temp(data, temp_code);
408 
409 	clk_disable(data->clk);
410 	mutex_unlock(&data->lock);
411 
412 	return temp;
413 }
414 
415 #ifdef CONFIG_THERMAL_EMULATION
416 static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp)
417 {
418 	struct exynos_tmu_data *data = drv_data;
419 	struct exynos_tmu_platform_data *pdata = data->pdata;
420 	const struct exynos_tmu_registers *reg = pdata->registers;
421 	unsigned int val;
422 	int ret = -EINVAL;
423 
424 	if (!TMU_SUPPORTS(pdata, EMULATION))
425 		goto out;
426 
427 	if (temp && temp < MCELSIUS)
428 		goto out;
429 
430 	mutex_lock(&data->lock);
431 	clk_enable(data->clk);
432 
433 	val = readl(data->base + reg->emul_con);
434 
435 	if (temp) {
436 		temp /= MCELSIUS;
437 
438 		if (TMU_SUPPORTS(pdata, EMUL_TIME)) {
439 			val &= ~(EXYNOS_EMUL_TIME_MASK << reg->emul_time_shift);
440 			val |= (EXYNOS_EMUL_TIME << reg->emul_time_shift);
441 		}
442 		val &= ~(EXYNOS_EMUL_DATA_MASK << reg->emul_temp_shift);
443 		val |= (temp_to_code(data, temp) << reg->emul_temp_shift) |
444 			EXYNOS_EMUL_ENABLE;
445 	} else {
446 		val &= ~EXYNOS_EMUL_ENABLE;
447 	}
448 
449 	writel(val, data->base + reg->emul_con);
450 
451 	clk_disable(data->clk);
452 	mutex_unlock(&data->lock);
453 	return 0;
454 out:
455 	return ret;
456 }
457 #else
458 static int exynos_tmu_set_emulation(void *drv_data,	unsigned long temp)
459 	{ return -EINVAL; }
460 #endif/*CONFIG_THERMAL_EMULATION*/
461 
462 static void exynos_tmu_work(struct work_struct *work)
463 {
464 	struct exynos_tmu_data *data = container_of(work,
465 			struct exynos_tmu_data, irq_work);
466 	struct exynos_tmu_platform_data *pdata = data->pdata;
467 	const struct exynos_tmu_registers *reg = pdata->registers;
468 	unsigned int val_irq, val_type;
469 
470 	if (!IS_ERR(data->clk_sec))
471 		clk_enable(data->clk_sec);
472 	/* Find which sensor generated this interrupt */
473 	if (reg->tmu_irqstatus) {
474 		val_type = readl(data->base_second + reg->tmu_irqstatus);
475 		if (!((val_type >> data->id) & 0x1))
476 			goto out;
477 	}
478 	if (!IS_ERR(data->clk_sec))
479 		clk_disable(data->clk_sec);
480 
481 	exynos_report_trigger(data->reg_conf);
482 	mutex_lock(&data->lock);
483 	clk_enable(data->clk);
484 
485 	/* TODO: take action based on particular interrupt */
486 	val_irq = readl(data->base + reg->tmu_intstat);
487 	/* clear the interrupts */
488 	writel(val_irq, data->base + reg->tmu_intclear);
489 
490 	clk_disable(data->clk);
491 	mutex_unlock(&data->lock);
492 out:
493 	enable_irq(data->irq);
494 }
495 
496 static irqreturn_t exynos_tmu_irq(int irq, void *id)
497 {
498 	struct exynos_tmu_data *data = id;
499 
500 	disable_irq_nosync(irq);
501 	schedule_work(&data->irq_work);
502 
503 	return IRQ_HANDLED;
504 }
505 
506 static const struct of_device_id exynos_tmu_match[] = {
507 	{
508 		.compatible = "samsung,exynos3250-tmu",
509 		.data = (void *)EXYNOS3250_TMU_DRV_DATA,
510 	},
511 	{
512 		.compatible = "samsung,exynos4210-tmu",
513 		.data = (void *)EXYNOS4210_TMU_DRV_DATA,
514 	},
515 	{
516 		.compatible = "samsung,exynos4412-tmu",
517 		.data = (void *)EXYNOS4412_TMU_DRV_DATA,
518 	},
519 	{
520 		.compatible = "samsung,exynos5250-tmu",
521 		.data = (void *)EXYNOS5250_TMU_DRV_DATA,
522 	},
523 	{
524 		.compatible = "samsung,exynos5260-tmu",
525 		.data = (void *)EXYNOS5260_TMU_DRV_DATA,
526 	},
527 	{
528 		.compatible = "samsung,exynos5420-tmu",
529 		.data = (void *)EXYNOS5420_TMU_DRV_DATA,
530 	},
531 	{
532 		.compatible = "samsung,exynos5420-tmu-ext-triminfo",
533 		.data = (void *)EXYNOS5420_TMU_DRV_DATA,
534 	},
535 	{
536 		.compatible = "samsung,exynos5440-tmu",
537 		.data = (void *)EXYNOS5440_TMU_DRV_DATA,
538 	},
539 	{},
540 };
541 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
542 
543 static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
544 			struct platform_device *pdev, int id)
545 {
546 	struct  exynos_tmu_init_data *data_table;
547 	struct exynos_tmu_platform_data *tmu_data;
548 	const struct of_device_id *match;
549 
550 	match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
551 	if (!match)
552 		return NULL;
553 	data_table = (struct exynos_tmu_init_data *) match->data;
554 	if (!data_table || id >= data_table->tmu_count)
555 		return NULL;
556 	tmu_data = data_table->tmu_data;
557 	return (struct exynos_tmu_platform_data *) (tmu_data + id);
558 }
559 
560 static int exynos_map_dt_data(struct platform_device *pdev)
561 {
562 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
563 	struct exynos_tmu_platform_data *pdata;
564 	struct resource res;
565 	int ret;
566 
567 	if (!data || !pdev->dev.of_node)
568 		return -ENODEV;
569 
570 	/*
571 	 * Try enabling the regulator if found
572 	 * TODO: Add regulator as an SOC feature, so that regulator enable
573 	 * is a compulsory call.
574 	 */
575 	data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
576 	if (!IS_ERR(data->regulator)) {
577 		ret = regulator_enable(data->regulator);
578 		if (ret) {
579 			dev_err(&pdev->dev, "failed to enable vtmu\n");
580 			return ret;
581 		}
582 	} else {
583 		dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
584 	}
585 
586 	data->id = of_alias_get_id(pdev->dev.of_node, "tmuctrl");
587 	if (data->id < 0)
588 		data->id = 0;
589 
590 	data->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
591 	if (data->irq <= 0) {
592 		dev_err(&pdev->dev, "failed to get IRQ\n");
593 		return -ENODEV;
594 	}
595 
596 	if (of_address_to_resource(pdev->dev.of_node, 0, &res)) {
597 		dev_err(&pdev->dev, "failed to get Resource 0\n");
598 		return -ENODEV;
599 	}
600 
601 	data->base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
602 	if (!data->base) {
603 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
604 		return -EADDRNOTAVAIL;
605 	}
606 
607 	pdata = exynos_get_driver_data(pdev, data->id);
608 	if (!pdata) {
609 		dev_err(&pdev->dev, "No platform init data supplied.\n");
610 		return -ENODEV;
611 	}
612 	data->pdata = pdata;
613 	/*
614 	 * Check if the TMU shares some registers and then try to map the
615 	 * memory of common registers.
616 	 */
617 	if (!TMU_SUPPORTS(pdata, ADDRESS_MULTIPLE))
618 		return 0;
619 
620 	if (of_address_to_resource(pdev->dev.of_node, 1, &res)) {
621 		dev_err(&pdev->dev, "failed to get Resource 1\n");
622 		return -ENODEV;
623 	}
624 
625 	data->base_second = devm_ioremap(&pdev->dev, res.start,
626 					resource_size(&res));
627 	if (!data->base_second) {
628 		dev_err(&pdev->dev, "Failed to ioremap memory\n");
629 		return -ENOMEM;
630 	}
631 
632 	return 0;
633 }
634 
635 static int exynos_tmu_probe(struct platform_device *pdev)
636 {
637 	struct exynos_tmu_data *data;
638 	struct exynos_tmu_platform_data *pdata;
639 	struct thermal_sensor_conf *sensor_conf;
640 	int ret, i;
641 
642 	data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
643 					GFP_KERNEL);
644 	if (!data)
645 		return -ENOMEM;
646 
647 	platform_set_drvdata(pdev, data);
648 	mutex_init(&data->lock);
649 
650 	ret = exynos_map_dt_data(pdev);
651 	if (ret)
652 		return ret;
653 
654 	pdata = data->pdata;
655 
656 	INIT_WORK(&data->irq_work, exynos_tmu_work);
657 
658 	data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
659 	if (IS_ERR(data->clk)) {
660 		dev_err(&pdev->dev, "Failed to get clock\n");
661 		return  PTR_ERR(data->clk);
662 	}
663 
664 	data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
665 	if (IS_ERR(data->clk_sec)) {
666 		if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
667 			dev_err(&pdev->dev, "Failed to get triminfo clock\n");
668 			return PTR_ERR(data->clk_sec);
669 		}
670 	} else {
671 		ret = clk_prepare(data->clk_sec);
672 		if (ret) {
673 			dev_err(&pdev->dev, "Failed to get clock\n");
674 			return ret;
675 		}
676 	}
677 
678 	ret = clk_prepare(data->clk);
679 	if (ret) {
680 		dev_err(&pdev->dev, "Failed to get clock\n");
681 		goto err_clk_sec;
682 	}
683 
684 	if (pdata->type == SOC_ARCH_EXYNOS3250 ||
685 	    pdata->type == SOC_ARCH_EXYNOS4210 ||
686 	    pdata->type == SOC_ARCH_EXYNOS4412 ||
687 	    pdata->type == SOC_ARCH_EXYNOS5250 ||
688 	    pdata->type == SOC_ARCH_EXYNOS5260 ||
689 	    pdata->type == SOC_ARCH_EXYNOS5420_TRIMINFO ||
690 	    pdata->type == SOC_ARCH_EXYNOS5440)
691 		data->soc = pdata->type;
692 	else {
693 		ret = -EINVAL;
694 		dev_err(&pdev->dev, "Platform not supported\n");
695 		goto err_clk;
696 	}
697 
698 	ret = exynos_tmu_initialize(pdev);
699 	if (ret) {
700 		dev_err(&pdev->dev, "Failed to initialize TMU\n");
701 		goto err_clk;
702 	}
703 
704 	exynos_tmu_control(pdev, true);
705 
706 	/* Allocate a structure to register with the exynos core thermal */
707 	sensor_conf = devm_kzalloc(&pdev->dev,
708 				sizeof(struct thermal_sensor_conf), GFP_KERNEL);
709 	if (!sensor_conf) {
710 		ret = -ENOMEM;
711 		goto err_clk;
712 	}
713 	sprintf(sensor_conf->name, "therm_zone%d", data->id);
714 	sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
715 	sensor_conf->write_emul_temp =
716 		(int (*)(void *, unsigned long))exynos_tmu_set_emulation;
717 	sensor_conf->driver_data = data;
718 	sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
719 			pdata->trigger_enable[1] + pdata->trigger_enable[2]+
720 			pdata->trigger_enable[3];
721 
722 	for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
723 		sensor_conf->trip_data.trip_val[i] =
724 			pdata->threshold + pdata->trigger_levels[i];
725 		sensor_conf->trip_data.trip_type[i] =
726 					pdata->trigger_type[i];
727 	}
728 
729 	sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
730 
731 	sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
732 	for (i = 0; i < pdata->freq_tab_count; i++) {
733 		sensor_conf->cooling_data.freq_data[i].freq_clip_max =
734 					pdata->freq_tab[i].freq_clip_max;
735 		sensor_conf->cooling_data.freq_data[i].temp_level =
736 					pdata->freq_tab[i].temp_level;
737 	}
738 	sensor_conf->dev = &pdev->dev;
739 	/* Register the sensor with thermal management interface */
740 	ret = exynos_register_thermal(sensor_conf);
741 	if (ret) {
742 		dev_err(&pdev->dev, "Failed to register thermal interface\n");
743 		goto err_clk;
744 	}
745 	data->reg_conf = sensor_conf;
746 
747 	ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
748 		IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
749 	if (ret) {
750 		dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
751 		goto err_clk;
752 	}
753 
754 	return 0;
755 err_clk:
756 	clk_unprepare(data->clk);
757 err_clk_sec:
758 	if (!IS_ERR(data->clk_sec))
759 		clk_unprepare(data->clk_sec);
760 	return ret;
761 }
762 
763 static int exynos_tmu_remove(struct platform_device *pdev)
764 {
765 	struct exynos_tmu_data *data = platform_get_drvdata(pdev);
766 
767 	exynos_unregister_thermal(data->reg_conf);
768 
769 	exynos_tmu_control(pdev, false);
770 
771 	clk_unprepare(data->clk);
772 	if (!IS_ERR(data->clk_sec))
773 		clk_unprepare(data->clk_sec);
774 
775 	if (!IS_ERR(data->regulator))
776 		regulator_disable(data->regulator);
777 
778 	return 0;
779 }
780 
781 #ifdef CONFIG_PM_SLEEP
782 static int exynos_tmu_suspend(struct device *dev)
783 {
784 	exynos_tmu_control(to_platform_device(dev), false);
785 
786 	return 0;
787 }
788 
789 static int exynos_tmu_resume(struct device *dev)
790 {
791 	struct platform_device *pdev = to_platform_device(dev);
792 
793 	exynos_tmu_initialize(pdev);
794 	exynos_tmu_control(pdev, true);
795 
796 	return 0;
797 }
798 
799 static SIMPLE_DEV_PM_OPS(exynos_tmu_pm,
800 			 exynos_tmu_suspend, exynos_tmu_resume);
801 #define EXYNOS_TMU_PM	(&exynos_tmu_pm)
802 #else
803 #define EXYNOS_TMU_PM	NULL
804 #endif
805 
806 static struct platform_driver exynos_tmu_driver = {
807 	.driver = {
808 		.name   = "exynos-tmu",
809 		.owner  = THIS_MODULE,
810 		.pm     = EXYNOS_TMU_PM,
811 		.of_match_table = exynos_tmu_match,
812 	},
813 	.probe = exynos_tmu_probe,
814 	.remove	= exynos_tmu_remove,
815 };
816 
817 module_platform_driver(exynos_tmu_driver);
818 
819 MODULE_DESCRIPTION("EXYNOS TMU Driver");
820 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
821 MODULE_LICENSE("GPL");
822 MODULE_ALIAS("platform:exynos-tmu");
823