1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 - 2018, NVIDIA CORPORATION. All rights reserved. 4 * 5 * Author: 6 * Mikko Perttunen <mperttunen@nvidia.com> 7 * 8 * This software is licensed under the terms of the GNU General Public 9 * License version 2, as published by the Free Software Foundation, and 10 * may be copied, distributed, and modified under those terms. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 */ 18 19 #include <linux/debugfs.h> 20 #include <linux/bitops.h> 21 #include <linux/clk.h> 22 #include <linux/delay.h> 23 #include <linux/err.h> 24 #include <linux/interrupt.h> 25 #include <linux/io.h> 26 #include <linux/irq.h> 27 #include <linux/irqdomain.h> 28 #include <linux/module.h> 29 #include <linux/of.h> 30 #include <linux/platform_device.h> 31 #include <linux/reset.h> 32 #include <linux/thermal.h> 33 34 #include <dt-bindings/thermal/tegra124-soctherm.h> 35 36 #include "../thermal_core.h" 37 #include "soctherm.h" 38 39 #define SENSOR_CONFIG0 0 40 #define SENSOR_CONFIG0_STOP BIT(0) 41 #define SENSOR_CONFIG0_CPTR_OVER BIT(2) 42 #define SENSOR_CONFIG0_OVER BIT(3) 43 #define SENSOR_CONFIG0_TCALC_OVER BIT(4) 44 #define SENSOR_CONFIG0_TALL_MASK (0xfffff << 8) 45 #define SENSOR_CONFIG0_TALL_SHIFT 8 46 47 #define SENSOR_CONFIG1 4 48 #define SENSOR_CONFIG1_TSAMPLE_MASK 0x3ff 49 #define SENSOR_CONFIG1_TSAMPLE_SHIFT 0 50 #define SENSOR_CONFIG1_TIDDQ_EN_MASK (0x3f << 15) 51 #define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15 52 #define SENSOR_CONFIG1_TEN_COUNT_MASK (0x3f << 24) 53 #define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24 54 #define SENSOR_CONFIG1_TEMP_ENABLE BIT(31) 55 56 /* 57 * SENSOR_CONFIG2 is defined in soctherm.h 58 * because, it will be used by tegra_soctherm_fuse.c 59 */ 60 61 #define SENSOR_STATUS0 0xc 62 #define SENSOR_STATUS0_VALID_MASK BIT(31) 63 #define SENSOR_STATUS0_CAPTURE_MASK 0xffff 64 65 #define SENSOR_STATUS1 0x10 66 #define SENSOR_STATUS1_TEMP_VALID_MASK BIT(31) 67 #define SENSOR_STATUS1_TEMP_MASK 0xffff 68 69 #define READBACK_VALUE_MASK 0xff00 70 #define READBACK_VALUE_SHIFT 8 71 #define READBACK_ADD_HALF BIT(7) 72 #define READBACK_NEGATE BIT(0) 73 74 /* 75 * THERMCTL_LEVEL0_GROUP_CPU is defined in soctherm.h 76 * because it will be used by tegraxxx_soctherm.c 77 */ 78 #define THERMCTL_LVL0_CPU0_EN_MASK BIT(8) 79 #define THERMCTL_LVL0_CPU0_CPU_THROT_MASK (0x3 << 5) 80 #define THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT 0x1 81 #define THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY 0x2 82 #define THERMCTL_LVL0_CPU0_GPU_THROT_MASK (0x3 << 3) 83 #define THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT 0x1 84 #define THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY 0x2 85 #define THERMCTL_LVL0_CPU0_MEM_THROT_MASK BIT(2) 86 #define THERMCTL_LVL0_CPU0_STATUS_MASK 0x3 87 88 #define THERMCTL_LVL0_UP_STATS 0x10 89 #define THERMCTL_LVL0_DN_STATS 0x14 90 91 #define THERMCTL_INTR_STATUS 0x84 92 93 #define TH_INTR_MD0_MASK BIT(25) 94 #define TH_INTR_MU0_MASK BIT(24) 95 #define TH_INTR_GD0_MASK BIT(17) 96 #define TH_INTR_GU0_MASK BIT(16) 97 #define TH_INTR_CD0_MASK BIT(9) 98 #define TH_INTR_CU0_MASK BIT(8) 99 #define TH_INTR_PD0_MASK BIT(1) 100 #define TH_INTR_PU0_MASK BIT(0) 101 #define TH_INTR_IGNORE_MASK 0xFCFCFCFC 102 103 #define THERMCTL_STATS_CTL 0x94 104 #define STATS_CTL_CLR_DN 0x8 105 #define STATS_CTL_EN_DN 0x4 106 #define STATS_CTL_CLR_UP 0x2 107 #define STATS_CTL_EN_UP 0x1 108 109 #define OC1_CFG 0x310 110 #define OC1_CFG_LONG_LATENCY_MASK BIT(6) 111 #define OC1_CFG_HW_RESTORE_MASK BIT(5) 112 #define OC1_CFG_PWR_GOOD_MASK_MASK BIT(4) 113 #define OC1_CFG_THROTTLE_MODE_MASK (0x3 << 2) 114 #define OC1_CFG_ALARM_POLARITY_MASK BIT(1) 115 #define OC1_CFG_EN_THROTTLE_MASK BIT(0) 116 117 #define OC1_CNT_THRESHOLD 0x314 118 #define OC1_THROTTLE_PERIOD 0x318 119 #define OC1_ALARM_COUNT 0x31c 120 #define OC1_FILTER 0x320 121 #define OC1_STATS 0x3a8 122 123 #define OC_INTR_STATUS 0x39c 124 #define OC_INTR_ENABLE 0x3a0 125 #define OC_INTR_DISABLE 0x3a4 126 #define OC_STATS_CTL 0x3c4 127 #define OC_STATS_CTL_CLR_ALL 0x2 128 #define OC_STATS_CTL_EN_ALL 0x1 129 130 #define OC_INTR_OC1_MASK BIT(0) 131 #define OC_INTR_OC2_MASK BIT(1) 132 #define OC_INTR_OC3_MASK BIT(2) 133 #define OC_INTR_OC4_MASK BIT(3) 134 #define OC_INTR_OC5_MASK BIT(4) 135 136 #define THROT_GLOBAL_CFG 0x400 137 #define THROT_GLOBAL_ENB_MASK BIT(0) 138 139 #define CPU_PSKIP_STATUS 0x418 140 #define XPU_PSKIP_STATUS_M_MASK (0xff << 12) 141 #define XPU_PSKIP_STATUS_N_MASK (0xff << 4) 142 #define XPU_PSKIP_STATUS_SW_OVERRIDE_MASK BIT(1) 143 #define XPU_PSKIP_STATUS_ENABLED_MASK BIT(0) 144 145 #define THROT_PRIORITY_LOCK 0x424 146 #define THROT_PRIORITY_LOCK_PRIORITY_MASK 0xff 147 148 #define THROT_STATUS 0x428 149 #define THROT_STATUS_BREACH_MASK BIT(12) 150 #define THROT_STATUS_STATE_MASK (0xff << 4) 151 #define THROT_STATUS_ENABLED_MASK BIT(0) 152 153 #define THROT_PSKIP_CTRL_LITE_CPU 0x430 154 #define THROT_PSKIP_CTRL_ENABLE_MASK BIT(31) 155 #define THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8) 156 #define THROT_PSKIP_CTRL_DIVISOR_MASK 0xff 157 #define THROT_PSKIP_CTRL_VECT_GPU_MASK (0x7 << 16) 158 #define THROT_PSKIP_CTRL_VECT_CPU_MASK (0x7 << 8) 159 #define THROT_PSKIP_CTRL_VECT2_CPU_MASK 0x7 160 161 #define THROT_VECT_NONE 0x0 /* 3'b000 */ 162 #define THROT_VECT_LOW 0x1 /* 3'b001 */ 163 #define THROT_VECT_MED 0x3 /* 3'b011 */ 164 #define THROT_VECT_HIGH 0x7 /* 3'b111 */ 165 166 #define THROT_PSKIP_RAMP_LITE_CPU 0x434 167 #define THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31) 168 #define THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8) 169 #define THROT_PSKIP_RAMP_STEP_MASK 0xff 170 171 #define THROT_PRIORITY_LITE 0x444 172 #define THROT_PRIORITY_LITE_PRIO_MASK 0xff 173 174 #define THROT_DELAY_LITE 0x448 175 #define THROT_DELAY_LITE_DELAY_MASK 0xff 176 177 /* car register offsets needed for enabling HW throttling */ 178 #define CAR_SUPER_CCLKG_DIVIDER 0x36c 179 #define CDIVG_USE_THERM_CONTROLS_MASK BIT(30) 180 181 /* ccroc register offsets needed for enabling HW throttling for Tegra132 */ 182 #define CCROC_SUPER_CCLKG_DIVIDER 0x024 183 184 #define CCROC_GLOBAL_CFG 0x148 185 186 #define CCROC_THROT_PSKIP_RAMP_CPU 0x150 187 #define CCROC_THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK BIT(31) 188 #define CCROC_THROT_PSKIP_RAMP_DURATION_MASK (0xffff << 8) 189 #define CCROC_THROT_PSKIP_RAMP_STEP_MASK 0xff 190 191 #define CCROC_THROT_PSKIP_CTRL_CPU 0x154 192 #define CCROC_THROT_PSKIP_CTRL_ENB_MASK BIT(31) 193 #define CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK (0xff << 8) 194 #define CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK 0xff 195 196 /* get val from register(r) mask bits(m) */ 197 #define REG_GET_MASK(r, m) (((r) & (m)) >> (ffs(m) - 1)) 198 /* set val(v) to mask bits(m) of register(r) */ 199 #define REG_SET_MASK(r, m, v) (((r) & ~(m)) | \ 200 (((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1))) 201 202 /* get dividend from the depth */ 203 #define THROT_DEPTH_DIVIDEND(depth) ((256 * (100 - (depth)) / 100) - 1) 204 205 /* gk20a nv_therm interface N:3 Mapping. Levels defined in tegra124-sochterm.h 206 * level vector 207 * NONE 3'b000 208 * LOW 3'b001 209 * MED 3'b011 210 * HIGH 3'b111 211 */ 212 #define THROT_LEVEL_TO_DEPTH(level) ((0x1 << (level)) - 1) 213 214 /* get THROT_PSKIP_xxx offset per LIGHT/HEAVY throt and CPU/GPU dev */ 215 #define THROT_OFFSET 0x30 216 #define THROT_PSKIP_CTRL(throt, dev) (THROT_PSKIP_CTRL_LITE_CPU + \ 217 (THROT_OFFSET * throt) + (8 * dev)) 218 #define THROT_PSKIP_RAMP(throt, dev) (THROT_PSKIP_RAMP_LITE_CPU + \ 219 (THROT_OFFSET * throt) + (8 * dev)) 220 221 /* get THROT_xxx_CTRL offset per LIGHT/HEAVY throt */ 222 #define THROT_PRIORITY_CTRL(throt) (THROT_PRIORITY_LITE + \ 223 (THROT_OFFSET * throt)) 224 #define THROT_DELAY_CTRL(throt) (THROT_DELAY_LITE + \ 225 (THROT_OFFSET * throt)) 226 227 #define ALARM_OFFSET 0x14 228 #define ALARM_CFG(throt) (OC1_CFG + \ 229 (ALARM_OFFSET * (throt - THROTTLE_OC1))) 230 231 #define ALARM_CNT_THRESHOLD(throt) (OC1_CNT_THRESHOLD + \ 232 (ALARM_OFFSET * (throt - THROTTLE_OC1))) 233 234 #define ALARM_THROTTLE_PERIOD(throt) (OC1_THROTTLE_PERIOD + \ 235 (ALARM_OFFSET * (throt - THROTTLE_OC1))) 236 237 #define ALARM_ALARM_COUNT(throt) (OC1_ALARM_COUNT + \ 238 (ALARM_OFFSET * (throt - THROTTLE_OC1))) 239 240 #define ALARM_FILTER(throt) (OC1_FILTER + \ 241 (ALARM_OFFSET * (throt - THROTTLE_OC1))) 242 243 #define ALARM_STATS(throt) (OC1_STATS + \ 244 (4 * (throt - THROTTLE_OC1))) 245 246 /* get CCROC_THROT_PSKIP_xxx offset per HIGH/MED/LOW vect*/ 247 #define CCROC_THROT_OFFSET 0x0c 248 #define CCROC_THROT_PSKIP_CTRL_CPU_REG(vect) (CCROC_THROT_PSKIP_CTRL_CPU + \ 249 (CCROC_THROT_OFFSET * vect)) 250 #define CCROC_THROT_PSKIP_RAMP_CPU_REG(vect) (CCROC_THROT_PSKIP_RAMP_CPU + \ 251 (CCROC_THROT_OFFSET * vect)) 252 253 /* get THERMCTL_LEVELx offset per CPU/GPU/MEM/TSENSE rg and LEVEL0~3 lv */ 254 #define THERMCTL_LVL_REGS_SIZE 0x20 255 #define THERMCTL_LVL_REG(rg, lv) ((rg) + ((lv) * THERMCTL_LVL_REGS_SIZE)) 256 257 #define OC_THROTTLE_MODE_DISABLED 0 258 #define OC_THROTTLE_MODE_BRIEF 2 259 260 static const int min_low_temp = -127000; 261 static const int max_high_temp = 127000; 262 263 enum soctherm_throttle_id { 264 THROTTLE_LIGHT = 0, 265 THROTTLE_HEAVY, 266 THROTTLE_OC1, 267 THROTTLE_OC2, 268 THROTTLE_OC3, 269 THROTTLE_OC4, 270 THROTTLE_OC5, /* OC5 is reserved */ 271 THROTTLE_SIZE, 272 }; 273 274 enum soctherm_oc_irq_id { 275 TEGRA_SOC_OC_IRQ_1, 276 TEGRA_SOC_OC_IRQ_2, 277 TEGRA_SOC_OC_IRQ_3, 278 TEGRA_SOC_OC_IRQ_4, 279 TEGRA_SOC_OC_IRQ_5, 280 TEGRA_SOC_OC_IRQ_MAX, 281 }; 282 283 enum soctherm_throttle_dev_id { 284 THROTTLE_DEV_CPU = 0, 285 THROTTLE_DEV_GPU, 286 THROTTLE_DEV_SIZE, 287 }; 288 289 static const char *const throt_names[] = { 290 [THROTTLE_LIGHT] = "light", 291 [THROTTLE_HEAVY] = "heavy", 292 [THROTTLE_OC1] = "oc1", 293 [THROTTLE_OC2] = "oc2", 294 [THROTTLE_OC3] = "oc3", 295 [THROTTLE_OC4] = "oc4", 296 [THROTTLE_OC5] = "oc5", 297 }; 298 299 struct tegra_soctherm; 300 struct tegra_thermctl_zone { 301 void __iomem *reg; 302 struct device *dev; 303 struct tegra_soctherm *ts; 304 struct thermal_zone_device *tz; 305 const struct tegra_tsensor_group *sg; 306 }; 307 308 struct soctherm_oc_cfg { 309 u32 active_low; 310 u32 throt_period; 311 u32 alarm_cnt_thresh; 312 u32 alarm_filter; 313 u32 mode; 314 bool intr_en; 315 }; 316 317 struct soctherm_throt_cfg { 318 const char *name; 319 unsigned int id; 320 u8 priority; 321 u8 cpu_throt_level; 322 u32 cpu_throt_depth; 323 u32 gpu_throt_level; 324 struct soctherm_oc_cfg oc_cfg; 325 struct thermal_cooling_device *cdev; 326 bool init; 327 }; 328 329 struct tegra_soctherm { 330 struct reset_control *reset; 331 struct clk *clock_tsensor; 332 struct clk *clock_soctherm; 333 void __iomem *regs; 334 void __iomem *clk_regs; 335 void __iomem *ccroc_regs; 336 337 int thermal_irq; 338 int edp_irq; 339 340 u32 *calib; 341 struct thermal_zone_device **thermctl_tzs; 342 struct tegra_soctherm_soc *soc; 343 344 struct soctherm_throt_cfg throt_cfgs[THROTTLE_SIZE]; 345 346 struct dentry *debugfs_dir; 347 348 struct mutex thermctl_lock; 349 }; 350 351 struct soctherm_oc_irq_chip_data { 352 struct mutex irq_lock; /* serialize OC IRQs */ 353 struct irq_chip irq_chip; 354 struct irq_domain *domain; 355 int irq_enable; 356 }; 357 358 static struct soctherm_oc_irq_chip_data soc_irq_cdata; 359 360 /** 361 * ccroc_writel() - writes a value to a CCROC register 362 * @ts: pointer to a struct tegra_soctherm 363 * @v: the value to write 364 * @reg: the register offset 365 * 366 * Writes @v to @reg. No return value. 367 */ 368 static inline void ccroc_writel(struct tegra_soctherm *ts, u32 value, u32 reg) 369 { 370 writel(value, (ts->ccroc_regs + reg)); 371 } 372 373 /** 374 * ccroc_readl() - reads specified register from CCROC IP block 375 * @ts: pointer to a struct tegra_soctherm 376 * @reg: register address to be read 377 * 378 * Return: the value of the register 379 */ 380 static inline u32 ccroc_readl(struct tegra_soctherm *ts, u32 reg) 381 { 382 return readl(ts->ccroc_regs + reg); 383 } 384 385 static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i) 386 { 387 const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i]; 388 void __iomem *base = tegra->regs + sensor->base; 389 unsigned int val; 390 391 val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT; 392 writel(val, base + SENSOR_CONFIG0); 393 394 val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT; 395 val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT; 396 val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT; 397 val |= SENSOR_CONFIG1_TEMP_ENABLE; 398 writel(val, base + SENSOR_CONFIG1); 399 400 writel(tegra->calib[i], base + SENSOR_CONFIG2); 401 } 402 403 /* 404 * Translate from soctherm readback format to millicelsius. 405 * The soctherm readback format in bits is as follows: 406 * TTTTTTTT H______N 407 * where T's contain the temperature in Celsius, 408 * H denotes an addition of 0.5 Celsius and N denotes negation 409 * of the final value. 410 */ 411 static int translate_temp(u16 val) 412 { 413 int t; 414 415 t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000; 416 if (val & READBACK_ADD_HALF) 417 t += 500; 418 if (val & READBACK_NEGATE) 419 t *= -1; 420 421 return t; 422 } 423 424 static int tegra_thermctl_get_temp(void *data, int *out_temp) 425 { 426 struct tegra_thermctl_zone *zone = data; 427 u32 val; 428 429 val = readl(zone->reg); 430 val = REG_GET_MASK(val, zone->sg->sensor_temp_mask); 431 *out_temp = translate_temp(val); 432 433 return 0; 434 } 435 436 /** 437 * enforce_temp_range() - check and enforce temperature range [min, max] 438 * @trip_temp: the trip temperature to check 439 * 440 * Checks and enforces the permitted temperature range that SOC_THERM 441 * HW can support This is 442 * done while taking care of precision. 443 * 444 * Return: The precision adjusted capped temperature in millicelsius. 445 */ 446 static int enforce_temp_range(struct device *dev, int trip_temp) 447 { 448 int temp; 449 450 temp = clamp_val(trip_temp, min_low_temp, max_high_temp); 451 if (temp != trip_temp) 452 dev_info(dev, "soctherm: trip temperature %d forced to %d\n", 453 trip_temp, temp); 454 return temp; 455 } 456 457 /** 458 * thermtrip_program() - Configures the hardware to shut down the 459 * system if a given sensor group reaches a given temperature 460 * @dev: ptr to the struct device for the SOC_THERM IP block 461 * @sg: pointer to the sensor group to set the thermtrip temperature for 462 * @trip_temp: the temperature in millicelsius to trigger the thermal trip at 463 * 464 * Sets the thermal trip threshold of the given sensor group to be the 465 * @trip_temp. If this threshold is crossed, the hardware will shut 466 * down. 467 * 468 * Note that, although @trip_temp is specified in millicelsius, the 469 * hardware is programmed in degrees Celsius. 470 * 471 * Return: 0 upon success, or %-EINVAL upon failure. 472 */ 473 static int thermtrip_program(struct device *dev, 474 const struct tegra_tsensor_group *sg, 475 int trip_temp) 476 { 477 struct tegra_soctherm *ts = dev_get_drvdata(dev); 478 int temp; 479 u32 r; 480 481 if (!sg || !sg->thermtrip_threshold_mask) 482 return -EINVAL; 483 484 temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain; 485 486 r = readl(ts->regs + THERMCTL_THERMTRIP_CTL); 487 r = REG_SET_MASK(r, sg->thermtrip_threshold_mask, temp); 488 r = REG_SET_MASK(r, sg->thermtrip_enable_mask, 1); 489 r = REG_SET_MASK(r, sg->thermtrip_any_en_mask, 0); 490 writel(r, ts->regs + THERMCTL_THERMTRIP_CTL); 491 492 return 0; 493 } 494 495 /** 496 * throttrip_program() - Configures the hardware to throttle the 497 * pulse if a given sensor group reaches a given temperature 498 * @dev: ptr to the struct device for the SOC_THERM IP block 499 * @sg: pointer to the sensor group to set the thermtrip temperature for 500 * @stc: pointer to the throttle need to be triggered 501 * @trip_temp: the temperature in millicelsius to trigger the thermal trip at 502 * 503 * Sets the thermal trip threshold and throttle event of the given sensor 504 * group. If this threshold is crossed, the hardware will trigger the 505 * throttle. 506 * 507 * Note that, although @trip_temp is specified in millicelsius, the 508 * hardware is programmed in degrees Celsius. 509 * 510 * Return: 0 upon success, or %-EINVAL upon failure. 511 */ 512 static int throttrip_program(struct device *dev, 513 const struct tegra_tsensor_group *sg, 514 struct soctherm_throt_cfg *stc, 515 int trip_temp) 516 { 517 struct tegra_soctherm *ts = dev_get_drvdata(dev); 518 int temp, cpu_throt, gpu_throt; 519 unsigned int throt; 520 u32 r, reg_off; 521 522 if (!sg || !stc || !stc->init) 523 return -EINVAL; 524 525 temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain; 526 527 /* Hardcode LIGHT on LEVEL1 and HEAVY on LEVEL2 */ 528 throt = stc->id; 529 reg_off = THERMCTL_LVL_REG(sg->thermctl_lvl0_offset, throt + 1); 530 531 if (throt == THROTTLE_LIGHT) { 532 cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT; 533 gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT; 534 } else { 535 cpu_throt = THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY; 536 gpu_throt = THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY; 537 if (throt != THROTTLE_HEAVY) 538 dev_warn(dev, 539 "invalid throt id %d - assuming HEAVY", 540 throt); 541 } 542 543 r = readl(ts->regs + reg_off); 544 r = REG_SET_MASK(r, sg->thermctl_lvl0_up_thresh_mask, temp); 545 r = REG_SET_MASK(r, sg->thermctl_lvl0_dn_thresh_mask, temp); 546 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_CPU_THROT_MASK, cpu_throt); 547 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_GPU_THROT_MASK, gpu_throt); 548 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1); 549 writel(r, ts->regs + reg_off); 550 551 return 0; 552 } 553 554 static struct soctherm_throt_cfg * 555 find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name) 556 { 557 unsigned int i; 558 559 for (i = 0; ts->throt_cfgs[i].name; i++) 560 if (!strcmp(ts->throt_cfgs[i].name, name)) 561 return &ts->throt_cfgs[i]; 562 563 return NULL; 564 } 565 566 static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id) 567 { 568 int i, temp = min_low_temp; 569 struct tsensor_group_thermtrips *tt = ts->soc->thermtrips; 570 571 if (id >= TEGRA124_SOCTHERM_SENSOR_NUM) 572 return temp; 573 574 if (tt) { 575 for (i = 0; i < ts->soc->num_ttgs; i++) { 576 if (tt[i].id == id) 577 return tt[i].temp; 578 } 579 } 580 581 return temp; 582 } 583 584 static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp) 585 { 586 struct tegra_thermctl_zone *zone = data; 587 struct thermal_zone_device *tz = zone->tz; 588 struct tegra_soctherm *ts = zone->ts; 589 const struct tegra_tsensor_group *sg = zone->sg; 590 struct device *dev = zone->dev; 591 enum thermal_trip_type type; 592 int ret; 593 594 if (!tz) 595 return -EINVAL; 596 597 ret = tz->ops->get_trip_type(tz, trip, &type); 598 if (ret) 599 return ret; 600 601 if (type == THERMAL_TRIP_CRITICAL) { 602 /* 603 * If thermtrips property is set in DT, 604 * doesn't need to program critical type trip to HW, 605 * if not, program critical trip to HW. 606 */ 607 if (min_low_temp == tsensor_group_thermtrip_get(ts, sg->id)) 608 return thermtrip_program(dev, sg, temp); 609 else 610 return 0; 611 612 } else if (type == THERMAL_TRIP_HOT) { 613 int i; 614 615 for (i = 0; i < THROTTLE_SIZE; i++) { 616 struct thermal_cooling_device *cdev; 617 struct soctherm_throt_cfg *stc; 618 619 if (!ts->throt_cfgs[i].init) 620 continue; 621 622 cdev = ts->throt_cfgs[i].cdev; 623 if (get_thermal_instance(tz, cdev, trip)) 624 stc = find_throttle_cfg_by_name(ts, cdev->type); 625 else 626 continue; 627 628 return throttrip_program(dev, sg, stc, temp); 629 } 630 } 631 632 return 0; 633 } 634 635 static int tegra_thermctl_get_trend(void *data, int trip, 636 enum thermal_trend *trend) 637 { 638 struct tegra_thermctl_zone *zone = data; 639 struct thermal_zone_device *tz = zone->tz; 640 int trip_temp, temp, last_temp, ret; 641 642 if (!tz) 643 return -EINVAL; 644 645 ret = tz->ops->get_trip_temp(zone->tz, trip, &trip_temp); 646 if (ret) 647 return ret; 648 649 temp = READ_ONCE(tz->temperature); 650 last_temp = READ_ONCE(tz->last_temperature); 651 652 if (temp > trip_temp) { 653 if (temp >= last_temp) 654 *trend = THERMAL_TREND_RAISING; 655 else 656 *trend = THERMAL_TREND_STABLE; 657 } else if (temp < trip_temp) { 658 *trend = THERMAL_TREND_DROPPING; 659 } else { 660 *trend = THERMAL_TREND_STABLE; 661 } 662 663 return 0; 664 } 665 666 static void thermal_irq_enable(struct tegra_thermctl_zone *zn) 667 { 668 u32 r; 669 670 /* multiple zones could be handling and setting trips at once */ 671 mutex_lock(&zn->ts->thermctl_lock); 672 r = readl(zn->ts->regs + THERMCTL_INTR_ENABLE); 673 r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, TH_INTR_UP_DN_EN); 674 writel(r, zn->ts->regs + THERMCTL_INTR_ENABLE); 675 mutex_unlock(&zn->ts->thermctl_lock); 676 } 677 678 static void thermal_irq_disable(struct tegra_thermctl_zone *zn) 679 { 680 u32 r; 681 682 /* multiple zones could be handling and setting trips at once */ 683 mutex_lock(&zn->ts->thermctl_lock); 684 r = readl(zn->ts->regs + THERMCTL_INTR_DISABLE); 685 r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, 0); 686 writel(r, zn->ts->regs + THERMCTL_INTR_DISABLE); 687 mutex_unlock(&zn->ts->thermctl_lock); 688 } 689 690 static int tegra_thermctl_set_trips(void *data, int lo, int hi) 691 { 692 struct tegra_thermctl_zone *zone = data; 693 u32 r; 694 695 thermal_irq_disable(zone); 696 697 r = readl(zone->ts->regs + zone->sg->thermctl_lvl0_offset); 698 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 0); 699 writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset); 700 701 lo = enforce_temp_range(zone->dev, lo) / zone->ts->soc->thresh_grain; 702 hi = enforce_temp_range(zone->dev, hi) / zone->ts->soc->thresh_grain; 703 dev_dbg(zone->dev, "%s hi:%d, lo:%d\n", __func__, hi, lo); 704 705 r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_up_thresh_mask, hi); 706 r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_dn_thresh_mask, lo); 707 r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1); 708 writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset); 709 710 thermal_irq_enable(zone); 711 712 return 0; 713 } 714 715 static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = { 716 .get_temp = tegra_thermctl_get_temp, 717 .set_trip_temp = tegra_thermctl_set_trip_temp, 718 .get_trend = tegra_thermctl_get_trend, 719 .set_trips = tegra_thermctl_set_trips, 720 }; 721 722 static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp) 723 { 724 int ntrips, i, ret; 725 enum thermal_trip_type type; 726 727 ntrips = of_thermal_get_ntrips(tz); 728 if (ntrips <= 0) 729 return -EINVAL; 730 731 for (i = 0; i < ntrips; i++) { 732 ret = tz->ops->get_trip_type(tz, i, &type); 733 if (ret) 734 return -EINVAL; 735 if (type == THERMAL_TRIP_HOT) { 736 ret = tz->ops->get_trip_temp(tz, i, temp); 737 if (!ret) 738 *trip = i; 739 740 return ret; 741 } 742 } 743 744 return -EINVAL; 745 } 746 747 /** 748 * tegra_soctherm_set_hwtrips() - set HW trip point from DT data 749 * @dev: struct device * of the SOC_THERM instance 750 * 751 * Configure the SOC_THERM HW trip points, setting "THERMTRIP" 752 * "THROTTLE" trip points , using "thermtrips", "critical" or "hot" 753 * type trip_temp 754 * from thermal zone. 755 * After they have been configured, THERMTRIP or THROTTLE will take 756 * action when the configured SoC thermal sensor group reaches a 757 * certain temperature. 758 * 759 * Return: 0 upon success, or a negative error code on failure. 760 * "Success" does not mean that trips was enabled; it could also 761 * mean that no node was found in DT. 762 * THERMTRIP has been enabled successfully when a message similar to 763 * this one appears on the serial console: 764 * "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC" 765 * THROTTLE has been enabled successfully when a message similar to 766 * this one appears on the serial console: 767 * ""throttrip: will throttle when sensor group XXX reaches YYYYYY mC" 768 */ 769 static int tegra_soctherm_set_hwtrips(struct device *dev, 770 const struct tegra_tsensor_group *sg, 771 struct thermal_zone_device *tz) 772 { 773 struct tegra_soctherm *ts = dev_get_drvdata(dev); 774 struct soctherm_throt_cfg *stc; 775 int i, trip, temperature, ret; 776 777 /* Get thermtrips. If missing, try to get critical trips. */ 778 temperature = tsensor_group_thermtrip_get(ts, sg->id); 779 if (min_low_temp == temperature) 780 if (tz->ops->get_crit_temp(tz, &temperature)) 781 temperature = max_high_temp; 782 783 ret = thermtrip_program(dev, sg, temperature); 784 if (ret) { 785 dev_err(dev, "thermtrip: %s: error during enable\n", sg->name); 786 return ret; 787 } 788 789 dev_info(dev, "thermtrip: will shut down when %s reaches %d mC\n", 790 sg->name, temperature); 791 792 ret = get_hot_temp(tz, &trip, &temperature); 793 if (ret) { 794 dev_info(dev, "throttrip: %s: missing hot temperature\n", 795 sg->name); 796 return 0; 797 } 798 799 for (i = 0; i < THROTTLE_OC1; i++) { 800 struct thermal_cooling_device *cdev; 801 802 if (!ts->throt_cfgs[i].init) 803 continue; 804 805 cdev = ts->throt_cfgs[i].cdev; 806 if (get_thermal_instance(tz, cdev, trip)) 807 stc = find_throttle_cfg_by_name(ts, cdev->type); 808 else 809 continue; 810 811 ret = throttrip_program(dev, sg, stc, temperature); 812 if (ret) { 813 dev_err(dev, "throttrip: %s: error during enable\n", 814 sg->name); 815 return ret; 816 } 817 818 dev_info(dev, 819 "throttrip: will throttle when %s reaches %d mC\n", 820 sg->name, temperature); 821 break; 822 } 823 824 if (i == THROTTLE_SIZE) 825 dev_info(dev, "throttrip: %s: missing throttle cdev\n", 826 sg->name); 827 828 return 0; 829 } 830 831 static irqreturn_t soctherm_thermal_isr(int irq, void *dev_id) 832 { 833 struct tegra_soctherm *ts = dev_id; 834 u32 r; 835 836 /* Case for no lock: 837 * Although interrupts are enabled in set_trips, there is still no need 838 * to lock here because the interrupts are disabled before programming 839 * new trip points. Hence there cant be a interrupt on the same sensor. 840 * An interrupt can however occur on a sensor while trips are being 841 * programmed on a different one. This beign a LEVEL interrupt won't 842 * cause a new interrupt but this is taken care of by the re-reading of 843 * the STATUS register in the thread function. 844 */ 845 r = readl(ts->regs + THERMCTL_INTR_STATUS); 846 writel(r, ts->regs + THERMCTL_INTR_DISABLE); 847 848 return IRQ_WAKE_THREAD; 849 } 850 851 /** 852 * soctherm_thermal_isr_thread() - Handles a thermal interrupt request 853 * @irq: The interrupt number being requested; not used 854 * @dev_id: Opaque pointer to tegra_soctherm; 855 * 856 * Clears the interrupt status register if there are expected 857 * interrupt bits set. 858 * The interrupt(s) are then handled by updating the corresponding 859 * thermal zones. 860 * 861 * An error is logged if any unexpected interrupt bits are set. 862 * 863 * Disabled interrupts are re-enabled. 864 * 865 * Return: %IRQ_HANDLED. Interrupt was handled and no further processing 866 * is needed. 867 */ 868 static irqreturn_t soctherm_thermal_isr_thread(int irq, void *dev_id) 869 { 870 struct tegra_soctherm *ts = dev_id; 871 struct thermal_zone_device *tz; 872 u32 st, ex = 0, cp = 0, gp = 0, pl = 0, me = 0; 873 874 st = readl(ts->regs + THERMCTL_INTR_STATUS); 875 876 /* deliberately clear expected interrupts handled in SW */ 877 cp |= st & TH_INTR_CD0_MASK; 878 cp |= st & TH_INTR_CU0_MASK; 879 880 gp |= st & TH_INTR_GD0_MASK; 881 gp |= st & TH_INTR_GU0_MASK; 882 883 pl |= st & TH_INTR_PD0_MASK; 884 pl |= st & TH_INTR_PU0_MASK; 885 886 me |= st & TH_INTR_MD0_MASK; 887 me |= st & TH_INTR_MU0_MASK; 888 889 ex |= cp | gp | pl | me; 890 if (ex) { 891 writel(ex, ts->regs + THERMCTL_INTR_STATUS); 892 st &= ~ex; 893 894 if (cp) { 895 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_CPU]; 896 thermal_zone_device_update(tz, 897 THERMAL_EVENT_UNSPECIFIED); 898 } 899 900 if (gp) { 901 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_GPU]; 902 thermal_zone_device_update(tz, 903 THERMAL_EVENT_UNSPECIFIED); 904 } 905 906 if (pl) { 907 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_PLLX]; 908 thermal_zone_device_update(tz, 909 THERMAL_EVENT_UNSPECIFIED); 910 } 911 912 if (me) { 913 tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_MEM]; 914 thermal_zone_device_update(tz, 915 THERMAL_EVENT_UNSPECIFIED); 916 } 917 } 918 919 /* deliberately ignore expected interrupts NOT handled in SW */ 920 ex |= TH_INTR_IGNORE_MASK; 921 st &= ~ex; 922 923 if (st) { 924 /* Whine about any other unexpected INTR bits still set */ 925 pr_err("soctherm: Ignored unexpected INTRs 0x%08x\n", st); 926 writel(st, ts->regs + THERMCTL_INTR_STATUS); 927 } 928 929 return IRQ_HANDLED; 930 } 931 932 /** 933 * soctherm_oc_intr_enable() - Enables the soctherm over-current interrupt 934 * @alarm: The soctherm throttle id 935 * @enable: Flag indicating enable the soctherm over-current 936 * interrupt or disable it 937 * 938 * Enables a specific over-current pins @alarm to raise an interrupt if the flag 939 * is set and the alarm corresponds to OC1, OC2, OC3, or OC4. 940 */ 941 static void soctherm_oc_intr_enable(struct tegra_soctherm *ts, 942 enum soctherm_throttle_id alarm, 943 bool enable) 944 { 945 u32 r; 946 947 if (!enable) 948 return; 949 950 r = readl(ts->regs + OC_INTR_ENABLE); 951 switch (alarm) { 952 case THROTTLE_OC1: 953 r = REG_SET_MASK(r, OC_INTR_OC1_MASK, 1); 954 break; 955 case THROTTLE_OC2: 956 r = REG_SET_MASK(r, OC_INTR_OC2_MASK, 1); 957 break; 958 case THROTTLE_OC3: 959 r = REG_SET_MASK(r, OC_INTR_OC3_MASK, 1); 960 break; 961 case THROTTLE_OC4: 962 r = REG_SET_MASK(r, OC_INTR_OC4_MASK, 1); 963 break; 964 default: 965 r = 0; 966 break; 967 } 968 writel(r, ts->regs + OC_INTR_ENABLE); 969 } 970 971 /** 972 * soctherm_handle_alarm() - Handles soctherm alarms 973 * @alarm: The soctherm throttle id 974 * 975 * "Handles" over-current alarms (OC1, OC2, OC3, and OC4) by printing 976 * a warning or informative message. 977 * 978 * Return: -EINVAL for @alarm = THROTTLE_OC3, otherwise 0 (success). 979 */ 980 static int soctherm_handle_alarm(enum soctherm_throttle_id alarm) 981 { 982 int rv = -EINVAL; 983 984 switch (alarm) { 985 case THROTTLE_OC1: 986 pr_debug("soctherm: Successfully handled OC1 alarm\n"); 987 rv = 0; 988 break; 989 990 case THROTTLE_OC2: 991 pr_debug("soctherm: Successfully handled OC2 alarm\n"); 992 rv = 0; 993 break; 994 995 case THROTTLE_OC3: 996 pr_debug("soctherm: Successfully handled OC3 alarm\n"); 997 rv = 0; 998 break; 999 1000 case THROTTLE_OC4: 1001 pr_debug("soctherm: Successfully handled OC4 alarm\n"); 1002 rv = 0; 1003 break; 1004 1005 default: 1006 break; 1007 } 1008 1009 if (rv) 1010 pr_err("soctherm: ERROR in handling %s alarm\n", 1011 throt_names[alarm]); 1012 1013 return rv; 1014 } 1015 1016 /** 1017 * soctherm_edp_isr_thread() - log an over-current interrupt request 1018 * @irq: OC irq number. Currently not being used. See description 1019 * @arg: a void pointer for callback, currently not being used 1020 * 1021 * Over-current events are handled in hardware. This function is called to log 1022 * and handle any OC events that happened. Additionally, it checks every 1023 * over-current interrupt registers for registers are set but 1024 * was not expected (i.e. any discrepancy in interrupt status) by the function, 1025 * the discrepancy will logged. 1026 * 1027 * Return: %IRQ_HANDLED 1028 */ 1029 static irqreturn_t soctherm_edp_isr_thread(int irq, void *arg) 1030 { 1031 struct tegra_soctherm *ts = arg; 1032 u32 st, ex, oc1, oc2, oc3, oc4; 1033 1034 st = readl(ts->regs + OC_INTR_STATUS); 1035 1036 /* deliberately clear expected interrupts handled in SW */ 1037 oc1 = st & OC_INTR_OC1_MASK; 1038 oc2 = st & OC_INTR_OC2_MASK; 1039 oc3 = st & OC_INTR_OC3_MASK; 1040 oc4 = st & OC_INTR_OC4_MASK; 1041 ex = oc1 | oc2 | oc3 | oc4; 1042 1043 pr_err("soctherm: OC ALARM 0x%08x\n", ex); 1044 if (ex) { 1045 writel(st, ts->regs + OC_INTR_STATUS); 1046 st &= ~ex; 1047 1048 if (oc1 && !soctherm_handle_alarm(THROTTLE_OC1)) 1049 soctherm_oc_intr_enable(ts, THROTTLE_OC1, true); 1050 1051 if (oc2 && !soctherm_handle_alarm(THROTTLE_OC2)) 1052 soctherm_oc_intr_enable(ts, THROTTLE_OC2, true); 1053 1054 if (oc3 && !soctherm_handle_alarm(THROTTLE_OC3)) 1055 soctherm_oc_intr_enable(ts, THROTTLE_OC3, true); 1056 1057 if (oc4 && !soctherm_handle_alarm(THROTTLE_OC4)) 1058 soctherm_oc_intr_enable(ts, THROTTLE_OC4, true); 1059 1060 if (oc1 && soc_irq_cdata.irq_enable & BIT(0)) 1061 handle_nested_irq( 1062 irq_find_mapping(soc_irq_cdata.domain, 0)); 1063 1064 if (oc2 && soc_irq_cdata.irq_enable & BIT(1)) 1065 handle_nested_irq( 1066 irq_find_mapping(soc_irq_cdata.domain, 1)); 1067 1068 if (oc3 && soc_irq_cdata.irq_enable & BIT(2)) 1069 handle_nested_irq( 1070 irq_find_mapping(soc_irq_cdata.domain, 2)); 1071 1072 if (oc4 && soc_irq_cdata.irq_enable & BIT(3)) 1073 handle_nested_irq( 1074 irq_find_mapping(soc_irq_cdata.domain, 3)); 1075 } 1076 1077 if (st) { 1078 pr_err("soctherm: Ignored unexpected OC ALARM 0x%08x\n", st); 1079 writel(st, ts->regs + OC_INTR_STATUS); 1080 } 1081 1082 return IRQ_HANDLED; 1083 } 1084 1085 /** 1086 * soctherm_edp_isr() - Disables any active interrupts 1087 * @irq: The interrupt request number 1088 * @arg: Opaque pointer to an argument 1089 * 1090 * Writes to the OC_INTR_DISABLE register the over current interrupt status, 1091 * masking any asserted interrupts. Doing this prevents the same interrupts 1092 * from triggering this isr repeatedly. The thread woken by this isr will 1093 * handle asserted interrupts and subsequently unmask/re-enable them. 1094 * 1095 * The OC_INTR_DISABLE register indicates which OC interrupts 1096 * have been disabled. 1097 * 1098 * Return: %IRQ_WAKE_THREAD, handler requests to wake the handler thread 1099 */ 1100 static irqreturn_t soctherm_edp_isr(int irq, void *arg) 1101 { 1102 struct tegra_soctherm *ts = arg; 1103 u32 r; 1104 1105 if (!ts) 1106 return IRQ_NONE; 1107 1108 r = readl(ts->regs + OC_INTR_STATUS); 1109 writel(r, ts->regs + OC_INTR_DISABLE); 1110 1111 return IRQ_WAKE_THREAD; 1112 } 1113 1114 /** 1115 * soctherm_oc_irq_lock() - locks the over-current interrupt request 1116 * @data: Interrupt request data 1117 * 1118 * Looks up the chip data from @data and locks the mutex associated with 1119 * a particular over-current interrupt request. 1120 */ 1121 static void soctherm_oc_irq_lock(struct irq_data *data) 1122 { 1123 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data); 1124 1125 mutex_lock(&d->irq_lock); 1126 } 1127 1128 /** 1129 * soctherm_oc_irq_sync_unlock() - Unlocks the OC interrupt request 1130 * @data: Interrupt request data 1131 * 1132 * Looks up the interrupt request data @data and unlocks the mutex associated 1133 * with a particular over-current interrupt request. 1134 */ 1135 static void soctherm_oc_irq_sync_unlock(struct irq_data *data) 1136 { 1137 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data); 1138 1139 mutex_unlock(&d->irq_lock); 1140 } 1141 1142 /** 1143 * soctherm_oc_irq_enable() - Enables the SOC_THERM over-current interrupt queue 1144 * @data: irq_data structure of the chip 1145 * 1146 * Sets the irq_enable bit of SOC_THERM allowing SOC_THERM 1147 * to respond to over-current interrupts. 1148 * 1149 */ 1150 static void soctherm_oc_irq_enable(struct irq_data *data) 1151 { 1152 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data); 1153 1154 d->irq_enable |= BIT(data->hwirq); 1155 } 1156 1157 /** 1158 * soctherm_oc_irq_disable() - Disables overcurrent interrupt requests 1159 * @irq_data: The interrupt request information 1160 * 1161 * Clears the interrupt request enable bit of the overcurrent 1162 * interrupt request chip data. 1163 * 1164 * Return: Nothing is returned (void) 1165 */ 1166 static void soctherm_oc_irq_disable(struct irq_data *data) 1167 { 1168 struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data); 1169 1170 d->irq_enable &= ~BIT(data->hwirq); 1171 } 1172 1173 static int soctherm_oc_irq_set_type(struct irq_data *data, unsigned int type) 1174 { 1175 return 0; 1176 } 1177 1178 /** 1179 * soctherm_oc_irq_map() - SOC_THERM interrupt request domain mapper 1180 * @h: Interrupt request domain 1181 * @virq: Virtual interrupt request number 1182 * @hw: Hardware interrupt request number 1183 * 1184 * Mapping callback function for SOC_THERM's irq_domain. When a SOC_THERM 1185 * interrupt request is called, the irq_domain takes the request's virtual 1186 * request number (much like a virtual memory address) and maps it to a 1187 * physical hardware request number. 1188 * 1189 * When a mapping doesn't already exist for a virtual request number, the 1190 * irq_domain calls this function to associate the virtual request number with 1191 * a hardware request number. 1192 * 1193 * Return: 0 1194 */ 1195 static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq, 1196 irq_hw_number_t hw) 1197 { 1198 struct soctherm_oc_irq_chip_data *data = h->host_data; 1199 1200 irq_set_chip_data(virq, data); 1201 irq_set_chip(virq, &data->irq_chip); 1202 irq_set_nested_thread(virq, 1); 1203 return 0; 1204 } 1205 1206 /** 1207 * soctherm_irq_domain_xlate_twocell() - xlate for soctherm interrupts 1208 * @d: Interrupt request domain 1209 * @intspec: Array of u32s from DTs "interrupt" property 1210 * @intsize: Number of values inside the intspec array 1211 * @out_hwirq: HW IRQ value associated with this interrupt 1212 * @out_type: The IRQ SENSE type for this interrupt. 1213 * 1214 * This Device Tree IRQ specifier translation function will translate a 1215 * specific "interrupt" as defined by 2 DT values where the cell values map 1216 * the hwirq number + 1 and linux irq flags. Since the output is the hwirq 1217 * number, this function will subtract 1 from the value listed in DT. 1218 * 1219 * Return: 0 1220 */ 1221 static int soctherm_irq_domain_xlate_twocell(struct irq_domain *d, 1222 struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, 1223 irq_hw_number_t *out_hwirq, unsigned int *out_type) 1224 { 1225 if (WARN_ON(intsize < 2)) 1226 return -EINVAL; 1227 1228 /* 1229 * The HW value is 1 index less than the DT IRQ values. 1230 * i.e. OC4 goes to HW index 3. 1231 */ 1232 *out_hwirq = intspec[0] - 1; 1233 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK; 1234 return 0; 1235 } 1236 1237 static const struct irq_domain_ops soctherm_oc_domain_ops = { 1238 .map = soctherm_oc_irq_map, 1239 .xlate = soctherm_irq_domain_xlate_twocell, 1240 }; 1241 1242 /** 1243 * soctherm_oc_int_init() - Initial enabling of the over 1244 * current interrupts 1245 * @np: The devicetree node for soctherm 1246 * @num_irqs: The number of new interrupt requests 1247 * 1248 * Sets the over current interrupt request chip data 1249 * 1250 * Return: 0 on success or if overcurrent interrupts are not enabled, 1251 * -ENOMEM (out of memory), or irq_base if the function failed to 1252 * allocate the irqs 1253 */ 1254 static int soctherm_oc_int_init(struct device_node *np, int num_irqs) 1255 { 1256 if (!num_irqs) { 1257 pr_info("%s(): OC interrupts are not enabled\n", __func__); 1258 return 0; 1259 } 1260 1261 mutex_init(&soc_irq_cdata.irq_lock); 1262 soc_irq_cdata.irq_enable = 0; 1263 1264 soc_irq_cdata.irq_chip.name = "soc_therm_oc"; 1265 soc_irq_cdata.irq_chip.irq_bus_lock = soctherm_oc_irq_lock; 1266 soc_irq_cdata.irq_chip.irq_bus_sync_unlock = 1267 soctherm_oc_irq_sync_unlock; 1268 soc_irq_cdata.irq_chip.irq_disable = soctherm_oc_irq_disable; 1269 soc_irq_cdata.irq_chip.irq_enable = soctherm_oc_irq_enable; 1270 soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type; 1271 soc_irq_cdata.irq_chip.irq_set_wake = NULL; 1272 1273 soc_irq_cdata.domain = irq_domain_add_linear(np, num_irqs, 1274 &soctherm_oc_domain_ops, 1275 &soc_irq_cdata); 1276 1277 if (!soc_irq_cdata.domain) { 1278 pr_err("%s: Failed to create IRQ domain\n", __func__); 1279 return -ENOMEM; 1280 } 1281 1282 pr_debug("%s(): OC interrupts enabled successful\n", __func__); 1283 return 0; 1284 } 1285 1286 #ifdef CONFIG_DEBUG_FS 1287 static int regs_show(struct seq_file *s, void *data) 1288 { 1289 struct platform_device *pdev = s->private; 1290 struct tegra_soctherm *ts = platform_get_drvdata(pdev); 1291 const struct tegra_tsensor *tsensors = ts->soc->tsensors; 1292 const struct tegra_tsensor_group **ttgs = ts->soc->ttgs; 1293 u32 r, state; 1294 int i, level; 1295 1296 seq_puts(s, "-----TSENSE (convert HW)-----\n"); 1297 1298 for (i = 0; i < ts->soc->num_tsensors; i++) { 1299 r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG1); 1300 state = REG_GET_MASK(r, SENSOR_CONFIG1_TEMP_ENABLE); 1301 1302 seq_printf(s, "%s: ", tsensors[i].name); 1303 seq_printf(s, "En(%d) ", state); 1304 1305 if (!state) { 1306 seq_puts(s, "\n"); 1307 continue; 1308 } 1309 1310 state = REG_GET_MASK(r, SENSOR_CONFIG1_TIDDQ_EN_MASK); 1311 seq_printf(s, "tiddq(%d) ", state); 1312 state = REG_GET_MASK(r, SENSOR_CONFIG1_TEN_COUNT_MASK); 1313 seq_printf(s, "ten_count(%d) ", state); 1314 state = REG_GET_MASK(r, SENSOR_CONFIG1_TSAMPLE_MASK); 1315 seq_printf(s, "tsample(%d) ", state + 1); 1316 1317 r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS1); 1318 state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_VALID_MASK); 1319 seq_printf(s, "Temp(%d/", state); 1320 state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_MASK); 1321 seq_printf(s, "%d) ", translate_temp(state)); 1322 1323 r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS0); 1324 state = REG_GET_MASK(r, SENSOR_STATUS0_VALID_MASK); 1325 seq_printf(s, "Capture(%d/", state); 1326 state = REG_GET_MASK(r, SENSOR_STATUS0_CAPTURE_MASK); 1327 seq_printf(s, "%d) ", state); 1328 1329 r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG0); 1330 state = REG_GET_MASK(r, SENSOR_CONFIG0_STOP); 1331 seq_printf(s, "Stop(%d) ", state); 1332 state = REG_GET_MASK(r, SENSOR_CONFIG0_TALL_MASK); 1333 seq_printf(s, "Tall(%d) ", state); 1334 state = REG_GET_MASK(r, SENSOR_CONFIG0_TCALC_OVER); 1335 seq_printf(s, "Over(%d/", state); 1336 state = REG_GET_MASK(r, SENSOR_CONFIG0_OVER); 1337 seq_printf(s, "%d/", state); 1338 state = REG_GET_MASK(r, SENSOR_CONFIG0_CPTR_OVER); 1339 seq_printf(s, "%d) ", state); 1340 1341 r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG2); 1342 state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMA_MASK); 1343 seq_printf(s, "Therm_A/B(%d/", state); 1344 state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMB_MASK); 1345 seq_printf(s, "%d)\n", (s16)state); 1346 } 1347 1348 r = readl(ts->regs + SENSOR_PDIV); 1349 seq_printf(s, "PDIV: 0x%x\n", r); 1350 1351 r = readl(ts->regs + SENSOR_HOTSPOT_OFF); 1352 seq_printf(s, "HOTSPOT: 0x%x\n", r); 1353 1354 seq_puts(s, "\n"); 1355 seq_puts(s, "-----SOC_THERM-----\n"); 1356 1357 r = readl(ts->regs + SENSOR_TEMP1); 1358 state = REG_GET_MASK(r, SENSOR_TEMP1_CPU_TEMP_MASK); 1359 seq_printf(s, "Temperatures: CPU(%d) ", translate_temp(state)); 1360 state = REG_GET_MASK(r, SENSOR_TEMP1_GPU_TEMP_MASK); 1361 seq_printf(s, " GPU(%d) ", translate_temp(state)); 1362 r = readl(ts->regs + SENSOR_TEMP2); 1363 state = REG_GET_MASK(r, SENSOR_TEMP2_PLLX_TEMP_MASK); 1364 seq_printf(s, " PLLX(%d) ", translate_temp(state)); 1365 state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK); 1366 seq_printf(s, " MEM(%d)\n", translate_temp(state)); 1367 1368 for (i = 0; i < ts->soc->num_ttgs; i++) { 1369 seq_printf(s, "%s:\n", ttgs[i]->name); 1370 for (level = 0; level < 4; level++) { 1371 s32 v; 1372 u32 mask; 1373 u16 off = ttgs[i]->thermctl_lvl0_offset; 1374 1375 r = readl(ts->regs + THERMCTL_LVL_REG(off, level)); 1376 1377 mask = ttgs[i]->thermctl_lvl0_up_thresh_mask; 1378 state = REG_GET_MASK(r, mask); 1379 v = sign_extend32(state, ts->soc->bptt - 1); 1380 v *= ts->soc->thresh_grain; 1381 seq_printf(s, " %d: Up/Dn(%d /", level, v); 1382 1383 mask = ttgs[i]->thermctl_lvl0_dn_thresh_mask; 1384 state = REG_GET_MASK(r, mask); 1385 v = sign_extend32(state, ts->soc->bptt - 1); 1386 v *= ts->soc->thresh_grain; 1387 seq_printf(s, "%d ) ", v); 1388 1389 mask = THERMCTL_LVL0_CPU0_EN_MASK; 1390 state = REG_GET_MASK(r, mask); 1391 seq_printf(s, "En(%d) ", state); 1392 1393 mask = THERMCTL_LVL0_CPU0_CPU_THROT_MASK; 1394 state = REG_GET_MASK(r, mask); 1395 seq_puts(s, "CPU Throt"); 1396 if (!state) 1397 seq_printf(s, "(%s) ", "none"); 1398 else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_LIGHT) 1399 seq_printf(s, "(%s) ", "L"); 1400 else if (state == THERMCTL_LVL0_CPU0_CPU_THROT_HEAVY) 1401 seq_printf(s, "(%s) ", "H"); 1402 else 1403 seq_printf(s, "(%s) ", "H+L"); 1404 1405 mask = THERMCTL_LVL0_CPU0_GPU_THROT_MASK; 1406 state = REG_GET_MASK(r, mask); 1407 seq_puts(s, "GPU Throt"); 1408 if (!state) 1409 seq_printf(s, "(%s) ", "none"); 1410 else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_LIGHT) 1411 seq_printf(s, "(%s) ", "L"); 1412 else if (state == THERMCTL_LVL0_CPU0_GPU_THROT_HEAVY) 1413 seq_printf(s, "(%s) ", "H"); 1414 else 1415 seq_printf(s, "(%s) ", "H+L"); 1416 1417 mask = THERMCTL_LVL0_CPU0_STATUS_MASK; 1418 state = REG_GET_MASK(r, mask); 1419 seq_printf(s, "Status(%s)\n", 1420 state == 0 ? "LO" : 1421 state == 1 ? "In" : 1422 state == 2 ? "Res" : "HI"); 1423 } 1424 } 1425 1426 r = readl(ts->regs + THERMCTL_STATS_CTL); 1427 seq_printf(s, "STATS: Up(%s) Dn(%s)\n", 1428 r & STATS_CTL_EN_UP ? "En" : "--", 1429 r & STATS_CTL_EN_DN ? "En" : "--"); 1430 1431 for (level = 0; level < 4; level++) { 1432 u16 off; 1433 1434 off = THERMCTL_LVL0_UP_STATS; 1435 r = readl(ts->regs + THERMCTL_LVL_REG(off, level)); 1436 seq_printf(s, " Level_%d Up(%d) ", level, r); 1437 1438 off = THERMCTL_LVL0_DN_STATS; 1439 r = readl(ts->regs + THERMCTL_LVL_REG(off, level)); 1440 seq_printf(s, "Dn(%d)\n", r); 1441 } 1442 1443 r = readl(ts->regs + THERMCTL_THERMTRIP_CTL); 1444 state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask); 1445 seq_printf(s, "Thermtrip Any En(%d)\n", state); 1446 for (i = 0; i < ts->soc->num_ttgs; i++) { 1447 state = REG_GET_MASK(r, ttgs[i]->thermtrip_enable_mask); 1448 seq_printf(s, " %s En(%d) ", ttgs[i]->name, state); 1449 state = REG_GET_MASK(r, ttgs[i]->thermtrip_threshold_mask); 1450 state *= ts->soc->thresh_grain; 1451 seq_printf(s, "Thresh(%d)\n", state); 1452 } 1453 1454 r = readl(ts->regs + THROT_GLOBAL_CFG); 1455 seq_puts(s, "\n"); 1456 seq_printf(s, "GLOBAL THROTTLE CONFIG: 0x%08x\n", r); 1457 1458 seq_puts(s, "---------------------------------------------------\n"); 1459 r = readl(ts->regs + THROT_STATUS); 1460 state = REG_GET_MASK(r, THROT_STATUS_BREACH_MASK); 1461 seq_printf(s, "THROT STATUS: breach(%d) ", state); 1462 state = REG_GET_MASK(r, THROT_STATUS_STATE_MASK); 1463 seq_printf(s, "state(%d) ", state); 1464 state = REG_GET_MASK(r, THROT_STATUS_ENABLED_MASK); 1465 seq_printf(s, "enabled(%d)\n", state); 1466 1467 r = readl(ts->regs + CPU_PSKIP_STATUS); 1468 if (ts->soc->use_ccroc) { 1469 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK); 1470 seq_printf(s, "CPU PSKIP STATUS: enabled(%d)\n", state); 1471 } else { 1472 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_M_MASK); 1473 seq_printf(s, "CPU PSKIP STATUS: M(%d) ", state); 1474 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_N_MASK); 1475 seq_printf(s, "N(%d) ", state); 1476 state = REG_GET_MASK(r, XPU_PSKIP_STATUS_ENABLED_MASK); 1477 seq_printf(s, "enabled(%d)\n", state); 1478 } 1479 1480 return 0; 1481 } 1482 1483 DEFINE_SHOW_ATTRIBUTE(regs); 1484 1485 static void soctherm_debug_init(struct platform_device *pdev) 1486 { 1487 struct tegra_soctherm *tegra = platform_get_drvdata(pdev); 1488 struct dentry *root, *file; 1489 1490 root = debugfs_create_dir("soctherm", NULL); 1491 if (!root) { 1492 dev_err(&pdev->dev, "failed to create debugfs directory\n"); 1493 return; 1494 } 1495 1496 tegra->debugfs_dir = root; 1497 1498 file = debugfs_create_file("reg_contents", 0644, root, 1499 pdev, ®s_fops); 1500 if (!file) { 1501 dev_err(&pdev->dev, "failed to create debugfs file\n"); 1502 debugfs_remove_recursive(tegra->debugfs_dir); 1503 tegra->debugfs_dir = NULL; 1504 } 1505 } 1506 #else 1507 static inline void soctherm_debug_init(struct platform_device *pdev) {} 1508 #endif 1509 1510 static int soctherm_clk_enable(struct platform_device *pdev, bool enable) 1511 { 1512 struct tegra_soctherm *tegra = platform_get_drvdata(pdev); 1513 int err; 1514 1515 if (!tegra->clock_soctherm || !tegra->clock_tsensor) 1516 return -EINVAL; 1517 1518 reset_control_assert(tegra->reset); 1519 1520 if (enable) { 1521 err = clk_prepare_enable(tegra->clock_soctherm); 1522 if (err) { 1523 reset_control_deassert(tegra->reset); 1524 return err; 1525 } 1526 1527 err = clk_prepare_enable(tegra->clock_tsensor); 1528 if (err) { 1529 clk_disable_unprepare(tegra->clock_soctherm); 1530 reset_control_deassert(tegra->reset); 1531 return err; 1532 } 1533 } else { 1534 clk_disable_unprepare(tegra->clock_tsensor); 1535 clk_disable_unprepare(tegra->clock_soctherm); 1536 } 1537 1538 reset_control_deassert(tegra->reset); 1539 1540 return 0; 1541 } 1542 1543 static int throt_get_cdev_max_state(struct thermal_cooling_device *cdev, 1544 unsigned long *max_state) 1545 { 1546 *max_state = 1; 1547 return 0; 1548 } 1549 1550 static int throt_get_cdev_cur_state(struct thermal_cooling_device *cdev, 1551 unsigned long *cur_state) 1552 { 1553 struct tegra_soctherm *ts = cdev->devdata; 1554 u32 r; 1555 1556 r = readl(ts->regs + THROT_STATUS); 1557 if (REG_GET_MASK(r, THROT_STATUS_STATE_MASK)) 1558 *cur_state = 1; 1559 else 1560 *cur_state = 0; 1561 1562 return 0; 1563 } 1564 1565 static int throt_set_cdev_state(struct thermal_cooling_device *cdev, 1566 unsigned long cur_state) 1567 { 1568 return 0; 1569 } 1570 1571 static const struct thermal_cooling_device_ops throt_cooling_ops = { 1572 .get_max_state = throt_get_cdev_max_state, 1573 .get_cur_state = throt_get_cdev_cur_state, 1574 .set_cur_state = throt_set_cdev_state, 1575 }; 1576 1577 static int soctherm_thermtrips_parse(struct platform_device *pdev) 1578 { 1579 struct device *dev = &pdev->dev; 1580 struct tegra_soctherm *ts = dev_get_drvdata(dev); 1581 struct tsensor_group_thermtrips *tt = ts->soc->thermtrips; 1582 const int max_num_prop = ts->soc->num_ttgs * 2; 1583 u32 *tlb; 1584 int i, j, n, ret; 1585 1586 if (!tt) 1587 return -ENOMEM; 1588 1589 n = of_property_count_u32_elems(dev->of_node, "nvidia,thermtrips"); 1590 if (n <= 0) { 1591 dev_info(dev, 1592 "missing thermtrips, will use critical trips as shut down temp\n"); 1593 return n; 1594 } 1595 1596 n = min(max_num_prop, n); 1597 1598 tlb = devm_kcalloc(&pdev->dev, max_num_prop, sizeof(u32), GFP_KERNEL); 1599 if (!tlb) 1600 return -ENOMEM; 1601 ret = of_property_read_u32_array(dev->of_node, "nvidia,thermtrips", 1602 tlb, n); 1603 if (ret) { 1604 dev_err(dev, "invalid num ele: thermtrips:%d\n", ret); 1605 return ret; 1606 } 1607 1608 i = 0; 1609 for (j = 0; j < n; j = j + 2) { 1610 if (tlb[j] >= TEGRA124_SOCTHERM_SENSOR_NUM) 1611 continue; 1612 1613 tt[i].id = tlb[j]; 1614 tt[i].temp = tlb[j + 1]; 1615 i++; 1616 } 1617 1618 return 0; 1619 } 1620 1621 static void soctherm_oc_cfg_parse(struct device *dev, 1622 struct device_node *np_oc, 1623 struct soctherm_throt_cfg *stc) 1624 { 1625 u32 val; 1626 1627 if (of_property_read_bool(np_oc, "nvidia,polarity-active-low")) 1628 stc->oc_cfg.active_low = 1; 1629 else 1630 stc->oc_cfg.active_low = 0; 1631 1632 if (!of_property_read_u32(np_oc, "nvidia,count-threshold", &val)) { 1633 stc->oc_cfg.intr_en = 1; 1634 stc->oc_cfg.alarm_cnt_thresh = val; 1635 } 1636 1637 if (!of_property_read_u32(np_oc, "nvidia,throttle-period-us", &val)) 1638 stc->oc_cfg.throt_period = val; 1639 1640 if (!of_property_read_u32(np_oc, "nvidia,alarm-filter", &val)) 1641 stc->oc_cfg.alarm_filter = val; 1642 1643 /* BRIEF throttling by default, do not support STICKY */ 1644 stc->oc_cfg.mode = OC_THROTTLE_MODE_BRIEF; 1645 } 1646 1647 static int soctherm_throt_cfg_parse(struct device *dev, 1648 struct device_node *np, 1649 struct soctherm_throt_cfg *stc) 1650 { 1651 struct tegra_soctherm *ts = dev_get_drvdata(dev); 1652 int ret; 1653 u32 val; 1654 1655 ret = of_property_read_u32(np, "nvidia,priority", &val); 1656 if (ret) { 1657 dev_err(dev, "throttle-cfg: %s: invalid priority\n", stc->name); 1658 return -EINVAL; 1659 } 1660 stc->priority = val; 1661 1662 ret = of_property_read_u32(np, ts->soc->use_ccroc ? 1663 "nvidia,cpu-throt-level" : 1664 "nvidia,cpu-throt-percent", &val); 1665 if (!ret) { 1666 if (ts->soc->use_ccroc && 1667 val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH) 1668 stc->cpu_throt_level = val; 1669 else if (!ts->soc->use_ccroc && val <= 100) 1670 stc->cpu_throt_depth = val; 1671 else 1672 goto err; 1673 } else { 1674 goto err; 1675 } 1676 1677 ret = of_property_read_u32(np, "nvidia,gpu-throt-level", &val); 1678 if (!ret && val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH) 1679 stc->gpu_throt_level = val; 1680 else 1681 goto err; 1682 1683 return 0; 1684 1685 err: 1686 dev_err(dev, "throttle-cfg: %s: no throt prop or invalid prop\n", 1687 stc->name); 1688 return -EINVAL; 1689 } 1690 1691 /** 1692 * soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations 1693 * and register them as cooling devices. 1694 */ 1695 static void soctherm_init_hw_throt_cdev(struct platform_device *pdev) 1696 { 1697 struct device *dev = &pdev->dev; 1698 struct tegra_soctherm *ts = dev_get_drvdata(dev); 1699 struct device_node *np_stc, *np_stcc; 1700 const char *name; 1701 int i; 1702 1703 for (i = 0; i < THROTTLE_SIZE; i++) { 1704 ts->throt_cfgs[i].name = throt_names[i]; 1705 ts->throt_cfgs[i].id = i; 1706 ts->throt_cfgs[i].init = false; 1707 } 1708 1709 np_stc = of_get_child_by_name(dev->of_node, "throttle-cfgs"); 1710 if (!np_stc) { 1711 dev_info(dev, 1712 "throttle-cfg: no throttle-cfgs - not enabling\n"); 1713 return; 1714 } 1715 1716 for_each_child_of_node(np_stc, np_stcc) { 1717 struct soctherm_throt_cfg *stc; 1718 struct thermal_cooling_device *tcd; 1719 int err; 1720 1721 name = np_stcc->name; 1722 stc = find_throttle_cfg_by_name(ts, name); 1723 if (!stc) { 1724 dev_err(dev, 1725 "throttle-cfg: could not find %s\n", name); 1726 continue; 1727 } 1728 1729 if (stc->init) { 1730 dev_err(dev, "throttle-cfg: %s: redefined!\n", name); 1731 of_node_put(np_stcc); 1732 break; 1733 } 1734 1735 err = soctherm_throt_cfg_parse(dev, np_stcc, stc); 1736 if (err) 1737 continue; 1738 1739 if (stc->id >= THROTTLE_OC1) { 1740 soctherm_oc_cfg_parse(dev, np_stcc, stc); 1741 stc->init = true; 1742 } else { 1743 1744 tcd = thermal_of_cooling_device_register(np_stcc, 1745 (char *)name, ts, 1746 &throt_cooling_ops); 1747 if (IS_ERR_OR_NULL(tcd)) { 1748 dev_err(dev, 1749 "throttle-cfg: %s: failed to register cooling device\n", 1750 name); 1751 continue; 1752 } 1753 stc->cdev = tcd; 1754 stc->init = true; 1755 } 1756 1757 } 1758 1759 of_node_put(np_stc); 1760 } 1761 1762 /** 1763 * throttlectl_cpu_level_cfg() - programs CCROC NV_THERM level config 1764 * @level: describing the level LOW/MED/HIGH of throttling 1765 * 1766 * It's necessary to set up the CPU-local CCROC NV_THERM instance with 1767 * the M/N values desired for each level. This function does this. 1768 * 1769 * This function pre-programs the CCROC NV_THERM levels in terms of 1770 * pre-configured "Low", "Medium" or "Heavy" throttle levels which are 1771 * mapped to THROT_LEVEL_LOW, THROT_LEVEL_MED and THROT_LEVEL_HVY. 1772 */ 1773 static void throttlectl_cpu_level_cfg(struct tegra_soctherm *ts, int level) 1774 { 1775 u8 depth, dividend; 1776 u32 r; 1777 1778 switch (level) { 1779 case TEGRA_SOCTHERM_THROT_LEVEL_LOW: 1780 depth = 50; 1781 break; 1782 case TEGRA_SOCTHERM_THROT_LEVEL_MED: 1783 depth = 75; 1784 break; 1785 case TEGRA_SOCTHERM_THROT_LEVEL_HIGH: 1786 depth = 80; 1787 break; 1788 case TEGRA_SOCTHERM_THROT_LEVEL_NONE: 1789 return; 1790 default: 1791 return; 1792 } 1793 1794 dividend = THROT_DEPTH_DIVIDEND(depth); 1795 1796 /* setup PSKIP in ccroc nv_therm registers */ 1797 r = ccroc_readl(ts, CCROC_THROT_PSKIP_RAMP_CPU_REG(level)); 1798 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_DURATION_MASK, 0xff); 1799 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_RAMP_STEP_MASK, 0xf); 1800 ccroc_writel(ts, r, CCROC_THROT_PSKIP_RAMP_CPU_REG(level)); 1801 1802 r = ccroc_readl(ts, CCROC_THROT_PSKIP_CTRL_CPU_REG(level)); 1803 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_ENB_MASK, 1); 1804 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend); 1805 r = REG_SET_MASK(r, CCROC_THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff); 1806 ccroc_writel(ts, r, CCROC_THROT_PSKIP_CTRL_CPU_REG(level)); 1807 } 1808 1809 /** 1810 * throttlectl_cpu_level_select() - program CPU pulse skipper config 1811 * @throt: the LIGHT/HEAVY of throttle event id 1812 * 1813 * Pulse skippers are used to throttle clock frequencies. This 1814 * function programs the pulse skippers based on @throt and platform 1815 * data. This function is used on SoCs which have CPU-local pulse 1816 * skipper control, such as T13x. It programs soctherm's interface to 1817 * Denver:CCROC NV_THERM in terms of Low, Medium and HIGH throttling 1818 * vectors. PSKIP_BYPASS mode is set as required per HW spec. 1819 */ 1820 static void throttlectl_cpu_level_select(struct tegra_soctherm *ts, 1821 enum soctherm_throttle_id throt) 1822 { 1823 u32 r, throt_vect; 1824 1825 /* Denver:CCROC NV_THERM interface N:3 Mapping */ 1826 switch (ts->throt_cfgs[throt].cpu_throt_level) { 1827 case TEGRA_SOCTHERM_THROT_LEVEL_LOW: 1828 throt_vect = THROT_VECT_LOW; 1829 break; 1830 case TEGRA_SOCTHERM_THROT_LEVEL_MED: 1831 throt_vect = THROT_VECT_MED; 1832 break; 1833 case TEGRA_SOCTHERM_THROT_LEVEL_HIGH: 1834 throt_vect = THROT_VECT_HIGH; 1835 break; 1836 default: 1837 throt_vect = THROT_VECT_NONE; 1838 break; 1839 } 1840 1841 r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); 1842 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1); 1843 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_CPU_MASK, throt_vect); 1844 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT2_CPU_MASK, throt_vect); 1845 writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); 1846 1847 /* bypass sequencer in soc_therm as it is programmed in ccroc */ 1848 r = REG_SET_MASK(0, THROT_PSKIP_RAMP_SEQ_BYPASS_MODE_MASK, 1); 1849 writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU)); 1850 } 1851 1852 /** 1853 * throttlectl_cpu_mn() - program CPU pulse skipper configuration 1854 * @throt: the LIGHT/HEAVY of throttle event id 1855 * 1856 * Pulse skippers are used to throttle clock frequencies. This 1857 * function programs the pulse skippers based on @throt and platform 1858 * data. This function is used for CPUs that have "remote" pulse 1859 * skipper control, e.g., the CPU pulse skipper is controlled by the 1860 * SOC_THERM IP block. (SOC_THERM is located outside the CPU 1861 * complex.) 1862 */ 1863 static void throttlectl_cpu_mn(struct tegra_soctherm *ts, 1864 enum soctherm_throttle_id throt) 1865 { 1866 u32 r; 1867 int depth; 1868 u8 dividend; 1869 1870 depth = ts->throt_cfgs[throt].cpu_throt_depth; 1871 dividend = THROT_DEPTH_DIVIDEND(depth); 1872 1873 r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); 1874 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1); 1875 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVIDEND_MASK, dividend); 1876 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_DIVISOR_MASK, 0xff); 1877 writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_CPU)); 1878 1879 r = readl(ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU)); 1880 r = REG_SET_MASK(r, THROT_PSKIP_RAMP_DURATION_MASK, 0xff); 1881 r = REG_SET_MASK(r, THROT_PSKIP_RAMP_STEP_MASK, 0xf); 1882 writel(r, ts->regs + THROT_PSKIP_RAMP(throt, THROTTLE_DEV_CPU)); 1883 } 1884 1885 /** 1886 * throttlectl_gpu_level_select() - selects throttling level for GPU 1887 * @throt: the LIGHT/HEAVY of throttle event id 1888 * 1889 * This function programs soctherm's interface to GK20a NV_THERM to select 1890 * pre-configured "Low", "Medium" or "Heavy" throttle levels. 1891 * 1892 * Return: boolean true if HW was programmed 1893 */ 1894 static void throttlectl_gpu_level_select(struct tegra_soctherm *ts, 1895 enum soctherm_throttle_id throt) 1896 { 1897 u32 r, level, throt_vect; 1898 1899 level = ts->throt_cfgs[throt].gpu_throt_level; 1900 throt_vect = THROT_LEVEL_TO_DEPTH(level); 1901 r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU)); 1902 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1); 1903 r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_GPU_MASK, throt_vect); 1904 writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU)); 1905 } 1906 1907 static int soctherm_oc_cfg_program(struct tegra_soctherm *ts, 1908 enum soctherm_throttle_id throt) 1909 { 1910 u32 r; 1911 struct soctherm_oc_cfg *oc = &ts->throt_cfgs[throt].oc_cfg; 1912 1913 if (oc->mode == OC_THROTTLE_MODE_DISABLED) 1914 return -EINVAL; 1915 1916 r = REG_SET_MASK(0, OC1_CFG_HW_RESTORE_MASK, 1); 1917 r = REG_SET_MASK(r, OC1_CFG_THROTTLE_MODE_MASK, oc->mode); 1918 r = REG_SET_MASK(r, OC1_CFG_ALARM_POLARITY_MASK, oc->active_low); 1919 r = REG_SET_MASK(r, OC1_CFG_EN_THROTTLE_MASK, 1); 1920 writel(r, ts->regs + ALARM_CFG(throt)); 1921 writel(oc->throt_period, ts->regs + ALARM_THROTTLE_PERIOD(throt)); 1922 writel(oc->alarm_cnt_thresh, ts->regs + ALARM_CNT_THRESHOLD(throt)); 1923 writel(oc->alarm_filter, ts->regs + ALARM_FILTER(throt)); 1924 soctherm_oc_intr_enable(ts, throt, oc->intr_en); 1925 1926 return 0; 1927 } 1928 1929 /** 1930 * soctherm_throttle_program() - programs pulse skippers' configuration 1931 * @throt: the LIGHT/HEAVY of the throttle event id. 1932 * 1933 * Pulse skippers are used to throttle clock frequencies. 1934 * This function programs the pulse skippers. 1935 */ 1936 static void soctherm_throttle_program(struct tegra_soctherm *ts, 1937 enum soctherm_throttle_id throt) 1938 { 1939 u32 r; 1940 struct soctherm_throt_cfg stc = ts->throt_cfgs[throt]; 1941 1942 if (!stc.init) 1943 return; 1944 1945 if ((throt >= THROTTLE_OC1) && (soctherm_oc_cfg_program(ts, throt))) 1946 return; 1947 1948 /* Setup PSKIP parameters */ 1949 if (ts->soc->use_ccroc) 1950 throttlectl_cpu_level_select(ts, throt); 1951 else 1952 throttlectl_cpu_mn(ts, throt); 1953 1954 throttlectl_gpu_level_select(ts, throt); 1955 1956 r = REG_SET_MASK(0, THROT_PRIORITY_LITE_PRIO_MASK, stc.priority); 1957 writel(r, ts->regs + THROT_PRIORITY_CTRL(throt)); 1958 1959 r = REG_SET_MASK(0, THROT_DELAY_LITE_DELAY_MASK, 0); 1960 writel(r, ts->regs + THROT_DELAY_CTRL(throt)); 1961 1962 r = readl(ts->regs + THROT_PRIORITY_LOCK); 1963 r = REG_GET_MASK(r, THROT_PRIORITY_LOCK_PRIORITY_MASK); 1964 if (r >= stc.priority) 1965 return; 1966 r = REG_SET_MASK(0, THROT_PRIORITY_LOCK_PRIORITY_MASK, 1967 stc.priority); 1968 writel(r, ts->regs + THROT_PRIORITY_LOCK); 1969 } 1970 1971 static void tegra_soctherm_throttle(struct device *dev) 1972 { 1973 struct tegra_soctherm *ts = dev_get_drvdata(dev); 1974 u32 v; 1975 int i; 1976 1977 /* configure LOW, MED and HIGH levels for CCROC NV_THERM */ 1978 if (ts->soc->use_ccroc) { 1979 throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_LOW); 1980 throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_MED); 1981 throttlectl_cpu_level_cfg(ts, TEGRA_SOCTHERM_THROT_LEVEL_HIGH); 1982 } 1983 1984 /* Thermal HW throttle programming */ 1985 for (i = 0; i < THROTTLE_SIZE; i++) 1986 soctherm_throttle_program(ts, i); 1987 1988 v = REG_SET_MASK(0, THROT_GLOBAL_ENB_MASK, 1); 1989 if (ts->soc->use_ccroc) { 1990 ccroc_writel(ts, v, CCROC_GLOBAL_CFG); 1991 1992 v = ccroc_readl(ts, CCROC_SUPER_CCLKG_DIVIDER); 1993 v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1); 1994 ccroc_writel(ts, v, CCROC_SUPER_CCLKG_DIVIDER); 1995 } else { 1996 writel(v, ts->regs + THROT_GLOBAL_CFG); 1997 1998 v = readl(ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER); 1999 v = REG_SET_MASK(v, CDIVG_USE_THERM_CONTROLS_MASK, 1); 2000 writel(v, ts->clk_regs + CAR_SUPER_CCLKG_DIVIDER); 2001 } 2002 2003 /* initialize stats collection */ 2004 v = STATS_CTL_CLR_DN | STATS_CTL_EN_DN | 2005 STATS_CTL_CLR_UP | STATS_CTL_EN_UP; 2006 writel(v, ts->regs + THERMCTL_STATS_CTL); 2007 } 2008 2009 static int soctherm_interrupts_init(struct platform_device *pdev, 2010 struct tegra_soctherm *tegra) 2011 { 2012 struct device_node *np = pdev->dev.of_node; 2013 int ret; 2014 2015 ret = soctherm_oc_int_init(np, TEGRA_SOC_OC_IRQ_MAX); 2016 if (ret < 0) { 2017 dev_err(&pdev->dev, "soctherm_oc_int_init failed\n"); 2018 return ret; 2019 } 2020 2021 tegra->thermal_irq = platform_get_irq(pdev, 0); 2022 if (tegra->thermal_irq < 0) { 2023 dev_dbg(&pdev->dev, "get 'thermal_irq' failed.\n"); 2024 return 0; 2025 } 2026 2027 tegra->edp_irq = platform_get_irq(pdev, 1); 2028 if (tegra->edp_irq < 0) { 2029 dev_dbg(&pdev->dev, "get 'edp_irq' failed.\n"); 2030 return 0; 2031 } 2032 2033 ret = devm_request_threaded_irq(&pdev->dev, 2034 tegra->thermal_irq, 2035 soctherm_thermal_isr, 2036 soctherm_thermal_isr_thread, 2037 IRQF_ONESHOT, 2038 dev_name(&pdev->dev), 2039 tegra); 2040 if (ret < 0) { 2041 dev_err(&pdev->dev, "request_irq 'thermal_irq' failed.\n"); 2042 return ret; 2043 } 2044 2045 ret = devm_request_threaded_irq(&pdev->dev, 2046 tegra->edp_irq, 2047 soctherm_edp_isr, 2048 soctherm_edp_isr_thread, 2049 IRQF_ONESHOT, 2050 "soctherm_edp", 2051 tegra); 2052 if (ret < 0) { 2053 dev_err(&pdev->dev, "request_irq 'edp_irq' failed.\n"); 2054 return ret; 2055 } 2056 2057 return 0; 2058 } 2059 2060 static void soctherm_init(struct platform_device *pdev) 2061 { 2062 struct tegra_soctherm *tegra = platform_get_drvdata(pdev); 2063 const struct tegra_tsensor_group **ttgs = tegra->soc->ttgs; 2064 int i; 2065 u32 pdiv, hotspot; 2066 2067 /* Initialize raw sensors */ 2068 for (i = 0; i < tegra->soc->num_tsensors; ++i) 2069 enable_tsensor(tegra, i); 2070 2071 /* program pdiv and hotspot offsets per THERM */ 2072 pdiv = readl(tegra->regs + SENSOR_PDIV); 2073 hotspot = readl(tegra->regs + SENSOR_HOTSPOT_OFF); 2074 for (i = 0; i < tegra->soc->num_ttgs; ++i) { 2075 pdiv = REG_SET_MASK(pdiv, ttgs[i]->pdiv_mask, 2076 ttgs[i]->pdiv); 2077 /* hotspot offset from PLLX, doesn't need to configure PLLX */ 2078 if (ttgs[i]->id == TEGRA124_SOCTHERM_SENSOR_PLLX) 2079 continue; 2080 hotspot = REG_SET_MASK(hotspot, 2081 ttgs[i]->pllx_hotspot_mask, 2082 ttgs[i]->pllx_hotspot_diff); 2083 } 2084 writel(pdiv, tegra->regs + SENSOR_PDIV); 2085 writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF); 2086 2087 /* Configure hw throttle */ 2088 tegra_soctherm_throttle(&pdev->dev); 2089 } 2090 2091 static const struct of_device_id tegra_soctherm_of_match[] = { 2092 #ifdef CONFIG_ARCH_TEGRA_124_SOC 2093 { 2094 .compatible = "nvidia,tegra124-soctherm", 2095 .data = &tegra124_soctherm, 2096 }, 2097 #endif 2098 #ifdef CONFIG_ARCH_TEGRA_132_SOC 2099 { 2100 .compatible = "nvidia,tegra132-soctherm", 2101 .data = &tegra132_soctherm, 2102 }, 2103 #endif 2104 #ifdef CONFIG_ARCH_TEGRA_210_SOC 2105 { 2106 .compatible = "nvidia,tegra210-soctherm", 2107 .data = &tegra210_soctherm, 2108 }, 2109 #endif 2110 { }, 2111 }; 2112 MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match); 2113 2114 static int tegra_soctherm_probe(struct platform_device *pdev) 2115 { 2116 const struct of_device_id *match; 2117 struct tegra_soctherm *tegra; 2118 struct thermal_zone_device *z; 2119 struct tsensor_shared_calib shared_calib; 2120 struct resource *res; 2121 struct tegra_soctherm_soc *soc; 2122 unsigned int i; 2123 int err; 2124 2125 match = of_match_node(tegra_soctherm_of_match, pdev->dev.of_node); 2126 if (!match) 2127 return -ENODEV; 2128 2129 soc = (struct tegra_soctherm_soc *)match->data; 2130 if (soc->num_ttgs > TEGRA124_SOCTHERM_SENSOR_NUM) 2131 return -EINVAL; 2132 2133 tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL); 2134 if (!tegra) 2135 return -ENOMEM; 2136 2137 mutex_init(&tegra->thermctl_lock); 2138 dev_set_drvdata(&pdev->dev, tegra); 2139 2140 tegra->soc = soc; 2141 2142 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2143 "soctherm-reg"); 2144 tegra->regs = devm_ioremap_resource(&pdev->dev, res); 2145 if (IS_ERR(tegra->regs)) { 2146 dev_err(&pdev->dev, "can't get soctherm registers"); 2147 return PTR_ERR(tegra->regs); 2148 } 2149 2150 if (!tegra->soc->use_ccroc) { 2151 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2152 "car-reg"); 2153 tegra->clk_regs = devm_ioremap_resource(&pdev->dev, res); 2154 if (IS_ERR(tegra->clk_regs)) { 2155 dev_err(&pdev->dev, "can't get car clk registers"); 2156 return PTR_ERR(tegra->clk_regs); 2157 } 2158 } else { 2159 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2160 "ccroc-reg"); 2161 tegra->ccroc_regs = devm_ioremap_resource(&pdev->dev, res); 2162 if (IS_ERR(tegra->ccroc_regs)) { 2163 dev_err(&pdev->dev, "can't get ccroc registers"); 2164 return PTR_ERR(tegra->ccroc_regs); 2165 } 2166 } 2167 2168 tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm"); 2169 if (IS_ERR(tegra->reset)) { 2170 dev_err(&pdev->dev, "can't get soctherm reset\n"); 2171 return PTR_ERR(tegra->reset); 2172 } 2173 2174 tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor"); 2175 if (IS_ERR(tegra->clock_tsensor)) { 2176 dev_err(&pdev->dev, "can't get tsensor clock\n"); 2177 return PTR_ERR(tegra->clock_tsensor); 2178 } 2179 2180 tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm"); 2181 if (IS_ERR(tegra->clock_soctherm)) { 2182 dev_err(&pdev->dev, "can't get soctherm clock\n"); 2183 return PTR_ERR(tegra->clock_soctherm); 2184 } 2185 2186 tegra->calib = devm_kcalloc(&pdev->dev, 2187 soc->num_tsensors, sizeof(u32), 2188 GFP_KERNEL); 2189 if (!tegra->calib) 2190 return -ENOMEM; 2191 2192 /* calculate shared calibration data */ 2193 err = tegra_calc_shared_calib(soc->tfuse, &shared_calib); 2194 if (err) 2195 return err; 2196 2197 /* calculate tsensor calibaration data */ 2198 for (i = 0; i < soc->num_tsensors; ++i) { 2199 err = tegra_calc_tsensor_calib(&soc->tsensors[i], 2200 &shared_calib, 2201 &tegra->calib[i]); 2202 if (err) 2203 return err; 2204 } 2205 2206 tegra->thermctl_tzs = devm_kcalloc(&pdev->dev, 2207 soc->num_ttgs, sizeof(z), 2208 GFP_KERNEL); 2209 if (!tegra->thermctl_tzs) 2210 return -ENOMEM; 2211 2212 err = soctherm_clk_enable(pdev, true); 2213 if (err) 2214 return err; 2215 2216 soctherm_thermtrips_parse(pdev); 2217 2218 soctherm_init_hw_throt_cdev(pdev); 2219 2220 soctherm_init(pdev); 2221 2222 for (i = 0; i < soc->num_ttgs; ++i) { 2223 struct tegra_thermctl_zone *zone = 2224 devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL); 2225 if (!zone) { 2226 err = -ENOMEM; 2227 goto disable_clocks; 2228 } 2229 2230 zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset; 2231 zone->dev = &pdev->dev; 2232 zone->sg = soc->ttgs[i]; 2233 zone->ts = tegra; 2234 2235 z = devm_thermal_zone_of_sensor_register(&pdev->dev, 2236 soc->ttgs[i]->id, zone, 2237 &tegra_of_thermal_ops); 2238 if (IS_ERR(z)) { 2239 err = PTR_ERR(z); 2240 dev_err(&pdev->dev, "failed to register sensor: %d\n", 2241 err); 2242 goto disable_clocks; 2243 } 2244 2245 zone->tz = z; 2246 tegra->thermctl_tzs[soc->ttgs[i]->id] = z; 2247 2248 /* Configure hw trip points */ 2249 err = tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z); 2250 if (err) 2251 goto disable_clocks; 2252 } 2253 2254 err = soctherm_interrupts_init(pdev, tegra); 2255 2256 soctherm_debug_init(pdev); 2257 2258 return 0; 2259 2260 disable_clocks: 2261 soctherm_clk_enable(pdev, false); 2262 2263 return err; 2264 } 2265 2266 static int tegra_soctherm_remove(struct platform_device *pdev) 2267 { 2268 struct tegra_soctherm *tegra = platform_get_drvdata(pdev); 2269 2270 debugfs_remove_recursive(tegra->debugfs_dir); 2271 2272 soctherm_clk_enable(pdev, false); 2273 2274 return 0; 2275 } 2276 2277 static int __maybe_unused soctherm_suspend(struct device *dev) 2278 { 2279 struct platform_device *pdev = to_platform_device(dev); 2280 2281 soctherm_clk_enable(pdev, false); 2282 2283 return 0; 2284 } 2285 2286 static int __maybe_unused soctherm_resume(struct device *dev) 2287 { 2288 struct platform_device *pdev = to_platform_device(dev); 2289 struct tegra_soctherm *tegra = platform_get_drvdata(pdev); 2290 struct tegra_soctherm_soc *soc = tegra->soc; 2291 int err, i; 2292 2293 err = soctherm_clk_enable(pdev, true); 2294 if (err) { 2295 dev_err(&pdev->dev, 2296 "Resume failed: enable clocks failed\n"); 2297 return err; 2298 } 2299 2300 soctherm_init(pdev); 2301 2302 for (i = 0; i < soc->num_ttgs; ++i) { 2303 struct thermal_zone_device *tz; 2304 2305 tz = tegra->thermctl_tzs[soc->ttgs[i]->id]; 2306 err = tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz); 2307 if (err) { 2308 dev_err(&pdev->dev, 2309 "Resume failed: set hwtrips failed\n"); 2310 return err; 2311 } 2312 } 2313 2314 return 0; 2315 } 2316 2317 static SIMPLE_DEV_PM_OPS(tegra_soctherm_pm, soctherm_suspend, soctherm_resume); 2318 2319 static struct platform_driver tegra_soctherm_driver = { 2320 .probe = tegra_soctherm_probe, 2321 .remove = tegra_soctherm_remove, 2322 .driver = { 2323 .name = "tegra_soctherm", 2324 .pm = &tegra_soctherm_pm, 2325 .of_match_table = tegra_soctherm_of_match, 2326 }, 2327 }; 2328 module_platform_driver(tegra_soctherm_driver); 2329 2330 MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>"); 2331 MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver"); 2332 MODULE_LICENSE("GPL v2"); 2333