1 /* 2 * Copyright (C) STMicroelectronics 2009 3 * Copyright (C) ST-Ericsson SA 2010 4 * 5 * License Terms: GNU General Public License v2 6 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> 7 * Author: Sundar Iyer <sundar.iyer@stericsson.com> 8 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> 9 * 10 * U8500 PRCM Unit interface driver 11 * 12 */ 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/delay.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/spinlock.h> 19 #include <linux/io.h> 20 #include <linux/slab.h> 21 #include <linux/mutex.h> 22 #include <linux/completion.h> 23 #include <linux/irq.h> 24 #include <linux/jiffies.h> 25 #include <linux/bitops.h> 26 #include <linux/fs.h> 27 #include <linux/of.h> 28 #include <linux/platform_device.h> 29 #include <linux/uaccess.h> 30 #include <linux/mfd/core.h> 31 #include <linux/mfd/dbx500-prcmu.h> 32 #include <linux/mfd/abx500/ab8500.h> 33 #include <linux/regulator/db8500-prcmu.h> 34 #include <linux/regulator/machine.h> 35 #include <linux/cpufreq.h> 36 #include <linux/platform_data/ux500_wdt.h> 37 #include <linux/platform_data/db8500_thermal.h> 38 #include "dbx500-prcmu-regs.h" 39 40 /* Index of different voltages to be used when accessing AVSData */ 41 #define PRCM_AVS_BASE 0x2FC 42 #define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0) 43 #define PRCM_AVS_VBB_MAX_OPP (PRCM_AVS_BASE + 0x1) 44 #define PRCM_AVS_VBB_100_OPP (PRCM_AVS_BASE + 0x2) 45 #define PRCM_AVS_VBB_50_OPP (PRCM_AVS_BASE + 0x3) 46 #define PRCM_AVS_VARM_MAX_OPP (PRCM_AVS_BASE + 0x4) 47 #define PRCM_AVS_VARM_100_OPP (PRCM_AVS_BASE + 0x5) 48 #define PRCM_AVS_VARM_50_OPP (PRCM_AVS_BASE + 0x6) 49 #define PRCM_AVS_VARM_RET (PRCM_AVS_BASE + 0x7) 50 #define PRCM_AVS_VAPE_100_OPP (PRCM_AVS_BASE + 0x8) 51 #define PRCM_AVS_VAPE_50_OPP (PRCM_AVS_BASE + 0x9) 52 #define PRCM_AVS_VMOD_100_OPP (PRCM_AVS_BASE + 0xA) 53 #define PRCM_AVS_VMOD_50_OPP (PRCM_AVS_BASE + 0xB) 54 #define PRCM_AVS_VSAFE (PRCM_AVS_BASE + 0xC) 55 56 #define PRCM_AVS_VOLTAGE 0 57 #define PRCM_AVS_VOLTAGE_MASK 0x3f 58 #define PRCM_AVS_ISSLOWSTARTUP 6 59 #define PRCM_AVS_ISSLOWSTARTUP_MASK (1 << PRCM_AVS_ISSLOWSTARTUP) 60 #define PRCM_AVS_ISMODEENABLE 7 61 #define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE) 62 63 #define PRCM_BOOT_STATUS 0xFFF 64 #define PRCM_ROMCODE_A2P 0xFFE 65 #define PRCM_ROMCODE_P2A 0xFFD 66 #define PRCM_XP70_CUR_PWR_STATE 0xFFC /* 4 BYTES */ 67 68 #define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */ 69 70 #define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */ 71 #define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0) 72 #define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1) 73 #define PRCM_MBOX_HEADER_REQ_MB2 (_PRCM_MBOX_HEADER + 0x2) 74 #define PRCM_MBOX_HEADER_REQ_MB3 (_PRCM_MBOX_HEADER + 0x3) 75 #define PRCM_MBOX_HEADER_REQ_MB4 (_PRCM_MBOX_HEADER + 0x4) 76 #define PRCM_MBOX_HEADER_REQ_MB5 (_PRCM_MBOX_HEADER + 0x5) 77 #define PRCM_MBOX_HEADER_ACK_MB0 (_PRCM_MBOX_HEADER + 0x8) 78 79 /* Req Mailboxes */ 80 #define PRCM_REQ_MB0 0xFDC /* 12 bytes */ 81 #define PRCM_REQ_MB1 0xFD0 /* 12 bytes */ 82 #define PRCM_REQ_MB2 0xFC0 /* 16 bytes */ 83 #define PRCM_REQ_MB3 0xE4C /* 372 bytes */ 84 #define PRCM_REQ_MB4 0xE48 /* 4 bytes */ 85 #define PRCM_REQ_MB5 0xE44 /* 4 bytes */ 86 87 /* Ack Mailboxes */ 88 #define PRCM_ACK_MB0 0xE08 /* 52 bytes */ 89 #define PRCM_ACK_MB1 0xE04 /* 4 bytes */ 90 #define PRCM_ACK_MB2 0xE00 /* 4 bytes */ 91 #define PRCM_ACK_MB3 0xDFC /* 4 bytes */ 92 #define PRCM_ACK_MB4 0xDF8 /* 4 bytes */ 93 #define PRCM_ACK_MB5 0xDF4 /* 4 bytes */ 94 95 /* Mailbox 0 headers */ 96 #define MB0H_POWER_STATE_TRANS 0 97 #define MB0H_CONFIG_WAKEUPS_EXE 1 98 #define MB0H_READ_WAKEUP_ACK 3 99 #define MB0H_CONFIG_WAKEUPS_SLEEP 4 100 101 #define MB0H_WAKEUP_EXE 2 102 #define MB0H_WAKEUP_SLEEP 5 103 104 /* Mailbox 0 REQs */ 105 #define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0) 106 #define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x1) 107 #define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x2) 108 #define PRCM_REQ_MB0_DO_NOT_WFI (PRCM_REQ_MB0 + 0x3) 109 #define PRCM_REQ_MB0_WAKEUP_8500 (PRCM_REQ_MB0 + 0x4) 110 #define PRCM_REQ_MB0_WAKEUP_4500 (PRCM_REQ_MB0 + 0x8) 111 112 /* Mailbox 0 ACKs */ 113 #define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0) 114 #define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1) 115 #define PRCM_ACK_MB0_WAKEUP_0_8500 (PRCM_ACK_MB0 + 0x4) 116 #define PRCM_ACK_MB0_WAKEUP_0_4500 (PRCM_ACK_MB0 + 0x8) 117 #define PRCM_ACK_MB0_WAKEUP_1_8500 (PRCM_ACK_MB0 + 0x1C) 118 #define PRCM_ACK_MB0_WAKEUP_1_4500 (PRCM_ACK_MB0 + 0x20) 119 #define PRCM_ACK_MB0_EVENT_4500_NUMBERS 20 120 121 /* Mailbox 1 headers */ 122 #define MB1H_ARM_APE_OPP 0x0 123 #define MB1H_RESET_MODEM 0x2 124 #define MB1H_REQUEST_APE_OPP_100_VOLT 0x3 125 #define MB1H_RELEASE_APE_OPP_100_VOLT 0x4 126 #define MB1H_RELEASE_USB_WAKEUP 0x5 127 #define MB1H_PLL_ON_OFF 0x6 128 129 /* Mailbox 1 Requests */ 130 #define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0) 131 #define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1) 132 #define PRCM_REQ_MB1_PLL_ON_OFF (PRCM_REQ_MB1 + 0x4) 133 #define PLL_SOC0_OFF 0x1 134 #define PLL_SOC0_ON 0x2 135 #define PLL_SOC1_OFF 0x4 136 #define PLL_SOC1_ON 0x8 137 138 /* Mailbox 1 ACKs */ 139 #define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0) 140 #define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1) 141 #define PRCM_ACK_MB1_APE_VOLTAGE_STATUS (PRCM_ACK_MB1 + 0x2) 142 #define PRCM_ACK_MB1_DVFS_STATUS (PRCM_ACK_MB1 + 0x3) 143 144 /* Mailbox 2 headers */ 145 #define MB2H_DPS 0x0 146 #define MB2H_AUTO_PWR 0x1 147 148 /* Mailbox 2 REQs */ 149 #define PRCM_REQ_MB2_SVA_MMDSP (PRCM_REQ_MB2 + 0x0) 150 #define PRCM_REQ_MB2_SVA_PIPE (PRCM_REQ_MB2 + 0x1) 151 #define PRCM_REQ_MB2_SIA_MMDSP (PRCM_REQ_MB2 + 0x2) 152 #define PRCM_REQ_MB2_SIA_PIPE (PRCM_REQ_MB2 + 0x3) 153 #define PRCM_REQ_MB2_SGA (PRCM_REQ_MB2 + 0x4) 154 #define PRCM_REQ_MB2_B2R2_MCDE (PRCM_REQ_MB2 + 0x5) 155 #define PRCM_REQ_MB2_ESRAM12 (PRCM_REQ_MB2 + 0x6) 156 #define PRCM_REQ_MB2_ESRAM34 (PRCM_REQ_MB2 + 0x7) 157 #define PRCM_REQ_MB2_AUTO_PM_SLEEP (PRCM_REQ_MB2 + 0x8) 158 #define PRCM_REQ_MB2_AUTO_PM_IDLE (PRCM_REQ_MB2 + 0xC) 159 160 /* Mailbox 2 ACKs */ 161 #define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0) 162 #define HWACC_PWR_ST_OK 0xFE 163 164 /* Mailbox 3 headers */ 165 #define MB3H_ANC 0x0 166 #define MB3H_SIDETONE 0x1 167 #define MB3H_SYSCLK 0xE 168 169 /* Mailbox 3 Requests */ 170 #define PRCM_REQ_MB3_ANC_FIR_COEFF (PRCM_REQ_MB3 + 0x0) 171 #define PRCM_REQ_MB3_ANC_IIR_COEFF (PRCM_REQ_MB3 + 0x20) 172 #define PRCM_REQ_MB3_ANC_SHIFTER (PRCM_REQ_MB3 + 0x60) 173 #define PRCM_REQ_MB3_ANC_WARP (PRCM_REQ_MB3 + 0x64) 174 #define PRCM_REQ_MB3_SIDETONE_FIR_GAIN (PRCM_REQ_MB3 + 0x68) 175 #define PRCM_REQ_MB3_SIDETONE_FIR_COEFF (PRCM_REQ_MB3 + 0x6C) 176 #define PRCM_REQ_MB3_SYSCLK_MGT (PRCM_REQ_MB3 + 0x16C) 177 178 /* Mailbox 4 headers */ 179 #define MB4H_DDR_INIT 0x0 180 #define MB4H_MEM_ST 0x1 181 #define MB4H_HOTDOG 0x12 182 #define MB4H_HOTMON 0x13 183 #define MB4H_HOT_PERIOD 0x14 184 #define MB4H_A9WDOG_CONF 0x16 185 #define MB4H_A9WDOG_EN 0x17 186 #define MB4H_A9WDOG_DIS 0x18 187 #define MB4H_A9WDOG_LOAD 0x19 188 #define MB4H_A9WDOG_KICK 0x20 189 190 /* Mailbox 4 Requests */ 191 #define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE (PRCM_REQ_MB4 + 0x0) 192 #define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE (PRCM_REQ_MB4 + 0x1) 193 #define PRCM_REQ_MB4_ESRAM0_ST (PRCM_REQ_MB4 + 0x3) 194 #define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 0x0) 195 #define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 0x0) 196 #define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 0x1) 197 #define PRCM_REQ_MB4_HOTMON_CONFIG (PRCM_REQ_MB4 + 0x2) 198 #define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 0x0) 199 #define HOTMON_CONFIG_LOW BIT(0) 200 #define HOTMON_CONFIG_HIGH BIT(1) 201 #define PRCM_REQ_MB4_A9WDOG_0 (PRCM_REQ_MB4 + 0x0) 202 #define PRCM_REQ_MB4_A9WDOG_1 (PRCM_REQ_MB4 + 0x1) 203 #define PRCM_REQ_MB4_A9WDOG_2 (PRCM_REQ_MB4 + 0x2) 204 #define PRCM_REQ_MB4_A9WDOG_3 (PRCM_REQ_MB4 + 0x3) 205 #define A9WDOG_AUTO_OFF_EN BIT(7) 206 #define A9WDOG_AUTO_OFF_DIS 0 207 #define A9WDOG_ID_MASK 0xf 208 209 /* Mailbox 5 Requests */ 210 #define PRCM_REQ_MB5_I2C_SLAVE_OP (PRCM_REQ_MB5 + 0x0) 211 #define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1) 212 #define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2) 213 #define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3) 214 #define PRCMU_I2C_WRITE(slave) (((slave) << 1) | BIT(6)) 215 #define PRCMU_I2C_READ(slave) (((slave) << 1) | BIT(0) | BIT(6)) 216 #define PRCMU_I2C_STOP_EN BIT(3) 217 218 /* Mailbox 5 ACKs */ 219 #define PRCM_ACK_MB5_I2C_STATUS (PRCM_ACK_MB5 + 0x1) 220 #define PRCM_ACK_MB5_I2C_VAL (PRCM_ACK_MB5 + 0x3) 221 #define I2C_WR_OK 0x1 222 #define I2C_RD_OK 0x2 223 224 #define NUM_MB 8 225 #define MBOX_BIT BIT 226 #define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1) 227 228 /* 229 * Wakeups/IRQs 230 */ 231 232 #define WAKEUP_BIT_RTC BIT(0) 233 #define WAKEUP_BIT_RTT0 BIT(1) 234 #define WAKEUP_BIT_RTT1 BIT(2) 235 #define WAKEUP_BIT_HSI0 BIT(3) 236 #define WAKEUP_BIT_HSI1 BIT(4) 237 #define WAKEUP_BIT_CA_WAKE BIT(5) 238 #define WAKEUP_BIT_USB BIT(6) 239 #define WAKEUP_BIT_ABB BIT(7) 240 #define WAKEUP_BIT_ABB_FIFO BIT(8) 241 #define WAKEUP_BIT_SYSCLK_OK BIT(9) 242 #define WAKEUP_BIT_CA_SLEEP BIT(10) 243 #define WAKEUP_BIT_AC_WAKE_ACK BIT(11) 244 #define WAKEUP_BIT_SIDE_TONE_OK BIT(12) 245 #define WAKEUP_BIT_ANC_OK BIT(13) 246 #define WAKEUP_BIT_SW_ERROR BIT(14) 247 #define WAKEUP_BIT_AC_SLEEP_ACK BIT(15) 248 #define WAKEUP_BIT_ARM BIT(17) 249 #define WAKEUP_BIT_HOTMON_LOW BIT(18) 250 #define WAKEUP_BIT_HOTMON_HIGH BIT(19) 251 #define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20) 252 #define WAKEUP_BIT_GPIO0 BIT(23) 253 #define WAKEUP_BIT_GPIO1 BIT(24) 254 #define WAKEUP_BIT_GPIO2 BIT(25) 255 #define WAKEUP_BIT_GPIO3 BIT(26) 256 #define WAKEUP_BIT_GPIO4 BIT(27) 257 #define WAKEUP_BIT_GPIO5 BIT(28) 258 #define WAKEUP_BIT_GPIO6 BIT(29) 259 #define WAKEUP_BIT_GPIO7 BIT(30) 260 #define WAKEUP_BIT_GPIO8 BIT(31) 261 262 static struct { 263 bool valid; 264 struct prcmu_fw_version version; 265 } fw_info; 266 267 static struct irq_domain *db8500_irq_domain; 268 269 /* 270 * This vector maps irq numbers to the bits in the bit field used in 271 * communication with the PRCMU firmware. 272 * 273 * The reason for having this is to keep the irq numbers contiguous even though 274 * the bits in the bit field are not. (The bits also have a tendency to move 275 * around, to further complicate matters.) 276 */ 277 #define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name)) 278 #define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name) 279 280 #define IRQ_PRCMU_RTC 0 281 #define IRQ_PRCMU_RTT0 1 282 #define IRQ_PRCMU_RTT1 2 283 #define IRQ_PRCMU_HSI0 3 284 #define IRQ_PRCMU_HSI1 4 285 #define IRQ_PRCMU_CA_WAKE 5 286 #define IRQ_PRCMU_USB 6 287 #define IRQ_PRCMU_ABB 7 288 #define IRQ_PRCMU_ABB_FIFO 8 289 #define IRQ_PRCMU_ARM 9 290 #define IRQ_PRCMU_MODEM_SW_RESET_REQ 10 291 #define IRQ_PRCMU_GPIO0 11 292 #define IRQ_PRCMU_GPIO1 12 293 #define IRQ_PRCMU_GPIO2 13 294 #define IRQ_PRCMU_GPIO3 14 295 #define IRQ_PRCMU_GPIO4 15 296 #define IRQ_PRCMU_GPIO5 16 297 #define IRQ_PRCMU_GPIO6 17 298 #define IRQ_PRCMU_GPIO7 18 299 #define IRQ_PRCMU_GPIO8 19 300 #define IRQ_PRCMU_CA_SLEEP 20 301 #define IRQ_PRCMU_HOTMON_LOW 21 302 #define IRQ_PRCMU_HOTMON_HIGH 22 303 #define NUM_PRCMU_WAKEUPS 23 304 305 static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = { 306 IRQ_ENTRY(RTC), 307 IRQ_ENTRY(RTT0), 308 IRQ_ENTRY(RTT1), 309 IRQ_ENTRY(HSI0), 310 IRQ_ENTRY(HSI1), 311 IRQ_ENTRY(CA_WAKE), 312 IRQ_ENTRY(USB), 313 IRQ_ENTRY(ABB), 314 IRQ_ENTRY(ABB_FIFO), 315 IRQ_ENTRY(CA_SLEEP), 316 IRQ_ENTRY(ARM), 317 IRQ_ENTRY(HOTMON_LOW), 318 IRQ_ENTRY(HOTMON_HIGH), 319 IRQ_ENTRY(MODEM_SW_RESET_REQ), 320 IRQ_ENTRY(GPIO0), 321 IRQ_ENTRY(GPIO1), 322 IRQ_ENTRY(GPIO2), 323 IRQ_ENTRY(GPIO3), 324 IRQ_ENTRY(GPIO4), 325 IRQ_ENTRY(GPIO5), 326 IRQ_ENTRY(GPIO6), 327 IRQ_ENTRY(GPIO7), 328 IRQ_ENTRY(GPIO8) 329 }; 330 331 #define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1) 332 #define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name) 333 static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = { 334 WAKEUP_ENTRY(RTC), 335 WAKEUP_ENTRY(RTT0), 336 WAKEUP_ENTRY(RTT1), 337 WAKEUP_ENTRY(HSI0), 338 WAKEUP_ENTRY(HSI1), 339 WAKEUP_ENTRY(USB), 340 WAKEUP_ENTRY(ABB), 341 WAKEUP_ENTRY(ABB_FIFO), 342 WAKEUP_ENTRY(ARM) 343 }; 344 345 /* 346 * mb0_transfer - state needed for mailbox 0 communication. 347 * @lock: The transaction lock. 348 * @dbb_events_lock: A lock used to handle concurrent access to (parts of) 349 * the request data. 350 * @mask_work: Work structure used for (un)masking wakeup interrupts. 351 * @req: Request data that need to persist between requests. 352 */ 353 static struct { 354 spinlock_t lock; 355 spinlock_t dbb_irqs_lock; 356 struct work_struct mask_work; 357 struct mutex ac_wake_lock; 358 struct completion ac_wake_work; 359 struct { 360 u32 dbb_irqs; 361 u32 dbb_wakeups; 362 u32 abb_events; 363 } req; 364 } mb0_transfer; 365 366 /* 367 * mb1_transfer - state needed for mailbox 1 communication. 368 * @lock: The transaction lock. 369 * @work: The transaction completion structure. 370 * @ape_opp: The current APE OPP. 371 * @ack: Reply ("acknowledge") data. 372 */ 373 static struct { 374 struct mutex lock; 375 struct completion work; 376 u8 ape_opp; 377 struct { 378 u8 header; 379 u8 arm_opp; 380 u8 ape_opp; 381 u8 ape_voltage_status; 382 } ack; 383 } mb1_transfer; 384 385 /* 386 * mb2_transfer - state needed for mailbox 2 communication. 387 * @lock: The transaction lock. 388 * @work: The transaction completion structure. 389 * @auto_pm_lock: The autonomous power management configuration lock. 390 * @auto_pm_enabled: A flag indicating whether autonomous PM is enabled. 391 * @req: Request data that need to persist between requests. 392 * @ack: Reply ("acknowledge") data. 393 */ 394 static struct { 395 struct mutex lock; 396 struct completion work; 397 spinlock_t auto_pm_lock; 398 bool auto_pm_enabled; 399 struct { 400 u8 status; 401 } ack; 402 } mb2_transfer; 403 404 /* 405 * mb3_transfer - state needed for mailbox 3 communication. 406 * @lock: The request lock. 407 * @sysclk_lock: A lock used to handle concurrent sysclk requests. 408 * @sysclk_work: Work structure used for sysclk requests. 409 */ 410 static struct { 411 spinlock_t lock; 412 struct mutex sysclk_lock; 413 struct completion sysclk_work; 414 } mb3_transfer; 415 416 /* 417 * mb4_transfer - state needed for mailbox 4 communication. 418 * @lock: The transaction lock. 419 * @work: The transaction completion structure. 420 */ 421 static struct { 422 struct mutex lock; 423 struct completion work; 424 } mb4_transfer; 425 426 /* 427 * mb5_transfer - state needed for mailbox 5 communication. 428 * @lock: The transaction lock. 429 * @work: The transaction completion structure. 430 * @ack: Reply ("acknowledge") data. 431 */ 432 static struct { 433 struct mutex lock; 434 struct completion work; 435 struct { 436 u8 status; 437 u8 value; 438 } ack; 439 } mb5_transfer; 440 441 static atomic_t ac_wake_req_state = ATOMIC_INIT(0); 442 443 /* Spinlocks */ 444 static DEFINE_SPINLOCK(prcmu_lock); 445 static DEFINE_SPINLOCK(clkout_lock); 446 447 /* Global var to runtime determine TCDM base for v2 or v1 */ 448 static __iomem void *tcdm_base; 449 static __iomem void *prcmu_base; 450 451 struct clk_mgt { 452 u32 offset; 453 u32 pllsw; 454 int branch; 455 bool clk38div; 456 }; 457 458 enum { 459 PLL_RAW, 460 PLL_FIX, 461 PLL_DIV 462 }; 463 464 static DEFINE_SPINLOCK(clk_mgt_lock); 465 466 #define CLK_MGT_ENTRY(_name, _branch, _clk38div)[PRCMU_##_name] = \ 467 { (PRCM_##_name##_MGT), 0 , _branch, _clk38div} 468 struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = { 469 CLK_MGT_ENTRY(SGACLK, PLL_DIV, false), 470 CLK_MGT_ENTRY(UARTCLK, PLL_FIX, true), 471 CLK_MGT_ENTRY(MSP02CLK, PLL_FIX, true), 472 CLK_MGT_ENTRY(MSP1CLK, PLL_FIX, true), 473 CLK_MGT_ENTRY(I2CCLK, PLL_FIX, true), 474 CLK_MGT_ENTRY(SDMMCCLK, PLL_DIV, true), 475 CLK_MGT_ENTRY(SLIMCLK, PLL_FIX, true), 476 CLK_MGT_ENTRY(PER1CLK, PLL_DIV, true), 477 CLK_MGT_ENTRY(PER2CLK, PLL_DIV, true), 478 CLK_MGT_ENTRY(PER3CLK, PLL_DIV, true), 479 CLK_MGT_ENTRY(PER5CLK, PLL_DIV, true), 480 CLK_MGT_ENTRY(PER6CLK, PLL_DIV, true), 481 CLK_MGT_ENTRY(PER7CLK, PLL_DIV, true), 482 CLK_MGT_ENTRY(LCDCLK, PLL_FIX, true), 483 CLK_MGT_ENTRY(BMLCLK, PLL_DIV, true), 484 CLK_MGT_ENTRY(HSITXCLK, PLL_DIV, true), 485 CLK_MGT_ENTRY(HSIRXCLK, PLL_DIV, true), 486 CLK_MGT_ENTRY(HDMICLK, PLL_FIX, false), 487 CLK_MGT_ENTRY(APEATCLK, PLL_DIV, true), 488 CLK_MGT_ENTRY(APETRACECLK, PLL_DIV, true), 489 CLK_MGT_ENTRY(MCDECLK, PLL_DIV, true), 490 CLK_MGT_ENTRY(IPI2CCLK, PLL_FIX, true), 491 CLK_MGT_ENTRY(DSIALTCLK, PLL_FIX, false), 492 CLK_MGT_ENTRY(DMACLK, PLL_DIV, true), 493 CLK_MGT_ENTRY(B2R2CLK, PLL_DIV, true), 494 CLK_MGT_ENTRY(TVCLK, PLL_FIX, true), 495 CLK_MGT_ENTRY(SSPCLK, PLL_FIX, true), 496 CLK_MGT_ENTRY(RNGCLK, PLL_FIX, true), 497 CLK_MGT_ENTRY(UICCCLK, PLL_FIX, false), 498 }; 499 500 struct dsiclk { 501 u32 divsel_mask; 502 u32 divsel_shift; 503 u32 divsel; 504 }; 505 506 static struct dsiclk dsiclk[2] = { 507 { 508 .divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_MASK, 509 .divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_SHIFT, 510 .divsel = PRCM_DSI_PLLOUT_SEL_PHI, 511 }, 512 { 513 .divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_MASK, 514 .divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_SHIFT, 515 .divsel = PRCM_DSI_PLLOUT_SEL_PHI, 516 } 517 }; 518 519 struct dsiescclk { 520 u32 en; 521 u32 div_mask; 522 u32 div_shift; 523 }; 524 525 static struct dsiescclk dsiescclk[3] = { 526 { 527 .en = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_EN, 528 .div_mask = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_MASK, 529 .div_shift = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_SHIFT, 530 }, 531 { 532 .en = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_EN, 533 .div_mask = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_MASK, 534 .div_shift = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_SHIFT, 535 }, 536 { 537 .en = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_EN, 538 .div_mask = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_MASK, 539 .div_shift = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_SHIFT, 540 } 541 }; 542 543 544 /* 545 * Used by MCDE to setup all necessary PRCMU registers 546 */ 547 #define PRCMU_RESET_DSIPLL 0x00004000 548 #define PRCMU_UNCLAMP_DSIPLL 0x00400800 549 550 #define PRCMU_CLK_PLL_DIV_SHIFT 0 551 #define PRCMU_CLK_PLL_SW_SHIFT 5 552 #define PRCMU_CLK_38 (1 << 9) 553 #define PRCMU_CLK_38_SRC (1 << 10) 554 #define PRCMU_CLK_38_DIV (1 << 11) 555 556 /* PLLDIV=12, PLLSW=4 (PLLDDR) */ 557 #define PRCMU_DSI_CLOCK_SETTING 0x0000008C 558 559 /* DPI 50000000 Hz */ 560 #define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \ 561 (16 << PRCMU_CLK_PLL_DIV_SHIFT)) 562 #define PRCMU_DSI_LP_CLOCK_SETTING 0x00000E00 563 564 /* D=101, N=1, R=4, SELDIV2=0 */ 565 #define PRCMU_PLLDSI_FREQ_SETTING 0x00040165 566 567 #define PRCMU_ENABLE_PLLDSI 0x00000001 568 #define PRCMU_DISABLE_PLLDSI 0x00000000 569 #define PRCMU_RELEASE_RESET_DSS 0x0000400C 570 #define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000202 571 /* ESC clk, div0=1, div1=1, div2=3 */ 572 #define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x07030101 573 #define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00030101 574 #define PRCMU_DSI_RESET_SW 0x00000007 575 576 #define PRCMU_PLLDSI_LOCKP_LOCKED 0x3 577 578 int db8500_prcmu_enable_dsipll(void) 579 { 580 int i; 581 582 /* Clear DSIPLL_RESETN */ 583 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR); 584 /* Unclamp DSIPLL in/out */ 585 writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR); 586 587 /* Set DSI PLL FREQ */ 588 writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ); 589 writel(PRCMU_DSI_PLLOUT_SEL_SETTING, PRCM_DSI_PLLOUT_SEL); 590 /* Enable Escape clocks */ 591 writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV); 592 593 /* Start DSI PLL */ 594 writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE); 595 /* Reset DSI PLL */ 596 writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET); 597 for (i = 0; i < 10; i++) { 598 if ((readl(PRCM_PLLDSI_LOCKP) & PRCMU_PLLDSI_LOCKP_LOCKED) 599 == PRCMU_PLLDSI_LOCKP_LOCKED) 600 break; 601 udelay(100); 602 } 603 /* Set DSIPLL_RESETN */ 604 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET); 605 return 0; 606 } 607 608 int db8500_prcmu_disable_dsipll(void) 609 { 610 /* Disable dsi pll */ 611 writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE); 612 /* Disable escapeclock */ 613 writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV); 614 return 0; 615 } 616 617 int db8500_prcmu_set_display_clocks(void) 618 { 619 unsigned long flags; 620 621 spin_lock_irqsave(&clk_mgt_lock, flags); 622 623 /* Grab the HW semaphore. */ 624 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 625 cpu_relax(); 626 627 writel(PRCMU_DSI_CLOCK_SETTING, prcmu_base + PRCM_HDMICLK_MGT); 628 writel(PRCMU_DSI_LP_CLOCK_SETTING, prcmu_base + PRCM_TVCLK_MGT); 629 writel(PRCMU_DPI_CLOCK_SETTING, prcmu_base + PRCM_LCDCLK_MGT); 630 631 /* Release the HW semaphore. */ 632 writel(0, PRCM_SEM); 633 634 spin_unlock_irqrestore(&clk_mgt_lock, flags); 635 636 return 0; 637 } 638 639 u32 db8500_prcmu_read(unsigned int reg) 640 { 641 return readl(prcmu_base + reg); 642 } 643 644 void db8500_prcmu_write(unsigned int reg, u32 value) 645 { 646 unsigned long flags; 647 648 spin_lock_irqsave(&prcmu_lock, flags); 649 writel(value, (prcmu_base + reg)); 650 spin_unlock_irqrestore(&prcmu_lock, flags); 651 } 652 653 void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value) 654 { 655 u32 val; 656 unsigned long flags; 657 658 spin_lock_irqsave(&prcmu_lock, flags); 659 val = readl(prcmu_base + reg); 660 val = ((val & ~mask) | (value & mask)); 661 writel(val, (prcmu_base + reg)); 662 spin_unlock_irqrestore(&prcmu_lock, flags); 663 } 664 665 struct prcmu_fw_version *prcmu_get_fw_version(void) 666 { 667 return fw_info.valid ? &fw_info.version : NULL; 668 } 669 670 bool prcmu_has_arm_maxopp(void) 671 { 672 return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) & 673 PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK; 674 } 675 676 /** 677 * prcmu_get_boot_status - PRCMU boot status checking 678 * Returns: the current PRCMU boot status 679 */ 680 int prcmu_get_boot_status(void) 681 { 682 return readb(tcdm_base + PRCM_BOOT_STATUS); 683 } 684 685 /** 686 * prcmu_set_rc_a2p - This function is used to run few power state sequences 687 * @val: Value to be set, i.e. transition requested 688 * Returns: 0 on success, -EINVAL on invalid argument 689 * 690 * This function is used to run the following power state sequences - 691 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep 692 */ 693 int prcmu_set_rc_a2p(enum romcode_write val) 694 { 695 if (val < RDY_2_DS || val > RDY_2_XP70_RST) 696 return -EINVAL; 697 writeb(val, (tcdm_base + PRCM_ROMCODE_A2P)); 698 return 0; 699 } 700 701 /** 702 * prcmu_get_rc_p2a - This function is used to get power state sequences 703 * Returns: the power transition that has last happened 704 * 705 * This function can return the following transitions- 706 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep 707 */ 708 enum romcode_read prcmu_get_rc_p2a(void) 709 { 710 return readb(tcdm_base + PRCM_ROMCODE_P2A); 711 } 712 713 /** 714 * prcmu_get_current_mode - Return the current XP70 power mode 715 * Returns: Returns the current AP(ARM) power mode: init, 716 * apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset 717 */ 718 enum ap_pwrst prcmu_get_xp70_current_state(void) 719 { 720 return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE); 721 } 722 723 /** 724 * prcmu_config_clkout - Configure one of the programmable clock outputs. 725 * @clkout: The CLKOUT number (0 or 1). 726 * @source: The clock to be used (one of the PRCMU_CLKSRC_*). 727 * @div: The divider to be applied. 728 * 729 * Configures one of the programmable clock outputs (CLKOUTs). 730 * @div should be in the range [1,63] to request a configuration, or 0 to 731 * inform that the configuration is no longer requested. 732 */ 733 int prcmu_config_clkout(u8 clkout, u8 source, u8 div) 734 { 735 static int requests[2]; 736 int r = 0; 737 unsigned long flags; 738 u32 val; 739 u32 bits; 740 u32 mask; 741 u32 div_mask; 742 743 BUG_ON(clkout > 1); 744 BUG_ON(div > 63); 745 BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009)); 746 747 if (!div && !requests[clkout]) 748 return -EINVAL; 749 750 switch (clkout) { 751 case 0: 752 div_mask = PRCM_CLKOCR_CLKODIV0_MASK; 753 mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK); 754 bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) | 755 (div << PRCM_CLKOCR_CLKODIV0_SHIFT)); 756 break; 757 case 1: 758 div_mask = PRCM_CLKOCR_CLKODIV1_MASK; 759 mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK | 760 PRCM_CLKOCR_CLK1TYPE); 761 bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) | 762 (div << PRCM_CLKOCR_CLKODIV1_SHIFT)); 763 break; 764 } 765 bits &= mask; 766 767 spin_lock_irqsave(&clkout_lock, flags); 768 769 val = readl(PRCM_CLKOCR); 770 if (val & div_mask) { 771 if (div) { 772 if ((val & mask) != bits) { 773 r = -EBUSY; 774 goto unlock_and_return; 775 } 776 } else { 777 if ((val & mask & ~div_mask) != bits) { 778 r = -EINVAL; 779 goto unlock_and_return; 780 } 781 } 782 } 783 writel((bits | (val & ~mask)), PRCM_CLKOCR); 784 requests[clkout] += (div ? 1 : -1); 785 786 unlock_and_return: 787 spin_unlock_irqrestore(&clkout_lock, flags); 788 789 return r; 790 } 791 792 int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll) 793 { 794 unsigned long flags; 795 796 BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state)); 797 798 spin_lock_irqsave(&mb0_transfer.lock, flags); 799 800 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) 801 cpu_relax(); 802 803 writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); 804 writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE)); 805 writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE)); 806 writeb((keep_ulp_clk ? 1 : 0), 807 (tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE)); 808 writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI)); 809 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET); 810 811 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 812 813 return 0; 814 } 815 816 u8 db8500_prcmu_get_power_state_result(void) 817 { 818 return readb(tcdm_base + PRCM_ACK_MB0_AP_PWRSTTR_STATUS); 819 } 820 821 /* This function should only be called while mb0_transfer.lock is held. */ 822 static void config_wakeups(void) 823 { 824 const u8 header[2] = { 825 MB0H_CONFIG_WAKEUPS_EXE, 826 MB0H_CONFIG_WAKEUPS_SLEEP 827 }; 828 static u32 last_dbb_events; 829 static u32 last_abb_events; 830 u32 dbb_events; 831 u32 abb_events; 832 unsigned int i; 833 834 dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups; 835 dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK); 836 837 abb_events = mb0_transfer.req.abb_events; 838 839 if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events)) 840 return; 841 842 for (i = 0; i < 2; i++) { 843 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) 844 cpu_relax(); 845 writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500)); 846 writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500)); 847 writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); 848 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET); 849 } 850 last_dbb_events = dbb_events; 851 last_abb_events = abb_events; 852 } 853 854 void db8500_prcmu_enable_wakeups(u32 wakeups) 855 { 856 unsigned long flags; 857 u32 bits; 858 int i; 859 860 BUG_ON(wakeups != (wakeups & VALID_WAKEUPS)); 861 862 for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) { 863 if (wakeups & BIT(i)) 864 bits |= prcmu_wakeup_bit[i]; 865 } 866 867 spin_lock_irqsave(&mb0_transfer.lock, flags); 868 869 mb0_transfer.req.dbb_wakeups = bits; 870 config_wakeups(); 871 872 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 873 } 874 875 void db8500_prcmu_config_abb_event_readout(u32 abb_events) 876 { 877 unsigned long flags; 878 879 spin_lock_irqsave(&mb0_transfer.lock, flags); 880 881 mb0_transfer.req.abb_events = abb_events; 882 config_wakeups(); 883 884 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 885 } 886 887 void db8500_prcmu_get_abb_event_buffer(void __iomem **buf) 888 { 889 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1) 890 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500); 891 else 892 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500); 893 } 894 895 /** 896 * db8500_prcmu_set_arm_opp - set the appropriate ARM OPP 897 * @opp: The new ARM operating point to which transition is to be made 898 * Returns: 0 on success, non-zero on failure 899 * 900 * This function sets the the operating point of the ARM. 901 */ 902 int db8500_prcmu_set_arm_opp(u8 opp) 903 { 904 int r; 905 906 if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK) 907 return -EINVAL; 908 909 r = 0; 910 911 mutex_lock(&mb1_transfer.lock); 912 913 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 914 cpu_relax(); 915 916 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 917 writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP)); 918 writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP)); 919 920 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 921 wait_for_completion(&mb1_transfer.work); 922 923 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) || 924 (mb1_transfer.ack.arm_opp != opp)) 925 r = -EIO; 926 927 mutex_unlock(&mb1_transfer.lock); 928 929 return r; 930 } 931 932 /** 933 * db8500_prcmu_get_arm_opp - get the current ARM OPP 934 * 935 * Returns: the current ARM OPP 936 */ 937 int db8500_prcmu_get_arm_opp(void) 938 { 939 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP); 940 } 941 942 /** 943 * db8500_prcmu_get_ddr_opp - get the current DDR OPP 944 * 945 * Returns: the current DDR OPP 946 */ 947 int db8500_prcmu_get_ddr_opp(void) 948 { 949 return readb(PRCM_DDR_SUBSYS_APE_MINBW); 950 } 951 952 /** 953 * db8500_set_ddr_opp - set the appropriate DDR OPP 954 * @opp: The new DDR operating point to which transition is to be made 955 * Returns: 0 on success, non-zero on failure 956 * 957 * This function sets the operating point of the DDR. 958 */ 959 static bool enable_set_ddr_opp; 960 int db8500_prcmu_set_ddr_opp(u8 opp) 961 { 962 if (opp < DDR_100_OPP || opp > DDR_25_OPP) 963 return -EINVAL; 964 /* Changing the DDR OPP can hang the hardware pre-v21 */ 965 if (enable_set_ddr_opp) 966 writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW); 967 968 return 0; 969 } 970 971 /* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */ 972 static void request_even_slower_clocks(bool enable) 973 { 974 u32 clock_reg[] = { 975 PRCM_ACLK_MGT, 976 PRCM_DMACLK_MGT 977 }; 978 unsigned long flags; 979 unsigned int i; 980 981 spin_lock_irqsave(&clk_mgt_lock, flags); 982 983 /* Grab the HW semaphore. */ 984 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 985 cpu_relax(); 986 987 for (i = 0; i < ARRAY_SIZE(clock_reg); i++) { 988 u32 val; 989 u32 div; 990 991 val = readl(prcmu_base + clock_reg[i]); 992 div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK); 993 if (enable) { 994 if ((div <= 1) || (div > 15)) { 995 pr_err("prcmu: Bad clock divider %d in %s\n", 996 div, __func__); 997 goto unlock_and_return; 998 } 999 div <<= 1; 1000 } else { 1001 if (div <= 2) 1002 goto unlock_and_return; 1003 div >>= 1; 1004 } 1005 val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) | 1006 (div & PRCM_CLK_MGT_CLKPLLDIV_MASK)); 1007 writel(val, prcmu_base + clock_reg[i]); 1008 } 1009 1010 unlock_and_return: 1011 /* Release the HW semaphore. */ 1012 writel(0, PRCM_SEM); 1013 1014 spin_unlock_irqrestore(&clk_mgt_lock, flags); 1015 } 1016 1017 /** 1018 * db8500_set_ape_opp - set the appropriate APE OPP 1019 * @opp: The new APE operating point to which transition is to be made 1020 * Returns: 0 on success, non-zero on failure 1021 * 1022 * This function sets the operating point of the APE. 1023 */ 1024 int db8500_prcmu_set_ape_opp(u8 opp) 1025 { 1026 int r = 0; 1027 1028 if (opp == mb1_transfer.ape_opp) 1029 return 0; 1030 1031 mutex_lock(&mb1_transfer.lock); 1032 1033 if (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP) 1034 request_even_slower_clocks(false); 1035 1036 if ((opp != APE_100_OPP) && (mb1_transfer.ape_opp != APE_100_OPP)) 1037 goto skip_message; 1038 1039 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1040 cpu_relax(); 1041 1042 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1043 writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP)); 1044 writeb(((opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp), 1045 (tcdm_base + PRCM_REQ_MB1_APE_OPP)); 1046 1047 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1048 wait_for_completion(&mb1_transfer.work); 1049 1050 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) || 1051 (mb1_transfer.ack.ape_opp != opp)) 1052 r = -EIO; 1053 1054 skip_message: 1055 if ((!r && (opp == APE_50_PARTLY_25_OPP)) || 1056 (r && (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP))) 1057 request_even_slower_clocks(true); 1058 if (!r) 1059 mb1_transfer.ape_opp = opp; 1060 1061 mutex_unlock(&mb1_transfer.lock); 1062 1063 return r; 1064 } 1065 1066 /** 1067 * db8500_prcmu_get_ape_opp - get the current APE OPP 1068 * 1069 * Returns: the current APE OPP 1070 */ 1071 int db8500_prcmu_get_ape_opp(void) 1072 { 1073 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP); 1074 } 1075 1076 /** 1077 * db8500_prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage 1078 * @enable: true to request the higher voltage, false to drop a request. 1079 * 1080 * Calls to this function to enable and disable requests must be balanced. 1081 */ 1082 int db8500_prcmu_request_ape_opp_100_voltage(bool enable) 1083 { 1084 int r = 0; 1085 u8 header; 1086 static unsigned int requests; 1087 1088 mutex_lock(&mb1_transfer.lock); 1089 1090 if (enable) { 1091 if (0 != requests++) 1092 goto unlock_and_return; 1093 header = MB1H_REQUEST_APE_OPP_100_VOLT; 1094 } else { 1095 if (requests == 0) { 1096 r = -EIO; 1097 goto unlock_and_return; 1098 } else if (1 != requests--) { 1099 goto unlock_and_return; 1100 } 1101 header = MB1H_RELEASE_APE_OPP_100_VOLT; 1102 } 1103 1104 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1105 cpu_relax(); 1106 1107 writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1108 1109 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1110 wait_for_completion(&mb1_transfer.work); 1111 1112 if ((mb1_transfer.ack.header != header) || 1113 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0)) 1114 r = -EIO; 1115 1116 unlock_and_return: 1117 mutex_unlock(&mb1_transfer.lock); 1118 1119 return r; 1120 } 1121 1122 /** 1123 * prcmu_release_usb_wakeup_state - release the state required by a USB wakeup 1124 * 1125 * This function releases the power state requirements of a USB wakeup. 1126 */ 1127 int prcmu_release_usb_wakeup_state(void) 1128 { 1129 int r = 0; 1130 1131 mutex_lock(&mb1_transfer.lock); 1132 1133 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1134 cpu_relax(); 1135 1136 writeb(MB1H_RELEASE_USB_WAKEUP, 1137 (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1138 1139 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1140 wait_for_completion(&mb1_transfer.work); 1141 1142 if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) || 1143 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0)) 1144 r = -EIO; 1145 1146 mutex_unlock(&mb1_transfer.lock); 1147 1148 return r; 1149 } 1150 1151 static int request_pll(u8 clock, bool enable) 1152 { 1153 int r = 0; 1154 1155 if (clock == PRCMU_PLLSOC0) 1156 clock = (enable ? PLL_SOC0_ON : PLL_SOC0_OFF); 1157 else if (clock == PRCMU_PLLSOC1) 1158 clock = (enable ? PLL_SOC1_ON : PLL_SOC1_OFF); 1159 else 1160 return -EINVAL; 1161 1162 mutex_lock(&mb1_transfer.lock); 1163 1164 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1165 cpu_relax(); 1166 1167 writeb(MB1H_PLL_ON_OFF, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1168 writeb(clock, (tcdm_base + PRCM_REQ_MB1_PLL_ON_OFF)); 1169 1170 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1171 wait_for_completion(&mb1_transfer.work); 1172 1173 if (mb1_transfer.ack.header != MB1H_PLL_ON_OFF) 1174 r = -EIO; 1175 1176 mutex_unlock(&mb1_transfer.lock); 1177 1178 return r; 1179 } 1180 1181 /** 1182 * db8500_prcmu_set_epod - set the state of a EPOD (power domain) 1183 * @epod_id: The EPOD to set 1184 * @epod_state: The new EPOD state 1185 * 1186 * This function sets the state of a EPOD (power domain). It may not be called 1187 * from interrupt context. 1188 */ 1189 int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state) 1190 { 1191 int r = 0; 1192 bool ram_retention = false; 1193 int i; 1194 1195 /* check argument */ 1196 BUG_ON(epod_id >= NUM_EPOD_ID); 1197 1198 /* set flag if retention is possible */ 1199 switch (epod_id) { 1200 case EPOD_ID_SVAMMDSP: 1201 case EPOD_ID_SIAMMDSP: 1202 case EPOD_ID_ESRAM12: 1203 case EPOD_ID_ESRAM34: 1204 ram_retention = true; 1205 break; 1206 } 1207 1208 /* check argument */ 1209 BUG_ON(epod_state > EPOD_STATE_ON); 1210 BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention); 1211 1212 /* get lock */ 1213 mutex_lock(&mb2_transfer.lock); 1214 1215 /* wait for mailbox */ 1216 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(2)) 1217 cpu_relax(); 1218 1219 /* fill in mailbox */ 1220 for (i = 0; i < NUM_EPOD_ID; i++) 1221 writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i)); 1222 writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id)); 1223 1224 writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2)); 1225 1226 writel(MBOX_BIT(2), PRCM_MBOX_CPU_SET); 1227 1228 /* 1229 * The current firmware version does not handle errors correctly, 1230 * and we cannot recover if there is an error. 1231 * This is expected to change when the firmware is updated. 1232 */ 1233 if (!wait_for_completion_timeout(&mb2_transfer.work, 1234 msecs_to_jiffies(20000))) { 1235 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 1236 __func__); 1237 r = -EIO; 1238 goto unlock_and_return; 1239 } 1240 1241 if (mb2_transfer.ack.status != HWACC_PWR_ST_OK) 1242 r = -EIO; 1243 1244 unlock_and_return: 1245 mutex_unlock(&mb2_transfer.lock); 1246 return r; 1247 } 1248 1249 /** 1250 * prcmu_configure_auto_pm - Configure autonomous power management. 1251 * @sleep: Configuration for ApSleep. 1252 * @idle: Configuration for ApIdle. 1253 */ 1254 void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep, 1255 struct prcmu_auto_pm_config *idle) 1256 { 1257 u32 sleep_cfg; 1258 u32 idle_cfg; 1259 unsigned long flags; 1260 1261 BUG_ON((sleep == NULL) || (idle == NULL)); 1262 1263 sleep_cfg = (sleep->sva_auto_pm_enable & 0xF); 1264 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF)); 1265 sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF)); 1266 sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF)); 1267 sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF)); 1268 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF)); 1269 1270 idle_cfg = (idle->sva_auto_pm_enable & 0xF); 1271 idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF)); 1272 idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF)); 1273 idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF)); 1274 idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF)); 1275 idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF)); 1276 1277 spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags); 1278 1279 /* 1280 * The autonomous power management configuration is done through 1281 * fields in mailbox 2, but these fields are only used as shared 1282 * variables - i.e. there is no need to send a message. 1283 */ 1284 writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP)); 1285 writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE)); 1286 1287 mb2_transfer.auto_pm_enabled = 1288 ((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) || 1289 (sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) || 1290 (idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) || 1291 (idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON)); 1292 1293 spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags); 1294 } 1295 EXPORT_SYMBOL(prcmu_configure_auto_pm); 1296 1297 bool prcmu_is_auto_pm_enabled(void) 1298 { 1299 return mb2_transfer.auto_pm_enabled; 1300 } 1301 1302 static int request_sysclk(bool enable) 1303 { 1304 int r; 1305 unsigned long flags; 1306 1307 r = 0; 1308 1309 mutex_lock(&mb3_transfer.sysclk_lock); 1310 1311 spin_lock_irqsave(&mb3_transfer.lock, flags); 1312 1313 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(3)) 1314 cpu_relax(); 1315 1316 writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT)); 1317 1318 writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3)); 1319 writel(MBOX_BIT(3), PRCM_MBOX_CPU_SET); 1320 1321 spin_unlock_irqrestore(&mb3_transfer.lock, flags); 1322 1323 /* 1324 * The firmware only sends an ACK if we want to enable the 1325 * SysClk, and it succeeds. 1326 */ 1327 if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work, 1328 msecs_to_jiffies(20000))) { 1329 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 1330 __func__); 1331 r = -EIO; 1332 } 1333 1334 mutex_unlock(&mb3_transfer.sysclk_lock); 1335 1336 return r; 1337 } 1338 1339 static int request_timclk(bool enable) 1340 { 1341 u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK); 1342 1343 if (!enable) 1344 val |= PRCM_TCR_STOP_TIMERS; 1345 writel(val, PRCM_TCR); 1346 1347 return 0; 1348 } 1349 1350 static int request_clock(u8 clock, bool enable) 1351 { 1352 u32 val; 1353 unsigned long flags; 1354 1355 spin_lock_irqsave(&clk_mgt_lock, flags); 1356 1357 /* Grab the HW semaphore. */ 1358 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 1359 cpu_relax(); 1360 1361 val = readl(prcmu_base + clk_mgt[clock].offset); 1362 if (enable) { 1363 val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw); 1364 } else { 1365 clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK); 1366 val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK); 1367 } 1368 writel(val, prcmu_base + clk_mgt[clock].offset); 1369 1370 /* Release the HW semaphore. */ 1371 writel(0, PRCM_SEM); 1372 1373 spin_unlock_irqrestore(&clk_mgt_lock, flags); 1374 1375 return 0; 1376 } 1377 1378 static int request_sga_clock(u8 clock, bool enable) 1379 { 1380 u32 val; 1381 int ret; 1382 1383 if (enable) { 1384 val = readl(PRCM_CGATING_BYPASS); 1385 writel(val | PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS); 1386 } 1387 1388 ret = request_clock(clock, enable); 1389 1390 if (!ret && !enable) { 1391 val = readl(PRCM_CGATING_BYPASS); 1392 writel(val & ~PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS); 1393 } 1394 1395 return ret; 1396 } 1397 1398 static inline bool plldsi_locked(void) 1399 { 1400 return (readl(PRCM_PLLDSI_LOCKP) & 1401 (PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 | 1402 PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3)) == 1403 (PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 | 1404 PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3); 1405 } 1406 1407 static int request_plldsi(bool enable) 1408 { 1409 int r = 0; 1410 u32 val; 1411 1412 writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP | 1413 PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI), (enable ? 1414 PRCM_MMIP_LS_CLAMP_CLR : PRCM_MMIP_LS_CLAMP_SET)); 1415 1416 val = readl(PRCM_PLLDSI_ENABLE); 1417 if (enable) 1418 val |= PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE; 1419 else 1420 val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE; 1421 writel(val, PRCM_PLLDSI_ENABLE); 1422 1423 if (enable) { 1424 unsigned int i; 1425 bool locked = plldsi_locked(); 1426 1427 for (i = 10; !locked && (i > 0); --i) { 1428 udelay(100); 1429 locked = plldsi_locked(); 1430 } 1431 if (locked) { 1432 writel(PRCM_APE_RESETN_DSIPLL_RESETN, 1433 PRCM_APE_RESETN_SET); 1434 } else { 1435 writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP | 1436 PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI), 1437 PRCM_MMIP_LS_CLAMP_SET); 1438 val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE; 1439 writel(val, PRCM_PLLDSI_ENABLE); 1440 r = -EAGAIN; 1441 } 1442 } else { 1443 writel(PRCM_APE_RESETN_DSIPLL_RESETN, PRCM_APE_RESETN_CLR); 1444 } 1445 return r; 1446 } 1447 1448 static int request_dsiclk(u8 n, bool enable) 1449 { 1450 u32 val; 1451 1452 val = readl(PRCM_DSI_PLLOUT_SEL); 1453 val &= ~dsiclk[n].divsel_mask; 1454 val |= ((enable ? dsiclk[n].divsel : PRCM_DSI_PLLOUT_SEL_OFF) << 1455 dsiclk[n].divsel_shift); 1456 writel(val, PRCM_DSI_PLLOUT_SEL); 1457 return 0; 1458 } 1459 1460 static int request_dsiescclk(u8 n, bool enable) 1461 { 1462 u32 val; 1463 1464 val = readl(PRCM_DSITVCLK_DIV); 1465 enable ? (val |= dsiescclk[n].en) : (val &= ~dsiescclk[n].en); 1466 writel(val, PRCM_DSITVCLK_DIV); 1467 return 0; 1468 } 1469 1470 /** 1471 * db8500_prcmu_request_clock() - Request for a clock to be enabled or disabled. 1472 * @clock: The clock for which the request is made. 1473 * @enable: Whether the clock should be enabled (true) or disabled (false). 1474 * 1475 * This function should only be used by the clock implementation. 1476 * Do not use it from any other place! 1477 */ 1478 int db8500_prcmu_request_clock(u8 clock, bool enable) 1479 { 1480 if (clock == PRCMU_SGACLK) 1481 return request_sga_clock(clock, enable); 1482 else if (clock < PRCMU_NUM_REG_CLOCKS) 1483 return request_clock(clock, enable); 1484 else if (clock == PRCMU_TIMCLK) 1485 return request_timclk(enable); 1486 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 1487 return request_dsiclk((clock - PRCMU_DSI0CLK), enable); 1488 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 1489 return request_dsiescclk((clock - PRCMU_DSI0ESCCLK), enable); 1490 else if (clock == PRCMU_PLLDSI) 1491 return request_plldsi(enable); 1492 else if (clock == PRCMU_SYSCLK) 1493 return request_sysclk(enable); 1494 else if ((clock == PRCMU_PLLSOC0) || (clock == PRCMU_PLLSOC1)) 1495 return request_pll(clock, enable); 1496 else 1497 return -EINVAL; 1498 } 1499 1500 static unsigned long pll_rate(void __iomem *reg, unsigned long src_rate, 1501 int branch) 1502 { 1503 u64 rate; 1504 u32 val; 1505 u32 d; 1506 u32 div = 1; 1507 1508 val = readl(reg); 1509 1510 rate = src_rate; 1511 rate *= ((val & PRCM_PLL_FREQ_D_MASK) >> PRCM_PLL_FREQ_D_SHIFT); 1512 1513 d = ((val & PRCM_PLL_FREQ_N_MASK) >> PRCM_PLL_FREQ_N_SHIFT); 1514 if (d > 1) 1515 div *= d; 1516 1517 d = ((val & PRCM_PLL_FREQ_R_MASK) >> PRCM_PLL_FREQ_R_SHIFT); 1518 if (d > 1) 1519 div *= d; 1520 1521 if (val & PRCM_PLL_FREQ_SELDIV2) 1522 div *= 2; 1523 1524 if ((branch == PLL_FIX) || ((branch == PLL_DIV) && 1525 (val & PRCM_PLL_FREQ_DIV2EN) && 1526 ((reg == PRCM_PLLSOC0_FREQ) || 1527 (reg == PRCM_PLLARM_FREQ) || 1528 (reg == PRCM_PLLDDR_FREQ)))) 1529 div *= 2; 1530 1531 (void)do_div(rate, div); 1532 1533 return (unsigned long)rate; 1534 } 1535 1536 #define ROOT_CLOCK_RATE 38400000 1537 1538 static unsigned long clock_rate(u8 clock) 1539 { 1540 u32 val; 1541 u32 pllsw; 1542 unsigned long rate = ROOT_CLOCK_RATE; 1543 1544 val = readl(prcmu_base + clk_mgt[clock].offset); 1545 1546 if (val & PRCM_CLK_MGT_CLK38) { 1547 if (clk_mgt[clock].clk38div && (val & PRCM_CLK_MGT_CLK38DIV)) 1548 rate /= 2; 1549 return rate; 1550 } 1551 1552 val |= clk_mgt[clock].pllsw; 1553 pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK); 1554 1555 if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC0) 1556 rate = pll_rate(PRCM_PLLSOC0_FREQ, rate, clk_mgt[clock].branch); 1557 else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC1) 1558 rate = pll_rate(PRCM_PLLSOC1_FREQ, rate, clk_mgt[clock].branch); 1559 else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_DDR) 1560 rate = pll_rate(PRCM_PLLDDR_FREQ, rate, clk_mgt[clock].branch); 1561 else 1562 return 0; 1563 1564 if ((clock == PRCMU_SGACLK) && 1565 (val & PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN)) { 1566 u64 r = (rate * 10); 1567 1568 (void)do_div(r, 25); 1569 return (unsigned long)r; 1570 } 1571 val &= PRCM_CLK_MGT_CLKPLLDIV_MASK; 1572 if (val) 1573 return rate / val; 1574 else 1575 return 0; 1576 } 1577 1578 static unsigned long armss_rate(void) 1579 { 1580 u32 r; 1581 unsigned long rate; 1582 1583 r = readl(PRCM_ARM_CHGCLKREQ); 1584 1585 if (r & PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ) { 1586 /* External ARMCLKFIX clock */ 1587 1588 rate = pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, PLL_FIX); 1589 1590 /* Check PRCM_ARM_CHGCLKREQ divider */ 1591 if (!(r & PRCM_ARM_CHGCLKREQ_PRCM_ARM_DIVSEL)) 1592 rate /= 2; 1593 1594 /* Check PRCM_ARMCLKFIX_MGT divider */ 1595 r = readl(PRCM_ARMCLKFIX_MGT); 1596 r &= PRCM_CLK_MGT_CLKPLLDIV_MASK; 1597 rate /= r; 1598 1599 } else {/* ARM PLL */ 1600 rate = pll_rate(PRCM_PLLARM_FREQ, ROOT_CLOCK_RATE, PLL_DIV); 1601 } 1602 1603 return rate; 1604 } 1605 1606 static unsigned long dsiclk_rate(u8 n) 1607 { 1608 u32 divsel; 1609 u32 div = 1; 1610 1611 divsel = readl(PRCM_DSI_PLLOUT_SEL); 1612 divsel = ((divsel & dsiclk[n].divsel_mask) >> dsiclk[n].divsel_shift); 1613 1614 if (divsel == PRCM_DSI_PLLOUT_SEL_OFF) 1615 divsel = dsiclk[n].divsel; 1616 1617 switch (divsel) { 1618 case PRCM_DSI_PLLOUT_SEL_PHI_4: 1619 div *= 2; 1620 case PRCM_DSI_PLLOUT_SEL_PHI_2: 1621 div *= 2; 1622 case PRCM_DSI_PLLOUT_SEL_PHI: 1623 return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), 1624 PLL_RAW) / div; 1625 default: 1626 return 0; 1627 } 1628 } 1629 1630 static unsigned long dsiescclk_rate(u8 n) 1631 { 1632 u32 div; 1633 1634 div = readl(PRCM_DSITVCLK_DIV); 1635 div = ((div & dsiescclk[n].div_mask) >> (dsiescclk[n].div_shift)); 1636 return clock_rate(PRCMU_TVCLK) / max((u32)1, div); 1637 } 1638 1639 unsigned long prcmu_clock_rate(u8 clock) 1640 { 1641 if (clock < PRCMU_NUM_REG_CLOCKS) 1642 return clock_rate(clock); 1643 else if (clock == PRCMU_TIMCLK) 1644 return ROOT_CLOCK_RATE / 16; 1645 else if (clock == PRCMU_SYSCLK) 1646 return ROOT_CLOCK_RATE; 1647 else if (clock == PRCMU_PLLSOC0) 1648 return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, PLL_RAW); 1649 else if (clock == PRCMU_PLLSOC1) 1650 return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, PLL_RAW); 1651 else if (clock == PRCMU_ARMSS) 1652 return armss_rate(); 1653 else if (clock == PRCMU_PLLDDR) 1654 return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, PLL_RAW); 1655 else if (clock == PRCMU_PLLDSI) 1656 return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), 1657 PLL_RAW); 1658 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 1659 return dsiclk_rate(clock - PRCMU_DSI0CLK); 1660 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 1661 return dsiescclk_rate(clock - PRCMU_DSI0ESCCLK); 1662 else 1663 return 0; 1664 } 1665 1666 static unsigned long clock_source_rate(u32 clk_mgt_val, int branch) 1667 { 1668 if (clk_mgt_val & PRCM_CLK_MGT_CLK38) 1669 return ROOT_CLOCK_RATE; 1670 clk_mgt_val &= PRCM_CLK_MGT_CLKPLLSW_MASK; 1671 if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC0) 1672 return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, branch); 1673 else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC1) 1674 return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, branch); 1675 else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_DDR) 1676 return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, branch); 1677 else 1678 return 0; 1679 } 1680 1681 static u32 clock_divider(unsigned long src_rate, unsigned long rate) 1682 { 1683 u32 div; 1684 1685 div = (src_rate / rate); 1686 if (div == 0) 1687 return 1; 1688 if (rate < (src_rate / div)) 1689 div++; 1690 return div; 1691 } 1692 1693 static long round_clock_rate(u8 clock, unsigned long rate) 1694 { 1695 u32 val; 1696 u32 div; 1697 unsigned long src_rate; 1698 long rounded_rate; 1699 1700 val = readl(prcmu_base + clk_mgt[clock].offset); 1701 src_rate = clock_source_rate((val | clk_mgt[clock].pllsw), 1702 clk_mgt[clock].branch); 1703 div = clock_divider(src_rate, rate); 1704 if (val & PRCM_CLK_MGT_CLK38) { 1705 if (clk_mgt[clock].clk38div) { 1706 if (div > 2) 1707 div = 2; 1708 } else { 1709 div = 1; 1710 } 1711 } else if ((clock == PRCMU_SGACLK) && (div == 3)) { 1712 u64 r = (src_rate * 10); 1713 1714 (void)do_div(r, 25); 1715 if (r <= rate) 1716 return (unsigned long)r; 1717 } 1718 rounded_rate = (src_rate / min(div, (u32)31)); 1719 1720 return rounded_rate; 1721 } 1722 1723 /* CPU FREQ table, may be changed due to if MAX_OPP is supported. */ 1724 static struct cpufreq_frequency_table db8500_cpufreq_table[] = { 1725 { .frequency = 200000, .index = ARM_EXTCLK,}, 1726 { .frequency = 400000, .index = ARM_50_OPP,}, 1727 { .frequency = 800000, .index = ARM_100_OPP,}, 1728 { .frequency = CPUFREQ_TABLE_END,}, /* To be used for MAX_OPP. */ 1729 { .frequency = CPUFREQ_TABLE_END,}, 1730 }; 1731 1732 static long round_armss_rate(unsigned long rate) 1733 { 1734 long freq = 0; 1735 int i = 0; 1736 1737 /* cpufreq table frequencies is in KHz. */ 1738 rate = rate / 1000; 1739 1740 /* Find the corresponding arm opp from the cpufreq table. */ 1741 while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) { 1742 freq = db8500_cpufreq_table[i].frequency; 1743 if (freq == rate) 1744 break; 1745 i++; 1746 } 1747 1748 /* Return the last valid value, even if a match was not found. */ 1749 return freq * 1000; 1750 } 1751 1752 #define MIN_PLL_VCO_RATE 600000000ULL 1753 #define MAX_PLL_VCO_RATE 1680640000ULL 1754 1755 static long round_plldsi_rate(unsigned long rate) 1756 { 1757 long rounded_rate = 0; 1758 unsigned long src_rate; 1759 unsigned long rem; 1760 u32 r; 1761 1762 src_rate = clock_rate(PRCMU_HDMICLK); 1763 rem = rate; 1764 1765 for (r = 7; (rem > 0) && (r > 0); r--) { 1766 u64 d; 1767 1768 d = (r * rate); 1769 (void)do_div(d, src_rate); 1770 if (d < 6) 1771 d = 6; 1772 else if (d > 255) 1773 d = 255; 1774 d *= src_rate; 1775 if (((2 * d) < (r * MIN_PLL_VCO_RATE)) || 1776 ((r * MAX_PLL_VCO_RATE) < (2 * d))) 1777 continue; 1778 (void)do_div(d, r); 1779 if (rate < d) { 1780 if (rounded_rate == 0) 1781 rounded_rate = (long)d; 1782 break; 1783 } 1784 if ((rate - d) < rem) { 1785 rem = (rate - d); 1786 rounded_rate = (long)d; 1787 } 1788 } 1789 return rounded_rate; 1790 } 1791 1792 static long round_dsiclk_rate(unsigned long rate) 1793 { 1794 u32 div; 1795 unsigned long src_rate; 1796 long rounded_rate; 1797 1798 src_rate = pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), 1799 PLL_RAW); 1800 div = clock_divider(src_rate, rate); 1801 rounded_rate = (src_rate / ((div > 2) ? 4 : div)); 1802 1803 return rounded_rate; 1804 } 1805 1806 static long round_dsiescclk_rate(unsigned long rate) 1807 { 1808 u32 div; 1809 unsigned long src_rate; 1810 long rounded_rate; 1811 1812 src_rate = clock_rate(PRCMU_TVCLK); 1813 div = clock_divider(src_rate, rate); 1814 rounded_rate = (src_rate / min(div, (u32)255)); 1815 1816 return rounded_rate; 1817 } 1818 1819 long prcmu_round_clock_rate(u8 clock, unsigned long rate) 1820 { 1821 if (clock < PRCMU_NUM_REG_CLOCKS) 1822 return round_clock_rate(clock, rate); 1823 else if (clock == PRCMU_ARMSS) 1824 return round_armss_rate(rate); 1825 else if (clock == PRCMU_PLLDSI) 1826 return round_plldsi_rate(rate); 1827 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 1828 return round_dsiclk_rate(rate); 1829 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 1830 return round_dsiescclk_rate(rate); 1831 else 1832 return (long)prcmu_clock_rate(clock); 1833 } 1834 1835 static void set_clock_rate(u8 clock, unsigned long rate) 1836 { 1837 u32 val; 1838 u32 div; 1839 unsigned long src_rate; 1840 unsigned long flags; 1841 1842 spin_lock_irqsave(&clk_mgt_lock, flags); 1843 1844 /* Grab the HW semaphore. */ 1845 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 1846 cpu_relax(); 1847 1848 val = readl(prcmu_base + clk_mgt[clock].offset); 1849 src_rate = clock_source_rate((val | clk_mgt[clock].pllsw), 1850 clk_mgt[clock].branch); 1851 div = clock_divider(src_rate, rate); 1852 if (val & PRCM_CLK_MGT_CLK38) { 1853 if (clk_mgt[clock].clk38div) { 1854 if (div > 1) 1855 val |= PRCM_CLK_MGT_CLK38DIV; 1856 else 1857 val &= ~PRCM_CLK_MGT_CLK38DIV; 1858 } 1859 } else if (clock == PRCMU_SGACLK) { 1860 val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK | 1861 PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN); 1862 if (div == 3) { 1863 u64 r = (src_rate * 10); 1864 1865 (void)do_div(r, 25); 1866 if (r <= rate) { 1867 val |= PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN; 1868 div = 0; 1869 } 1870 } 1871 val |= min(div, (u32)31); 1872 } else { 1873 val &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK; 1874 val |= min(div, (u32)31); 1875 } 1876 writel(val, prcmu_base + clk_mgt[clock].offset); 1877 1878 /* Release the HW semaphore. */ 1879 writel(0, PRCM_SEM); 1880 1881 spin_unlock_irqrestore(&clk_mgt_lock, flags); 1882 } 1883 1884 static int set_armss_rate(unsigned long rate) 1885 { 1886 int i = 0; 1887 1888 /* cpufreq table frequencies is in KHz. */ 1889 rate = rate / 1000; 1890 1891 /* Find the corresponding arm opp from the cpufreq table. */ 1892 while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) { 1893 if (db8500_cpufreq_table[i].frequency == rate) 1894 break; 1895 i++; 1896 } 1897 1898 if (db8500_cpufreq_table[i].frequency != rate) 1899 return -EINVAL; 1900 1901 /* Set the new arm opp. */ 1902 return db8500_prcmu_set_arm_opp(db8500_cpufreq_table[i].index); 1903 } 1904 1905 static int set_plldsi_rate(unsigned long rate) 1906 { 1907 unsigned long src_rate; 1908 unsigned long rem; 1909 u32 pll_freq = 0; 1910 u32 r; 1911 1912 src_rate = clock_rate(PRCMU_HDMICLK); 1913 rem = rate; 1914 1915 for (r = 7; (rem > 0) && (r > 0); r--) { 1916 u64 d; 1917 u64 hwrate; 1918 1919 d = (r * rate); 1920 (void)do_div(d, src_rate); 1921 if (d < 6) 1922 d = 6; 1923 else if (d > 255) 1924 d = 255; 1925 hwrate = (d * src_rate); 1926 if (((2 * hwrate) < (r * MIN_PLL_VCO_RATE)) || 1927 ((r * MAX_PLL_VCO_RATE) < (2 * hwrate))) 1928 continue; 1929 (void)do_div(hwrate, r); 1930 if (rate < hwrate) { 1931 if (pll_freq == 0) 1932 pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) | 1933 (r << PRCM_PLL_FREQ_R_SHIFT)); 1934 break; 1935 } 1936 if ((rate - hwrate) < rem) { 1937 rem = (rate - hwrate); 1938 pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) | 1939 (r << PRCM_PLL_FREQ_R_SHIFT)); 1940 } 1941 } 1942 if (pll_freq == 0) 1943 return -EINVAL; 1944 1945 pll_freq |= (1 << PRCM_PLL_FREQ_N_SHIFT); 1946 writel(pll_freq, PRCM_PLLDSI_FREQ); 1947 1948 return 0; 1949 } 1950 1951 static void set_dsiclk_rate(u8 n, unsigned long rate) 1952 { 1953 u32 val; 1954 u32 div; 1955 1956 div = clock_divider(pll_rate(PRCM_PLLDSI_FREQ, 1957 clock_rate(PRCMU_HDMICLK), PLL_RAW), rate); 1958 1959 dsiclk[n].divsel = (div == 1) ? PRCM_DSI_PLLOUT_SEL_PHI : 1960 (div == 2) ? PRCM_DSI_PLLOUT_SEL_PHI_2 : 1961 /* else */ PRCM_DSI_PLLOUT_SEL_PHI_4; 1962 1963 val = readl(PRCM_DSI_PLLOUT_SEL); 1964 val &= ~dsiclk[n].divsel_mask; 1965 val |= (dsiclk[n].divsel << dsiclk[n].divsel_shift); 1966 writel(val, PRCM_DSI_PLLOUT_SEL); 1967 } 1968 1969 static void set_dsiescclk_rate(u8 n, unsigned long rate) 1970 { 1971 u32 val; 1972 u32 div; 1973 1974 div = clock_divider(clock_rate(PRCMU_TVCLK), rate); 1975 val = readl(PRCM_DSITVCLK_DIV); 1976 val &= ~dsiescclk[n].div_mask; 1977 val |= (min(div, (u32)255) << dsiescclk[n].div_shift); 1978 writel(val, PRCM_DSITVCLK_DIV); 1979 } 1980 1981 int prcmu_set_clock_rate(u8 clock, unsigned long rate) 1982 { 1983 if (clock < PRCMU_NUM_REG_CLOCKS) 1984 set_clock_rate(clock, rate); 1985 else if (clock == PRCMU_ARMSS) 1986 return set_armss_rate(rate); 1987 else if (clock == PRCMU_PLLDSI) 1988 return set_plldsi_rate(rate); 1989 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 1990 set_dsiclk_rate((clock - PRCMU_DSI0CLK), rate); 1991 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 1992 set_dsiescclk_rate((clock - PRCMU_DSI0ESCCLK), rate); 1993 return 0; 1994 } 1995 1996 int db8500_prcmu_config_esram0_deep_sleep(u8 state) 1997 { 1998 if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) || 1999 (state < ESRAM0_DEEP_SLEEP_STATE_OFF)) 2000 return -EINVAL; 2001 2002 mutex_lock(&mb4_transfer.lock); 2003 2004 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2005 cpu_relax(); 2006 2007 writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2008 writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON), 2009 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE)); 2010 writeb(DDR_PWR_STATE_ON, 2011 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE)); 2012 writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST)); 2013 2014 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2015 wait_for_completion(&mb4_transfer.work); 2016 2017 mutex_unlock(&mb4_transfer.lock); 2018 2019 return 0; 2020 } 2021 2022 int db8500_prcmu_config_hotdog(u8 threshold) 2023 { 2024 mutex_lock(&mb4_transfer.lock); 2025 2026 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2027 cpu_relax(); 2028 2029 writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD)); 2030 writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2031 2032 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2033 wait_for_completion(&mb4_transfer.work); 2034 2035 mutex_unlock(&mb4_transfer.lock); 2036 2037 return 0; 2038 } 2039 2040 int db8500_prcmu_config_hotmon(u8 low, u8 high) 2041 { 2042 mutex_lock(&mb4_transfer.lock); 2043 2044 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2045 cpu_relax(); 2046 2047 writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW)); 2048 writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH)); 2049 writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH), 2050 (tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG)); 2051 writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2052 2053 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2054 wait_for_completion(&mb4_transfer.work); 2055 2056 mutex_unlock(&mb4_transfer.lock); 2057 2058 return 0; 2059 } 2060 2061 static int config_hot_period(u16 val) 2062 { 2063 mutex_lock(&mb4_transfer.lock); 2064 2065 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2066 cpu_relax(); 2067 2068 writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD)); 2069 writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2070 2071 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2072 wait_for_completion(&mb4_transfer.work); 2073 2074 mutex_unlock(&mb4_transfer.lock); 2075 2076 return 0; 2077 } 2078 2079 int db8500_prcmu_start_temp_sense(u16 cycles32k) 2080 { 2081 if (cycles32k == 0xFFFF) 2082 return -EINVAL; 2083 2084 return config_hot_period(cycles32k); 2085 } 2086 2087 int db8500_prcmu_stop_temp_sense(void) 2088 { 2089 return config_hot_period(0xFFFF); 2090 } 2091 2092 static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3) 2093 { 2094 2095 mutex_lock(&mb4_transfer.lock); 2096 2097 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2098 cpu_relax(); 2099 2100 writeb(d0, (tcdm_base + PRCM_REQ_MB4_A9WDOG_0)); 2101 writeb(d1, (tcdm_base + PRCM_REQ_MB4_A9WDOG_1)); 2102 writeb(d2, (tcdm_base + PRCM_REQ_MB4_A9WDOG_2)); 2103 writeb(d3, (tcdm_base + PRCM_REQ_MB4_A9WDOG_3)); 2104 2105 writeb(cmd, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2106 2107 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2108 wait_for_completion(&mb4_transfer.work); 2109 2110 mutex_unlock(&mb4_transfer.lock); 2111 2112 return 0; 2113 2114 } 2115 2116 int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off) 2117 { 2118 BUG_ON(num == 0 || num > 0xf); 2119 return prcmu_a9wdog(MB4H_A9WDOG_CONF, num, 0, 0, 2120 sleep_auto_off ? A9WDOG_AUTO_OFF_EN : 2121 A9WDOG_AUTO_OFF_DIS); 2122 } 2123 EXPORT_SYMBOL(db8500_prcmu_config_a9wdog); 2124 2125 int db8500_prcmu_enable_a9wdog(u8 id) 2126 { 2127 return prcmu_a9wdog(MB4H_A9WDOG_EN, id, 0, 0, 0); 2128 } 2129 EXPORT_SYMBOL(db8500_prcmu_enable_a9wdog); 2130 2131 int db8500_prcmu_disable_a9wdog(u8 id) 2132 { 2133 return prcmu_a9wdog(MB4H_A9WDOG_DIS, id, 0, 0, 0); 2134 } 2135 EXPORT_SYMBOL(db8500_prcmu_disable_a9wdog); 2136 2137 int db8500_prcmu_kick_a9wdog(u8 id) 2138 { 2139 return prcmu_a9wdog(MB4H_A9WDOG_KICK, id, 0, 0, 0); 2140 } 2141 EXPORT_SYMBOL(db8500_prcmu_kick_a9wdog); 2142 2143 /* 2144 * timeout is 28 bit, in ms. 2145 */ 2146 int db8500_prcmu_load_a9wdog(u8 id, u32 timeout) 2147 { 2148 return prcmu_a9wdog(MB4H_A9WDOG_LOAD, 2149 (id & A9WDOG_ID_MASK) | 2150 /* 2151 * Put the lowest 28 bits of timeout at 2152 * offset 4. Four first bits are used for id. 2153 */ 2154 (u8)((timeout << 4) & 0xf0), 2155 (u8)((timeout >> 4) & 0xff), 2156 (u8)((timeout >> 12) & 0xff), 2157 (u8)((timeout >> 20) & 0xff)); 2158 } 2159 EXPORT_SYMBOL(db8500_prcmu_load_a9wdog); 2160 2161 /** 2162 * prcmu_abb_read() - Read register value(s) from the ABB. 2163 * @slave: The I2C slave address. 2164 * @reg: The (start) register address. 2165 * @value: The read out value(s). 2166 * @size: The number of registers to read. 2167 * 2168 * Reads register value(s) from the ABB. 2169 * @size has to be 1 for the current firmware version. 2170 */ 2171 int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) 2172 { 2173 int r; 2174 2175 if (size != 1) 2176 return -EINVAL; 2177 2178 mutex_lock(&mb5_transfer.lock); 2179 2180 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) 2181 cpu_relax(); 2182 2183 writeb(0, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB5)); 2184 writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP)); 2185 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS)); 2186 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG)); 2187 writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL)); 2188 2189 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET); 2190 2191 if (!wait_for_completion_timeout(&mb5_transfer.work, 2192 msecs_to_jiffies(20000))) { 2193 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 2194 __func__); 2195 r = -EIO; 2196 } else { 2197 r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO); 2198 } 2199 2200 if (!r) 2201 *value = mb5_transfer.ack.value; 2202 2203 mutex_unlock(&mb5_transfer.lock); 2204 2205 return r; 2206 } 2207 2208 /** 2209 * prcmu_abb_write_masked() - Write masked register value(s) to the ABB. 2210 * @slave: The I2C slave address. 2211 * @reg: The (start) register address. 2212 * @value: The value(s) to write. 2213 * @mask: The mask(s) to use. 2214 * @size: The number of registers to write. 2215 * 2216 * Writes masked register value(s) to the ABB. 2217 * For each @value, only the bits set to 1 in the corresponding @mask 2218 * will be written. The other bits are not changed. 2219 * @size has to be 1 for the current firmware version. 2220 */ 2221 int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, u8 size) 2222 { 2223 int r; 2224 2225 if (size != 1) 2226 return -EINVAL; 2227 2228 mutex_lock(&mb5_transfer.lock); 2229 2230 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) 2231 cpu_relax(); 2232 2233 writeb(~*mask, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB5)); 2234 writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP)); 2235 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS)); 2236 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG)); 2237 writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL)); 2238 2239 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET); 2240 2241 if (!wait_for_completion_timeout(&mb5_transfer.work, 2242 msecs_to_jiffies(20000))) { 2243 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 2244 __func__); 2245 r = -EIO; 2246 } else { 2247 r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO); 2248 } 2249 2250 mutex_unlock(&mb5_transfer.lock); 2251 2252 return r; 2253 } 2254 2255 /** 2256 * prcmu_abb_write() - Write register value(s) to the ABB. 2257 * @slave: The I2C slave address. 2258 * @reg: The (start) register address. 2259 * @value: The value(s) to write. 2260 * @size: The number of registers to write. 2261 * 2262 * Writes register value(s) to the ABB. 2263 * @size has to be 1 for the current firmware version. 2264 */ 2265 int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) 2266 { 2267 u8 mask = ~0; 2268 2269 return prcmu_abb_write_masked(slave, reg, value, &mask, size); 2270 } 2271 2272 /** 2273 * prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem 2274 */ 2275 int prcmu_ac_wake_req(void) 2276 { 2277 u32 val; 2278 int ret = 0; 2279 2280 mutex_lock(&mb0_transfer.ac_wake_lock); 2281 2282 val = readl(PRCM_HOSTACCESS_REQ); 2283 if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ) 2284 goto unlock_and_return; 2285 2286 atomic_set(&ac_wake_req_state, 1); 2287 2288 /* 2289 * Force Modem Wake-up before hostaccess_req ping-pong. 2290 * It prevents Modem to enter in Sleep while acking the hostaccess 2291 * request. The 31us delay has been calculated by HWI. 2292 */ 2293 val |= PRCM_HOSTACCESS_REQ_WAKE_REQ; 2294 writel(val, PRCM_HOSTACCESS_REQ); 2295 2296 udelay(31); 2297 2298 val |= PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ; 2299 writel(val, PRCM_HOSTACCESS_REQ); 2300 2301 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work, 2302 msecs_to_jiffies(5000))) { 2303 #if defined(CONFIG_DBX500_PRCMU_DEBUG) 2304 db8500_prcmu_debug_dump(__func__, true, true); 2305 #endif 2306 pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n", 2307 __func__); 2308 ret = -EFAULT; 2309 } 2310 2311 unlock_and_return: 2312 mutex_unlock(&mb0_transfer.ac_wake_lock); 2313 return ret; 2314 } 2315 2316 /** 2317 * prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem 2318 */ 2319 void prcmu_ac_sleep_req() 2320 { 2321 u32 val; 2322 2323 mutex_lock(&mb0_transfer.ac_wake_lock); 2324 2325 val = readl(PRCM_HOSTACCESS_REQ); 2326 if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)) 2327 goto unlock_and_return; 2328 2329 writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ), 2330 PRCM_HOSTACCESS_REQ); 2331 2332 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work, 2333 msecs_to_jiffies(5000))) { 2334 pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n", 2335 __func__); 2336 } 2337 2338 atomic_set(&ac_wake_req_state, 0); 2339 2340 unlock_and_return: 2341 mutex_unlock(&mb0_transfer.ac_wake_lock); 2342 } 2343 2344 bool db8500_prcmu_is_ac_wake_requested(void) 2345 { 2346 return (atomic_read(&ac_wake_req_state) != 0); 2347 } 2348 2349 /** 2350 * db8500_prcmu_system_reset - System reset 2351 * 2352 * Saves the reset reason code and then sets the APE_SOFTRST register which 2353 * fires interrupt to fw 2354 */ 2355 void db8500_prcmu_system_reset(u16 reset_code) 2356 { 2357 writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON)); 2358 writel(1, PRCM_APE_SOFTRST); 2359 } 2360 2361 /** 2362 * db8500_prcmu_get_reset_code - Retrieve SW reset reason code 2363 * 2364 * Retrieves the reset reason code stored by prcmu_system_reset() before 2365 * last restart. 2366 */ 2367 u16 db8500_prcmu_get_reset_code(void) 2368 { 2369 return readw(tcdm_base + PRCM_SW_RST_REASON); 2370 } 2371 2372 /** 2373 * db8500_prcmu_reset_modem - ask the PRCMU to reset modem 2374 */ 2375 void db8500_prcmu_modem_reset(void) 2376 { 2377 mutex_lock(&mb1_transfer.lock); 2378 2379 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 2380 cpu_relax(); 2381 2382 writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 2383 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 2384 wait_for_completion(&mb1_transfer.work); 2385 2386 /* 2387 * No need to check return from PRCMU as modem should go in reset state 2388 * This state is already managed by upper layer 2389 */ 2390 2391 mutex_unlock(&mb1_transfer.lock); 2392 } 2393 2394 static void ack_dbb_wakeup(void) 2395 { 2396 unsigned long flags; 2397 2398 spin_lock_irqsave(&mb0_transfer.lock, flags); 2399 2400 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) 2401 cpu_relax(); 2402 2403 writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); 2404 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET); 2405 2406 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 2407 } 2408 2409 static inline void print_unknown_header_warning(u8 n, u8 header) 2410 { 2411 pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n", 2412 header, n); 2413 } 2414 2415 static bool read_mailbox_0(void) 2416 { 2417 bool r; 2418 u32 ev; 2419 unsigned int n; 2420 u8 header; 2421 2422 header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0); 2423 switch (header) { 2424 case MB0H_WAKEUP_EXE: 2425 case MB0H_WAKEUP_SLEEP: 2426 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1) 2427 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500); 2428 else 2429 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500); 2430 2431 if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK)) 2432 complete(&mb0_transfer.ac_wake_work); 2433 if (ev & WAKEUP_BIT_SYSCLK_OK) 2434 complete(&mb3_transfer.sysclk_work); 2435 2436 ev &= mb0_transfer.req.dbb_irqs; 2437 2438 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) { 2439 if (ev & prcmu_irq_bit[n]) 2440 generic_handle_irq(irq_find_mapping(db8500_irq_domain, n)); 2441 } 2442 r = true; 2443 break; 2444 default: 2445 print_unknown_header_warning(0, header); 2446 r = false; 2447 break; 2448 } 2449 writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR); 2450 return r; 2451 } 2452 2453 static bool read_mailbox_1(void) 2454 { 2455 mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1); 2456 mb1_transfer.ack.arm_opp = readb(tcdm_base + 2457 PRCM_ACK_MB1_CURRENT_ARM_OPP); 2458 mb1_transfer.ack.ape_opp = readb(tcdm_base + 2459 PRCM_ACK_MB1_CURRENT_APE_OPP); 2460 mb1_transfer.ack.ape_voltage_status = readb(tcdm_base + 2461 PRCM_ACK_MB1_APE_VOLTAGE_STATUS); 2462 writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR); 2463 complete(&mb1_transfer.work); 2464 return false; 2465 } 2466 2467 static bool read_mailbox_2(void) 2468 { 2469 mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS); 2470 writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR); 2471 complete(&mb2_transfer.work); 2472 return false; 2473 } 2474 2475 static bool read_mailbox_3(void) 2476 { 2477 writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR); 2478 return false; 2479 } 2480 2481 static bool read_mailbox_4(void) 2482 { 2483 u8 header; 2484 bool do_complete = true; 2485 2486 header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4); 2487 switch (header) { 2488 case MB4H_MEM_ST: 2489 case MB4H_HOTDOG: 2490 case MB4H_HOTMON: 2491 case MB4H_HOT_PERIOD: 2492 case MB4H_A9WDOG_CONF: 2493 case MB4H_A9WDOG_EN: 2494 case MB4H_A9WDOG_DIS: 2495 case MB4H_A9WDOG_LOAD: 2496 case MB4H_A9WDOG_KICK: 2497 break; 2498 default: 2499 print_unknown_header_warning(4, header); 2500 do_complete = false; 2501 break; 2502 } 2503 2504 writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR); 2505 2506 if (do_complete) 2507 complete(&mb4_transfer.work); 2508 2509 return false; 2510 } 2511 2512 static bool read_mailbox_5(void) 2513 { 2514 mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS); 2515 mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL); 2516 writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR); 2517 complete(&mb5_transfer.work); 2518 return false; 2519 } 2520 2521 static bool read_mailbox_6(void) 2522 { 2523 writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR); 2524 return false; 2525 } 2526 2527 static bool read_mailbox_7(void) 2528 { 2529 writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR); 2530 return false; 2531 } 2532 2533 static bool (* const read_mailbox[NUM_MB])(void) = { 2534 read_mailbox_0, 2535 read_mailbox_1, 2536 read_mailbox_2, 2537 read_mailbox_3, 2538 read_mailbox_4, 2539 read_mailbox_5, 2540 read_mailbox_6, 2541 read_mailbox_7 2542 }; 2543 2544 static irqreturn_t prcmu_irq_handler(int irq, void *data) 2545 { 2546 u32 bits; 2547 u8 n; 2548 irqreturn_t r; 2549 2550 bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS); 2551 if (unlikely(!bits)) 2552 return IRQ_NONE; 2553 2554 r = IRQ_HANDLED; 2555 for (n = 0; bits; n++) { 2556 if (bits & MBOX_BIT(n)) { 2557 bits -= MBOX_BIT(n); 2558 if (read_mailbox[n]()) 2559 r = IRQ_WAKE_THREAD; 2560 } 2561 } 2562 return r; 2563 } 2564 2565 static irqreturn_t prcmu_irq_thread_fn(int irq, void *data) 2566 { 2567 ack_dbb_wakeup(); 2568 return IRQ_HANDLED; 2569 } 2570 2571 static void prcmu_mask_work(struct work_struct *work) 2572 { 2573 unsigned long flags; 2574 2575 spin_lock_irqsave(&mb0_transfer.lock, flags); 2576 2577 config_wakeups(); 2578 2579 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 2580 } 2581 2582 static void prcmu_irq_mask(struct irq_data *d) 2583 { 2584 unsigned long flags; 2585 2586 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags); 2587 2588 mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->hwirq]; 2589 2590 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags); 2591 2592 if (d->irq != IRQ_PRCMU_CA_SLEEP) 2593 schedule_work(&mb0_transfer.mask_work); 2594 } 2595 2596 static void prcmu_irq_unmask(struct irq_data *d) 2597 { 2598 unsigned long flags; 2599 2600 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags); 2601 2602 mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->hwirq]; 2603 2604 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags); 2605 2606 if (d->irq != IRQ_PRCMU_CA_SLEEP) 2607 schedule_work(&mb0_transfer.mask_work); 2608 } 2609 2610 static void noop(struct irq_data *d) 2611 { 2612 } 2613 2614 static struct irq_chip prcmu_irq_chip = { 2615 .name = "prcmu", 2616 .irq_disable = prcmu_irq_mask, 2617 .irq_ack = noop, 2618 .irq_mask = prcmu_irq_mask, 2619 .irq_unmask = prcmu_irq_unmask, 2620 }; 2621 2622 static __init char *fw_project_name(u32 project) 2623 { 2624 switch (project) { 2625 case PRCMU_FW_PROJECT_U8500: 2626 return "U8500"; 2627 case PRCMU_FW_PROJECT_U8400: 2628 return "U8400"; 2629 case PRCMU_FW_PROJECT_U9500: 2630 return "U9500"; 2631 case PRCMU_FW_PROJECT_U8500_MBB: 2632 return "U8500 MBB"; 2633 case PRCMU_FW_PROJECT_U8500_C1: 2634 return "U8500 C1"; 2635 case PRCMU_FW_PROJECT_U8500_C2: 2636 return "U8500 C2"; 2637 case PRCMU_FW_PROJECT_U8500_C3: 2638 return "U8500 C3"; 2639 case PRCMU_FW_PROJECT_U8500_C4: 2640 return "U8500 C4"; 2641 case PRCMU_FW_PROJECT_U9500_MBL: 2642 return "U9500 MBL"; 2643 case PRCMU_FW_PROJECT_U8500_MBL: 2644 return "U8500 MBL"; 2645 case PRCMU_FW_PROJECT_U8500_MBL2: 2646 return "U8500 MBL2"; 2647 case PRCMU_FW_PROJECT_U8520: 2648 return "U8520 MBL"; 2649 case PRCMU_FW_PROJECT_U8420: 2650 return "U8420"; 2651 case PRCMU_FW_PROJECT_U9540: 2652 return "U9540"; 2653 case PRCMU_FW_PROJECT_A9420: 2654 return "A9420"; 2655 case PRCMU_FW_PROJECT_L8540: 2656 return "L8540"; 2657 case PRCMU_FW_PROJECT_L8580: 2658 return "L8580"; 2659 default: 2660 return "Unknown"; 2661 } 2662 } 2663 2664 static int db8500_irq_map(struct irq_domain *d, unsigned int virq, 2665 irq_hw_number_t hwirq) 2666 { 2667 irq_set_chip_and_handler(virq, &prcmu_irq_chip, 2668 handle_simple_irq); 2669 set_irq_flags(virq, IRQF_VALID); 2670 2671 return 0; 2672 } 2673 2674 static struct irq_domain_ops db8500_irq_ops = { 2675 .map = db8500_irq_map, 2676 .xlate = irq_domain_xlate_twocell, 2677 }; 2678 2679 static int db8500_irq_init(struct device_node *np, int irq_base) 2680 { 2681 int i; 2682 2683 /* In the device tree case, just take some IRQs */ 2684 if (np) 2685 irq_base = 0; 2686 2687 db8500_irq_domain = irq_domain_add_simple( 2688 np, NUM_PRCMU_WAKEUPS, irq_base, 2689 &db8500_irq_ops, NULL); 2690 2691 if (!db8500_irq_domain) { 2692 pr_err("Failed to create irqdomain\n"); 2693 return -ENOSYS; 2694 } 2695 2696 /* All wakeups will be used, so create mappings for all */ 2697 for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) 2698 irq_create_mapping(db8500_irq_domain, i); 2699 2700 return 0; 2701 } 2702 2703 static void dbx500_fw_version_init(struct platform_device *pdev, 2704 u32 version_offset) 2705 { 2706 struct resource *res; 2707 void __iomem *tcpm_base; 2708 u32 version; 2709 2710 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2711 "prcmu-tcpm"); 2712 if (!res) { 2713 dev_err(&pdev->dev, 2714 "Error: no prcmu tcpm memory region provided\n"); 2715 return; 2716 } 2717 tcpm_base = ioremap(res->start, resource_size(res)); 2718 if (!tcpm_base) { 2719 dev_err(&pdev->dev, "no prcmu tcpm mem region provided\n"); 2720 return; 2721 } 2722 2723 version = readl(tcpm_base + version_offset); 2724 fw_info.version.project = (version & 0xFF); 2725 fw_info.version.api_version = (version >> 8) & 0xFF; 2726 fw_info.version.func_version = (version >> 16) & 0xFF; 2727 fw_info.version.errata = (version >> 24) & 0xFF; 2728 strncpy(fw_info.version.project_name, 2729 fw_project_name(fw_info.version.project), 2730 PRCMU_FW_PROJECT_NAME_LEN); 2731 fw_info.valid = true; 2732 pr_info("PRCMU firmware: %s(%d), version %d.%d.%d\n", 2733 fw_info.version.project_name, 2734 fw_info.version.project, 2735 fw_info.version.api_version, 2736 fw_info.version.func_version, 2737 fw_info.version.errata); 2738 iounmap(tcpm_base); 2739 } 2740 2741 void __init db8500_prcmu_early_init(u32 phy_base, u32 size) 2742 { 2743 /* 2744 * This is a temporary remap to bring up the clocks. It is 2745 * subsequently replaces with a real remap. After the merge of 2746 * the mailbox subsystem all of this early code goes away, and the 2747 * clock driver can probe independently. An early initcall will 2748 * still be needed, but it can be diverted into drivers/clk/ux500. 2749 */ 2750 prcmu_base = ioremap(phy_base, size); 2751 if (!prcmu_base) 2752 pr_err("%s: ioremap() of prcmu registers failed!\n", __func__); 2753 2754 spin_lock_init(&mb0_transfer.lock); 2755 spin_lock_init(&mb0_transfer.dbb_irqs_lock); 2756 mutex_init(&mb0_transfer.ac_wake_lock); 2757 init_completion(&mb0_transfer.ac_wake_work); 2758 mutex_init(&mb1_transfer.lock); 2759 init_completion(&mb1_transfer.work); 2760 mb1_transfer.ape_opp = APE_NO_CHANGE; 2761 mutex_init(&mb2_transfer.lock); 2762 init_completion(&mb2_transfer.work); 2763 spin_lock_init(&mb2_transfer.auto_pm_lock); 2764 spin_lock_init(&mb3_transfer.lock); 2765 mutex_init(&mb3_transfer.sysclk_lock); 2766 init_completion(&mb3_transfer.sysclk_work); 2767 mutex_init(&mb4_transfer.lock); 2768 init_completion(&mb4_transfer.work); 2769 mutex_init(&mb5_transfer.lock); 2770 init_completion(&mb5_transfer.work); 2771 2772 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); 2773 } 2774 2775 static void __init init_prcm_registers(void) 2776 { 2777 u32 val; 2778 2779 val = readl(PRCM_A9PL_FORCE_CLKEN); 2780 val &= ~(PRCM_A9PL_FORCE_CLKEN_PRCM_A9PL_FORCE_CLKEN | 2781 PRCM_A9PL_FORCE_CLKEN_PRCM_A9AXI_FORCE_CLKEN); 2782 writel(val, (PRCM_A9PL_FORCE_CLKEN)); 2783 } 2784 2785 /* 2786 * Power domain switches (ePODs) modeled as regulators for the DB8500 SoC 2787 */ 2788 static struct regulator_consumer_supply db8500_vape_consumers[] = { 2789 REGULATOR_SUPPLY("v-ape", NULL), 2790 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"), 2791 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"), 2792 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"), 2793 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"), 2794 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.4"), 2795 /* "v-mmc" changed to "vcore" in the mainline kernel */ 2796 REGULATOR_SUPPLY("vcore", "sdi0"), 2797 REGULATOR_SUPPLY("vcore", "sdi1"), 2798 REGULATOR_SUPPLY("vcore", "sdi2"), 2799 REGULATOR_SUPPLY("vcore", "sdi3"), 2800 REGULATOR_SUPPLY("vcore", "sdi4"), 2801 REGULATOR_SUPPLY("v-dma", "dma40.0"), 2802 REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"), 2803 /* "v-uart" changed to "vcore" in the mainline kernel */ 2804 REGULATOR_SUPPLY("vcore", "uart0"), 2805 REGULATOR_SUPPLY("vcore", "uart1"), 2806 REGULATOR_SUPPLY("vcore", "uart2"), 2807 REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"), 2808 REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"), 2809 REGULATOR_SUPPLY("vddvario", "smsc911x.0"), 2810 }; 2811 2812 static struct regulator_consumer_supply db8500_vsmps2_consumers[] = { 2813 REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"), 2814 /* AV8100 regulator */ 2815 REGULATOR_SUPPLY("hdmi_1v8", "0-0070"), 2816 }; 2817 2818 static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = { 2819 REGULATOR_SUPPLY("vsupply", "b2r2_bus"), 2820 REGULATOR_SUPPLY("vsupply", "mcde"), 2821 }; 2822 2823 /* SVA MMDSP regulator switch */ 2824 static struct regulator_consumer_supply db8500_svammdsp_consumers[] = { 2825 REGULATOR_SUPPLY("sva-mmdsp", "cm_control"), 2826 }; 2827 2828 /* SVA pipe regulator switch */ 2829 static struct regulator_consumer_supply db8500_svapipe_consumers[] = { 2830 REGULATOR_SUPPLY("sva-pipe", "cm_control"), 2831 }; 2832 2833 /* SIA MMDSP regulator switch */ 2834 static struct regulator_consumer_supply db8500_siammdsp_consumers[] = { 2835 REGULATOR_SUPPLY("sia-mmdsp", "cm_control"), 2836 }; 2837 2838 /* SIA pipe regulator switch */ 2839 static struct regulator_consumer_supply db8500_siapipe_consumers[] = { 2840 REGULATOR_SUPPLY("sia-pipe", "cm_control"), 2841 }; 2842 2843 static struct regulator_consumer_supply db8500_sga_consumers[] = { 2844 REGULATOR_SUPPLY("v-mali", NULL), 2845 }; 2846 2847 /* ESRAM1 and 2 regulator switch */ 2848 static struct regulator_consumer_supply db8500_esram12_consumers[] = { 2849 REGULATOR_SUPPLY("esram12", "cm_control"), 2850 }; 2851 2852 /* ESRAM3 and 4 regulator switch */ 2853 static struct regulator_consumer_supply db8500_esram34_consumers[] = { 2854 REGULATOR_SUPPLY("v-esram34", "mcde"), 2855 REGULATOR_SUPPLY("esram34", "cm_control"), 2856 REGULATOR_SUPPLY("lcla_esram", "dma40.0"), 2857 }; 2858 2859 static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = { 2860 [DB8500_REGULATOR_VAPE] = { 2861 .constraints = { 2862 .name = "db8500-vape", 2863 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2864 .always_on = true, 2865 }, 2866 .consumer_supplies = db8500_vape_consumers, 2867 .num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers), 2868 }, 2869 [DB8500_REGULATOR_VARM] = { 2870 .constraints = { 2871 .name = "db8500-varm", 2872 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2873 }, 2874 }, 2875 [DB8500_REGULATOR_VMODEM] = { 2876 .constraints = { 2877 .name = "db8500-vmodem", 2878 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2879 }, 2880 }, 2881 [DB8500_REGULATOR_VPLL] = { 2882 .constraints = { 2883 .name = "db8500-vpll", 2884 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2885 }, 2886 }, 2887 [DB8500_REGULATOR_VSMPS1] = { 2888 .constraints = { 2889 .name = "db8500-vsmps1", 2890 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2891 }, 2892 }, 2893 [DB8500_REGULATOR_VSMPS2] = { 2894 .constraints = { 2895 .name = "db8500-vsmps2", 2896 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2897 }, 2898 .consumer_supplies = db8500_vsmps2_consumers, 2899 .num_consumer_supplies = ARRAY_SIZE(db8500_vsmps2_consumers), 2900 }, 2901 [DB8500_REGULATOR_VSMPS3] = { 2902 .constraints = { 2903 .name = "db8500-vsmps3", 2904 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2905 }, 2906 }, 2907 [DB8500_REGULATOR_VRF1] = { 2908 .constraints = { 2909 .name = "db8500-vrf1", 2910 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2911 }, 2912 }, 2913 [DB8500_REGULATOR_SWITCH_SVAMMDSP] = { 2914 /* dependency to u8500-vape is handled outside regulator framework */ 2915 .constraints = { 2916 .name = "db8500-sva-mmdsp", 2917 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2918 }, 2919 .consumer_supplies = db8500_svammdsp_consumers, 2920 .num_consumer_supplies = ARRAY_SIZE(db8500_svammdsp_consumers), 2921 }, 2922 [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = { 2923 .constraints = { 2924 /* "ret" means "retention" */ 2925 .name = "db8500-sva-mmdsp-ret", 2926 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2927 }, 2928 }, 2929 [DB8500_REGULATOR_SWITCH_SVAPIPE] = { 2930 /* dependency to u8500-vape is handled outside regulator framework */ 2931 .constraints = { 2932 .name = "db8500-sva-pipe", 2933 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2934 }, 2935 .consumer_supplies = db8500_svapipe_consumers, 2936 .num_consumer_supplies = ARRAY_SIZE(db8500_svapipe_consumers), 2937 }, 2938 [DB8500_REGULATOR_SWITCH_SIAMMDSP] = { 2939 /* dependency to u8500-vape is handled outside regulator framework */ 2940 .constraints = { 2941 .name = "db8500-sia-mmdsp", 2942 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2943 }, 2944 .consumer_supplies = db8500_siammdsp_consumers, 2945 .num_consumer_supplies = ARRAY_SIZE(db8500_siammdsp_consumers), 2946 }, 2947 [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = { 2948 .constraints = { 2949 .name = "db8500-sia-mmdsp-ret", 2950 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2951 }, 2952 }, 2953 [DB8500_REGULATOR_SWITCH_SIAPIPE] = { 2954 /* dependency to u8500-vape is handled outside regulator framework */ 2955 .constraints = { 2956 .name = "db8500-sia-pipe", 2957 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2958 }, 2959 .consumer_supplies = db8500_siapipe_consumers, 2960 .num_consumer_supplies = ARRAY_SIZE(db8500_siapipe_consumers), 2961 }, 2962 [DB8500_REGULATOR_SWITCH_SGA] = { 2963 .supply_regulator = "db8500-vape", 2964 .constraints = { 2965 .name = "db8500-sga", 2966 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2967 }, 2968 .consumer_supplies = db8500_sga_consumers, 2969 .num_consumer_supplies = ARRAY_SIZE(db8500_sga_consumers), 2970 2971 }, 2972 [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = { 2973 .supply_regulator = "db8500-vape", 2974 .constraints = { 2975 .name = "db8500-b2r2-mcde", 2976 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2977 }, 2978 .consumer_supplies = db8500_b2r2_mcde_consumers, 2979 .num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers), 2980 }, 2981 [DB8500_REGULATOR_SWITCH_ESRAM12] = { 2982 /* 2983 * esram12 is set in retention and supplied by Vsafe when Vape is off, 2984 * no need to hold Vape 2985 */ 2986 .constraints = { 2987 .name = "db8500-esram12", 2988 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2989 }, 2990 .consumer_supplies = db8500_esram12_consumers, 2991 .num_consumer_supplies = ARRAY_SIZE(db8500_esram12_consumers), 2992 }, 2993 [DB8500_REGULATOR_SWITCH_ESRAM12RET] = { 2994 .constraints = { 2995 .name = "db8500-esram12-ret", 2996 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2997 }, 2998 }, 2999 [DB8500_REGULATOR_SWITCH_ESRAM34] = { 3000 /* 3001 * esram34 is set in retention and supplied by Vsafe when Vape is off, 3002 * no need to hold Vape 3003 */ 3004 .constraints = { 3005 .name = "db8500-esram34", 3006 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 3007 }, 3008 .consumer_supplies = db8500_esram34_consumers, 3009 .num_consumer_supplies = ARRAY_SIZE(db8500_esram34_consumers), 3010 }, 3011 [DB8500_REGULATOR_SWITCH_ESRAM34RET] = { 3012 .constraints = { 3013 .name = "db8500-esram34-ret", 3014 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 3015 }, 3016 }, 3017 }; 3018 3019 static struct ux500_wdt_data db8500_wdt_pdata = { 3020 .timeout = 600, /* 10 minutes */ 3021 .has_28_bits_resolution = true, 3022 }; 3023 /* 3024 * Thermal Sensor 3025 */ 3026 3027 static struct resource db8500_thsens_resources[] = { 3028 { 3029 .name = "IRQ_HOTMON_LOW", 3030 .start = IRQ_PRCMU_HOTMON_LOW, 3031 .end = IRQ_PRCMU_HOTMON_LOW, 3032 .flags = IORESOURCE_IRQ, 3033 }, 3034 { 3035 .name = "IRQ_HOTMON_HIGH", 3036 .start = IRQ_PRCMU_HOTMON_HIGH, 3037 .end = IRQ_PRCMU_HOTMON_HIGH, 3038 .flags = IORESOURCE_IRQ, 3039 }, 3040 }; 3041 3042 static struct db8500_thsens_platform_data db8500_thsens_data = { 3043 .trip_points[0] = { 3044 .temp = 70000, 3045 .type = THERMAL_TRIP_ACTIVE, 3046 .cdev_name = { 3047 [0] = "thermal-cpufreq-0", 3048 }, 3049 }, 3050 .trip_points[1] = { 3051 .temp = 75000, 3052 .type = THERMAL_TRIP_ACTIVE, 3053 .cdev_name = { 3054 [0] = "thermal-cpufreq-0", 3055 }, 3056 }, 3057 .trip_points[2] = { 3058 .temp = 80000, 3059 .type = THERMAL_TRIP_ACTIVE, 3060 .cdev_name = { 3061 [0] = "thermal-cpufreq-0", 3062 }, 3063 }, 3064 .trip_points[3] = { 3065 .temp = 85000, 3066 .type = THERMAL_TRIP_CRITICAL, 3067 }, 3068 .num_trips = 4, 3069 }; 3070 3071 static struct mfd_cell common_prcmu_devs[] = { 3072 { 3073 .name = "ux500_wdt", 3074 .platform_data = &db8500_wdt_pdata, 3075 .pdata_size = sizeof(db8500_wdt_pdata), 3076 .id = -1, 3077 }, 3078 }; 3079 3080 static struct mfd_cell db8500_prcmu_devs[] = { 3081 { 3082 .name = "db8500-prcmu-regulators", 3083 .of_compatible = "stericsson,db8500-prcmu-regulator", 3084 .platform_data = &db8500_regulators, 3085 .pdata_size = sizeof(db8500_regulators), 3086 }, 3087 { 3088 .name = "cpufreq-ux500", 3089 .of_compatible = "stericsson,cpufreq-ux500", 3090 .platform_data = &db8500_cpufreq_table, 3091 .pdata_size = sizeof(db8500_cpufreq_table), 3092 }, 3093 { 3094 .name = "db8500-thermal", 3095 .num_resources = ARRAY_SIZE(db8500_thsens_resources), 3096 .resources = db8500_thsens_resources, 3097 .platform_data = &db8500_thsens_data, 3098 }, 3099 }; 3100 3101 static void db8500_prcmu_update_cpufreq(void) 3102 { 3103 if (prcmu_has_arm_maxopp()) { 3104 db8500_cpufreq_table[3].frequency = 1000000; 3105 db8500_cpufreq_table[3].index = ARM_MAX_OPP; 3106 } 3107 } 3108 3109 static int db8500_prcmu_register_ab8500(struct device *parent, 3110 struct ab8500_platform_data *pdata, 3111 int irq) 3112 { 3113 struct resource ab8500_resource = DEFINE_RES_IRQ(irq); 3114 struct mfd_cell ab8500_cell = { 3115 .name = "ab8500-core", 3116 .of_compatible = "stericsson,ab8500", 3117 .id = AB8500_VERSION_AB8500, 3118 .platform_data = pdata, 3119 .pdata_size = sizeof(struct ab8500_platform_data), 3120 .resources = &ab8500_resource, 3121 .num_resources = 1, 3122 }; 3123 3124 return mfd_add_devices(parent, 0, &ab8500_cell, 1, NULL, 0, NULL); 3125 } 3126 3127 /** 3128 * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic 3129 * 3130 */ 3131 static int db8500_prcmu_probe(struct platform_device *pdev) 3132 { 3133 struct device_node *np = pdev->dev.of_node; 3134 struct prcmu_pdata *pdata = dev_get_platdata(&pdev->dev); 3135 int irq = 0, err = 0; 3136 struct resource *res; 3137 3138 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu"); 3139 if (!res) { 3140 dev_err(&pdev->dev, "no prcmu memory region provided\n"); 3141 return -ENOENT; 3142 } 3143 prcmu_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); 3144 if (!prcmu_base) { 3145 dev_err(&pdev->dev, 3146 "failed to ioremap prcmu register memory\n"); 3147 return -ENOENT; 3148 } 3149 init_prcm_registers(); 3150 dbx500_fw_version_init(pdev, pdata->version_offset); 3151 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "prcmu-tcdm"); 3152 if (!res) { 3153 dev_err(&pdev->dev, "no prcmu tcdm region provided\n"); 3154 return -ENOENT; 3155 } 3156 tcdm_base = devm_ioremap(&pdev->dev, res->start, 3157 resource_size(res)); 3158 3159 /* Clean up the mailbox interrupts after pre-kernel code. */ 3160 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR); 3161 3162 irq = platform_get_irq(pdev, 0); 3163 if (irq <= 0) { 3164 dev_err(&pdev->dev, "no prcmu irq provided\n"); 3165 return -ENOENT; 3166 } 3167 3168 err = request_threaded_irq(irq, prcmu_irq_handler, 3169 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL); 3170 if (err < 0) { 3171 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n"); 3172 err = -EBUSY; 3173 goto no_irq_return; 3174 } 3175 3176 db8500_irq_init(np, pdata->irq_base); 3177 3178 prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); 3179 3180 db8500_prcmu_update_cpufreq(); 3181 3182 err = mfd_add_devices(&pdev->dev, 0, common_prcmu_devs, 3183 ARRAY_SIZE(common_prcmu_devs), NULL, 0, db8500_irq_domain); 3184 if (err) { 3185 pr_err("prcmu: Failed to add subdevices\n"); 3186 return err; 3187 } 3188 3189 /* TODO: Remove restriction when clk definitions are available. */ 3190 if (!of_machine_is_compatible("st-ericsson,u8540")) { 3191 err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs, 3192 ARRAY_SIZE(db8500_prcmu_devs), NULL, 0, 3193 db8500_irq_domain); 3194 if (err) { 3195 mfd_remove_devices(&pdev->dev); 3196 pr_err("prcmu: Failed to add subdevices\n"); 3197 goto no_irq_return; 3198 } 3199 } 3200 3201 err = db8500_prcmu_register_ab8500(&pdev->dev, pdata->ab_platdata, 3202 pdata->ab_irq); 3203 if (err) { 3204 mfd_remove_devices(&pdev->dev); 3205 pr_err("prcmu: Failed to add ab8500 subdevice\n"); 3206 goto no_irq_return; 3207 } 3208 3209 pr_info("DB8500 PRCMU initialized\n"); 3210 3211 no_irq_return: 3212 return err; 3213 } 3214 static const struct of_device_id db8500_prcmu_match[] = { 3215 { .compatible = "stericsson,db8500-prcmu"}, 3216 { }, 3217 }; 3218 3219 static struct platform_driver db8500_prcmu_driver = { 3220 .driver = { 3221 .name = "db8500-prcmu", 3222 .owner = THIS_MODULE, 3223 .of_match_table = db8500_prcmu_match, 3224 }, 3225 .probe = db8500_prcmu_probe, 3226 }; 3227 3228 static int __init db8500_prcmu_init(void) 3229 { 3230 return platform_driver_register(&db8500_prcmu_driver); 3231 } 3232 3233 core_initcall(db8500_prcmu_init); 3234 3235 MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>"); 3236 MODULE_DESCRIPTION("DB8500 PRCM Unit driver"); 3237 MODULE_LICENSE("GPL v2"); 3238