1 /* 2 * Copyright (C) STMicroelectronics 2009 3 * Copyright (C) ST-Ericsson SA 2010 4 * 5 * License Terms: GNU General Public License v2 6 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> 7 * Author: Sundar Iyer <sundar.iyer@stericsson.com> 8 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> 9 * 10 * U8500 PRCM Unit interface driver 11 * 12 */ 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/delay.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/spinlock.h> 19 #include <linux/io.h> 20 #include <linux/slab.h> 21 #include <linux/mutex.h> 22 #include <linux/completion.h> 23 #include <linux/irq.h> 24 #include <linux/jiffies.h> 25 #include <linux/bitops.h> 26 #include <linux/fs.h> 27 #include <linux/platform_device.h> 28 #include <linux/uaccess.h> 29 #include <linux/mfd/core.h> 30 #include <linux/mfd/dbx500-prcmu.h> 31 #include <linux/mfd/abx500/ab8500.h> 32 #include <linux/regulator/db8500-prcmu.h> 33 #include <linux/regulator/machine.h> 34 #include <linux/cpufreq.h> 35 #include <asm/hardware/gic.h> 36 #include <mach/hardware.h> 37 #include <mach/irqs.h> 38 #include <mach/db8500-regs.h> 39 #include <mach/id.h> 40 #include "dbx500-prcmu-regs.h" 41 42 /* Offset for the firmware version within the TCPM */ 43 #define PRCMU_FW_VERSION_OFFSET 0xA4 44 45 /* Index of different voltages to be used when accessing AVSData */ 46 #define PRCM_AVS_BASE 0x2FC 47 #define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0) 48 #define PRCM_AVS_VBB_MAX_OPP (PRCM_AVS_BASE + 0x1) 49 #define PRCM_AVS_VBB_100_OPP (PRCM_AVS_BASE + 0x2) 50 #define PRCM_AVS_VBB_50_OPP (PRCM_AVS_BASE + 0x3) 51 #define PRCM_AVS_VARM_MAX_OPP (PRCM_AVS_BASE + 0x4) 52 #define PRCM_AVS_VARM_100_OPP (PRCM_AVS_BASE + 0x5) 53 #define PRCM_AVS_VARM_50_OPP (PRCM_AVS_BASE + 0x6) 54 #define PRCM_AVS_VARM_RET (PRCM_AVS_BASE + 0x7) 55 #define PRCM_AVS_VAPE_100_OPP (PRCM_AVS_BASE + 0x8) 56 #define PRCM_AVS_VAPE_50_OPP (PRCM_AVS_BASE + 0x9) 57 #define PRCM_AVS_VMOD_100_OPP (PRCM_AVS_BASE + 0xA) 58 #define PRCM_AVS_VMOD_50_OPP (PRCM_AVS_BASE + 0xB) 59 #define PRCM_AVS_VSAFE (PRCM_AVS_BASE + 0xC) 60 61 #define PRCM_AVS_VOLTAGE 0 62 #define PRCM_AVS_VOLTAGE_MASK 0x3f 63 #define PRCM_AVS_ISSLOWSTARTUP 6 64 #define PRCM_AVS_ISSLOWSTARTUP_MASK (1 << PRCM_AVS_ISSLOWSTARTUP) 65 #define PRCM_AVS_ISMODEENABLE 7 66 #define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE) 67 68 #define PRCM_BOOT_STATUS 0xFFF 69 #define PRCM_ROMCODE_A2P 0xFFE 70 #define PRCM_ROMCODE_P2A 0xFFD 71 #define PRCM_XP70_CUR_PWR_STATE 0xFFC /* 4 BYTES */ 72 73 #define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */ 74 75 #define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */ 76 #define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0) 77 #define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1) 78 #define PRCM_MBOX_HEADER_REQ_MB2 (_PRCM_MBOX_HEADER + 0x2) 79 #define PRCM_MBOX_HEADER_REQ_MB3 (_PRCM_MBOX_HEADER + 0x3) 80 #define PRCM_MBOX_HEADER_REQ_MB4 (_PRCM_MBOX_HEADER + 0x4) 81 #define PRCM_MBOX_HEADER_REQ_MB5 (_PRCM_MBOX_HEADER + 0x5) 82 #define PRCM_MBOX_HEADER_ACK_MB0 (_PRCM_MBOX_HEADER + 0x8) 83 84 /* Req Mailboxes */ 85 #define PRCM_REQ_MB0 0xFDC /* 12 bytes */ 86 #define PRCM_REQ_MB1 0xFD0 /* 12 bytes */ 87 #define PRCM_REQ_MB2 0xFC0 /* 16 bytes */ 88 #define PRCM_REQ_MB3 0xE4C /* 372 bytes */ 89 #define PRCM_REQ_MB4 0xE48 /* 4 bytes */ 90 #define PRCM_REQ_MB5 0xE44 /* 4 bytes */ 91 92 /* Ack Mailboxes */ 93 #define PRCM_ACK_MB0 0xE08 /* 52 bytes */ 94 #define PRCM_ACK_MB1 0xE04 /* 4 bytes */ 95 #define PRCM_ACK_MB2 0xE00 /* 4 bytes */ 96 #define PRCM_ACK_MB3 0xDFC /* 4 bytes */ 97 #define PRCM_ACK_MB4 0xDF8 /* 4 bytes */ 98 #define PRCM_ACK_MB5 0xDF4 /* 4 bytes */ 99 100 /* Mailbox 0 headers */ 101 #define MB0H_POWER_STATE_TRANS 0 102 #define MB0H_CONFIG_WAKEUPS_EXE 1 103 #define MB0H_READ_WAKEUP_ACK 3 104 #define MB0H_CONFIG_WAKEUPS_SLEEP 4 105 106 #define MB0H_WAKEUP_EXE 2 107 #define MB0H_WAKEUP_SLEEP 5 108 109 /* Mailbox 0 REQs */ 110 #define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0) 111 #define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x1) 112 #define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x2) 113 #define PRCM_REQ_MB0_DO_NOT_WFI (PRCM_REQ_MB0 + 0x3) 114 #define PRCM_REQ_MB0_WAKEUP_8500 (PRCM_REQ_MB0 + 0x4) 115 #define PRCM_REQ_MB0_WAKEUP_4500 (PRCM_REQ_MB0 + 0x8) 116 117 /* Mailbox 0 ACKs */ 118 #define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0) 119 #define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1) 120 #define PRCM_ACK_MB0_WAKEUP_0_8500 (PRCM_ACK_MB0 + 0x4) 121 #define PRCM_ACK_MB0_WAKEUP_0_4500 (PRCM_ACK_MB0 + 0x8) 122 #define PRCM_ACK_MB0_WAKEUP_1_8500 (PRCM_ACK_MB0 + 0x1C) 123 #define PRCM_ACK_MB0_WAKEUP_1_4500 (PRCM_ACK_MB0 + 0x20) 124 #define PRCM_ACK_MB0_EVENT_4500_NUMBERS 20 125 126 /* Mailbox 1 headers */ 127 #define MB1H_ARM_APE_OPP 0x0 128 #define MB1H_RESET_MODEM 0x2 129 #define MB1H_REQUEST_APE_OPP_100_VOLT 0x3 130 #define MB1H_RELEASE_APE_OPP_100_VOLT 0x4 131 #define MB1H_RELEASE_USB_WAKEUP 0x5 132 #define MB1H_PLL_ON_OFF 0x6 133 134 /* Mailbox 1 Requests */ 135 #define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0) 136 #define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1) 137 #define PRCM_REQ_MB1_PLL_ON_OFF (PRCM_REQ_MB1 + 0x4) 138 #define PLL_SOC0_OFF 0x1 139 #define PLL_SOC0_ON 0x2 140 #define PLL_SOC1_OFF 0x4 141 #define PLL_SOC1_ON 0x8 142 143 /* Mailbox 1 ACKs */ 144 #define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0) 145 #define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1) 146 #define PRCM_ACK_MB1_APE_VOLTAGE_STATUS (PRCM_ACK_MB1 + 0x2) 147 #define PRCM_ACK_MB1_DVFS_STATUS (PRCM_ACK_MB1 + 0x3) 148 149 /* Mailbox 2 headers */ 150 #define MB2H_DPS 0x0 151 #define MB2H_AUTO_PWR 0x1 152 153 /* Mailbox 2 REQs */ 154 #define PRCM_REQ_MB2_SVA_MMDSP (PRCM_REQ_MB2 + 0x0) 155 #define PRCM_REQ_MB2_SVA_PIPE (PRCM_REQ_MB2 + 0x1) 156 #define PRCM_REQ_MB2_SIA_MMDSP (PRCM_REQ_MB2 + 0x2) 157 #define PRCM_REQ_MB2_SIA_PIPE (PRCM_REQ_MB2 + 0x3) 158 #define PRCM_REQ_MB2_SGA (PRCM_REQ_MB2 + 0x4) 159 #define PRCM_REQ_MB2_B2R2_MCDE (PRCM_REQ_MB2 + 0x5) 160 #define PRCM_REQ_MB2_ESRAM12 (PRCM_REQ_MB2 + 0x6) 161 #define PRCM_REQ_MB2_ESRAM34 (PRCM_REQ_MB2 + 0x7) 162 #define PRCM_REQ_MB2_AUTO_PM_SLEEP (PRCM_REQ_MB2 + 0x8) 163 #define PRCM_REQ_MB2_AUTO_PM_IDLE (PRCM_REQ_MB2 + 0xC) 164 165 /* Mailbox 2 ACKs */ 166 #define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0) 167 #define HWACC_PWR_ST_OK 0xFE 168 169 /* Mailbox 3 headers */ 170 #define MB3H_ANC 0x0 171 #define MB3H_SIDETONE 0x1 172 #define MB3H_SYSCLK 0xE 173 174 /* Mailbox 3 Requests */ 175 #define PRCM_REQ_MB3_ANC_FIR_COEFF (PRCM_REQ_MB3 + 0x0) 176 #define PRCM_REQ_MB3_ANC_IIR_COEFF (PRCM_REQ_MB3 + 0x20) 177 #define PRCM_REQ_MB3_ANC_SHIFTER (PRCM_REQ_MB3 + 0x60) 178 #define PRCM_REQ_MB3_ANC_WARP (PRCM_REQ_MB3 + 0x64) 179 #define PRCM_REQ_MB3_SIDETONE_FIR_GAIN (PRCM_REQ_MB3 + 0x68) 180 #define PRCM_REQ_MB3_SIDETONE_FIR_COEFF (PRCM_REQ_MB3 + 0x6C) 181 #define PRCM_REQ_MB3_SYSCLK_MGT (PRCM_REQ_MB3 + 0x16C) 182 183 /* Mailbox 4 headers */ 184 #define MB4H_DDR_INIT 0x0 185 #define MB4H_MEM_ST 0x1 186 #define MB4H_HOTDOG 0x12 187 #define MB4H_HOTMON 0x13 188 #define MB4H_HOT_PERIOD 0x14 189 #define MB4H_A9WDOG_CONF 0x16 190 #define MB4H_A9WDOG_EN 0x17 191 #define MB4H_A9WDOG_DIS 0x18 192 #define MB4H_A9WDOG_LOAD 0x19 193 #define MB4H_A9WDOG_KICK 0x20 194 195 /* Mailbox 4 Requests */ 196 #define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE (PRCM_REQ_MB4 + 0x0) 197 #define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE (PRCM_REQ_MB4 + 0x1) 198 #define PRCM_REQ_MB4_ESRAM0_ST (PRCM_REQ_MB4 + 0x3) 199 #define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 0x0) 200 #define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 0x0) 201 #define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 0x1) 202 #define PRCM_REQ_MB4_HOTMON_CONFIG (PRCM_REQ_MB4 + 0x2) 203 #define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 0x0) 204 #define HOTMON_CONFIG_LOW BIT(0) 205 #define HOTMON_CONFIG_HIGH BIT(1) 206 #define PRCM_REQ_MB4_A9WDOG_0 (PRCM_REQ_MB4 + 0x0) 207 #define PRCM_REQ_MB4_A9WDOG_1 (PRCM_REQ_MB4 + 0x1) 208 #define PRCM_REQ_MB4_A9WDOG_2 (PRCM_REQ_MB4 + 0x2) 209 #define PRCM_REQ_MB4_A9WDOG_3 (PRCM_REQ_MB4 + 0x3) 210 #define A9WDOG_AUTO_OFF_EN BIT(7) 211 #define A9WDOG_AUTO_OFF_DIS 0 212 #define A9WDOG_ID_MASK 0xf 213 214 /* Mailbox 5 Requests */ 215 #define PRCM_REQ_MB5_I2C_SLAVE_OP (PRCM_REQ_MB5 + 0x0) 216 #define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1) 217 #define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2) 218 #define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3) 219 #define PRCMU_I2C_WRITE(slave) \ 220 (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0)) 221 #define PRCMU_I2C_READ(slave) \ 222 (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0)) 223 #define PRCMU_I2C_STOP_EN BIT(3) 224 225 /* Mailbox 5 ACKs */ 226 #define PRCM_ACK_MB5_I2C_STATUS (PRCM_ACK_MB5 + 0x1) 227 #define PRCM_ACK_MB5_I2C_VAL (PRCM_ACK_MB5 + 0x3) 228 #define I2C_WR_OK 0x1 229 #define I2C_RD_OK 0x2 230 231 #define NUM_MB 8 232 #define MBOX_BIT BIT 233 #define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1) 234 235 /* 236 * Wakeups/IRQs 237 */ 238 239 #define WAKEUP_BIT_RTC BIT(0) 240 #define WAKEUP_BIT_RTT0 BIT(1) 241 #define WAKEUP_BIT_RTT1 BIT(2) 242 #define WAKEUP_BIT_HSI0 BIT(3) 243 #define WAKEUP_BIT_HSI1 BIT(4) 244 #define WAKEUP_BIT_CA_WAKE BIT(5) 245 #define WAKEUP_BIT_USB BIT(6) 246 #define WAKEUP_BIT_ABB BIT(7) 247 #define WAKEUP_BIT_ABB_FIFO BIT(8) 248 #define WAKEUP_BIT_SYSCLK_OK BIT(9) 249 #define WAKEUP_BIT_CA_SLEEP BIT(10) 250 #define WAKEUP_BIT_AC_WAKE_ACK BIT(11) 251 #define WAKEUP_BIT_SIDE_TONE_OK BIT(12) 252 #define WAKEUP_BIT_ANC_OK BIT(13) 253 #define WAKEUP_BIT_SW_ERROR BIT(14) 254 #define WAKEUP_BIT_AC_SLEEP_ACK BIT(15) 255 #define WAKEUP_BIT_ARM BIT(17) 256 #define WAKEUP_BIT_HOTMON_LOW BIT(18) 257 #define WAKEUP_BIT_HOTMON_HIGH BIT(19) 258 #define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20) 259 #define WAKEUP_BIT_GPIO0 BIT(23) 260 #define WAKEUP_BIT_GPIO1 BIT(24) 261 #define WAKEUP_BIT_GPIO2 BIT(25) 262 #define WAKEUP_BIT_GPIO3 BIT(26) 263 #define WAKEUP_BIT_GPIO4 BIT(27) 264 #define WAKEUP_BIT_GPIO5 BIT(28) 265 #define WAKEUP_BIT_GPIO6 BIT(29) 266 #define WAKEUP_BIT_GPIO7 BIT(30) 267 #define WAKEUP_BIT_GPIO8 BIT(31) 268 269 static struct { 270 bool valid; 271 struct prcmu_fw_version version; 272 } fw_info; 273 274 static struct irq_domain *db8500_irq_domain; 275 276 /* 277 * This vector maps irq numbers to the bits in the bit field used in 278 * communication with the PRCMU firmware. 279 * 280 * The reason for having this is to keep the irq numbers contiguous even though 281 * the bits in the bit field are not. (The bits also have a tendency to move 282 * around, to further complicate matters.) 283 */ 284 #define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name) - IRQ_PRCMU_BASE) 285 #define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name) 286 static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = { 287 IRQ_ENTRY(RTC), 288 IRQ_ENTRY(RTT0), 289 IRQ_ENTRY(RTT1), 290 IRQ_ENTRY(HSI0), 291 IRQ_ENTRY(HSI1), 292 IRQ_ENTRY(CA_WAKE), 293 IRQ_ENTRY(USB), 294 IRQ_ENTRY(ABB), 295 IRQ_ENTRY(ABB_FIFO), 296 IRQ_ENTRY(CA_SLEEP), 297 IRQ_ENTRY(ARM), 298 IRQ_ENTRY(HOTMON_LOW), 299 IRQ_ENTRY(HOTMON_HIGH), 300 IRQ_ENTRY(MODEM_SW_RESET_REQ), 301 IRQ_ENTRY(GPIO0), 302 IRQ_ENTRY(GPIO1), 303 IRQ_ENTRY(GPIO2), 304 IRQ_ENTRY(GPIO3), 305 IRQ_ENTRY(GPIO4), 306 IRQ_ENTRY(GPIO5), 307 IRQ_ENTRY(GPIO6), 308 IRQ_ENTRY(GPIO7), 309 IRQ_ENTRY(GPIO8) 310 }; 311 312 #define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1) 313 #define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name) 314 static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = { 315 WAKEUP_ENTRY(RTC), 316 WAKEUP_ENTRY(RTT0), 317 WAKEUP_ENTRY(RTT1), 318 WAKEUP_ENTRY(HSI0), 319 WAKEUP_ENTRY(HSI1), 320 WAKEUP_ENTRY(USB), 321 WAKEUP_ENTRY(ABB), 322 WAKEUP_ENTRY(ABB_FIFO), 323 WAKEUP_ENTRY(ARM) 324 }; 325 326 /* 327 * mb0_transfer - state needed for mailbox 0 communication. 328 * @lock: The transaction lock. 329 * @dbb_events_lock: A lock used to handle concurrent access to (parts of) 330 * the request data. 331 * @mask_work: Work structure used for (un)masking wakeup interrupts. 332 * @req: Request data that need to persist between requests. 333 */ 334 static struct { 335 spinlock_t lock; 336 spinlock_t dbb_irqs_lock; 337 struct work_struct mask_work; 338 struct mutex ac_wake_lock; 339 struct completion ac_wake_work; 340 struct { 341 u32 dbb_irqs; 342 u32 dbb_wakeups; 343 u32 abb_events; 344 } req; 345 } mb0_transfer; 346 347 /* 348 * mb1_transfer - state needed for mailbox 1 communication. 349 * @lock: The transaction lock. 350 * @work: The transaction completion structure. 351 * @ape_opp: The current APE OPP. 352 * @ack: Reply ("acknowledge") data. 353 */ 354 static struct { 355 struct mutex lock; 356 struct completion work; 357 u8 ape_opp; 358 struct { 359 u8 header; 360 u8 arm_opp; 361 u8 ape_opp; 362 u8 ape_voltage_status; 363 } ack; 364 } mb1_transfer; 365 366 /* 367 * mb2_transfer - state needed for mailbox 2 communication. 368 * @lock: The transaction lock. 369 * @work: The transaction completion structure. 370 * @auto_pm_lock: The autonomous power management configuration lock. 371 * @auto_pm_enabled: A flag indicating whether autonomous PM is enabled. 372 * @req: Request data that need to persist between requests. 373 * @ack: Reply ("acknowledge") data. 374 */ 375 static struct { 376 struct mutex lock; 377 struct completion work; 378 spinlock_t auto_pm_lock; 379 bool auto_pm_enabled; 380 struct { 381 u8 status; 382 } ack; 383 } mb2_transfer; 384 385 /* 386 * mb3_transfer - state needed for mailbox 3 communication. 387 * @lock: The request lock. 388 * @sysclk_lock: A lock used to handle concurrent sysclk requests. 389 * @sysclk_work: Work structure used for sysclk requests. 390 */ 391 static struct { 392 spinlock_t lock; 393 struct mutex sysclk_lock; 394 struct completion sysclk_work; 395 } mb3_transfer; 396 397 /* 398 * mb4_transfer - state needed for mailbox 4 communication. 399 * @lock: The transaction lock. 400 * @work: The transaction completion structure. 401 */ 402 static struct { 403 struct mutex lock; 404 struct completion work; 405 } mb4_transfer; 406 407 /* 408 * mb5_transfer - state needed for mailbox 5 communication. 409 * @lock: The transaction lock. 410 * @work: The transaction completion structure. 411 * @ack: Reply ("acknowledge") data. 412 */ 413 static struct { 414 struct mutex lock; 415 struct completion work; 416 struct { 417 u8 status; 418 u8 value; 419 } ack; 420 } mb5_transfer; 421 422 static atomic_t ac_wake_req_state = ATOMIC_INIT(0); 423 424 /* Spinlocks */ 425 static DEFINE_SPINLOCK(prcmu_lock); 426 static DEFINE_SPINLOCK(clkout_lock); 427 428 /* Global var to runtime determine TCDM base for v2 or v1 */ 429 static __iomem void *tcdm_base; 430 431 struct clk_mgt { 432 void __iomem *reg; 433 u32 pllsw; 434 int branch; 435 bool clk38div; 436 }; 437 438 enum { 439 PLL_RAW, 440 PLL_FIX, 441 PLL_DIV 442 }; 443 444 static DEFINE_SPINLOCK(clk_mgt_lock); 445 446 #define CLK_MGT_ENTRY(_name, _branch, _clk38div)[PRCMU_##_name] = \ 447 { (PRCM_##_name##_MGT), 0 , _branch, _clk38div} 448 struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = { 449 CLK_MGT_ENTRY(SGACLK, PLL_DIV, false), 450 CLK_MGT_ENTRY(UARTCLK, PLL_FIX, true), 451 CLK_MGT_ENTRY(MSP02CLK, PLL_FIX, true), 452 CLK_MGT_ENTRY(MSP1CLK, PLL_FIX, true), 453 CLK_MGT_ENTRY(I2CCLK, PLL_FIX, true), 454 CLK_MGT_ENTRY(SDMMCCLK, PLL_DIV, true), 455 CLK_MGT_ENTRY(SLIMCLK, PLL_FIX, true), 456 CLK_MGT_ENTRY(PER1CLK, PLL_DIV, true), 457 CLK_MGT_ENTRY(PER2CLK, PLL_DIV, true), 458 CLK_MGT_ENTRY(PER3CLK, PLL_DIV, true), 459 CLK_MGT_ENTRY(PER5CLK, PLL_DIV, true), 460 CLK_MGT_ENTRY(PER6CLK, PLL_DIV, true), 461 CLK_MGT_ENTRY(PER7CLK, PLL_DIV, true), 462 CLK_MGT_ENTRY(LCDCLK, PLL_FIX, true), 463 CLK_MGT_ENTRY(BMLCLK, PLL_DIV, true), 464 CLK_MGT_ENTRY(HSITXCLK, PLL_DIV, true), 465 CLK_MGT_ENTRY(HSIRXCLK, PLL_DIV, true), 466 CLK_MGT_ENTRY(HDMICLK, PLL_FIX, false), 467 CLK_MGT_ENTRY(APEATCLK, PLL_DIV, true), 468 CLK_MGT_ENTRY(APETRACECLK, PLL_DIV, true), 469 CLK_MGT_ENTRY(MCDECLK, PLL_DIV, true), 470 CLK_MGT_ENTRY(IPI2CCLK, PLL_FIX, true), 471 CLK_MGT_ENTRY(DSIALTCLK, PLL_FIX, false), 472 CLK_MGT_ENTRY(DMACLK, PLL_DIV, true), 473 CLK_MGT_ENTRY(B2R2CLK, PLL_DIV, true), 474 CLK_MGT_ENTRY(TVCLK, PLL_FIX, true), 475 CLK_MGT_ENTRY(SSPCLK, PLL_FIX, true), 476 CLK_MGT_ENTRY(RNGCLK, PLL_FIX, true), 477 CLK_MGT_ENTRY(UICCCLK, PLL_FIX, false), 478 }; 479 480 struct dsiclk { 481 u32 divsel_mask; 482 u32 divsel_shift; 483 u32 divsel; 484 }; 485 486 static struct dsiclk dsiclk[2] = { 487 { 488 .divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_MASK, 489 .divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI0_PLLOUT_DIVSEL_SHIFT, 490 .divsel = PRCM_DSI_PLLOUT_SEL_PHI, 491 }, 492 { 493 .divsel_mask = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_MASK, 494 .divsel_shift = PRCM_DSI_PLLOUT_SEL_DSI1_PLLOUT_DIVSEL_SHIFT, 495 .divsel = PRCM_DSI_PLLOUT_SEL_PHI, 496 } 497 }; 498 499 struct dsiescclk { 500 u32 en; 501 u32 div_mask; 502 u32 div_shift; 503 }; 504 505 static struct dsiescclk dsiescclk[3] = { 506 { 507 .en = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_EN, 508 .div_mask = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_MASK, 509 .div_shift = PRCM_DSITVCLK_DIV_DSI0_ESC_CLK_DIV_SHIFT, 510 }, 511 { 512 .en = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_EN, 513 .div_mask = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_MASK, 514 .div_shift = PRCM_DSITVCLK_DIV_DSI1_ESC_CLK_DIV_SHIFT, 515 }, 516 { 517 .en = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_EN, 518 .div_mask = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_MASK, 519 .div_shift = PRCM_DSITVCLK_DIV_DSI2_ESC_CLK_DIV_SHIFT, 520 } 521 }; 522 523 524 /* 525 * Used by MCDE to setup all necessary PRCMU registers 526 */ 527 #define PRCMU_RESET_DSIPLL 0x00004000 528 #define PRCMU_UNCLAMP_DSIPLL 0x00400800 529 530 #define PRCMU_CLK_PLL_DIV_SHIFT 0 531 #define PRCMU_CLK_PLL_SW_SHIFT 5 532 #define PRCMU_CLK_38 (1 << 9) 533 #define PRCMU_CLK_38_SRC (1 << 10) 534 #define PRCMU_CLK_38_DIV (1 << 11) 535 536 /* PLLDIV=12, PLLSW=4 (PLLDDR) */ 537 #define PRCMU_DSI_CLOCK_SETTING 0x0000008C 538 539 /* DPI 50000000 Hz */ 540 #define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \ 541 (16 << PRCMU_CLK_PLL_DIV_SHIFT)) 542 #define PRCMU_DSI_LP_CLOCK_SETTING 0x00000E00 543 544 /* D=101, N=1, R=4, SELDIV2=0 */ 545 #define PRCMU_PLLDSI_FREQ_SETTING 0x00040165 546 547 #define PRCMU_ENABLE_PLLDSI 0x00000001 548 #define PRCMU_DISABLE_PLLDSI 0x00000000 549 #define PRCMU_RELEASE_RESET_DSS 0x0000400C 550 #define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000202 551 /* ESC clk, div0=1, div1=1, div2=3 */ 552 #define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x07030101 553 #define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00030101 554 #define PRCMU_DSI_RESET_SW 0x00000007 555 556 #define PRCMU_PLLDSI_LOCKP_LOCKED 0x3 557 558 int db8500_prcmu_enable_dsipll(void) 559 { 560 int i; 561 562 /* Clear DSIPLL_RESETN */ 563 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR); 564 /* Unclamp DSIPLL in/out */ 565 writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR); 566 567 /* Set DSI PLL FREQ */ 568 writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ); 569 writel(PRCMU_DSI_PLLOUT_SEL_SETTING, PRCM_DSI_PLLOUT_SEL); 570 /* Enable Escape clocks */ 571 writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV); 572 573 /* Start DSI PLL */ 574 writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE); 575 /* Reset DSI PLL */ 576 writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET); 577 for (i = 0; i < 10; i++) { 578 if ((readl(PRCM_PLLDSI_LOCKP) & PRCMU_PLLDSI_LOCKP_LOCKED) 579 == PRCMU_PLLDSI_LOCKP_LOCKED) 580 break; 581 udelay(100); 582 } 583 /* Set DSIPLL_RESETN */ 584 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET); 585 return 0; 586 } 587 588 int db8500_prcmu_disable_dsipll(void) 589 { 590 /* Disable dsi pll */ 591 writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE); 592 /* Disable escapeclock */ 593 writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV); 594 return 0; 595 } 596 597 int db8500_prcmu_set_display_clocks(void) 598 { 599 unsigned long flags; 600 601 spin_lock_irqsave(&clk_mgt_lock, flags); 602 603 /* Grab the HW semaphore. */ 604 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 605 cpu_relax(); 606 607 writel(PRCMU_DSI_CLOCK_SETTING, PRCM_HDMICLK_MGT); 608 writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT); 609 writel(PRCMU_DPI_CLOCK_SETTING, PRCM_LCDCLK_MGT); 610 611 /* Release the HW semaphore. */ 612 writel(0, PRCM_SEM); 613 614 spin_unlock_irqrestore(&clk_mgt_lock, flags); 615 616 return 0; 617 } 618 619 u32 db8500_prcmu_read(unsigned int reg) 620 { 621 return readl(_PRCMU_BASE + reg); 622 } 623 624 void db8500_prcmu_write(unsigned int reg, u32 value) 625 { 626 unsigned long flags; 627 628 spin_lock_irqsave(&prcmu_lock, flags); 629 writel(value, (_PRCMU_BASE + reg)); 630 spin_unlock_irqrestore(&prcmu_lock, flags); 631 } 632 633 void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value) 634 { 635 u32 val; 636 unsigned long flags; 637 638 spin_lock_irqsave(&prcmu_lock, flags); 639 val = readl(_PRCMU_BASE + reg); 640 val = ((val & ~mask) | (value & mask)); 641 writel(val, (_PRCMU_BASE + reg)); 642 spin_unlock_irqrestore(&prcmu_lock, flags); 643 } 644 645 struct prcmu_fw_version *prcmu_get_fw_version(void) 646 { 647 return fw_info.valid ? &fw_info.version : NULL; 648 } 649 650 bool prcmu_has_arm_maxopp(void) 651 { 652 return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) & 653 PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK; 654 } 655 656 /** 657 * prcmu_get_boot_status - PRCMU boot status checking 658 * Returns: the current PRCMU boot status 659 */ 660 int prcmu_get_boot_status(void) 661 { 662 return readb(tcdm_base + PRCM_BOOT_STATUS); 663 } 664 665 /** 666 * prcmu_set_rc_a2p - This function is used to run few power state sequences 667 * @val: Value to be set, i.e. transition requested 668 * Returns: 0 on success, -EINVAL on invalid argument 669 * 670 * This function is used to run the following power state sequences - 671 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep 672 */ 673 int prcmu_set_rc_a2p(enum romcode_write val) 674 { 675 if (val < RDY_2_DS || val > RDY_2_XP70_RST) 676 return -EINVAL; 677 writeb(val, (tcdm_base + PRCM_ROMCODE_A2P)); 678 return 0; 679 } 680 681 /** 682 * prcmu_get_rc_p2a - This function is used to get power state sequences 683 * Returns: the power transition that has last happened 684 * 685 * This function can return the following transitions- 686 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep 687 */ 688 enum romcode_read prcmu_get_rc_p2a(void) 689 { 690 return readb(tcdm_base + PRCM_ROMCODE_P2A); 691 } 692 693 /** 694 * prcmu_get_current_mode - Return the current XP70 power mode 695 * Returns: Returns the current AP(ARM) power mode: init, 696 * apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset 697 */ 698 enum ap_pwrst prcmu_get_xp70_current_state(void) 699 { 700 return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE); 701 } 702 703 /** 704 * prcmu_config_clkout - Configure one of the programmable clock outputs. 705 * @clkout: The CLKOUT number (0 or 1). 706 * @source: The clock to be used (one of the PRCMU_CLKSRC_*). 707 * @div: The divider to be applied. 708 * 709 * Configures one of the programmable clock outputs (CLKOUTs). 710 * @div should be in the range [1,63] to request a configuration, or 0 to 711 * inform that the configuration is no longer requested. 712 */ 713 int prcmu_config_clkout(u8 clkout, u8 source, u8 div) 714 { 715 static int requests[2]; 716 int r = 0; 717 unsigned long flags; 718 u32 val; 719 u32 bits; 720 u32 mask; 721 u32 div_mask; 722 723 BUG_ON(clkout > 1); 724 BUG_ON(div > 63); 725 BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009)); 726 727 if (!div && !requests[clkout]) 728 return -EINVAL; 729 730 switch (clkout) { 731 case 0: 732 div_mask = PRCM_CLKOCR_CLKODIV0_MASK; 733 mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK); 734 bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) | 735 (div << PRCM_CLKOCR_CLKODIV0_SHIFT)); 736 break; 737 case 1: 738 div_mask = PRCM_CLKOCR_CLKODIV1_MASK; 739 mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK | 740 PRCM_CLKOCR_CLK1TYPE); 741 bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) | 742 (div << PRCM_CLKOCR_CLKODIV1_SHIFT)); 743 break; 744 } 745 bits &= mask; 746 747 spin_lock_irqsave(&clkout_lock, flags); 748 749 val = readl(PRCM_CLKOCR); 750 if (val & div_mask) { 751 if (div) { 752 if ((val & mask) != bits) { 753 r = -EBUSY; 754 goto unlock_and_return; 755 } 756 } else { 757 if ((val & mask & ~div_mask) != bits) { 758 r = -EINVAL; 759 goto unlock_and_return; 760 } 761 } 762 } 763 writel((bits | (val & ~mask)), PRCM_CLKOCR); 764 requests[clkout] += (div ? 1 : -1); 765 766 unlock_and_return: 767 spin_unlock_irqrestore(&clkout_lock, flags); 768 769 return r; 770 } 771 772 int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll) 773 { 774 unsigned long flags; 775 776 BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state)); 777 778 spin_lock_irqsave(&mb0_transfer.lock, flags); 779 780 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) 781 cpu_relax(); 782 783 writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); 784 writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE)); 785 writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE)); 786 writeb((keep_ulp_clk ? 1 : 0), 787 (tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE)); 788 writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI)); 789 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET); 790 791 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 792 793 return 0; 794 } 795 796 u8 db8500_prcmu_get_power_state_result(void) 797 { 798 return readb(tcdm_base + PRCM_ACK_MB0_AP_PWRSTTR_STATUS); 799 } 800 801 /* This function decouple the gic from the prcmu */ 802 int db8500_prcmu_gic_decouple(void) 803 { 804 u32 val = readl(PRCM_A9_MASK_REQ); 805 806 /* Set bit 0 register value to 1 */ 807 writel(val | PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ, 808 PRCM_A9_MASK_REQ); 809 810 /* Make sure the register is updated */ 811 readl(PRCM_A9_MASK_REQ); 812 813 /* Wait a few cycles for the gic mask completion */ 814 udelay(1); 815 816 return 0; 817 } 818 819 /* This function recouple the gic with the prcmu */ 820 int db8500_prcmu_gic_recouple(void) 821 { 822 u32 val = readl(PRCM_A9_MASK_REQ); 823 824 /* Set bit 0 register value to 0 */ 825 writel(val & ~PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ, PRCM_A9_MASK_REQ); 826 827 return 0; 828 } 829 830 #define PRCMU_GIC_NUMBER_REGS 5 831 832 /* 833 * This function checks if there are pending irq on the gic. It only 834 * makes sense if the gic has been decoupled before with the 835 * db8500_prcmu_gic_decouple function. Disabling an interrupt only 836 * disables the forwarding of the interrupt to any CPU interface. It 837 * does not prevent the interrupt from changing state, for example 838 * becoming pending, or active and pending if it is already 839 * active. Hence, we have to check the interrupt is pending *and* is 840 * active. 841 */ 842 bool db8500_prcmu_gic_pending_irq(void) 843 { 844 u32 pr; /* Pending register */ 845 u32 er; /* Enable register */ 846 void __iomem *dist_base = __io_address(U8500_GIC_DIST_BASE); 847 int i; 848 849 /* 5 registers. STI & PPI not skipped */ 850 for (i = 0; i < PRCMU_GIC_NUMBER_REGS; i++) { 851 852 pr = readl_relaxed(dist_base + GIC_DIST_PENDING_SET + i * 4); 853 er = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 854 855 if (pr & er) 856 return true; /* There is a pending interrupt */ 857 } 858 859 return false; 860 } 861 862 /* 863 * This function checks if there are pending interrupt on the 864 * prcmu which has been delegated to monitor the irqs with the 865 * db8500_prcmu_copy_gic_settings function. 866 */ 867 bool db8500_prcmu_pending_irq(void) 868 { 869 u32 it, im; 870 int i; 871 872 for (i = 0; i < PRCMU_GIC_NUMBER_REGS - 1; i++) { 873 it = readl(PRCM_ARMITVAL31TO0 + i * 4); 874 im = readl(PRCM_ARMITMSK31TO0 + i * 4); 875 if (it & im) 876 return true; /* There is a pending interrupt */ 877 } 878 879 return false; 880 } 881 882 /* 883 * This function checks if the specified cpu is in in WFI. It's usage 884 * makes sense only if the gic is decoupled with the db8500_prcmu_gic_decouple 885 * function. Of course passing smp_processor_id() to this function will 886 * always return false... 887 */ 888 bool db8500_prcmu_is_cpu_in_wfi(int cpu) 889 { 890 return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : 891 PRCM_ARM_WFI_STANDBY_WFI0; 892 } 893 894 /* 895 * This function copies the gic SPI settings to the prcmu in order to 896 * monitor them and abort/finish the retention/off sequence or state. 897 */ 898 int db8500_prcmu_copy_gic_settings(void) 899 { 900 u32 er; /* Enable register */ 901 void __iomem *dist_base = __io_address(U8500_GIC_DIST_BASE); 902 int i; 903 904 /* We skip the STI and PPI */ 905 for (i = 0; i < PRCMU_GIC_NUMBER_REGS - 1; i++) { 906 er = readl_relaxed(dist_base + 907 GIC_DIST_ENABLE_SET + (i + 1) * 4); 908 writel(er, PRCM_ARMITMSK31TO0 + i * 4); 909 } 910 911 return 0; 912 } 913 914 /* This function should only be called while mb0_transfer.lock is held. */ 915 static void config_wakeups(void) 916 { 917 const u8 header[2] = { 918 MB0H_CONFIG_WAKEUPS_EXE, 919 MB0H_CONFIG_WAKEUPS_SLEEP 920 }; 921 static u32 last_dbb_events; 922 static u32 last_abb_events; 923 u32 dbb_events; 924 u32 abb_events; 925 unsigned int i; 926 927 dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups; 928 dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK); 929 930 abb_events = mb0_transfer.req.abb_events; 931 932 if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events)) 933 return; 934 935 for (i = 0; i < 2; i++) { 936 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) 937 cpu_relax(); 938 writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500)); 939 writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500)); 940 writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); 941 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET); 942 } 943 last_dbb_events = dbb_events; 944 last_abb_events = abb_events; 945 } 946 947 void db8500_prcmu_enable_wakeups(u32 wakeups) 948 { 949 unsigned long flags; 950 u32 bits; 951 int i; 952 953 BUG_ON(wakeups != (wakeups & VALID_WAKEUPS)); 954 955 for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) { 956 if (wakeups & BIT(i)) 957 bits |= prcmu_wakeup_bit[i]; 958 } 959 960 spin_lock_irqsave(&mb0_transfer.lock, flags); 961 962 mb0_transfer.req.dbb_wakeups = bits; 963 config_wakeups(); 964 965 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 966 } 967 968 void db8500_prcmu_config_abb_event_readout(u32 abb_events) 969 { 970 unsigned long flags; 971 972 spin_lock_irqsave(&mb0_transfer.lock, flags); 973 974 mb0_transfer.req.abb_events = abb_events; 975 config_wakeups(); 976 977 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 978 } 979 980 void db8500_prcmu_get_abb_event_buffer(void __iomem **buf) 981 { 982 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1) 983 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500); 984 else 985 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500); 986 } 987 988 /** 989 * db8500_prcmu_set_arm_opp - set the appropriate ARM OPP 990 * @opp: The new ARM operating point to which transition is to be made 991 * Returns: 0 on success, non-zero on failure 992 * 993 * This function sets the the operating point of the ARM. 994 */ 995 int db8500_prcmu_set_arm_opp(u8 opp) 996 { 997 int r; 998 999 if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK) 1000 return -EINVAL; 1001 1002 r = 0; 1003 1004 mutex_lock(&mb1_transfer.lock); 1005 1006 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1007 cpu_relax(); 1008 1009 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1010 writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP)); 1011 writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP)); 1012 1013 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1014 wait_for_completion(&mb1_transfer.work); 1015 1016 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) || 1017 (mb1_transfer.ack.arm_opp != opp)) 1018 r = -EIO; 1019 1020 mutex_unlock(&mb1_transfer.lock); 1021 1022 return r; 1023 } 1024 1025 /** 1026 * db8500_prcmu_get_arm_opp - get the current ARM OPP 1027 * 1028 * Returns: the current ARM OPP 1029 */ 1030 int db8500_prcmu_get_arm_opp(void) 1031 { 1032 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP); 1033 } 1034 1035 /** 1036 * db8500_prcmu_get_ddr_opp - get the current DDR OPP 1037 * 1038 * Returns: the current DDR OPP 1039 */ 1040 int db8500_prcmu_get_ddr_opp(void) 1041 { 1042 return readb(PRCM_DDR_SUBSYS_APE_MINBW); 1043 } 1044 1045 /** 1046 * db8500_set_ddr_opp - set the appropriate DDR OPP 1047 * @opp: The new DDR operating point to which transition is to be made 1048 * Returns: 0 on success, non-zero on failure 1049 * 1050 * This function sets the operating point of the DDR. 1051 */ 1052 int db8500_prcmu_set_ddr_opp(u8 opp) 1053 { 1054 if (opp < DDR_100_OPP || opp > DDR_25_OPP) 1055 return -EINVAL; 1056 /* Changing the DDR OPP can hang the hardware pre-v21 */ 1057 if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20()) 1058 writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW); 1059 1060 return 0; 1061 } 1062 1063 /* Divide the frequency of certain clocks by 2 for APE_50_PARTLY_25_OPP. */ 1064 static void request_even_slower_clocks(bool enable) 1065 { 1066 void __iomem *clock_reg[] = { 1067 PRCM_ACLK_MGT, 1068 PRCM_DMACLK_MGT 1069 }; 1070 unsigned long flags; 1071 unsigned int i; 1072 1073 spin_lock_irqsave(&clk_mgt_lock, flags); 1074 1075 /* Grab the HW semaphore. */ 1076 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 1077 cpu_relax(); 1078 1079 for (i = 0; i < ARRAY_SIZE(clock_reg); i++) { 1080 u32 val; 1081 u32 div; 1082 1083 val = readl(clock_reg[i]); 1084 div = (val & PRCM_CLK_MGT_CLKPLLDIV_MASK); 1085 if (enable) { 1086 if ((div <= 1) || (div > 15)) { 1087 pr_err("prcmu: Bad clock divider %d in %s\n", 1088 div, __func__); 1089 goto unlock_and_return; 1090 } 1091 div <<= 1; 1092 } else { 1093 if (div <= 2) 1094 goto unlock_and_return; 1095 div >>= 1; 1096 } 1097 val = ((val & ~PRCM_CLK_MGT_CLKPLLDIV_MASK) | 1098 (div & PRCM_CLK_MGT_CLKPLLDIV_MASK)); 1099 writel(val, clock_reg[i]); 1100 } 1101 1102 unlock_and_return: 1103 /* Release the HW semaphore. */ 1104 writel(0, PRCM_SEM); 1105 1106 spin_unlock_irqrestore(&clk_mgt_lock, flags); 1107 } 1108 1109 /** 1110 * db8500_set_ape_opp - set the appropriate APE OPP 1111 * @opp: The new APE operating point to which transition is to be made 1112 * Returns: 0 on success, non-zero on failure 1113 * 1114 * This function sets the operating point of the APE. 1115 */ 1116 int db8500_prcmu_set_ape_opp(u8 opp) 1117 { 1118 int r = 0; 1119 1120 if (opp == mb1_transfer.ape_opp) 1121 return 0; 1122 1123 mutex_lock(&mb1_transfer.lock); 1124 1125 if (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP) 1126 request_even_slower_clocks(false); 1127 1128 if ((opp != APE_100_OPP) && (mb1_transfer.ape_opp != APE_100_OPP)) 1129 goto skip_message; 1130 1131 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1132 cpu_relax(); 1133 1134 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1135 writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP)); 1136 writeb(((opp == APE_50_PARTLY_25_OPP) ? APE_50_OPP : opp), 1137 (tcdm_base + PRCM_REQ_MB1_APE_OPP)); 1138 1139 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1140 wait_for_completion(&mb1_transfer.work); 1141 1142 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) || 1143 (mb1_transfer.ack.ape_opp != opp)) 1144 r = -EIO; 1145 1146 skip_message: 1147 if ((!r && (opp == APE_50_PARTLY_25_OPP)) || 1148 (r && (mb1_transfer.ape_opp == APE_50_PARTLY_25_OPP))) 1149 request_even_slower_clocks(true); 1150 if (!r) 1151 mb1_transfer.ape_opp = opp; 1152 1153 mutex_unlock(&mb1_transfer.lock); 1154 1155 return r; 1156 } 1157 1158 /** 1159 * db8500_prcmu_get_ape_opp - get the current APE OPP 1160 * 1161 * Returns: the current APE OPP 1162 */ 1163 int db8500_prcmu_get_ape_opp(void) 1164 { 1165 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP); 1166 } 1167 1168 /** 1169 * db8500_prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage 1170 * @enable: true to request the higher voltage, false to drop a request. 1171 * 1172 * Calls to this function to enable and disable requests must be balanced. 1173 */ 1174 int db8500_prcmu_request_ape_opp_100_voltage(bool enable) 1175 { 1176 int r = 0; 1177 u8 header; 1178 static unsigned int requests; 1179 1180 mutex_lock(&mb1_transfer.lock); 1181 1182 if (enable) { 1183 if (0 != requests++) 1184 goto unlock_and_return; 1185 header = MB1H_REQUEST_APE_OPP_100_VOLT; 1186 } else { 1187 if (requests == 0) { 1188 r = -EIO; 1189 goto unlock_and_return; 1190 } else if (1 != requests--) { 1191 goto unlock_and_return; 1192 } 1193 header = MB1H_RELEASE_APE_OPP_100_VOLT; 1194 } 1195 1196 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1197 cpu_relax(); 1198 1199 writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1200 1201 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1202 wait_for_completion(&mb1_transfer.work); 1203 1204 if ((mb1_transfer.ack.header != header) || 1205 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0)) 1206 r = -EIO; 1207 1208 unlock_and_return: 1209 mutex_unlock(&mb1_transfer.lock); 1210 1211 return r; 1212 } 1213 1214 /** 1215 * prcmu_release_usb_wakeup_state - release the state required by a USB wakeup 1216 * 1217 * This function releases the power state requirements of a USB wakeup. 1218 */ 1219 int prcmu_release_usb_wakeup_state(void) 1220 { 1221 int r = 0; 1222 1223 mutex_lock(&mb1_transfer.lock); 1224 1225 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1226 cpu_relax(); 1227 1228 writeb(MB1H_RELEASE_USB_WAKEUP, 1229 (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1230 1231 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1232 wait_for_completion(&mb1_transfer.work); 1233 1234 if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) || 1235 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0)) 1236 r = -EIO; 1237 1238 mutex_unlock(&mb1_transfer.lock); 1239 1240 return r; 1241 } 1242 1243 static int request_pll(u8 clock, bool enable) 1244 { 1245 int r = 0; 1246 1247 if (clock == PRCMU_PLLSOC0) 1248 clock = (enable ? PLL_SOC0_ON : PLL_SOC0_OFF); 1249 else if (clock == PRCMU_PLLSOC1) 1250 clock = (enable ? PLL_SOC1_ON : PLL_SOC1_OFF); 1251 else 1252 return -EINVAL; 1253 1254 mutex_lock(&mb1_transfer.lock); 1255 1256 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 1257 cpu_relax(); 1258 1259 writeb(MB1H_PLL_ON_OFF, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 1260 writeb(clock, (tcdm_base + PRCM_REQ_MB1_PLL_ON_OFF)); 1261 1262 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 1263 wait_for_completion(&mb1_transfer.work); 1264 1265 if (mb1_transfer.ack.header != MB1H_PLL_ON_OFF) 1266 r = -EIO; 1267 1268 mutex_unlock(&mb1_transfer.lock); 1269 1270 return r; 1271 } 1272 1273 /** 1274 * db8500_prcmu_set_epod - set the state of a EPOD (power domain) 1275 * @epod_id: The EPOD to set 1276 * @epod_state: The new EPOD state 1277 * 1278 * This function sets the state of a EPOD (power domain). It may not be called 1279 * from interrupt context. 1280 */ 1281 int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state) 1282 { 1283 int r = 0; 1284 bool ram_retention = false; 1285 int i; 1286 1287 /* check argument */ 1288 BUG_ON(epod_id >= NUM_EPOD_ID); 1289 1290 /* set flag if retention is possible */ 1291 switch (epod_id) { 1292 case EPOD_ID_SVAMMDSP: 1293 case EPOD_ID_SIAMMDSP: 1294 case EPOD_ID_ESRAM12: 1295 case EPOD_ID_ESRAM34: 1296 ram_retention = true; 1297 break; 1298 } 1299 1300 /* check argument */ 1301 BUG_ON(epod_state > EPOD_STATE_ON); 1302 BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention); 1303 1304 /* get lock */ 1305 mutex_lock(&mb2_transfer.lock); 1306 1307 /* wait for mailbox */ 1308 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(2)) 1309 cpu_relax(); 1310 1311 /* fill in mailbox */ 1312 for (i = 0; i < NUM_EPOD_ID; i++) 1313 writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i)); 1314 writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id)); 1315 1316 writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2)); 1317 1318 writel(MBOX_BIT(2), PRCM_MBOX_CPU_SET); 1319 1320 /* 1321 * The current firmware version does not handle errors correctly, 1322 * and we cannot recover if there is an error. 1323 * This is expected to change when the firmware is updated. 1324 */ 1325 if (!wait_for_completion_timeout(&mb2_transfer.work, 1326 msecs_to_jiffies(20000))) { 1327 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 1328 __func__); 1329 r = -EIO; 1330 goto unlock_and_return; 1331 } 1332 1333 if (mb2_transfer.ack.status != HWACC_PWR_ST_OK) 1334 r = -EIO; 1335 1336 unlock_and_return: 1337 mutex_unlock(&mb2_transfer.lock); 1338 return r; 1339 } 1340 1341 /** 1342 * prcmu_configure_auto_pm - Configure autonomous power management. 1343 * @sleep: Configuration for ApSleep. 1344 * @idle: Configuration for ApIdle. 1345 */ 1346 void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep, 1347 struct prcmu_auto_pm_config *idle) 1348 { 1349 u32 sleep_cfg; 1350 u32 idle_cfg; 1351 unsigned long flags; 1352 1353 BUG_ON((sleep == NULL) || (idle == NULL)); 1354 1355 sleep_cfg = (sleep->sva_auto_pm_enable & 0xF); 1356 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF)); 1357 sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF)); 1358 sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF)); 1359 sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF)); 1360 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF)); 1361 1362 idle_cfg = (idle->sva_auto_pm_enable & 0xF); 1363 idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF)); 1364 idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF)); 1365 idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF)); 1366 idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF)); 1367 idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF)); 1368 1369 spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags); 1370 1371 /* 1372 * The autonomous power management configuration is done through 1373 * fields in mailbox 2, but these fields are only used as shared 1374 * variables - i.e. there is no need to send a message. 1375 */ 1376 writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP)); 1377 writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE)); 1378 1379 mb2_transfer.auto_pm_enabled = 1380 ((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) || 1381 (sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) || 1382 (idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) || 1383 (idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON)); 1384 1385 spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags); 1386 } 1387 EXPORT_SYMBOL(prcmu_configure_auto_pm); 1388 1389 bool prcmu_is_auto_pm_enabled(void) 1390 { 1391 return mb2_transfer.auto_pm_enabled; 1392 } 1393 1394 static int request_sysclk(bool enable) 1395 { 1396 int r; 1397 unsigned long flags; 1398 1399 r = 0; 1400 1401 mutex_lock(&mb3_transfer.sysclk_lock); 1402 1403 spin_lock_irqsave(&mb3_transfer.lock, flags); 1404 1405 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(3)) 1406 cpu_relax(); 1407 1408 writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT)); 1409 1410 writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3)); 1411 writel(MBOX_BIT(3), PRCM_MBOX_CPU_SET); 1412 1413 spin_unlock_irqrestore(&mb3_transfer.lock, flags); 1414 1415 /* 1416 * The firmware only sends an ACK if we want to enable the 1417 * SysClk, and it succeeds. 1418 */ 1419 if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work, 1420 msecs_to_jiffies(20000))) { 1421 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 1422 __func__); 1423 r = -EIO; 1424 } 1425 1426 mutex_unlock(&mb3_transfer.sysclk_lock); 1427 1428 return r; 1429 } 1430 1431 static int request_timclk(bool enable) 1432 { 1433 u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK); 1434 1435 if (!enable) 1436 val |= PRCM_TCR_STOP_TIMERS; 1437 writel(val, PRCM_TCR); 1438 1439 return 0; 1440 } 1441 1442 static int request_clock(u8 clock, bool enable) 1443 { 1444 u32 val; 1445 unsigned long flags; 1446 1447 spin_lock_irqsave(&clk_mgt_lock, flags); 1448 1449 /* Grab the HW semaphore. */ 1450 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 1451 cpu_relax(); 1452 1453 val = readl(clk_mgt[clock].reg); 1454 if (enable) { 1455 val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw); 1456 } else { 1457 clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK); 1458 val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK); 1459 } 1460 writel(val, clk_mgt[clock].reg); 1461 1462 /* Release the HW semaphore. */ 1463 writel(0, PRCM_SEM); 1464 1465 spin_unlock_irqrestore(&clk_mgt_lock, flags); 1466 1467 return 0; 1468 } 1469 1470 static int request_sga_clock(u8 clock, bool enable) 1471 { 1472 u32 val; 1473 int ret; 1474 1475 if (enable) { 1476 val = readl(PRCM_CGATING_BYPASS); 1477 writel(val | PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS); 1478 } 1479 1480 ret = request_clock(clock, enable); 1481 1482 if (!ret && !enable) { 1483 val = readl(PRCM_CGATING_BYPASS); 1484 writel(val & ~PRCM_CGATING_BYPASS_ICN2, PRCM_CGATING_BYPASS); 1485 } 1486 1487 return ret; 1488 } 1489 1490 static inline bool plldsi_locked(void) 1491 { 1492 return (readl(PRCM_PLLDSI_LOCKP) & 1493 (PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 | 1494 PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3)) == 1495 (PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP10 | 1496 PRCM_PLLDSI_LOCKP_PRCM_PLLDSI_LOCKP3); 1497 } 1498 1499 static int request_plldsi(bool enable) 1500 { 1501 int r = 0; 1502 u32 val; 1503 1504 writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP | 1505 PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI), (enable ? 1506 PRCM_MMIP_LS_CLAMP_CLR : PRCM_MMIP_LS_CLAMP_SET)); 1507 1508 val = readl(PRCM_PLLDSI_ENABLE); 1509 if (enable) 1510 val |= PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE; 1511 else 1512 val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE; 1513 writel(val, PRCM_PLLDSI_ENABLE); 1514 1515 if (enable) { 1516 unsigned int i; 1517 bool locked = plldsi_locked(); 1518 1519 for (i = 10; !locked && (i > 0); --i) { 1520 udelay(100); 1521 locked = plldsi_locked(); 1522 } 1523 if (locked) { 1524 writel(PRCM_APE_RESETN_DSIPLL_RESETN, 1525 PRCM_APE_RESETN_SET); 1526 } else { 1527 writel((PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMP | 1528 PRCM_MMIP_LS_CLAMP_DSIPLL_CLAMPI), 1529 PRCM_MMIP_LS_CLAMP_SET); 1530 val &= ~PRCM_PLLDSI_ENABLE_PRCM_PLLDSI_ENABLE; 1531 writel(val, PRCM_PLLDSI_ENABLE); 1532 r = -EAGAIN; 1533 } 1534 } else { 1535 writel(PRCM_APE_RESETN_DSIPLL_RESETN, PRCM_APE_RESETN_CLR); 1536 } 1537 return r; 1538 } 1539 1540 static int request_dsiclk(u8 n, bool enable) 1541 { 1542 u32 val; 1543 1544 val = readl(PRCM_DSI_PLLOUT_SEL); 1545 val &= ~dsiclk[n].divsel_mask; 1546 val |= ((enable ? dsiclk[n].divsel : PRCM_DSI_PLLOUT_SEL_OFF) << 1547 dsiclk[n].divsel_shift); 1548 writel(val, PRCM_DSI_PLLOUT_SEL); 1549 return 0; 1550 } 1551 1552 static int request_dsiescclk(u8 n, bool enable) 1553 { 1554 u32 val; 1555 1556 val = readl(PRCM_DSITVCLK_DIV); 1557 enable ? (val |= dsiescclk[n].en) : (val &= ~dsiescclk[n].en); 1558 writel(val, PRCM_DSITVCLK_DIV); 1559 return 0; 1560 } 1561 1562 /** 1563 * db8500_prcmu_request_clock() - Request for a clock to be enabled or disabled. 1564 * @clock: The clock for which the request is made. 1565 * @enable: Whether the clock should be enabled (true) or disabled (false). 1566 * 1567 * This function should only be used by the clock implementation. 1568 * Do not use it from any other place! 1569 */ 1570 int db8500_prcmu_request_clock(u8 clock, bool enable) 1571 { 1572 if (clock == PRCMU_SGACLK) 1573 return request_sga_clock(clock, enable); 1574 else if (clock < PRCMU_NUM_REG_CLOCKS) 1575 return request_clock(clock, enable); 1576 else if (clock == PRCMU_TIMCLK) 1577 return request_timclk(enable); 1578 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 1579 return request_dsiclk((clock - PRCMU_DSI0CLK), enable); 1580 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 1581 return request_dsiescclk((clock - PRCMU_DSI0ESCCLK), enable); 1582 else if (clock == PRCMU_PLLDSI) 1583 return request_plldsi(enable); 1584 else if (clock == PRCMU_SYSCLK) 1585 return request_sysclk(enable); 1586 else if ((clock == PRCMU_PLLSOC0) || (clock == PRCMU_PLLSOC1)) 1587 return request_pll(clock, enable); 1588 else 1589 return -EINVAL; 1590 } 1591 1592 static unsigned long pll_rate(void __iomem *reg, unsigned long src_rate, 1593 int branch) 1594 { 1595 u64 rate; 1596 u32 val; 1597 u32 d; 1598 u32 div = 1; 1599 1600 val = readl(reg); 1601 1602 rate = src_rate; 1603 rate *= ((val & PRCM_PLL_FREQ_D_MASK) >> PRCM_PLL_FREQ_D_SHIFT); 1604 1605 d = ((val & PRCM_PLL_FREQ_N_MASK) >> PRCM_PLL_FREQ_N_SHIFT); 1606 if (d > 1) 1607 div *= d; 1608 1609 d = ((val & PRCM_PLL_FREQ_R_MASK) >> PRCM_PLL_FREQ_R_SHIFT); 1610 if (d > 1) 1611 div *= d; 1612 1613 if (val & PRCM_PLL_FREQ_SELDIV2) 1614 div *= 2; 1615 1616 if ((branch == PLL_FIX) || ((branch == PLL_DIV) && 1617 (val & PRCM_PLL_FREQ_DIV2EN) && 1618 ((reg == PRCM_PLLSOC0_FREQ) || 1619 (reg == PRCM_PLLARM_FREQ) || 1620 (reg == PRCM_PLLDDR_FREQ)))) 1621 div *= 2; 1622 1623 (void)do_div(rate, div); 1624 1625 return (unsigned long)rate; 1626 } 1627 1628 #define ROOT_CLOCK_RATE 38400000 1629 1630 static unsigned long clock_rate(u8 clock) 1631 { 1632 u32 val; 1633 u32 pllsw; 1634 unsigned long rate = ROOT_CLOCK_RATE; 1635 1636 val = readl(clk_mgt[clock].reg); 1637 1638 if (val & PRCM_CLK_MGT_CLK38) { 1639 if (clk_mgt[clock].clk38div && (val & PRCM_CLK_MGT_CLK38DIV)) 1640 rate /= 2; 1641 return rate; 1642 } 1643 1644 val |= clk_mgt[clock].pllsw; 1645 pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK); 1646 1647 if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC0) 1648 rate = pll_rate(PRCM_PLLSOC0_FREQ, rate, clk_mgt[clock].branch); 1649 else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_SOC1) 1650 rate = pll_rate(PRCM_PLLSOC1_FREQ, rate, clk_mgt[clock].branch); 1651 else if (pllsw == PRCM_CLK_MGT_CLKPLLSW_DDR) 1652 rate = pll_rate(PRCM_PLLDDR_FREQ, rate, clk_mgt[clock].branch); 1653 else 1654 return 0; 1655 1656 if ((clock == PRCMU_SGACLK) && 1657 (val & PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN)) { 1658 u64 r = (rate * 10); 1659 1660 (void)do_div(r, 25); 1661 return (unsigned long)r; 1662 } 1663 val &= PRCM_CLK_MGT_CLKPLLDIV_MASK; 1664 if (val) 1665 return rate / val; 1666 else 1667 return 0; 1668 } 1669 1670 static unsigned long armss_rate(void) 1671 { 1672 u32 r; 1673 unsigned long rate; 1674 1675 r = readl(PRCM_ARM_CHGCLKREQ); 1676 1677 if (r & PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ) { 1678 /* External ARMCLKFIX clock */ 1679 1680 rate = pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, PLL_FIX); 1681 1682 /* Check PRCM_ARM_CHGCLKREQ divider */ 1683 if (!(r & PRCM_ARM_CHGCLKREQ_PRCM_ARM_DIVSEL)) 1684 rate /= 2; 1685 1686 /* Check PRCM_ARMCLKFIX_MGT divider */ 1687 r = readl(PRCM_ARMCLKFIX_MGT); 1688 r &= PRCM_CLK_MGT_CLKPLLDIV_MASK; 1689 rate /= r; 1690 1691 } else {/* ARM PLL */ 1692 rate = pll_rate(PRCM_PLLARM_FREQ, ROOT_CLOCK_RATE, PLL_DIV); 1693 } 1694 1695 return rate; 1696 } 1697 1698 static unsigned long dsiclk_rate(u8 n) 1699 { 1700 u32 divsel; 1701 u32 div = 1; 1702 1703 divsel = readl(PRCM_DSI_PLLOUT_SEL); 1704 divsel = ((divsel & dsiclk[n].divsel_mask) >> dsiclk[n].divsel_shift); 1705 1706 if (divsel == PRCM_DSI_PLLOUT_SEL_OFF) 1707 divsel = dsiclk[n].divsel; 1708 1709 switch (divsel) { 1710 case PRCM_DSI_PLLOUT_SEL_PHI_4: 1711 div *= 2; 1712 case PRCM_DSI_PLLOUT_SEL_PHI_2: 1713 div *= 2; 1714 case PRCM_DSI_PLLOUT_SEL_PHI: 1715 return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), 1716 PLL_RAW) / div; 1717 default: 1718 return 0; 1719 } 1720 } 1721 1722 static unsigned long dsiescclk_rate(u8 n) 1723 { 1724 u32 div; 1725 1726 div = readl(PRCM_DSITVCLK_DIV); 1727 div = ((div & dsiescclk[n].div_mask) >> (dsiescclk[n].div_shift)); 1728 return clock_rate(PRCMU_TVCLK) / max((u32)1, div); 1729 } 1730 1731 unsigned long prcmu_clock_rate(u8 clock) 1732 { 1733 if (clock < PRCMU_NUM_REG_CLOCKS) 1734 return clock_rate(clock); 1735 else if (clock == PRCMU_TIMCLK) 1736 return ROOT_CLOCK_RATE / 16; 1737 else if (clock == PRCMU_SYSCLK) 1738 return ROOT_CLOCK_RATE; 1739 else if (clock == PRCMU_PLLSOC0) 1740 return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, PLL_RAW); 1741 else if (clock == PRCMU_PLLSOC1) 1742 return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, PLL_RAW); 1743 else if (clock == PRCMU_ARMSS) 1744 return armss_rate(); 1745 else if (clock == PRCMU_PLLDDR) 1746 return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, PLL_RAW); 1747 else if (clock == PRCMU_PLLDSI) 1748 return pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), 1749 PLL_RAW); 1750 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 1751 return dsiclk_rate(clock - PRCMU_DSI0CLK); 1752 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 1753 return dsiescclk_rate(clock - PRCMU_DSI0ESCCLK); 1754 else 1755 return 0; 1756 } 1757 1758 static unsigned long clock_source_rate(u32 clk_mgt_val, int branch) 1759 { 1760 if (clk_mgt_val & PRCM_CLK_MGT_CLK38) 1761 return ROOT_CLOCK_RATE; 1762 clk_mgt_val &= PRCM_CLK_MGT_CLKPLLSW_MASK; 1763 if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC0) 1764 return pll_rate(PRCM_PLLSOC0_FREQ, ROOT_CLOCK_RATE, branch); 1765 else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_SOC1) 1766 return pll_rate(PRCM_PLLSOC1_FREQ, ROOT_CLOCK_RATE, branch); 1767 else if (clk_mgt_val == PRCM_CLK_MGT_CLKPLLSW_DDR) 1768 return pll_rate(PRCM_PLLDDR_FREQ, ROOT_CLOCK_RATE, branch); 1769 else 1770 return 0; 1771 } 1772 1773 static u32 clock_divider(unsigned long src_rate, unsigned long rate) 1774 { 1775 u32 div; 1776 1777 div = (src_rate / rate); 1778 if (div == 0) 1779 return 1; 1780 if (rate < (src_rate / div)) 1781 div++; 1782 return div; 1783 } 1784 1785 static long round_clock_rate(u8 clock, unsigned long rate) 1786 { 1787 u32 val; 1788 u32 div; 1789 unsigned long src_rate; 1790 long rounded_rate; 1791 1792 val = readl(clk_mgt[clock].reg); 1793 src_rate = clock_source_rate((val | clk_mgt[clock].pllsw), 1794 clk_mgt[clock].branch); 1795 div = clock_divider(src_rate, rate); 1796 if (val & PRCM_CLK_MGT_CLK38) { 1797 if (clk_mgt[clock].clk38div) { 1798 if (div > 2) 1799 div = 2; 1800 } else { 1801 div = 1; 1802 } 1803 } else if ((clock == PRCMU_SGACLK) && (div == 3)) { 1804 u64 r = (src_rate * 10); 1805 1806 (void)do_div(r, 25); 1807 if (r <= rate) 1808 return (unsigned long)r; 1809 } 1810 rounded_rate = (src_rate / min(div, (u32)31)); 1811 1812 return rounded_rate; 1813 } 1814 1815 /* CPU FREQ table, may be changed due to if MAX_OPP is supported. */ 1816 static struct cpufreq_frequency_table db8500_cpufreq_table[] = { 1817 { .frequency = 200000, .index = ARM_EXTCLK,}, 1818 { .frequency = 400000, .index = ARM_50_OPP,}, 1819 { .frequency = 800000, .index = ARM_100_OPP,}, 1820 { .frequency = CPUFREQ_TABLE_END,}, /* To be used for MAX_OPP. */ 1821 { .frequency = CPUFREQ_TABLE_END,}, 1822 }; 1823 1824 static long round_armss_rate(unsigned long rate) 1825 { 1826 long freq = 0; 1827 int i = 0; 1828 1829 /* cpufreq table frequencies is in KHz. */ 1830 rate = rate / 1000; 1831 1832 /* Find the corresponding arm opp from the cpufreq table. */ 1833 while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) { 1834 freq = db8500_cpufreq_table[i].frequency; 1835 if (freq == rate) 1836 break; 1837 i++; 1838 } 1839 1840 /* Return the last valid value, even if a match was not found. */ 1841 return freq * 1000; 1842 } 1843 1844 #define MIN_PLL_VCO_RATE 600000000ULL 1845 #define MAX_PLL_VCO_RATE 1680640000ULL 1846 1847 static long round_plldsi_rate(unsigned long rate) 1848 { 1849 long rounded_rate = 0; 1850 unsigned long src_rate; 1851 unsigned long rem; 1852 u32 r; 1853 1854 src_rate = clock_rate(PRCMU_HDMICLK); 1855 rem = rate; 1856 1857 for (r = 7; (rem > 0) && (r > 0); r--) { 1858 u64 d; 1859 1860 d = (r * rate); 1861 (void)do_div(d, src_rate); 1862 if (d < 6) 1863 d = 6; 1864 else if (d > 255) 1865 d = 255; 1866 d *= src_rate; 1867 if (((2 * d) < (r * MIN_PLL_VCO_RATE)) || 1868 ((r * MAX_PLL_VCO_RATE) < (2 * d))) 1869 continue; 1870 (void)do_div(d, r); 1871 if (rate < d) { 1872 if (rounded_rate == 0) 1873 rounded_rate = (long)d; 1874 break; 1875 } 1876 if ((rate - d) < rem) { 1877 rem = (rate - d); 1878 rounded_rate = (long)d; 1879 } 1880 } 1881 return rounded_rate; 1882 } 1883 1884 static long round_dsiclk_rate(unsigned long rate) 1885 { 1886 u32 div; 1887 unsigned long src_rate; 1888 long rounded_rate; 1889 1890 src_rate = pll_rate(PRCM_PLLDSI_FREQ, clock_rate(PRCMU_HDMICLK), 1891 PLL_RAW); 1892 div = clock_divider(src_rate, rate); 1893 rounded_rate = (src_rate / ((div > 2) ? 4 : div)); 1894 1895 return rounded_rate; 1896 } 1897 1898 static long round_dsiescclk_rate(unsigned long rate) 1899 { 1900 u32 div; 1901 unsigned long src_rate; 1902 long rounded_rate; 1903 1904 src_rate = clock_rate(PRCMU_TVCLK); 1905 div = clock_divider(src_rate, rate); 1906 rounded_rate = (src_rate / min(div, (u32)255)); 1907 1908 return rounded_rate; 1909 } 1910 1911 long prcmu_round_clock_rate(u8 clock, unsigned long rate) 1912 { 1913 if (clock < PRCMU_NUM_REG_CLOCKS) 1914 return round_clock_rate(clock, rate); 1915 else if (clock == PRCMU_ARMSS) 1916 return round_armss_rate(rate); 1917 else if (clock == PRCMU_PLLDSI) 1918 return round_plldsi_rate(rate); 1919 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 1920 return round_dsiclk_rate(rate); 1921 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 1922 return round_dsiescclk_rate(rate); 1923 else 1924 return (long)prcmu_clock_rate(clock); 1925 } 1926 1927 static void set_clock_rate(u8 clock, unsigned long rate) 1928 { 1929 u32 val; 1930 u32 div; 1931 unsigned long src_rate; 1932 unsigned long flags; 1933 1934 spin_lock_irqsave(&clk_mgt_lock, flags); 1935 1936 /* Grab the HW semaphore. */ 1937 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) 1938 cpu_relax(); 1939 1940 val = readl(clk_mgt[clock].reg); 1941 src_rate = clock_source_rate((val | clk_mgt[clock].pllsw), 1942 clk_mgt[clock].branch); 1943 div = clock_divider(src_rate, rate); 1944 if (val & PRCM_CLK_MGT_CLK38) { 1945 if (clk_mgt[clock].clk38div) { 1946 if (div > 1) 1947 val |= PRCM_CLK_MGT_CLK38DIV; 1948 else 1949 val &= ~PRCM_CLK_MGT_CLK38DIV; 1950 } 1951 } else if (clock == PRCMU_SGACLK) { 1952 val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK | 1953 PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN); 1954 if (div == 3) { 1955 u64 r = (src_rate * 10); 1956 1957 (void)do_div(r, 25); 1958 if (r <= rate) { 1959 val |= PRCM_SGACLK_MGT_SGACLKDIV_BY_2_5_EN; 1960 div = 0; 1961 } 1962 } 1963 val |= min(div, (u32)31); 1964 } else { 1965 val &= ~PRCM_CLK_MGT_CLKPLLDIV_MASK; 1966 val |= min(div, (u32)31); 1967 } 1968 writel(val, clk_mgt[clock].reg); 1969 1970 /* Release the HW semaphore. */ 1971 writel(0, PRCM_SEM); 1972 1973 spin_unlock_irqrestore(&clk_mgt_lock, flags); 1974 } 1975 1976 static int set_armss_rate(unsigned long rate) 1977 { 1978 int i = 0; 1979 1980 /* cpufreq table frequencies is in KHz. */ 1981 rate = rate / 1000; 1982 1983 /* Find the corresponding arm opp from the cpufreq table. */ 1984 while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) { 1985 if (db8500_cpufreq_table[i].frequency == rate) 1986 break; 1987 i++; 1988 } 1989 1990 if (db8500_cpufreq_table[i].frequency != rate) 1991 return -EINVAL; 1992 1993 /* Set the new arm opp. */ 1994 return db8500_prcmu_set_arm_opp(db8500_cpufreq_table[i].index); 1995 } 1996 1997 static int set_plldsi_rate(unsigned long rate) 1998 { 1999 unsigned long src_rate; 2000 unsigned long rem; 2001 u32 pll_freq = 0; 2002 u32 r; 2003 2004 src_rate = clock_rate(PRCMU_HDMICLK); 2005 rem = rate; 2006 2007 for (r = 7; (rem > 0) && (r > 0); r--) { 2008 u64 d; 2009 u64 hwrate; 2010 2011 d = (r * rate); 2012 (void)do_div(d, src_rate); 2013 if (d < 6) 2014 d = 6; 2015 else if (d > 255) 2016 d = 255; 2017 hwrate = (d * src_rate); 2018 if (((2 * hwrate) < (r * MIN_PLL_VCO_RATE)) || 2019 ((r * MAX_PLL_VCO_RATE) < (2 * hwrate))) 2020 continue; 2021 (void)do_div(hwrate, r); 2022 if (rate < hwrate) { 2023 if (pll_freq == 0) 2024 pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) | 2025 (r << PRCM_PLL_FREQ_R_SHIFT)); 2026 break; 2027 } 2028 if ((rate - hwrate) < rem) { 2029 rem = (rate - hwrate); 2030 pll_freq = (((u32)d << PRCM_PLL_FREQ_D_SHIFT) | 2031 (r << PRCM_PLL_FREQ_R_SHIFT)); 2032 } 2033 } 2034 if (pll_freq == 0) 2035 return -EINVAL; 2036 2037 pll_freq |= (1 << PRCM_PLL_FREQ_N_SHIFT); 2038 writel(pll_freq, PRCM_PLLDSI_FREQ); 2039 2040 return 0; 2041 } 2042 2043 static void set_dsiclk_rate(u8 n, unsigned long rate) 2044 { 2045 u32 val; 2046 u32 div; 2047 2048 div = clock_divider(pll_rate(PRCM_PLLDSI_FREQ, 2049 clock_rate(PRCMU_HDMICLK), PLL_RAW), rate); 2050 2051 dsiclk[n].divsel = (div == 1) ? PRCM_DSI_PLLOUT_SEL_PHI : 2052 (div == 2) ? PRCM_DSI_PLLOUT_SEL_PHI_2 : 2053 /* else */ PRCM_DSI_PLLOUT_SEL_PHI_4; 2054 2055 val = readl(PRCM_DSI_PLLOUT_SEL); 2056 val &= ~dsiclk[n].divsel_mask; 2057 val |= (dsiclk[n].divsel << dsiclk[n].divsel_shift); 2058 writel(val, PRCM_DSI_PLLOUT_SEL); 2059 } 2060 2061 static void set_dsiescclk_rate(u8 n, unsigned long rate) 2062 { 2063 u32 val; 2064 u32 div; 2065 2066 div = clock_divider(clock_rate(PRCMU_TVCLK), rate); 2067 val = readl(PRCM_DSITVCLK_DIV); 2068 val &= ~dsiescclk[n].div_mask; 2069 val |= (min(div, (u32)255) << dsiescclk[n].div_shift); 2070 writel(val, PRCM_DSITVCLK_DIV); 2071 } 2072 2073 int prcmu_set_clock_rate(u8 clock, unsigned long rate) 2074 { 2075 if (clock < PRCMU_NUM_REG_CLOCKS) 2076 set_clock_rate(clock, rate); 2077 else if (clock == PRCMU_ARMSS) 2078 return set_armss_rate(rate); 2079 else if (clock == PRCMU_PLLDSI) 2080 return set_plldsi_rate(rate); 2081 else if ((clock == PRCMU_DSI0CLK) || (clock == PRCMU_DSI1CLK)) 2082 set_dsiclk_rate((clock - PRCMU_DSI0CLK), rate); 2083 else if ((PRCMU_DSI0ESCCLK <= clock) && (clock <= PRCMU_DSI2ESCCLK)) 2084 set_dsiescclk_rate((clock - PRCMU_DSI0ESCCLK), rate); 2085 return 0; 2086 } 2087 2088 int db8500_prcmu_config_esram0_deep_sleep(u8 state) 2089 { 2090 if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) || 2091 (state < ESRAM0_DEEP_SLEEP_STATE_OFF)) 2092 return -EINVAL; 2093 2094 mutex_lock(&mb4_transfer.lock); 2095 2096 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2097 cpu_relax(); 2098 2099 writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2100 writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON), 2101 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE)); 2102 writeb(DDR_PWR_STATE_ON, 2103 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE)); 2104 writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST)); 2105 2106 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2107 wait_for_completion(&mb4_transfer.work); 2108 2109 mutex_unlock(&mb4_transfer.lock); 2110 2111 return 0; 2112 } 2113 2114 int db8500_prcmu_config_hotdog(u8 threshold) 2115 { 2116 mutex_lock(&mb4_transfer.lock); 2117 2118 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2119 cpu_relax(); 2120 2121 writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD)); 2122 writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2123 2124 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2125 wait_for_completion(&mb4_transfer.work); 2126 2127 mutex_unlock(&mb4_transfer.lock); 2128 2129 return 0; 2130 } 2131 2132 int db8500_prcmu_config_hotmon(u8 low, u8 high) 2133 { 2134 mutex_lock(&mb4_transfer.lock); 2135 2136 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2137 cpu_relax(); 2138 2139 writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW)); 2140 writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH)); 2141 writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH), 2142 (tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG)); 2143 writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2144 2145 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2146 wait_for_completion(&mb4_transfer.work); 2147 2148 mutex_unlock(&mb4_transfer.lock); 2149 2150 return 0; 2151 } 2152 2153 static int config_hot_period(u16 val) 2154 { 2155 mutex_lock(&mb4_transfer.lock); 2156 2157 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2158 cpu_relax(); 2159 2160 writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD)); 2161 writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2162 2163 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2164 wait_for_completion(&mb4_transfer.work); 2165 2166 mutex_unlock(&mb4_transfer.lock); 2167 2168 return 0; 2169 } 2170 2171 int db8500_prcmu_start_temp_sense(u16 cycles32k) 2172 { 2173 if (cycles32k == 0xFFFF) 2174 return -EINVAL; 2175 2176 return config_hot_period(cycles32k); 2177 } 2178 2179 int db8500_prcmu_stop_temp_sense(void) 2180 { 2181 return config_hot_period(0xFFFF); 2182 } 2183 2184 static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3) 2185 { 2186 2187 mutex_lock(&mb4_transfer.lock); 2188 2189 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) 2190 cpu_relax(); 2191 2192 writeb(d0, (tcdm_base + PRCM_REQ_MB4_A9WDOG_0)); 2193 writeb(d1, (tcdm_base + PRCM_REQ_MB4_A9WDOG_1)); 2194 writeb(d2, (tcdm_base + PRCM_REQ_MB4_A9WDOG_2)); 2195 writeb(d3, (tcdm_base + PRCM_REQ_MB4_A9WDOG_3)); 2196 2197 writeb(cmd, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); 2198 2199 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET); 2200 wait_for_completion(&mb4_transfer.work); 2201 2202 mutex_unlock(&mb4_transfer.lock); 2203 2204 return 0; 2205 2206 } 2207 2208 int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off) 2209 { 2210 BUG_ON(num == 0 || num > 0xf); 2211 return prcmu_a9wdog(MB4H_A9WDOG_CONF, num, 0, 0, 2212 sleep_auto_off ? A9WDOG_AUTO_OFF_EN : 2213 A9WDOG_AUTO_OFF_DIS); 2214 } 2215 2216 int db8500_prcmu_enable_a9wdog(u8 id) 2217 { 2218 return prcmu_a9wdog(MB4H_A9WDOG_EN, id, 0, 0, 0); 2219 } 2220 2221 int db8500_prcmu_disable_a9wdog(u8 id) 2222 { 2223 return prcmu_a9wdog(MB4H_A9WDOG_DIS, id, 0, 0, 0); 2224 } 2225 2226 int db8500_prcmu_kick_a9wdog(u8 id) 2227 { 2228 return prcmu_a9wdog(MB4H_A9WDOG_KICK, id, 0, 0, 0); 2229 } 2230 2231 /* 2232 * timeout is 28 bit, in ms. 2233 */ 2234 int db8500_prcmu_load_a9wdog(u8 id, u32 timeout) 2235 { 2236 return prcmu_a9wdog(MB4H_A9WDOG_LOAD, 2237 (id & A9WDOG_ID_MASK) | 2238 /* 2239 * Put the lowest 28 bits of timeout at 2240 * offset 4. Four first bits are used for id. 2241 */ 2242 (u8)((timeout << 4) & 0xf0), 2243 (u8)((timeout >> 4) & 0xff), 2244 (u8)((timeout >> 12) & 0xff), 2245 (u8)((timeout >> 20) & 0xff)); 2246 } 2247 2248 /** 2249 * prcmu_abb_read() - Read register value(s) from the ABB. 2250 * @slave: The I2C slave address. 2251 * @reg: The (start) register address. 2252 * @value: The read out value(s). 2253 * @size: The number of registers to read. 2254 * 2255 * Reads register value(s) from the ABB. 2256 * @size has to be 1 for the current firmware version. 2257 */ 2258 int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) 2259 { 2260 int r; 2261 2262 if (size != 1) 2263 return -EINVAL; 2264 2265 mutex_lock(&mb5_transfer.lock); 2266 2267 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) 2268 cpu_relax(); 2269 2270 writeb(0, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB5)); 2271 writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP)); 2272 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS)); 2273 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG)); 2274 writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL)); 2275 2276 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET); 2277 2278 if (!wait_for_completion_timeout(&mb5_transfer.work, 2279 msecs_to_jiffies(20000))) { 2280 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 2281 __func__); 2282 r = -EIO; 2283 } else { 2284 r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO); 2285 } 2286 2287 if (!r) 2288 *value = mb5_transfer.ack.value; 2289 2290 mutex_unlock(&mb5_transfer.lock); 2291 2292 return r; 2293 } 2294 2295 /** 2296 * prcmu_abb_write_masked() - Write masked register value(s) to the ABB. 2297 * @slave: The I2C slave address. 2298 * @reg: The (start) register address. 2299 * @value: The value(s) to write. 2300 * @mask: The mask(s) to use. 2301 * @size: The number of registers to write. 2302 * 2303 * Writes masked register value(s) to the ABB. 2304 * For each @value, only the bits set to 1 in the corresponding @mask 2305 * will be written. The other bits are not changed. 2306 * @size has to be 1 for the current firmware version. 2307 */ 2308 int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, u8 size) 2309 { 2310 int r; 2311 2312 if (size != 1) 2313 return -EINVAL; 2314 2315 mutex_lock(&mb5_transfer.lock); 2316 2317 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) 2318 cpu_relax(); 2319 2320 writeb(~*mask, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB5)); 2321 writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP)); 2322 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS)); 2323 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG)); 2324 writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL)); 2325 2326 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET); 2327 2328 if (!wait_for_completion_timeout(&mb5_transfer.work, 2329 msecs_to_jiffies(20000))) { 2330 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", 2331 __func__); 2332 r = -EIO; 2333 } else { 2334 r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO); 2335 } 2336 2337 mutex_unlock(&mb5_transfer.lock); 2338 2339 return r; 2340 } 2341 2342 /** 2343 * prcmu_abb_write() - Write register value(s) to the ABB. 2344 * @slave: The I2C slave address. 2345 * @reg: The (start) register address. 2346 * @value: The value(s) to write. 2347 * @size: The number of registers to write. 2348 * 2349 * Writes register value(s) to the ABB. 2350 * @size has to be 1 for the current firmware version. 2351 */ 2352 int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) 2353 { 2354 u8 mask = ~0; 2355 2356 return prcmu_abb_write_masked(slave, reg, value, &mask, size); 2357 } 2358 2359 /** 2360 * prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem 2361 */ 2362 int prcmu_ac_wake_req(void) 2363 { 2364 u32 val; 2365 int ret = 0; 2366 2367 mutex_lock(&mb0_transfer.ac_wake_lock); 2368 2369 val = readl(PRCM_HOSTACCESS_REQ); 2370 if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ) 2371 goto unlock_and_return; 2372 2373 atomic_set(&ac_wake_req_state, 1); 2374 2375 /* 2376 * Force Modem Wake-up before hostaccess_req ping-pong. 2377 * It prevents Modem to enter in Sleep while acking the hostaccess 2378 * request. The 31us delay has been calculated by HWI. 2379 */ 2380 val |= PRCM_HOSTACCESS_REQ_WAKE_REQ; 2381 writel(val, PRCM_HOSTACCESS_REQ); 2382 2383 udelay(31); 2384 2385 val |= PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ; 2386 writel(val, PRCM_HOSTACCESS_REQ); 2387 2388 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work, 2389 msecs_to_jiffies(5000))) { 2390 #if defined(CONFIG_DBX500_PRCMU_DEBUG) 2391 db8500_prcmu_debug_dump(__func__, true, true); 2392 #endif 2393 pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n", 2394 __func__); 2395 ret = -EFAULT; 2396 } 2397 2398 unlock_and_return: 2399 mutex_unlock(&mb0_transfer.ac_wake_lock); 2400 return ret; 2401 } 2402 2403 /** 2404 * prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem 2405 */ 2406 void prcmu_ac_sleep_req() 2407 { 2408 u32 val; 2409 2410 mutex_lock(&mb0_transfer.ac_wake_lock); 2411 2412 val = readl(PRCM_HOSTACCESS_REQ); 2413 if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)) 2414 goto unlock_and_return; 2415 2416 writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ), 2417 PRCM_HOSTACCESS_REQ); 2418 2419 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work, 2420 msecs_to_jiffies(5000))) { 2421 pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n", 2422 __func__); 2423 } 2424 2425 atomic_set(&ac_wake_req_state, 0); 2426 2427 unlock_and_return: 2428 mutex_unlock(&mb0_transfer.ac_wake_lock); 2429 } 2430 2431 bool db8500_prcmu_is_ac_wake_requested(void) 2432 { 2433 return (atomic_read(&ac_wake_req_state) != 0); 2434 } 2435 2436 /** 2437 * db8500_prcmu_system_reset - System reset 2438 * 2439 * Saves the reset reason code and then sets the APE_SOFTRST register which 2440 * fires interrupt to fw 2441 */ 2442 void db8500_prcmu_system_reset(u16 reset_code) 2443 { 2444 writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON)); 2445 writel(1, PRCM_APE_SOFTRST); 2446 } 2447 2448 /** 2449 * db8500_prcmu_get_reset_code - Retrieve SW reset reason code 2450 * 2451 * Retrieves the reset reason code stored by prcmu_system_reset() before 2452 * last restart. 2453 */ 2454 u16 db8500_prcmu_get_reset_code(void) 2455 { 2456 return readw(tcdm_base + PRCM_SW_RST_REASON); 2457 } 2458 2459 /** 2460 * db8500_prcmu_reset_modem - ask the PRCMU to reset modem 2461 */ 2462 void db8500_prcmu_modem_reset(void) 2463 { 2464 mutex_lock(&mb1_transfer.lock); 2465 2466 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) 2467 cpu_relax(); 2468 2469 writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); 2470 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET); 2471 wait_for_completion(&mb1_transfer.work); 2472 2473 /* 2474 * No need to check return from PRCMU as modem should go in reset state 2475 * This state is already managed by upper layer 2476 */ 2477 2478 mutex_unlock(&mb1_transfer.lock); 2479 } 2480 2481 static void ack_dbb_wakeup(void) 2482 { 2483 unsigned long flags; 2484 2485 spin_lock_irqsave(&mb0_transfer.lock, flags); 2486 2487 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) 2488 cpu_relax(); 2489 2490 writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); 2491 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET); 2492 2493 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 2494 } 2495 2496 static inline void print_unknown_header_warning(u8 n, u8 header) 2497 { 2498 pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n", 2499 header, n); 2500 } 2501 2502 static bool read_mailbox_0(void) 2503 { 2504 bool r; 2505 u32 ev; 2506 unsigned int n; 2507 u8 header; 2508 2509 header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0); 2510 switch (header) { 2511 case MB0H_WAKEUP_EXE: 2512 case MB0H_WAKEUP_SLEEP: 2513 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1) 2514 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500); 2515 else 2516 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500); 2517 2518 if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK)) 2519 complete(&mb0_transfer.ac_wake_work); 2520 if (ev & WAKEUP_BIT_SYSCLK_OK) 2521 complete(&mb3_transfer.sysclk_work); 2522 2523 ev &= mb0_transfer.req.dbb_irqs; 2524 2525 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) { 2526 if (ev & prcmu_irq_bit[n]) 2527 generic_handle_irq(irq_find_mapping(db8500_irq_domain, n)); 2528 } 2529 r = true; 2530 break; 2531 default: 2532 print_unknown_header_warning(0, header); 2533 r = false; 2534 break; 2535 } 2536 writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR); 2537 return r; 2538 } 2539 2540 static bool read_mailbox_1(void) 2541 { 2542 mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1); 2543 mb1_transfer.ack.arm_opp = readb(tcdm_base + 2544 PRCM_ACK_MB1_CURRENT_ARM_OPP); 2545 mb1_transfer.ack.ape_opp = readb(tcdm_base + 2546 PRCM_ACK_MB1_CURRENT_APE_OPP); 2547 mb1_transfer.ack.ape_voltage_status = readb(tcdm_base + 2548 PRCM_ACK_MB1_APE_VOLTAGE_STATUS); 2549 writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR); 2550 complete(&mb1_transfer.work); 2551 return false; 2552 } 2553 2554 static bool read_mailbox_2(void) 2555 { 2556 mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS); 2557 writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR); 2558 complete(&mb2_transfer.work); 2559 return false; 2560 } 2561 2562 static bool read_mailbox_3(void) 2563 { 2564 writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR); 2565 return false; 2566 } 2567 2568 static bool read_mailbox_4(void) 2569 { 2570 u8 header; 2571 bool do_complete = true; 2572 2573 header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4); 2574 switch (header) { 2575 case MB4H_MEM_ST: 2576 case MB4H_HOTDOG: 2577 case MB4H_HOTMON: 2578 case MB4H_HOT_PERIOD: 2579 case MB4H_A9WDOG_CONF: 2580 case MB4H_A9WDOG_EN: 2581 case MB4H_A9WDOG_DIS: 2582 case MB4H_A9WDOG_LOAD: 2583 case MB4H_A9WDOG_KICK: 2584 break; 2585 default: 2586 print_unknown_header_warning(4, header); 2587 do_complete = false; 2588 break; 2589 } 2590 2591 writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR); 2592 2593 if (do_complete) 2594 complete(&mb4_transfer.work); 2595 2596 return false; 2597 } 2598 2599 static bool read_mailbox_5(void) 2600 { 2601 mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS); 2602 mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL); 2603 writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR); 2604 complete(&mb5_transfer.work); 2605 return false; 2606 } 2607 2608 static bool read_mailbox_6(void) 2609 { 2610 writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR); 2611 return false; 2612 } 2613 2614 static bool read_mailbox_7(void) 2615 { 2616 writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR); 2617 return false; 2618 } 2619 2620 static bool (* const read_mailbox[NUM_MB])(void) = { 2621 read_mailbox_0, 2622 read_mailbox_1, 2623 read_mailbox_2, 2624 read_mailbox_3, 2625 read_mailbox_4, 2626 read_mailbox_5, 2627 read_mailbox_6, 2628 read_mailbox_7 2629 }; 2630 2631 static irqreturn_t prcmu_irq_handler(int irq, void *data) 2632 { 2633 u32 bits; 2634 u8 n; 2635 irqreturn_t r; 2636 2637 bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS); 2638 if (unlikely(!bits)) 2639 return IRQ_NONE; 2640 2641 r = IRQ_HANDLED; 2642 for (n = 0; bits; n++) { 2643 if (bits & MBOX_BIT(n)) { 2644 bits -= MBOX_BIT(n); 2645 if (read_mailbox[n]()) 2646 r = IRQ_WAKE_THREAD; 2647 } 2648 } 2649 return r; 2650 } 2651 2652 static irqreturn_t prcmu_irq_thread_fn(int irq, void *data) 2653 { 2654 ack_dbb_wakeup(); 2655 return IRQ_HANDLED; 2656 } 2657 2658 static void prcmu_mask_work(struct work_struct *work) 2659 { 2660 unsigned long flags; 2661 2662 spin_lock_irqsave(&mb0_transfer.lock, flags); 2663 2664 config_wakeups(); 2665 2666 spin_unlock_irqrestore(&mb0_transfer.lock, flags); 2667 } 2668 2669 static void prcmu_irq_mask(struct irq_data *d) 2670 { 2671 unsigned long flags; 2672 2673 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags); 2674 2675 mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->hwirq]; 2676 2677 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags); 2678 2679 if (d->irq != IRQ_PRCMU_CA_SLEEP) 2680 schedule_work(&mb0_transfer.mask_work); 2681 } 2682 2683 static void prcmu_irq_unmask(struct irq_data *d) 2684 { 2685 unsigned long flags; 2686 2687 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags); 2688 2689 mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->hwirq]; 2690 2691 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags); 2692 2693 if (d->irq != IRQ_PRCMU_CA_SLEEP) 2694 schedule_work(&mb0_transfer.mask_work); 2695 } 2696 2697 static void noop(struct irq_data *d) 2698 { 2699 } 2700 2701 static struct irq_chip prcmu_irq_chip = { 2702 .name = "prcmu", 2703 .irq_disable = prcmu_irq_mask, 2704 .irq_ack = noop, 2705 .irq_mask = prcmu_irq_mask, 2706 .irq_unmask = prcmu_irq_unmask, 2707 }; 2708 2709 static char *fw_project_name(u8 project) 2710 { 2711 switch (project) { 2712 case PRCMU_FW_PROJECT_U8500: 2713 return "U8500"; 2714 case PRCMU_FW_PROJECT_U8500_C2: 2715 return "U8500 C2"; 2716 case PRCMU_FW_PROJECT_U9500: 2717 return "U9500"; 2718 case PRCMU_FW_PROJECT_U9500_C2: 2719 return "U9500 C2"; 2720 case PRCMU_FW_PROJECT_U8520: 2721 return "U8520"; 2722 case PRCMU_FW_PROJECT_U8420: 2723 return "U8420"; 2724 default: 2725 return "Unknown"; 2726 } 2727 } 2728 2729 static int db8500_irq_map(struct irq_domain *d, unsigned int virq, 2730 irq_hw_number_t hwirq) 2731 { 2732 irq_set_chip_and_handler(virq, &prcmu_irq_chip, 2733 handle_simple_irq); 2734 set_irq_flags(virq, IRQF_VALID); 2735 2736 return 0; 2737 } 2738 2739 static struct irq_domain_ops db8500_irq_ops = { 2740 .map = db8500_irq_map, 2741 .xlate = irq_domain_xlate_twocell, 2742 }; 2743 2744 static int db8500_irq_init(struct device_node *np) 2745 { 2746 int irq_base = 0; 2747 int i; 2748 2749 /* In the device tree case, just take some IRQs */ 2750 if (!np) 2751 irq_base = IRQ_PRCMU_BASE; 2752 2753 db8500_irq_domain = irq_domain_add_simple( 2754 np, NUM_PRCMU_WAKEUPS, irq_base, 2755 &db8500_irq_ops, NULL); 2756 2757 if (!db8500_irq_domain) { 2758 pr_err("Failed to create irqdomain\n"); 2759 return -ENOSYS; 2760 } 2761 2762 /* All wakeups will be used, so create mappings for all */ 2763 for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) 2764 irq_create_mapping(db8500_irq_domain, i); 2765 2766 return 0; 2767 } 2768 2769 void __init db8500_prcmu_early_init(void) 2770 { 2771 if (cpu_is_u8500v2() || cpu_is_u9540()) { 2772 void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K); 2773 2774 if (tcpm_base != NULL) { 2775 u32 version; 2776 version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET); 2777 fw_info.version.project = version & 0xFF; 2778 fw_info.version.api_version = (version >> 8) & 0xFF; 2779 fw_info.version.func_version = (version >> 16) & 0xFF; 2780 fw_info.version.errata = (version >> 24) & 0xFF; 2781 fw_info.valid = true; 2782 pr_info("PRCMU firmware: %s, version %d.%d.%d\n", 2783 fw_project_name(fw_info.version.project), 2784 (version >> 8) & 0xFF, (version >> 16) & 0xFF, 2785 (version >> 24) & 0xFF); 2786 iounmap(tcpm_base); 2787 } 2788 2789 if (cpu_is_u9540()) 2790 tcdm_base = ioremap_nocache(U8500_PRCMU_TCDM_BASE, 2791 SZ_4K + SZ_8K) + SZ_8K; 2792 else 2793 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE); 2794 } else { 2795 pr_err("prcmu: Unsupported chip version\n"); 2796 BUG(); 2797 } 2798 2799 spin_lock_init(&mb0_transfer.lock); 2800 spin_lock_init(&mb0_transfer.dbb_irqs_lock); 2801 mutex_init(&mb0_transfer.ac_wake_lock); 2802 init_completion(&mb0_transfer.ac_wake_work); 2803 mutex_init(&mb1_transfer.lock); 2804 init_completion(&mb1_transfer.work); 2805 mb1_transfer.ape_opp = APE_NO_CHANGE; 2806 mutex_init(&mb2_transfer.lock); 2807 init_completion(&mb2_transfer.work); 2808 spin_lock_init(&mb2_transfer.auto_pm_lock); 2809 spin_lock_init(&mb3_transfer.lock); 2810 mutex_init(&mb3_transfer.sysclk_lock); 2811 init_completion(&mb3_transfer.sysclk_work); 2812 mutex_init(&mb4_transfer.lock); 2813 init_completion(&mb4_transfer.work); 2814 mutex_init(&mb5_transfer.lock); 2815 init_completion(&mb5_transfer.work); 2816 2817 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); 2818 } 2819 2820 static void __init init_prcm_registers(void) 2821 { 2822 u32 val; 2823 2824 val = readl(PRCM_A9PL_FORCE_CLKEN); 2825 val &= ~(PRCM_A9PL_FORCE_CLKEN_PRCM_A9PL_FORCE_CLKEN | 2826 PRCM_A9PL_FORCE_CLKEN_PRCM_A9AXI_FORCE_CLKEN); 2827 writel(val, (PRCM_A9PL_FORCE_CLKEN)); 2828 } 2829 2830 /* 2831 * Power domain switches (ePODs) modeled as regulators for the DB8500 SoC 2832 */ 2833 static struct regulator_consumer_supply db8500_vape_consumers[] = { 2834 REGULATOR_SUPPLY("v-ape", NULL), 2835 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"), 2836 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"), 2837 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"), 2838 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"), 2839 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.4"), 2840 /* "v-mmc" changed to "vcore" in the mainline kernel */ 2841 REGULATOR_SUPPLY("vcore", "sdi0"), 2842 REGULATOR_SUPPLY("vcore", "sdi1"), 2843 REGULATOR_SUPPLY("vcore", "sdi2"), 2844 REGULATOR_SUPPLY("vcore", "sdi3"), 2845 REGULATOR_SUPPLY("vcore", "sdi4"), 2846 REGULATOR_SUPPLY("v-dma", "dma40.0"), 2847 REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"), 2848 /* "v-uart" changed to "vcore" in the mainline kernel */ 2849 REGULATOR_SUPPLY("vcore", "uart0"), 2850 REGULATOR_SUPPLY("vcore", "uart1"), 2851 REGULATOR_SUPPLY("vcore", "uart2"), 2852 REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"), 2853 REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"), 2854 REGULATOR_SUPPLY("vddvario", "smsc911x.0"), 2855 }; 2856 2857 static struct regulator_consumer_supply db8500_vsmps2_consumers[] = { 2858 REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"), 2859 /* AV8100 regulator */ 2860 REGULATOR_SUPPLY("hdmi_1v8", "0-0070"), 2861 }; 2862 2863 static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = { 2864 REGULATOR_SUPPLY("vsupply", "b2r2_bus"), 2865 REGULATOR_SUPPLY("vsupply", "mcde"), 2866 }; 2867 2868 /* SVA MMDSP regulator switch */ 2869 static struct regulator_consumer_supply db8500_svammdsp_consumers[] = { 2870 REGULATOR_SUPPLY("sva-mmdsp", "cm_control"), 2871 }; 2872 2873 /* SVA pipe regulator switch */ 2874 static struct regulator_consumer_supply db8500_svapipe_consumers[] = { 2875 REGULATOR_SUPPLY("sva-pipe", "cm_control"), 2876 }; 2877 2878 /* SIA MMDSP regulator switch */ 2879 static struct regulator_consumer_supply db8500_siammdsp_consumers[] = { 2880 REGULATOR_SUPPLY("sia-mmdsp", "cm_control"), 2881 }; 2882 2883 /* SIA pipe regulator switch */ 2884 static struct regulator_consumer_supply db8500_siapipe_consumers[] = { 2885 REGULATOR_SUPPLY("sia-pipe", "cm_control"), 2886 }; 2887 2888 static struct regulator_consumer_supply db8500_sga_consumers[] = { 2889 REGULATOR_SUPPLY("v-mali", NULL), 2890 }; 2891 2892 /* ESRAM1 and 2 regulator switch */ 2893 static struct regulator_consumer_supply db8500_esram12_consumers[] = { 2894 REGULATOR_SUPPLY("esram12", "cm_control"), 2895 }; 2896 2897 /* ESRAM3 and 4 regulator switch */ 2898 static struct regulator_consumer_supply db8500_esram34_consumers[] = { 2899 REGULATOR_SUPPLY("v-esram34", "mcde"), 2900 REGULATOR_SUPPLY("esram34", "cm_control"), 2901 REGULATOR_SUPPLY("lcla_esram", "dma40.0"), 2902 }; 2903 2904 static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = { 2905 [DB8500_REGULATOR_VAPE] = { 2906 .constraints = { 2907 .name = "db8500-vape", 2908 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2909 .always_on = true, 2910 }, 2911 .consumer_supplies = db8500_vape_consumers, 2912 .num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers), 2913 }, 2914 [DB8500_REGULATOR_VARM] = { 2915 .constraints = { 2916 .name = "db8500-varm", 2917 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2918 }, 2919 }, 2920 [DB8500_REGULATOR_VMODEM] = { 2921 .constraints = { 2922 .name = "db8500-vmodem", 2923 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2924 }, 2925 }, 2926 [DB8500_REGULATOR_VPLL] = { 2927 .constraints = { 2928 .name = "db8500-vpll", 2929 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2930 }, 2931 }, 2932 [DB8500_REGULATOR_VSMPS1] = { 2933 .constraints = { 2934 .name = "db8500-vsmps1", 2935 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2936 }, 2937 }, 2938 [DB8500_REGULATOR_VSMPS2] = { 2939 .constraints = { 2940 .name = "db8500-vsmps2", 2941 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2942 }, 2943 .consumer_supplies = db8500_vsmps2_consumers, 2944 .num_consumer_supplies = ARRAY_SIZE(db8500_vsmps2_consumers), 2945 }, 2946 [DB8500_REGULATOR_VSMPS3] = { 2947 .constraints = { 2948 .name = "db8500-vsmps3", 2949 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2950 }, 2951 }, 2952 [DB8500_REGULATOR_VRF1] = { 2953 .constraints = { 2954 .name = "db8500-vrf1", 2955 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2956 }, 2957 }, 2958 [DB8500_REGULATOR_SWITCH_SVAMMDSP] = { 2959 /* dependency to u8500-vape is handled outside regulator framework */ 2960 .constraints = { 2961 .name = "db8500-sva-mmdsp", 2962 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2963 }, 2964 .consumer_supplies = db8500_svammdsp_consumers, 2965 .num_consumer_supplies = ARRAY_SIZE(db8500_svammdsp_consumers), 2966 }, 2967 [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = { 2968 .constraints = { 2969 /* "ret" means "retention" */ 2970 .name = "db8500-sva-mmdsp-ret", 2971 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2972 }, 2973 }, 2974 [DB8500_REGULATOR_SWITCH_SVAPIPE] = { 2975 /* dependency to u8500-vape is handled outside regulator framework */ 2976 .constraints = { 2977 .name = "db8500-sva-pipe", 2978 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2979 }, 2980 .consumer_supplies = db8500_svapipe_consumers, 2981 .num_consumer_supplies = ARRAY_SIZE(db8500_svapipe_consumers), 2982 }, 2983 [DB8500_REGULATOR_SWITCH_SIAMMDSP] = { 2984 /* dependency to u8500-vape is handled outside regulator framework */ 2985 .constraints = { 2986 .name = "db8500-sia-mmdsp", 2987 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2988 }, 2989 .consumer_supplies = db8500_siammdsp_consumers, 2990 .num_consumer_supplies = ARRAY_SIZE(db8500_siammdsp_consumers), 2991 }, 2992 [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = { 2993 .constraints = { 2994 .name = "db8500-sia-mmdsp-ret", 2995 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2996 }, 2997 }, 2998 [DB8500_REGULATOR_SWITCH_SIAPIPE] = { 2999 /* dependency to u8500-vape is handled outside regulator framework */ 3000 .constraints = { 3001 .name = "db8500-sia-pipe", 3002 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 3003 }, 3004 .consumer_supplies = db8500_siapipe_consumers, 3005 .num_consumer_supplies = ARRAY_SIZE(db8500_siapipe_consumers), 3006 }, 3007 [DB8500_REGULATOR_SWITCH_SGA] = { 3008 .supply_regulator = "db8500-vape", 3009 .constraints = { 3010 .name = "db8500-sga", 3011 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 3012 }, 3013 .consumer_supplies = db8500_sga_consumers, 3014 .num_consumer_supplies = ARRAY_SIZE(db8500_sga_consumers), 3015 3016 }, 3017 [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = { 3018 .supply_regulator = "db8500-vape", 3019 .constraints = { 3020 .name = "db8500-b2r2-mcde", 3021 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 3022 }, 3023 .consumer_supplies = db8500_b2r2_mcde_consumers, 3024 .num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers), 3025 }, 3026 [DB8500_REGULATOR_SWITCH_ESRAM12] = { 3027 /* 3028 * esram12 is set in retention and supplied by Vsafe when Vape is off, 3029 * no need to hold Vape 3030 */ 3031 .constraints = { 3032 .name = "db8500-esram12", 3033 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 3034 }, 3035 .consumer_supplies = db8500_esram12_consumers, 3036 .num_consumer_supplies = ARRAY_SIZE(db8500_esram12_consumers), 3037 }, 3038 [DB8500_REGULATOR_SWITCH_ESRAM12RET] = { 3039 .constraints = { 3040 .name = "db8500-esram12-ret", 3041 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 3042 }, 3043 }, 3044 [DB8500_REGULATOR_SWITCH_ESRAM34] = { 3045 /* 3046 * esram34 is set in retention and supplied by Vsafe when Vape is off, 3047 * no need to hold Vape 3048 */ 3049 .constraints = { 3050 .name = "db8500-esram34", 3051 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 3052 }, 3053 .consumer_supplies = db8500_esram34_consumers, 3054 .num_consumer_supplies = ARRAY_SIZE(db8500_esram34_consumers), 3055 }, 3056 [DB8500_REGULATOR_SWITCH_ESRAM34RET] = { 3057 .constraints = { 3058 .name = "db8500-esram34-ret", 3059 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 3060 }, 3061 }, 3062 }; 3063 3064 static struct resource ab8500_resources[] = { 3065 [0] = { 3066 .start = IRQ_DB8500_AB8500, 3067 .end = IRQ_DB8500_AB8500, 3068 .flags = IORESOURCE_IRQ 3069 } 3070 }; 3071 3072 static struct mfd_cell db8500_prcmu_devs[] = { 3073 { 3074 .name = "db8500-prcmu-regulators", 3075 .of_compatible = "stericsson,db8500-prcmu-regulator", 3076 .platform_data = &db8500_regulators, 3077 .pdata_size = sizeof(db8500_regulators), 3078 }, 3079 { 3080 .name = "cpufreq-u8500", 3081 .of_compatible = "stericsson,cpufreq-u8500", 3082 .platform_data = &db8500_cpufreq_table, 3083 .pdata_size = sizeof(db8500_cpufreq_table), 3084 }, 3085 { 3086 .name = "ab8500-core", 3087 .of_compatible = "stericsson,ab8500", 3088 .num_resources = ARRAY_SIZE(ab8500_resources), 3089 .resources = ab8500_resources, 3090 .id = AB8500_VERSION_AB8500, 3091 }, 3092 }; 3093 3094 static void db8500_prcmu_update_cpufreq(void) 3095 { 3096 if (prcmu_has_arm_maxopp()) { 3097 db8500_cpufreq_table[3].frequency = 1000000; 3098 db8500_cpufreq_table[3].index = ARM_MAX_OPP; 3099 } 3100 } 3101 3102 /** 3103 * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic 3104 * 3105 */ 3106 static int db8500_prcmu_probe(struct platform_device *pdev) 3107 { 3108 struct ab8500_platform_data *ab8500_platdata = pdev->dev.platform_data; 3109 struct device_node *np = pdev->dev.of_node; 3110 int irq = 0, err = 0, i; 3111 3112 if (ux500_is_svp()) 3113 return -ENODEV; 3114 3115 init_prcm_registers(); 3116 3117 /* Clean up the mailbox interrupts after pre-kernel code. */ 3118 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR); 3119 3120 if (np) 3121 irq = platform_get_irq(pdev, 0); 3122 3123 if (!np || irq <= 0) 3124 irq = IRQ_DB8500_PRCMU1; 3125 3126 err = request_threaded_irq(irq, prcmu_irq_handler, 3127 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL); 3128 if (err < 0) { 3129 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n"); 3130 err = -EBUSY; 3131 goto no_irq_return; 3132 } 3133 3134 db8500_irq_init(np); 3135 3136 for (i = 0; i < ARRAY_SIZE(db8500_prcmu_devs); i++) { 3137 if (!strcmp(db8500_prcmu_devs[i].name, "ab8500-core")) { 3138 db8500_prcmu_devs[i].platform_data = ab8500_platdata; 3139 db8500_prcmu_devs[i].pdata_size = sizeof(struct ab8500_platform_data); 3140 } 3141 } 3142 3143 if (cpu_is_u8500v20_or_later()) 3144 prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); 3145 3146 db8500_prcmu_update_cpufreq(); 3147 3148 err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs, 3149 ARRAY_SIZE(db8500_prcmu_devs), NULL, 0, NULL); 3150 if (err) { 3151 pr_err("prcmu: Failed to add subdevices\n"); 3152 return err; 3153 } 3154 3155 pr_info("DB8500 PRCMU initialized\n"); 3156 3157 no_irq_return: 3158 return err; 3159 } 3160 static const struct of_device_id db8500_prcmu_match[] = { 3161 { .compatible = "stericsson,db8500-prcmu"}, 3162 { }, 3163 }; 3164 3165 static struct platform_driver db8500_prcmu_driver = { 3166 .driver = { 3167 .name = "db8500-prcmu", 3168 .owner = THIS_MODULE, 3169 .of_match_table = db8500_prcmu_match, 3170 }, 3171 .probe = db8500_prcmu_probe, 3172 }; 3173 3174 static int __init db8500_prcmu_init(void) 3175 { 3176 return platform_driver_register(&db8500_prcmu_driver); 3177 } 3178 3179 core_initcall(db8500_prcmu_init); 3180 3181 MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>"); 3182 MODULE_DESCRIPTION("DB8500 PRCM Unit driver"); 3183 MODULE_LICENSE("GPL v2"); 3184