1e8db288eSNicolas Pitre /* 2e8db288eSNicolas Pitre * arch/arm/include/asm/mcpm.h 3e8db288eSNicolas Pitre * 4e8db288eSNicolas Pitre * Created by: Nicolas Pitre, April 2012 5e8db288eSNicolas Pitre * Copyright: (C) 2012-2013 Linaro Limited 6e8db288eSNicolas Pitre * 7e8db288eSNicolas Pitre * This program is free software; you can redistribute it and/or modify 8e8db288eSNicolas Pitre * it under the terms of the GNU General Public License version 2 as 9e8db288eSNicolas Pitre * published by the Free Software Foundation. 10e8db288eSNicolas Pitre */ 11e8db288eSNicolas Pitre 12e8db288eSNicolas Pitre #ifndef MCPM_H 13e8db288eSNicolas Pitre #define MCPM_H 14e8db288eSNicolas Pitre 15e8db288eSNicolas Pitre /* 16e8db288eSNicolas Pitre * Maximum number of possible clusters / CPUs per cluster. 17e8db288eSNicolas Pitre * 18e8db288eSNicolas Pitre * This should be sufficient for quite a while, while keeping the 19e8db288eSNicolas Pitre * (assembly) code simpler. When this starts to grow then we'll have 20e8db288eSNicolas Pitre * to consider dynamic allocation. 21e8db288eSNicolas Pitre */ 22e8db288eSNicolas Pitre #define MAX_CPUS_PER_CLUSTER 4 23e8db288eSNicolas Pitre #define MAX_NR_CLUSTERS 2 24e8db288eSNicolas Pitre 25e8db288eSNicolas Pitre #ifndef __ASSEMBLY__ 26e8db288eSNicolas Pitre 27e8db288eSNicolas Pitre /* 28e8db288eSNicolas Pitre * Platform specific code should use this symbol to set up secondary 29e8db288eSNicolas Pitre * entry location for processors to use when released from reset. 30e8db288eSNicolas Pitre */ 31e8db288eSNicolas Pitre extern void mcpm_entry_point(void); 32e8db288eSNicolas Pitre 33e8db288eSNicolas Pitre /* 34e8db288eSNicolas Pitre * This is used to indicate where the given CPU from given cluster should 35e8db288eSNicolas Pitre * branch once it is ready to re-enter the kernel using ptr, or NULL if it 36e8db288eSNicolas Pitre * should be gated. A gated CPU is held in a WFE loop until its vector 37e8db288eSNicolas Pitre * becomes non NULL. 38e8db288eSNicolas Pitre */ 39e8db288eSNicolas Pitre void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); 40e8db288eSNicolas Pitre 41*7c2b8605SNicolas Pitre /* 42*7c2b8605SNicolas Pitre * CPU/cluster power operations API for higher subsystems to use. 43*7c2b8605SNicolas Pitre */ 44*7c2b8605SNicolas Pitre 45*7c2b8605SNicolas Pitre /** 46*7c2b8605SNicolas Pitre * mcpm_cpu_power_up - make given CPU in given cluster runable 47*7c2b8605SNicolas Pitre * 48*7c2b8605SNicolas Pitre * @cpu: CPU number within given cluster 49*7c2b8605SNicolas Pitre * @cluster: cluster number for the CPU 50*7c2b8605SNicolas Pitre * 51*7c2b8605SNicolas Pitre * The identified CPU is brought out of reset. If the cluster was powered 52*7c2b8605SNicolas Pitre * down then it is brought up as well, taking care not to let the other CPUs 53*7c2b8605SNicolas Pitre * in the cluster run, and ensuring appropriate cluster setup. 54*7c2b8605SNicolas Pitre * 55*7c2b8605SNicolas Pitre * Caller must ensure the appropriate entry vector is initialized with 56*7c2b8605SNicolas Pitre * mcpm_set_entry_vector() prior to calling this. 57*7c2b8605SNicolas Pitre * 58*7c2b8605SNicolas Pitre * This must be called in a sleepable context. However, the implementation 59*7c2b8605SNicolas Pitre * is strongly encouraged to return early and let the operation happen 60*7c2b8605SNicolas Pitre * asynchronously, especially when significant delays are expected. 61*7c2b8605SNicolas Pitre * 62*7c2b8605SNicolas Pitre * If the operation cannot be performed then an error code is returned. 63*7c2b8605SNicolas Pitre */ 64*7c2b8605SNicolas Pitre int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); 65*7c2b8605SNicolas Pitre 66*7c2b8605SNicolas Pitre /** 67*7c2b8605SNicolas Pitre * mcpm_cpu_power_down - power the calling CPU down 68*7c2b8605SNicolas Pitre * 69*7c2b8605SNicolas Pitre * The calling CPU is powered down. 70*7c2b8605SNicolas Pitre * 71*7c2b8605SNicolas Pitre * If this CPU is found to be the "last man standing" in the cluster 72*7c2b8605SNicolas Pitre * then the cluster is prepared for power-down too. 73*7c2b8605SNicolas Pitre * 74*7c2b8605SNicolas Pitre * This must be called with interrupts disabled. 75*7c2b8605SNicolas Pitre * 76*7c2b8605SNicolas Pitre * This does not return. Re-entry in the kernel is expected via 77*7c2b8605SNicolas Pitre * mcpm_entry_point. 78*7c2b8605SNicolas Pitre */ 79*7c2b8605SNicolas Pitre void mcpm_cpu_power_down(void); 80*7c2b8605SNicolas Pitre 81*7c2b8605SNicolas Pitre /** 82*7c2b8605SNicolas Pitre * mcpm_cpu_suspend - bring the calling CPU in a suspended state 83*7c2b8605SNicolas Pitre * 84*7c2b8605SNicolas Pitre * @expected_residency: duration in microseconds the CPU is expected 85*7c2b8605SNicolas Pitre * to remain suspended, or 0 if unknown/infinity. 86*7c2b8605SNicolas Pitre * 87*7c2b8605SNicolas Pitre * The calling CPU is suspended. The expected residency argument is used 88*7c2b8605SNicolas Pitre * as a hint by the platform specific backend to implement the appropriate 89*7c2b8605SNicolas Pitre * sleep state level according to the knowledge it has on wake-up latency 90*7c2b8605SNicolas Pitre * for the given hardware. 91*7c2b8605SNicolas Pitre * 92*7c2b8605SNicolas Pitre * If this CPU is found to be the "last man standing" in the cluster 93*7c2b8605SNicolas Pitre * then the cluster may be prepared for power-down too, if the expected 94*7c2b8605SNicolas Pitre * residency makes it worthwhile. 95*7c2b8605SNicolas Pitre * 96*7c2b8605SNicolas Pitre * This must be called with interrupts disabled. 97*7c2b8605SNicolas Pitre * 98*7c2b8605SNicolas Pitre * This does not return. Re-entry in the kernel is expected via 99*7c2b8605SNicolas Pitre * mcpm_entry_point. 100*7c2b8605SNicolas Pitre */ 101*7c2b8605SNicolas Pitre void mcpm_cpu_suspend(u64 expected_residency); 102*7c2b8605SNicolas Pitre 103*7c2b8605SNicolas Pitre /** 104*7c2b8605SNicolas Pitre * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up 105*7c2b8605SNicolas Pitre * 106*7c2b8605SNicolas Pitre * This lets the platform specific backend code perform needed housekeeping 107*7c2b8605SNicolas Pitre * work. This must be called by the newly activated CPU as soon as it is 108*7c2b8605SNicolas Pitre * fully operational in kernel space, before it enables interrupts. 109*7c2b8605SNicolas Pitre * 110*7c2b8605SNicolas Pitre * If the operation cannot be performed then an error code is returned. 111*7c2b8605SNicolas Pitre */ 112*7c2b8605SNicolas Pitre int mcpm_cpu_powered_up(void); 113*7c2b8605SNicolas Pitre 114*7c2b8605SNicolas Pitre /* 115*7c2b8605SNicolas Pitre * Platform specific methods used in the implementation of the above API. 116*7c2b8605SNicolas Pitre */ 117*7c2b8605SNicolas Pitre struct mcpm_platform_ops { 118*7c2b8605SNicolas Pitre int (*power_up)(unsigned int cpu, unsigned int cluster); 119*7c2b8605SNicolas Pitre void (*power_down)(void); 120*7c2b8605SNicolas Pitre void (*suspend)(u64); 121*7c2b8605SNicolas Pitre void (*powered_up)(void); 122*7c2b8605SNicolas Pitre }; 123*7c2b8605SNicolas Pitre 124*7c2b8605SNicolas Pitre /** 125*7c2b8605SNicolas Pitre * mcpm_platform_register - register platform specific power methods 126*7c2b8605SNicolas Pitre * 127*7c2b8605SNicolas Pitre * @ops: mcpm_platform_ops structure to register 128*7c2b8605SNicolas Pitre * 129*7c2b8605SNicolas Pitre * An error is returned if the registration has been done previously. 130*7c2b8605SNicolas Pitre */ 131*7c2b8605SNicolas Pitre int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); 132*7c2b8605SNicolas Pitre 133e8db288eSNicolas Pitre #endif /* ! __ASSEMBLY__ */ 134e8db288eSNicolas Pitre #endif 135