intel_pstate.c (731e6b9753baa39074060221a76de97058c0ac2e) | intel_pstate.c (1e4f63aecb53e48468661e922fc2fa3b83e55722) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * intel_pstate.c: Native P state management for Intel processors 4 * 5 * (C) Copyright 2012 Intel Corporation 6 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 7 */ 8 --- 2022 unchanged lines hidden (view full) --- 2031} 2032 2033static int intel_pstate_get_max_freq(struct cpudata *cpu) 2034{ 2035 return global.turbo_disabled || global.no_turbo ? 2036 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2037} 2038 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * intel_pstate.c: Native P state management for Intel processors 4 * 5 * (C) Copyright 2012 Intel Corporation 6 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> 7 */ 8 --- 2022 unchanged lines hidden (view full) --- 2031} 2032 2033static int intel_pstate_get_max_freq(struct cpudata *cpu) 2034{ 2035 return global.turbo_disabled || global.no_turbo ? 2036 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2037} 2038 |
2039static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, 2040 struct cpudata *cpu) | 2039static void intel_pstate_update_perf_limits(struct cpudata *cpu, 2040 unsigned int policy_min, 2041 unsigned int policy_max) |
2041{ 2042 int max_freq = intel_pstate_get_max_freq(cpu); 2043 int32_t max_policy_perf, min_policy_perf; 2044 int max_state, turbo_max; 2045 2046 /* 2047 * HWP needs some special consideration, because on BDX the 2048 * HWP_REQUEST uses abstract value to represent performance 2049 * rather than pure ratios. 2050 */ 2051 if (hwp_active) { 2052 intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); 2053 } else { 2054 max_state = global.no_turbo || global.turbo_disabled ? 2055 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2056 turbo_max = cpu->pstate.turbo_pstate; 2057 } 2058 | 2042{ 2043 int max_freq = intel_pstate_get_max_freq(cpu); 2044 int32_t max_policy_perf, min_policy_perf; 2045 int max_state, turbo_max; 2046 2047 /* 2048 * HWP needs some special consideration, because on BDX the 2049 * HWP_REQUEST uses abstract value to represent performance 2050 * rather than pure ratios. 2051 */ 2052 if (hwp_active) { 2053 intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); 2054 } else { 2055 max_state = global.no_turbo || global.turbo_disabled ? 2056 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2057 turbo_max = cpu->pstate.turbo_pstate; 2058 } 2059 |
2059 max_policy_perf = max_state * policy->max / max_freq; 2060 if (policy->max == policy->min) { | 2060 max_policy_perf = max_state * policy_max / max_freq; 2061 if (policy_max == policy_min) { |
2061 min_policy_perf = max_policy_perf; 2062 } else { | 2062 min_policy_perf = max_policy_perf; 2063 } else { |
2063 min_policy_perf = max_state * policy->min / max_freq; | 2064 min_policy_perf = max_state * policy_min / max_freq; |
2064 min_policy_perf = clamp_t(int32_t, min_policy_perf, 2065 0, max_policy_perf); 2066 } 2067 2068 pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n", | 2065 min_policy_perf = clamp_t(int32_t, min_policy_perf, 2066 0, max_policy_perf); 2067 } 2068 2069 pr_debug("cpu:%d max_state %d min_policy_perf:%d max_policy_perf:%d\n", |
2069 policy->cpu, max_state, 2070 min_policy_perf, max_policy_perf); | 2070 cpu->cpu, max_state, min_policy_perf, max_policy_perf); |
2071 2072 /* Normalize user input to [min_perf, max_perf] */ 2073 if (per_cpu_limits) { 2074 cpu->min_perf_ratio = min_policy_perf; 2075 cpu->max_perf_ratio = max_policy_perf; 2076 } else { 2077 int32_t global_min, global_max; 2078 2079 /* Global limits are in percent of the maximum turbo P-state. */ 2080 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 2081 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); 2082 global_min = clamp_t(int32_t, global_min, 0, global_max); 2083 | 2071 2072 /* Normalize user input to [min_perf, max_perf] */ 2073 if (per_cpu_limits) { 2074 cpu->min_perf_ratio = min_policy_perf; 2075 cpu->max_perf_ratio = max_policy_perf; 2076 } else { 2077 int32_t global_min, global_max; 2078 2079 /* Global limits are in percent of the maximum turbo P-state. */ 2080 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); 2081 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); 2082 global_min = clamp_t(int32_t, global_min, 0, global_max); 2083 |
2084 pr_debug("cpu:%d global_min:%d global_max:%d\n", policy->cpu, | 2084 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu, |
2085 global_min, global_max); 2086 2087 cpu->min_perf_ratio = max(min_policy_perf, global_min); 2088 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); 2089 cpu->max_perf_ratio = min(max_policy_perf, global_max); 2090 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); 2091 2092 /* Make sure min_perf <= max_perf */ 2093 cpu->min_perf_ratio = min(cpu->min_perf_ratio, 2094 cpu->max_perf_ratio); 2095 2096 } | 2085 global_min, global_max); 2086 2087 cpu->min_perf_ratio = max(min_policy_perf, global_min); 2088 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); 2089 cpu->max_perf_ratio = min(max_policy_perf, global_max); 2090 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); 2091 2092 /* Make sure min_perf <= max_perf */ 2093 cpu->min_perf_ratio = min(cpu->min_perf_ratio, 2094 cpu->max_perf_ratio); 2095 2096 } |
2097 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", policy->cpu, | 2097 pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu, |
2098 cpu->max_perf_ratio, 2099 cpu->min_perf_ratio); 2100} 2101 2102static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2103{ 2104 struct cpudata *cpu; 2105 2106 if (!policy->cpuinfo.max_freq) 2107 return -ENODEV; 2108 2109 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2110 policy->cpuinfo.max_freq, policy->max); 2111 2112 cpu = all_cpu_data[policy->cpu]; 2113 cpu->policy = policy->policy; 2114 2115 mutex_lock(&intel_pstate_limits_lock); 2116 | 2098 cpu->max_perf_ratio, 2099 cpu->min_perf_ratio); 2100} 2101 2102static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2103{ 2104 struct cpudata *cpu; 2105 2106 if (!policy->cpuinfo.max_freq) 2107 return -ENODEV; 2108 2109 pr_debug("set_policy cpuinfo.max %u policy->max %u\n", 2110 policy->cpuinfo.max_freq, policy->max); 2111 2112 cpu = all_cpu_data[policy->cpu]; 2113 cpu->policy = policy->policy; 2114 2115 mutex_lock(&intel_pstate_limits_lock); 2116 |
2117 intel_pstate_update_perf_limits(policy, cpu); | 2117 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); |
2118 2119 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2120 /* 2121 * NOHZ_FULL CPUs need this as the governor callback may not 2122 * be invoked on them. 2123 */ 2124 intel_pstate_clear_update_util_hook(policy->cpu); 2125 intel_pstate_max_within_limits(cpu); --- 12 unchanged lines hidden (view full) --- 2138 intel_pstate_hwp_set(policy->cpu); 2139 } 2140 2141 mutex_unlock(&intel_pstate_limits_lock); 2142 2143 return 0; 2144} 2145 | 2118 2119 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2120 /* 2121 * NOHZ_FULL CPUs need this as the governor callback may not 2122 * be invoked on them. 2123 */ 2124 intel_pstate_clear_update_util_hook(policy->cpu); 2125 intel_pstate_max_within_limits(cpu); --- 12 unchanged lines hidden (view full) --- 2138 intel_pstate_hwp_set(policy->cpu); 2139 } 2140 2141 mutex_unlock(&intel_pstate_limits_lock); 2142 2143 return 0; 2144} 2145 |
2146static void intel_pstate_adjust_policy_max(struct cpufreq_policy *policy, 2147 struct cpudata *cpu) | 2146static void intel_pstate_adjust_policy_max(struct cpudata *cpu, 2147 struct cpufreq_policy_data *policy) |
2148{ 2149 if (!hwp_active && 2150 cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2151 policy->max < policy->cpuinfo.max_freq && 2152 policy->max > cpu->pstate.max_freq) { 2153 pr_debug("policy->max > max non turbo frequency\n"); 2154 policy->max = policy->cpuinfo.max_freq; 2155 } 2156} 2157 | 2148{ 2149 if (!hwp_active && 2150 cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && 2151 policy->max < policy->cpuinfo.max_freq && 2152 policy->max > cpu->pstate.max_freq) { 2153 pr_debug("policy->max > max non turbo frequency\n"); 2154 policy->max = policy->cpuinfo.max_freq; 2155 } 2156} 2157 |
2158static int intel_pstate_verify_policy(struct cpufreq_policy *policy) | 2158static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) |
2159{ 2160 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2161 2162 update_turbo_state(); 2163 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2164 intel_pstate_get_max_freq(cpu)); 2165 | 2159{ 2160 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2161 2162 update_turbo_state(); 2163 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2164 intel_pstate_get_max_freq(cpu)); 2165 |
2166 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && 2167 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 2168 return -EINVAL; | 2166 intel_pstate_adjust_policy_max(cpu, policy); |
2169 | 2167 |
2170 intel_pstate_adjust_policy_max(policy, cpu); 2171 | |
2172 return 0; 2173} 2174 2175static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) 2176{ 2177 intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); 2178} 2179 --- 83 unchanged lines hidden (view full) --- 2263 .resume = intel_pstate_resume, 2264 .init = intel_pstate_cpu_init, 2265 .exit = intel_pstate_cpu_exit, 2266 .stop_cpu = intel_pstate_stop_cpu, 2267 .update_limits = intel_pstate_update_limits, 2268 .name = "intel_pstate", 2269}; 2270 | 2168 return 0; 2169} 2170 2171static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy) 2172{ 2173 intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]); 2174} 2175 --- 83 unchanged lines hidden (view full) --- 2259 .resume = intel_pstate_resume, 2260 .init = intel_pstate_cpu_init, 2261 .exit = intel_pstate_cpu_exit, 2262 .stop_cpu = intel_pstate_stop_cpu, 2263 .update_limits = intel_pstate_update_limits, 2264 .name = "intel_pstate", 2265}; 2266 |
2271static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) | 2267static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) |
2272{ 2273 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2274 2275 update_turbo_state(); 2276 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2277 intel_pstate_get_max_freq(cpu)); 2278 | 2268{ 2269 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2270 2271 update_turbo_state(); 2272 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 2273 intel_pstate_get_max_freq(cpu)); 2274 |
2279 intel_pstate_adjust_policy_max(policy, cpu); | 2275 intel_pstate_adjust_policy_max(cpu, policy); |
2280 | 2276 |
2281 intel_pstate_update_perf_limits(policy, cpu); | 2277 intel_pstate_update_perf_limits(cpu, policy->min, policy->max); |
2282 2283 return 0; 2284} 2285 2286/* Use of trace in passive mode: 2287 * 2288 * In passive mode the trace core_busy field (also known as the 2289 * performance field, and lablelled as such on the graphs; also known as --- 558 unchanged lines hidden --- | 2278 2279 return 0; 2280} 2281 2282/* Use of trace in passive mode: 2283 * 2284 * In passive mode the trace core_busy field (also known as the 2285 * performance field, and lablelled as such on the graphs; also known as --- 558 unchanged lines hidden --- |