14471a34fSViresh Kumar /*
24471a34fSViresh Kumar  * drivers/cpufreq/cpufreq_governor.h
34471a34fSViresh Kumar  *
44471a34fSViresh Kumar  * Header file for CPUFreq governors common code
54471a34fSViresh Kumar  *
64471a34fSViresh Kumar  * Copyright	(C) 2001 Russell King
74471a34fSViresh Kumar  *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
84471a34fSViresh Kumar  *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
94471a34fSViresh Kumar  *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
104471a34fSViresh Kumar  *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
114471a34fSViresh Kumar  *
124471a34fSViresh Kumar  * This program is free software; you can redistribute it and/or modify
134471a34fSViresh Kumar  * it under the terms of the GNU General Public License version 2 as
144471a34fSViresh Kumar  * published by the Free Software Foundation.
154471a34fSViresh Kumar  */
164471a34fSViresh Kumar 
174471a34fSViresh Kumar #ifndef _CPUFREQ_GOVERNER_H
184471a34fSViresh Kumar #define _CPUFREQ_GOVERNER_H
194471a34fSViresh Kumar 
204471a34fSViresh Kumar #include <linux/cpufreq.h>
214471a34fSViresh Kumar #include <linux/kobject.h>
224471a34fSViresh Kumar #include <linux/mutex.h>
234471a34fSViresh Kumar #include <linux/workqueue.h>
244471a34fSViresh Kumar #include <linux/sysfs.h>
254471a34fSViresh Kumar 
264471a34fSViresh Kumar /*
274471a34fSViresh Kumar  * The polling frequency depends on the capability of the processor. Default
284471a34fSViresh Kumar  * polling frequency is 1000 times the transition latency of the processor. The
294471a34fSViresh Kumar  * governor will work on any processor with transition latency <= 10mS, using
304471a34fSViresh Kumar  * appropriate sampling rate.
314471a34fSViresh Kumar  *
324471a34fSViresh Kumar  * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
334471a34fSViresh Kumar  * this governor will not work. All times here are in uS.
344471a34fSViresh Kumar  */
354471a34fSViresh Kumar #define MIN_SAMPLING_RATE_RATIO			(2)
364471a34fSViresh Kumar #define LATENCY_MULTIPLIER			(1000)
374471a34fSViresh Kumar #define MIN_LATENCY_MULTIPLIER			(100)
384471a34fSViresh Kumar #define TRANSITION_LATENCY_LIMIT		(10 * 1000 * 1000)
394471a34fSViresh Kumar 
404471a34fSViresh Kumar /* Ondemand Sampling types */
414471a34fSViresh Kumar enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
424471a34fSViresh Kumar 
434471a34fSViresh Kumar /* Macro creating sysfs show routines */
444471a34fSViresh Kumar #define show_one(_gov, file_name, object)				\
454471a34fSViresh Kumar static ssize_t show_##file_name						\
464471a34fSViresh Kumar (struct kobject *kobj, struct attribute *attr, char *buf)		\
474471a34fSViresh Kumar {									\
484471a34fSViresh Kumar 	return sprintf(buf, "%u\n", _gov##_tuners.object);		\
494471a34fSViresh Kumar }
504471a34fSViresh Kumar 
514471a34fSViresh Kumar #define define_get_cpu_dbs_routines(_dbs_info)				\
524471a34fSViresh Kumar static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu)		\
534471a34fSViresh Kumar {									\
544471a34fSViresh Kumar 	return &per_cpu(_dbs_info, cpu).cdbs;				\
554471a34fSViresh Kumar }									\
564471a34fSViresh Kumar 									\
574471a34fSViresh Kumar static void *get_cpu_dbs_info_s(int cpu)				\
584471a34fSViresh Kumar {									\
594471a34fSViresh Kumar 	return &per_cpu(_dbs_info, cpu);				\
604471a34fSViresh Kumar }
614471a34fSViresh Kumar 
624471a34fSViresh Kumar /*
634471a34fSViresh Kumar  * Abbreviations:
644471a34fSViresh Kumar  * dbs: used as a shortform for demand based switching It helps to keep variable
654471a34fSViresh Kumar  *	names smaller, simpler
664471a34fSViresh Kumar  * cdbs: common dbs
674471a34fSViresh Kumar  * on_*: On-demand governor
684471a34fSViresh Kumar  * cs_*: Conservative governor
694471a34fSViresh Kumar  */
704471a34fSViresh Kumar 
714471a34fSViresh Kumar /* Per cpu structures */
724471a34fSViresh Kumar struct cpu_dbs_common_info {
734471a34fSViresh Kumar 	int cpu;
741e7586a1SViresh Kumar 	u64 prev_cpu_idle;
751e7586a1SViresh Kumar 	u64 prev_cpu_wall;
761e7586a1SViresh Kumar 	u64 prev_cpu_nice;
774471a34fSViresh Kumar 	struct cpufreq_policy *cur_policy;
784471a34fSViresh Kumar 	struct delayed_work work;
794471a34fSViresh Kumar 	/*
804471a34fSViresh Kumar 	 * percpu mutex that serializes governor limit change with gov_dbs_timer
814471a34fSViresh Kumar 	 * invocation. We do not want gov_dbs_timer to run when user is changing
824471a34fSViresh Kumar 	 * the governor or limits.
834471a34fSViresh Kumar 	 */
844471a34fSViresh Kumar 	struct mutex timer_mutex;
85da53d61eSFabio Baltieri 	ktime_t time_stamp;
864471a34fSViresh Kumar };
874471a34fSViresh Kumar 
884471a34fSViresh Kumar struct od_cpu_dbs_info_s {
894471a34fSViresh Kumar 	struct cpu_dbs_common_info cdbs;
901e7586a1SViresh Kumar 	u64 prev_cpu_iowait;
914471a34fSViresh Kumar 	struct cpufreq_frequency_table *freq_table;
924471a34fSViresh Kumar 	unsigned int freq_lo;
934471a34fSViresh Kumar 	unsigned int freq_lo_jiffies;
944471a34fSViresh Kumar 	unsigned int freq_hi_jiffies;
954471a34fSViresh Kumar 	unsigned int rate_mult;
964471a34fSViresh Kumar 	unsigned int sample_type:1;
974471a34fSViresh Kumar };
984471a34fSViresh Kumar 
994471a34fSViresh Kumar struct cs_cpu_dbs_info_s {
1004471a34fSViresh Kumar 	struct cpu_dbs_common_info cdbs;
1014471a34fSViresh Kumar 	unsigned int down_skip;
1024471a34fSViresh Kumar 	unsigned int requested_freq;
1034471a34fSViresh Kumar 	unsigned int enable:1;
1044471a34fSViresh Kumar };
1054471a34fSViresh Kumar 
1064471a34fSViresh Kumar /* Governers sysfs tunables */
1074471a34fSViresh Kumar struct od_dbs_tuners {
1084471a34fSViresh Kumar 	unsigned int ignore_nice;
1094471a34fSViresh Kumar 	unsigned int sampling_rate;
1104471a34fSViresh Kumar 	unsigned int sampling_down_factor;
1114471a34fSViresh Kumar 	unsigned int up_threshold;
1124bd4e428SStratos Karafotis 	unsigned int adj_up_threshold;
1134471a34fSViresh Kumar 	unsigned int powersave_bias;
1144471a34fSViresh Kumar 	unsigned int io_is_busy;
1154471a34fSViresh Kumar };
1164471a34fSViresh Kumar 
1174471a34fSViresh Kumar struct cs_dbs_tuners {
1184471a34fSViresh Kumar 	unsigned int ignore_nice;
1194471a34fSViresh Kumar 	unsigned int sampling_rate;
1204471a34fSViresh Kumar 	unsigned int sampling_down_factor;
1214471a34fSViresh Kumar 	unsigned int up_threshold;
1224471a34fSViresh Kumar 	unsigned int down_threshold;
1234471a34fSViresh Kumar 	unsigned int freq_step;
1244471a34fSViresh Kumar };
1254471a34fSViresh Kumar 
1264471a34fSViresh Kumar /* Per Governer data */
1274471a34fSViresh Kumar struct dbs_data {
1284471a34fSViresh Kumar 	/* Common across governors */
1294471a34fSViresh Kumar 	#define GOV_ONDEMAND		0
1304471a34fSViresh Kumar 	#define GOV_CONSERVATIVE	1
1314471a34fSViresh Kumar 	int governor;
1324471a34fSViresh Kumar 	unsigned int min_sampling_rate;
1334471a34fSViresh Kumar 	struct attribute_group *attr_group;
1344471a34fSViresh Kumar 	void *tuners;
1354471a34fSViresh Kumar 
1364471a34fSViresh Kumar 	/* dbs_mutex protects dbs_enable in governor start/stop */
1374471a34fSViresh Kumar 	struct mutex mutex;
1384471a34fSViresh Kumar 
1394471a34fSViresh Kumar 	struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
1404471a34fSViresh Kumar 	void *(*get_cpu_dbs_info_s)(int cpu);
1414471a34fSViresh Kumar 	void (*gov_dbs_timer)(struct work_struct *work);
1424471a34fSViresh Kumar 	void (*gov_check_cpu)(int cpu, unsigned int load);
1434471a34fSViresh Kumar 
1444471a34fSViresh Kumar 	/* Governor specific ops, see below */
1454471a34fSViresh Kumar 	void *gov_ops;
1464471a34fSViresh Kumar };
1474471a34fSViresh Kumar 
1484471a34fSViresh Kumar /* Governor specific ops, will be passed to dbs_data->gov_ops */
1494471a34fSViresh Kumar struct od_ops {
1504471a34fSViresh Kumar 	int (*io_busy)(void);
1514471a34fSViresh Kumar 	void (*powersave_bias_init_cpu)(int cpu);
1524471a34fSViresh Kumar 	unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
1534471a34fSViresh Kumar 			unsigned int freq_next, unsigned int relation);
1544471a34fSViresh Kumar 	void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq);
1554471a34fSViresh Kumar };
1564471a34fSViresh Kumar 
1574471a34fSViresh Kumar struct cs_ops {
1584471a34fSViresh Kumar 	struct notifier_block *notifier_block;
1594471a34fSViresh Kumar };
1604471a34fSViresh Kumar 
1614471a34fSViresh Kumar static inline int delay_for_sampling_rate(unsigned int sampling_rate)
1624471a34fSViresh Kumar {
1634471a34fSViresh Kumar 	int delay = usecs_to_jiffies(sampling_rate);
1644471a34fSViresh Kumar 
1654471a34fSViresh Kumar 	/* We want all CPUs to do sampling nearly on same jiffy */
1664471a34fSViresh Kumar 	if (num_online_cpus() > 1)
1674471a34fSViresh Kumar 		delay -= jiffies % delay;
1684471a34fSViresh Kumar 
1694471a34fSViresh Kumar 	return delay;
1704471a34fSViresh Kumar }
1714471a34fSViresh Kumar 
1721e7586a1SViresh Kumar u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
1734471a34fSViresh Kumar void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
1744447266bSViresh Kumar bool need_load_eval(struct cpu_dbs_common_info *cdbs,
1754447266bSViresh Kumar 		unsigned int sampling_rate);
1764471a34fSViresh Kumar int cpufreq_governor_dbs(struct dbs_data *dbs_data,
1774471a34fSViresh Kumar 		struct cpufreq_policy *policy, unsigned int event);
1784471a34fSViresh Kumar #endif /* _CPUFREQ_GOVERNER_H */
179