Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 *  linux/drivers/cpufreq/cpufreq.c
   3 *
   4 *  Copyright (C) 2001 Russell King
   5 *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
   6 *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
   7 *
   8 *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
   9 *	Added handling for CPU hotplug
  10 *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
  11 *	Fix handling for CPU hotplug -- affected CPUs
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License version 2 as
  15 * published by the Free Software Foundation.
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <linux/cpu.h>
  21#include <linux/cpufreq.h>
 
  22#include <linux/delay.h>
  23#include <linux/device.h>
  24#include <linux/init.h>
  25#include <linux/kernel_stat.h>
  26#include <linux/module.h>
  27#include <linux/mutex.h>
 
  28#include <linux/slab.h>
  29#include <linux/suspend.h>
 
  30#include <linux/tick.h>
 
  31#include <trace/events/power.h>
  32
  33/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34 * The "cpufreq driver" - the arch- or hardware-dependent low
  35 * level driver of CPUFreq support, and its spinlock. This lock
  36 * also protects the cpufreq_cpu_data array.
  37 */
  38static struct cpufreq_driver *cpufreq_driver;
  39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
  40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
  41static DEFINE_RWLOCK(cpufreq_driver_lock);
  42DEFINE_MUTEX(cpufreq_governor_lock);
  43static LIST_HEAD(cpufreq_policy_list);
  44
  45/* This one keeps track of the previously set governor of a removed CPU */
  46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
 
 
 
  47
  48/* Flag to suspend/resume CPUFreq governors */
  49static bool cpufreq_suspended;
  50
  51static inline bool has_target(void)
  52{
  53	return cpufreq_driver->target_index || cpufreq_driver->target;
  54}
  55
  56/*
  57 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
  58 * sections
  59 */
  60static DECLARE_RWSEM(cpufreq_rwsem);
  61
  62/* internal prototypes */
  63static int __cpufreq_governor(struct cpufreq_policy *policy,
  64		unsigned int event);
  65static unsigned int __cpufreq_get(unsigned int cpu);
  66static void handle_update(struct work_struct *work);
 
 
 
  67
  68/**
  69 * Two notifier lists: the "policy" list is involved in the
  70 * validation process for a new CPU frequency policy; the
  71 * "transition" list for kernel code that needs to handle
  72 * changes to devices when the CPU clock speed changes.
  73 * The mutex locks both lists.
  74 */
  75static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
  76static struct srcu_notifier_head cpufreq_transition_notifier_list;
  77
  78static bool init_cpufreq_transition_notifier_list_called;
  79static int __init init_cpufreq_transition_notifier_list(void)
  80{
  81	srcu_init_notifier_head(&cpufreq_transition_notifier_list);
  82	init_cpufreq_transition_notifier_list_called = true;
  83	return 0;
  84}
  85pure_initcall(init_cpufreq_transition_notifier_list);
  86
  87static int off __read_mostly;
  88static int cpufreq_disabled(void)
  89{
  90	return off;
  91}
  92void disable_cpufreq(void)
  93{
  94	off = 1;
  95}
  96static LIST_HEAD(cpufreq_governor_list);
  97static DEFINE_MUTEX(cpufreq_governor_mutex);
  98
  99bool have_governor_per_policy(void)
 100{
 101	return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
 102}
 103EXPORT_SYMBOL_GPL(have_governor_per_policy);
 104
 
 
 105struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
 106{
 107	if (have_governor_per_policy())
 108		return &policy->kobj;
 109	else
 110		return cpufreq_global_kobject;
 111}
 112EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
 113
 114static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
 115{
 116	u64 idle_time;
 117	u64 cur_wall_time;
 
 118	u64 busy_time;
 119
 120	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
 
 
 121
 122	busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
 123	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
 124	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
 125	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
 126	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
 127	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
 128
 129	idle_time = cur_wall_time - busy_time;
 130	if (wall)
 131		*wall = cputime_to_usecs(cur_wall_time);
 132
 133	return cputime_to_usecs(idle_time);
 134}
 135
 136u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
 137{
 138	u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
 139
 140	if (idle_time == -1ULL)
 141		return get_cpu_idle_time_jiffy(cpu, wall);
 142	else if (!io_busy)
 143		idle_time += get_cpu_iowait_time_us(cpu, wall);
 144
 145	return idle_time;
 146}
 147EXPORT_SYMBOL_GPL(get_cpu_idle_time);
 148
 149/*
 150 * This is a generic cpufreq init() routine which can be used by cpufreq
 151 * drivers of SMP systems. It will do following:
 152 * - validate & show freq table passed
 153 * - set policies transition latency
 154 * - policy->cpus with all possible CPUs
 155 */
 156int cpufreq_generic_init(struct cpufreq_policy *policy,
 157		struct cpufreq_frequency_table *table,
 158		unsigned int transition_latency)
 159{
 160	int ret;
 161
 162	ret = cpufreq_table_validate_and_show(policy, table);
 163	if (ret) {
 164		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
 165		return ret;
 166	}
 167
 168	policy->cpuinfo.transition_latency = transition_latency;
 169
 170	/*
 171	 * The driver only supports the SMP configuartion where all processors
 172	 * share the clock and voltage and clock.
 173	 */
 174	cpumask_setall(policy->cpus);
 175
 176	return 0;
 177}
 178EXPORT_SYMBOL_GPL(cpufreq_generic_init);
 179
 180unsigned int cpufreq_generic_get(unsigned int cpu)
 181{
 182	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 183
 
 
 
 
 
 
 
 
 184	if (!policy || IS_ERR(policy->clk)) {
 185		pr_err("%s: No %s associated to cpu: %d\n",
 186		       __func__, policy ? "clk" : "policy", cpu);
 187		return 0;
 188	}
 189
 190	return clk_get_rate(policy->clk) / 1000;
 191}
 192EXPORT_SYMBOL_GPL(cpufreq_generic_get);
 193
 194/* Only for cpufreq core internal use */
 195struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
 196{
 197	return per_cpu(cpufreq_cpu_data, cpu);
 198}
 199
 
 
 
 
 
 200struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 201{
 202	struct cpufreq_policy *policy = NULL;
 203	unsigned long flags;
 204
 205	if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
 206		return NULL;
 207
 208	if (!down_read_trylock(&cpufreq_rwsem))
 209		return NULL;
 210
 211	/* get the cpufreq driver */
 212	read_lock_irqsave(&cpufreq_driver_lock, flags);
 213
 214	if (cpufreq_driver) {
 215		/* get the CPU */
 216		policy = per_cpu(cpufreq_cpu_data, cpu);
 217		if (policy)
 218			kobject_get(&policy->kobj);
 219	}
 220
 221	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 222
 223	if (!policy)
 224		up_read(&cpufreq_rwsem);
 225
 226	return policy;
 227}
 228EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
 229
 
 
 
 
 230void cpufreq_cpu_put(struct cpufreq_policy *policy)
 231{
 232	if (cpufreq_disabled())
 233		return;
 234
 235	kobject_put(&policy->kobj);
 236	up_read(&cpufreq_rwsem);
 237}
 238EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
 239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 240/*********************************************************************
 241 *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
 242 *********************************************************************/
 243
 244/**
 245 * adjust_jiffies - adjust the system "loops_per_jiffy"
 
 
 246 *
 247 * This function alters the system "loops_per_jiffy" for the clock
 248 * speed change. Note that loops_per_jiffy cannot be updated on SMP
 249 * systems as each CPU might be scaled differently. So, use the arch
 250 * per-CPU loops_per_jiffy value wherever possible.
 251 */
 252#ifndef CONFIG_SMP
 253static unsigned long l_p_j_ref;
 254static unsigned int l_p_j_ref_freq;
 255
 256static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
 257{
 
 
 
 
 258	if (ci->flags & CPUFREQ_CONST_LOOPS)
 259		return;
 260
 261	if (!l_p_j_ref_freq) {
 262		l_p_j_ref = loops_per_jiffy;
 263		l_p_j_ref_freq = ci->old;
 264		pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
 265			 l_p_j_ref, l_p_j_ref_freq);
 266	}
 267	if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
 268		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
 269								ci->new);
 270		pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
 271			 loops_per_jiffy, ci->new);
 272	}
 273}
 274#else
 275static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
 276{
 277	return;
 278}
 279#endif
 
 280
 281static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
 282		struct cpufreq_freqs *freqs, unsigned int state)
 
 
 
 
 
 
 
 
 
 
 
 283{
 
 
 284	BUG_ON(irqs_disabled());
 285
 286	if (cpufreq_disabled())
 287		return;
 288
 
 289	freqs->flags = cpufreq_driver->flags;
 290	pr_debug("notification %u of frequency transition to %u kHz\n",
 291		 state, freqs->new);
 292
 293	switch (state) {
 294
 295	case CPUFREQ_PRECHANGE:
 296		/* detect if the driver reported a value as "old frequency"
 
 297		 * which is not equal to what the cpufreq core thinks is
 298		 * "old frequency".
 299		 */
 300		if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
 301			if ((policy) && (policy->cpu == freqs->cpu) &&
 302			    (policy->cur) && (policy->cur != freqs->old)) {
 303				pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
 304					 freqs->old, policy->cur);
 305				freqs->old = policy->cur;
 306			}
 307		}
 
 308		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
 309				CPUFREQ_PRECHANGE, freqs);
 
 310		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
 311		break;
 312
 313	case CPUFREQ_POSTCHANGE:
 314		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
 315		pr_debug("FREQ: %lu - CPU: %lu\n",
 316			 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
 317		trace_cpu_frequency(freqs->new, freqs->cpu);
 
 
 
 318		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
 319				CPUFREQ_POSTCHANGE, freqs);
 320		if (likely(policy) && likely(policy->cpu == freqs->cpu))
 321			policy->cur = freqs->new;
 322		break;
 323	}
 324}
 325
 326/**
 327 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
 328 * on frequency transition.
 329 *
 330 * This function calls the transition notifiers and the "adjust_jiffies"
 331 * function. It is called twice on all CPU frequency changes that have
 332 * external effects.
 333 */
 334static void cpufreq_notify_transition(struct cpufreq_policy *policy,
 335		struct cpufreq_freqs *freqs, unsigned int state)
 336{
 337	for_each_cpu(freqs->cpu, policy->cpus)
 338		__cpufreq_notify_transition(policy, freqs, state);
 339}
 340
 341/* Do post notifications when there are chances that transition has failed */
 342static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
 343		struct cpufreq_freqs *freqs, int transition_failed)
 344{
 345	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
 346	if (!transition_failed)
 347		return;
 348
 349	swap(freqs->old, freqs->new);
 350	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
 351	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
 352}
 353
 354void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
 355		struct cpufreq_freqs *freqs)
 356{
 
 
 
 
 
 
 
 
 
 
 
 
 357wait:
 358	wait_event(policy->transition_wait, !policy->transition_ongoing);
 359
 360	spin_lock(&policy->transition_lock);
 361
 362	if (unlikely(policy->transition_ongoing)) {
 363		spin_unlock(&policy->transition_lock);
 364		goto wait;
 365	}
 366
 367	policy->transition_ongoing = true;
 
 368
 369	spin_unlock(&policy->transition_lock);
 370
 371	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
 372}
 373EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
 374
 375void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
 376		struct cpufreq_freqs *freqs, int transition_failed)
 377{
 378	if (unlikely(WARN_ON(!policy->transition_ongoing)))
 379		return;
 380
 381	cpufreq_notify_post_transition(policy, freqs, transition_failed);
 382
 
 
 
 
 383	policy->transition_ongoing = false;
 
 384
 385	wake_up(&policy->transition_wait);
 386}
 387EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
 388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 389
 390/*********************************************************************
 391 *                          SYSFS INTERFACE                          *
 392 *********************************************************************/
 393static ssize_t show_boost(struct kobject *kobj,
 394				 struct attribute *attr, char *buf)
 395{
 396	return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
 397}
 398
 399static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
 400				  const char *buf, size_t count)
 401{
 402	int ret, enable;
 403
 404	ret = sscanf(buf, "%d", &enable);
 405	if (ret != 1 || enable < 0 || enable > 1)
 406		return -EINVAL;
 407
 408	if (cpufreq_boost_trigger_state(enable)) {
 409		pr_err("%s: Cannot %s BOOST!\n",
 410		       __func__, enable ? "enable" : "disable");
 411		return -EINVAL;
 412	}
 413
 414	pr_debug("%s: cpufreq BOOST %s\n",
 415		 __func__, enable ? "enabled" : "disabled");
 416
 417	return count;
 418}
 419define_one_global_rw(boost);
 420
 421static struct cpufreq_governor *__find_governor(const char *str_governor)
 422{
 423	struct cpufreq_governor *t;
 424
 425	list_for_each_entry(t, &cpufreq_governor_list, governor_list)
 426		if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
 427			return t;
 428
 429	return NULL;
 430}
 431
 432/**
 433 * cpufreq_parse_governor - parse a governor string
 434 */
 435static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
 436				struct cpufreq_governor **governor)
 437{
 438	int err = -EINVAL;
 439
 440	if (!cpufreq_driver)
 441		goto out;
 
 
 442
 443	if (cpufreq_driver->setpolicy) {
 444		if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
 445			*policy = CPUFREQ_POLICY_PERFORMANCE;
 446			err = 0;
 447		} else if (!strnicmp(str_governor, "powersave",
 448						CPUFREQ_NAME_LEN)) {
 449			*policy = CPUFREQ_POLICY_POWERSAVE;
 450			err = 0;
 451		}
 452	} else if (has_target()) {
 453		struct cpufreq_governor *t;
 454
 455		mutex_lock(&cpufreq_governor_mutex);
 
 456
 457		t = __find_governor(str_governor);
 
 458
 459		if (t == NULL) {
 460			int ret;
 
 
 461
 462			mutex_unlock(&cpufreq_governor_mutex);
 463			ret = request_module("cpufreq_%s", str_governor);
 464			mutex_lock(&cpufreq_governor_mutex);
 465
 466			if (ret == 0)
 467				t = __find_governor(str_governor);
 468		}
 469
 470		if (t != NULL) {
 471			*governor = t;
 472			err = 0;
 473		}
 
 
 
 474
 475		mutex_unlock(&cpufreq_governor_mutex);
 476	}
 477out:
 478	return err;
 
 
 
 
 479}
 480
 481/**
 482 * cpufreq_per_cpu_attr_read() / show_##file_name() -
 483 * print out cpufreq information
 484 *
 485 * Write out information from cpufreq_driver->policy[cpu]; object must be
 486 * "unsigned int".
 487 */
 488
 489#define show_one(file_name, object)			\
 490static ssize_t show_##file_name				\
 491(struct cpufreq_policy *policy, char *buf)		\
 492{							\
 493	return sprintf(buf, "%u\n", policy->object);	\
 494}
 495
 496show_one(cpuinfo_min_freq, cpuinfo.min_freq);
 497show_one(cpuinfo_max_freq, cpuinfo.max_freq);
 498show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
 499show_one(scaling_min_freq, min);
 500show_one(scaling_max_freq, max);
 501show_one(scaling_cur_freq, cur);
 502
 503static int cpufreq_set_policy(struct cpufreq_policy *policy,
 504				struct cpufreq_policy *new_policy);
 
 
 505
 506/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
 508 */
 509#define store_one(file_name, object)			\
 510static ssize_t store_##file_name					\
 511(struct cpufreq_policy *policy, const char *buf, size_t count)		\
 512{									\
 
 513	int ret;							\
 514	struct cpufreq_policy new_policy;				\
 515									\
 516	ret = cpufreq_get_policy(&new_policy, policy->cpu);		\
 517	if (ret)							\
 518		return -EINVAL;						\
 519									\
 520	ret = sscanf(buf, "%u", &new_policy.object);			\
 521	if (ret != 1)							\
 522		return -EINVAL;						\
 523									\
 524	ret = cpufreq_set_policy(policy, &new_policy);		\
 525	policy->user_policy.object = policy->object;			\
 526									\
 527	return ret ? ret : count;					\
 528}
 529
 530store_one(scaling_min_freq, min);
 531store_one(scaling_max_freq, max);
 532
 533/**
 534 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
 535 */
 536static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
 537					char *buf)
 538{
 539	unsigned int cur_freq = __cpufreq_get(policy->cpu);
 540	if (!cur_freq)
 541		return sprintf(buf, "<unknown>");
 542	return sprintf(buf, "%u\n", cur_freq);
 
 
 543}
 544
 545/**
 546 * show_scaling_governor - show the current policy for the specified CPU
 547 */
 548static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
 549{
 550	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
 551		return sprintf(buf, "powersave\n");
 552	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
 553		return sprintf(buf, "performance\n");
 554	else if (policy->governor)
 555		return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
 556				policy->governor->name);
 557	return -EINVAL;
 558}
 559
 560/**
 561 * store_scaling_governor - store policy for the specified CPU
 562 */
 563static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
 564					const char *buf, size_t count)
 565{
 
 566	int ret;
 567	char	str_governor[16];
 568	struct cpufreq_policy new_policy;
 569
 570	ret = cpufreq_get_policy(&new_policy, policy->cpu);
 571	if (ret)
 572		return ret;
 573
 574	ret = sscanf(buf, "%15s", str_governor);
 575	if (ret != 1)
 576		return -EINVAL;
 577
 578	if (cpufreq_parse_governor(str_governor, &new_policy.policy,
 579						&new_policy.governor))
 580		return -EINVAL;
 581
 582	ret = cpufreq_set_policy(policy, &new_policy);
 
 
 583
 584	policy->user_policy.policy = policy->policy;
 585	policy->user_policy.governor = policy->governor;
 
 586
 587	if (ret)
 588		return ret;
 589	else
 590		return count;
 
 
 
 
 
 
 
 591}
 592
 593/**
 594 * show_scaling_driver - show the cpufreq driver currently loaded
 595 */
 596static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
 597{
 598	return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
 599}
 600
 601/**
 602 * show_scaling_available_governors - show the available CPUfreq governors
 603 */
 604static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
 605						char *buf)
 606{
 607	ssize_t i = 0;
 608	struct cpufreq_governor *t;
 609
 610	if (!has_target()) {
 611		i += sprintf(buf, "performance powersave");
 612		goto out;
 613	}
 614
 615	list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
 
 616		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
 617		    - (CPUFREQ_NAME_LEN + 2)))
 618			goto out;
 619		i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
 620	}
 
 621out:
 622	i += sprintf(&buf[i], "\n");
 623	return i;
 624}
 625
 626ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
 627{
 628	ssize_t i = 0;
 629	unsigned int cpu;
 630
 631	for_each_cpu(cpu, mask) {
 632		if (i)
 633			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
 634		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
 635		if (i >= (PAGE_SIZE - 5))
 636			break;
 637	}
 
 
 
 
 638	i += sprintf(&buf[i], "\n");
 639	return i;
 640}
 641EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
 642
 643/**
 644 * show_related_cpus - show the CPUs affected by each transition even if
 645 * hw coordination is in use
 646 */
 647static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
 648{
 649	return cpufreq_show_cpus(policy->related_cpus, buf);
 650}
 651
 652/**
 653 * show_affected_cpus - show the CPUs affected by each transition
 654 */
 655static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
 656{
 657	return cpufreq_show_cpus(policy->cpus, buf);
 658}
 659
 660static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
 661					const char *buf, size_t count)
 662{
 663	unsigned int freq = 0;
 664	unsigned int ret;
 665
 666	if (!policy->governor || !policy->governor->store_setspeed)
 667		return -EINVAL;
 668
 669	ret = sscanf(buf, "%u", &freq);
 670	if (ret != 1)
 671		return -EINVAL;
 672
 673	policy->governor->store_setspeed(policy, freq);
 674
 675	return count;
 676}
 677
 678static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
 679{
 680	if (!policy->governor || !policy->governor->show_setspeed)
 681		return sprintf(buf, "<unsupported>\n");
 682
 683	return policy->governor->show_setspeed(policy, buf);
 684}
 685
 686/**
 687 * show_bios_limit - show the current cpufreq HW/BIOS limitation
 688 */
 689static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
 690{
 691	unsigned int limit;
 692	int ret;
 693	if (cpufreq_driver->bios_limit) {
 694		ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
 695		if (!ret)
 696			return sprintf(buf, "%u\n", limit);
 697	}
 698	return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
 699}
 700
 701cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
 702cpufreq_freq_attr_ro(cpuinfo_min_freq);
 703cpufreq_freq_attr_ro(cpuinfo_max_freq);
 704cpufreq_freq_attr_ro(cpuinfo_transition_latency);
 705cpufreq_freq_attr_ro(scaling_available_governors);
 706cpufreq_freq_attr_ro(scaling_driver);
 707cpufreq_freq_attr_ro(scaling_cur_freq);
 708cpufreq_freq_attr_ro(bios_limit);
 709cpufreq_freq_attr_ro(related_cpus);
 710cpufreq_freq_attr_ro(affected_cpus);
 711cpufreq_freq_attr_rw(scaling_min_freq);
 712cpufreq_freq_attr_rw(scaling_max_freq);
 713cpufreq_freq_attr_rw(scaling_governor);
 714cpufreq_freq_attr_rw(scaling_setspeed);
 715
 716static struct attribute *default_attrs[] = {
 717	&cpuinfo_min_freq.attr,
 718	&cpuinfo_max_freq.attr,
 719	&cpuinfo_transition_latency.attr,
 720	&scaling_min_freq.attr,
 721	&scaling_max_freq.attr,
 722	&affected_cpus.attr,
 723	&related_cpus.attr,
 724	&scaling_governor.attr,
 725	&scaling_driver.attr,
 726	&scaling_available_governors.attr,
 727	&scaling_setspeed.attr,
 728	NULL
 729};
 
 730
 731#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
 732#define to_attr(a) container_of(a, struct freq_attr, attr)
 733
 734static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
 735{
 736	struct cpufreq_policy *policy = to_policy(kobj);
 737	struct freq_attr *fattr = to_attr(attr);
 738	ssize_t ret;
 739
 740	if (!down_read_trylock(&cpufreq_rwsem))
 741		return -EINVAL;
 742
 743	down_read(&policy->rwsem);
 744
 745	if (fattr->show)
 746		ret = fattr->show(policy, buf);
 747	else
 748		ret = -EIO;
 749
 750	up_read(&policy->rwsem);
 751	up_read(&cpufreq_rwsem);
 752
 753	return ret;
 754}
 755
 756static ssize_t store(struct kobject *kobj, struct attribute *attr,
 757		     const char *buf, size_t count)
 758{
 759	struct cpufreq_policy *policy = to_policy(kobj);
 760	struct freq_attr *fattr = to_attr(attr);
 761	ssize_t ret = -EINVAL;
 762
 763	get_online_cpus();
 764
 765	if (!cpu_online(policy->cpu))
 766		goto unlock;
 767
 768	if (!down_read_trylock(&cpufreq_rwsem))
 769		goto unlock;
 770
 771	down_write(&policy->rwsem);
 772
 773	if (fattr->store)
 774		ret = fattr->store(policy, buf, count);
 775	else
 776		ret = -EIO;
 777
 778	up_write(&policy->rwsem);
 779
 780	up_read(&cpufreq_rwsem);
 781unlock:
 782	put_online_cpus();
 783
 784	return ret;
 785}
 786
 787static void cpufreq_sysfs_release(struct kobject *kobj)
 788{
 789	struct cpufreq_policy *policy = to_policy(kobj);
 790	pr_debug("last reference is dropped\n");
 791	complete(&policy->kobj_unregister);
 792}
 793
 794static const struct sysfs_ops sysfs_ops = {
 795	.show	= show,
 796	.store	= store,
 797};
 798
 799static struct kobj_type ktype_cpufreq = {
 800	.sysfs_ops	= &sysfs_ops,
 801	.default_attrs	= default_attrs,
 802	.release	= cpufreq_sysfs_release,
 803};
 804
 805struct kobject *cpufreq_global_kobject;
 806EXPORT_SYMBOL(cpufreq_global_kobject);
 807
 808static int cpufreq_global_kobject_usage;
 809
 810int cpufreq_get_global_kobject(void)
 811{
 812	if (!cpufreq_global_kobject_usage++)
 813		return kobject_add(cpufreq_global_kobject,
 814				&cpu_subsys.dev_root->kobj, "%s", "cpufreq");
 815
 816	return 0;
 817}
 818EXPORT_SYMBOL(cpufreq_get_global_kobject);
 819
 820void cpufreq_put_global_kobject(void)
 821{
 822	if (!--cpufreq_global_kobject_usage)
 823		kobject_del(cpufreq_global_kobject);
 824}
 825EXPORT_SYMBOL(cpufreq_put_global_kobject);
 826
 827int cpufreq_sysfs_create_file(const struct attribute *attr)
 828{
 829	int ret = cpufreq_get_global_kobject();
 830
 831	if (!ret) {
 832		ret = sysfs_create_file(cpufreq_global_kobject, attr);
 833		if (ret)
 834			cpufreq_put_global_kobject();
 835	}
 836
 837	return ret;
 838}
 839EXPORT_SYMBOL(cpufreq_sysfs_create_file);
 840
 841void cpufreq_sysfs_remove_file(const struct attribute *attr)
 842{
 843	sysfs_remove_file(cpufreq_global_kobject, attr);
 844	cpufreq_put_global_kobject();
 845}
 846EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
 847
 848/* symlink affected CPUs */
 849static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
 850{
 851	unsigned int j;
 852	int ret = 0;
 853
 854	for_each_cpu(j, policy->cpus) {
 855		struct device *cpu_dev;
 856
 857		if (j == policy->cpu)
 858			continue;
 859
 860		pr_debug("Adding link for CPU: %u\n", j);
 861		cpu_dev = get_cpu_device(j);
 862		ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
 863					"cpufreq");
 864		if (ret)
 865			break;
 866	}
 867	return ret;
 868}
 869
 870static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
 871				     struct device *dev)
 872{
 873	struct freq_attr **drv_attr;
 874	int ret = 0;
 875
 876	/* prepare interface data */
 877	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
 878				   &dev->kobj, "cpufreq");
 879	if (ret)
 880		return ret;
 881
 882	/* set up files for this cpu device */
 883	drv_attr = cpufreq_driver->attr;
 884	while ((drv_attr) && (*drv_attr)) {
 885		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
 886		if (ret)
 887			goto err_out_kobj_put;
 888		drv_attr++;
 889	}
 890	if (cpufreq_driver->get) {
 891		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
 892		if (ret)
 893			goto err_out_kobj_put;
 894	}
 895	if (has_target()) {
 896		ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
 897		if (ret)
 898			goto err_out_kobj_put;
 899	}
 
 
 
 
 
 900	if (cpufreq_driver->bios_limit) {
 901		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
 902		if (ret)
 903			goto err_out_kobj_put;
 904	}
 905
 906	ret = cpufreq_add_dev_symlink(policy);
 907	if (ret)
 908		goto err_out_kobj_put;
 909
 910	return ret;
 911
 912err_out_kobj_put:
 913	kobject_put(&policy->kobj);
 914	wait_for_completion(&policy->kobj_unregister);
 915	return ret;
 916}
 917
 918static void cpufreq_init_policy(struct cpufreq_policy *policy)
 919{
 920	struct cpufreq_governor *gov = NULL;
 921	struct cpufreq_policy new_policy;
 922	int ret = 0;
 923
 924	memcpy(&new_policy, policy, sizeof(*policy));
 925
 926	/* Update governor of new_policy to the governor used before hotplug */
 927	gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
 928	if (gov)
 929		pr_debug("Restoring governor %s for cpu %d\n",
 930				policy->governor->name, policy->cpu);
 931	else
 932		gov = CPUFREQ_DEFAULT_GOVERNOR;
 
 
 933
 934	new_policy.governor = gov;
 
 
 
 935
 936	/* Use the default policy if its valid. */
 937	if (cpufreq_driver->setpolicy)
 938		cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
 939
 940	/* set default policy */
 941	ret = cpufreq_set_policy(policy, &new_policy);
 942	if (ret) {
 943		pr_debug("setting policy failed\n");
 944		if (cpufreq_driver->exit)
 945			cpufreq_driver->exit(policy);
 
 
 
 
 
 
 
 
 
 
 946	}
 
 
 
 
 
 
 947}
 948
 949#ifdef CONFIG_HOTPLUG_CPU
 950static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
 951				  unsigned int cpu, struct device *dev)
 952{
 953	int ret = 0;
 954	unsigned long flags;
 955
 956	if (has_target()) {
 957		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 958		if (ret) {
 959			pr_err("%s: Failed to stop governor\n", __func__);
 960			return ret;
 961		}
 962	}
 963
 964	down_write(&policy->rwsem);
 965
 966	write_lock_irqsave(&cpufreq_driver_lock, flags);
 967
 968	cpumask_set_cpu(cpu, policy->cpus);
 969	per_cpu(cpufreq_cpu_data, cpu) = policy;
 970	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 971
 
 
 
 
 
 972	up_write(&policy->rwsem);
 
 
 973
 974	if (has_target()) {
 975		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
 976		if (!ret)
 977			ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
 978
 979		if (ret) {
 980			pr_err("%s: Failed to start governor\n", __func__);
 981			return ret;
 982		}
 983	}
 
 
 
 
 
 
 
 984
 985	return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
 
 
 
 986}
 987#endif
 988
 989static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
 
 990{
 991	struct cpufreq_policy *policy;
 992	unsigned long flags;
 993
 994	read_lock_irqsave(&cpufreq_driver_lock, flags);
 
 
 995
 996	policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
 
 
 
 997
 998	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 
 
 
 
 
 
 999
1000	policy->governor = NULL;
 
 
 
 
 
1001
1002	return policy;
 
 
 
 
 
 
 
1003}
1004
1005static struct cpufreq_policy *cpufreq_policy_alloc(void)
1006{
1007	struct cpufreq_policy *policy;
 
 
 
 
 
1008
1009	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1010	if (!policy)
1011		return NULL;
1012
1013	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1014		goto err_free_policy;
1015
1016	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1017		goto err_free_cpumask;
1018
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1019	INIT_LIST_HEAD(&policy->policy_list);
1020	init_rwsem(&policy->rwsem);
1021	spin_lock_init(&policy->transition_lock);
1022	init_waitqueue_head(&policy->transition_wait);
 
1023
 
1024	return policy;
1025
 
 
 
 
 
 
 
 
 
1026err_free_cpumask:
1027	free_cpumask_var(policy->cpus);
1028err_free_policy:
1029	kfree(policy);
1030
1031	return NULL;
1032}
1033
1034static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1035{
1036	struct kobject *kobj;
1037	struct completion *cmp;
1038
1039	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1040			CPUFREQ_REMOVE_POLICY, policy);
1041
1042	down_read(&policy->rwsem);
1043	kobj = &policy->kobj;
1044	cmp = &policy->kobj_unregister;
1045	up_read(&policy->rwsem);
1046	kobject_put(kobj);
1047
1048	/*
1049	 * We need to make sure that the underlying kobj is
1050	 * actually not referenced anymore by anybody before we
1051	 * proceed with unloading.
1052	 */
1053	pr_debug("waiting for dropping of refcount\n");
1054	wait_for_completion(cmp);
1055	pr_debug("wait complete\n");
1056}
1057
1058static void cpufreq_policy_free(struct cpufreq_policy *policy)
1059{
1060	free_cpumask_var(policy->related_cpus);
1061	free_cpumask_var(policy->cpus);
1062	kfree(policy);
1063}
1064
1065static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1066{
1067	if (WARN_ON(cpu == policy->cpu))
1068		return;
1069
1070	down_write(&policy->rwsem);
 
 
 
1071
1072	policy->last_cpu = policy->cpu;
1073	policy->cpu = cpu;
1074
1075	up_write(&policy->rwsem);
 
 
 
 
 
 
 
 
 
 
 
 
1076
1077	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1078			CPUFREQ_UPDATE_POLICY_CPU, policy);
 
 
 
1079}
1080
1081static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1082{
1083	unsigned int j, cpu = dev->id;
1084	int ret = -ENOMEM;
1085	struct cpufreq_policy *policy;
 
1086	unsigned long flags;
1087	bool recover_policy = cpufreq_suspended;
1088#ifdef CONFIG_HOTPLUG_CPU
1089	struct cpufreq_policy *tpolicy;
1090#endif
1091
1092	if (cpu_is_offline(cpu))
1093		return 0;
1094
1095	pr_debug("adding CPU %u\n", cpu);
 
 
 
 
 
1096
1097#ifdef CONFIG_SMP
1098	/* check whether a different CPU already registered this
1099	 * CPU because it is in the same boat. */
1100	policy = cpufreq_cpu_get(cpu);
1101	if (unlikely(policy)) {
1102		cpufreq_cpu_put(policy);
1103		return 0;
 
 
 
 
1104	}
1105#endif
1106
1107	if (!down_read_trylock(&cpufreq_rwsem))
1108		return 0;
 
1109
1110#ifdef CONFIG_HOTPLUG_CPU
1111	/* Check if this cpu was hot-unplugged earlier and has siblings */
1112	read_lock_irqsave(&cpufreq_driver_lock, flags);
1113	list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1114		if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1115			read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1116			ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1117			up_read(&cpufreq_rwsem);
1118			return ret;
1119		}
1120	}
1121	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1122#endif
1123
1124	/*
1125	 * Restore the saved policy when doing light-weight init and fall back
1126	 * to the full init if that fails.
1127	 */
1128	policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1129	if (!policy) {
1130		recover_policy = false;
1131		policy = cpufreq_policy_alloc();
1132		if (!policy)
1133			goto nomem_out;
1134	}
1135
1136	/*
1137	 * In the resume path, since we restore a saved policy, the assignment
1138	 * to policy->cpu is like an update of the existing policy, rather than
1139	 * the creation of a brand new one. So we need to perform this update
1140	 * by invoking update_policy_cpu().
1141	 */
1142	if (recover_policy && cpu != policy->cpu)
1143		update_policy_cpu(policy, cpu);
1144	else
1145		policy->cpu = cpu;
1146
1147	cpumask_copy(policy->cpus, cpumask_of(cpu));
 
 
 
 
 
 
 
 
 
1148
1149	init_completion(&policy->kobj_unregister);
1150	INIT_WORK(&policy->update, handle_update);
 
 
 
 
 
 
1151
1152	/* call driver. From then on the cpufreq must be able
1153	 * to accept all calls to ->verify and ->setpolicy for this CPU
1154	 */
1155	ret = cpufreq_driver->init(policy);
1156	if (ret) {
1157		pr_debug("initialization failed\n");
1158		goto err_set_policy_cpu;
1159	}
1160
1161	/* related cpus should atleast have policy->cpus */
1162	cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1163
1164	/*
1165	 * affected cpus must always be the one, which are online. We aren't
1166	 * managing offline cpus here.
1167	 */
1168	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1169
1170	if (!recover_policy) {
1171		policy->user_policy.min = policy->min;
1172		policy->user_policy.max = policy->max;
1173	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174
1175	down_write(&policy->rwsem);
1176	write_lock_irqsave(&cpufreq_driver_lock, flags);
1177	for_each_cpu(j, policy->cpus)
1178		per_cpu(cpufreq_cpu_data, j) = policy;
1179	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 
 
 
 
 
 
 
 
1180
1181	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
 
 
 
 
1182		policy->cur = cpufreq_driver->get(policy->cpu);
1183		if (!policy->cur) {
 
1184			pr_err("%s: ->get() failed\n", __func__);
1185			goto err_get_freq;
1186		}
1187	}
1188
1189	/*
1190	 * Sometimes boot loaders set CPU frequency to a value outside of
1191	 * frequency table present with cpufreq core. In such cases CPU might be
1192	 * unstable if it has to run on that frequency for long duration of time
1193	 * and so its better to set it to a frequency which is specified in
1194	 * freq-table. This also makes cpufreq stats inconsistent as
1195	 * cpufreq-stats would fail to register because current frequency of CPU
1196	 * isn't found in freq-table.
1197	 *
1198	 * Because we don't want this change to effect boot process badly, we go
1199	 * for the next freq which is >= policy->cur ('cur' must be set by now,
1200	 * otherwise we will end up setting freq to lowest of the table as 'cur'
1201	 * is initialized to zero).
1202	 *
1203	 * We are passing target-freq as "policy->cur - 1" otherwise
1204	 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1205	 * equal to target-freq.
1206	 */
1207	if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1208	    && has_target()) {
 
 
1209		/* Are we running at unknown frequency ? */
1210		ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1211		if (ret == -EINVAL) {
1212			/* Warn user and fix it */
1213			pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1214				__func__, policy->cpu, policy->cur);
1215			ret = __cpufreq_driver_target(policy, policy->cur - 1,
1216				CPUFREQ_RELATION_L);
1217
1218			/*
1219			 * Reaching here after boot in a few seconds may not
1220			 * mean that system will remain stable at "unknown"
1221			 * frequency for longer duration. Hence, a BUG_ON().
1222			 */
1223			BUG_ON(ret);
1224			pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1225				__func__, policy->cpu, policy->cur);
1226		}
1227	}
1228
1229	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1230				     CPUFREQ_START, policy);
1231
1232	if (!recover_policy) {
1233		ret = cpufreq_add_dev_interface(policy, dev);
1234		if (ret)
1235			goto err_out_unregister;
1236		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1237				CPUFREQ_CREATE_POLICY, policy);
1238	}
1239
1240	write_lock_irqsave(&cpufreq_driver_lock, flags);
1241	list_add(&policy->policy_list, &cpufreq_policy_list);
1242	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1243
1244	cpufreq_init_policy(policy);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1245
1246	if (!recover_policy) {
1247		policy->user_policy.policy = policy->policy;
1248		policy->user_policy.governor = policy->governor;
 
 
1249	}
 
1250	up_write(&policy->rwsem);
1251
1252	kobject_uevent(&policy->kobj, KOBJ_ADD);
1253	up_read(&cpufreq_rwsem);
 
 
 
 
 
 
1254
1255	pr_debug("initialization complete\n");
1256
1257	return 0;
1258
1259err_out_unregister:
1260err_get_freq:
1261	write_lock_irqsave(&cpufreq_driver_lock, flags);
1262	for_each_cpu(j, policy->cpus)
1263		per_cpu(cpufreq_cpu_data, j) = NULL;
1264	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
1265
 
1266	if (cpufreq_driver->exit)
1267		cpufreq_driver->exit(policy);
1268err_set_policy_cpu:
1269	if (recover_policy) {
1270		/* Do not leave stale fallback data behind. */
1271		per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1272		cpufreq_policy_put_kobj(policy);
1273	}
1274	cpufreq_policy_free(policy);
1275
1276nomem_out:
1277	up_read(&cpufreq_rwsem);
 
1278
 
1279	return ret;
1280}
1281
1282/**
1283 * cpufreq_add_dev - add a CPU device
1284 *
1285 * Adds the cpufreq interface for a CPU device.
1286 *
1287 * The Oracle says: try running cpufreq registration/unregistration concurrently
1288 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1289 * mess up, but more thorough testing is needed. - Mathieu
1290 */
1291static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1292{
1293	return __cpufreq_add_dev(dev, sif);
1294}
1295
1296static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1297					   unsigned int old_cpu)
1298{
1299	struct device *cpu_dev;
1300	int ret;
1301
1302	/* first sibling now owns the new sysfs dir */
1303	cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1304
1305	sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1306	ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1307	if (ret) {
1308		pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1309
1310		down_write(&policy->rwsem);
1311		cpumask_set_cpu(old_cpu, policy->cpus);
1312		up_write(&policy->rwsem);
1313
1314		ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1315					"cpufreq");
1316
1317		return -EINVAL;
1318	}
1319
1320	return cpu_dev->id;
 
 
 
 
 
1321}
1322
1323static int __cpufreq_remove_dev_prepare(struct device *dev,
1324					struct subsys_interface *sif)
1325{
1326	unsigned int cpu = dev->id, cpus;
1327	int new_cpu, ret;
1328	unsigned long flags;
1329	struct cpufreq_policy *policy;
1330
1331	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
 
1332
1333	write_lock_irqsave(&cpufreq_driver_lock, flags);
1334
1335	policy = per_cpu(cpufreq_cpu_data, cpu);
 
 
 
1336
1337	/* Save the policy somewhere when doing a light-weight tear-down */
1338	if (cpufreq_suspended)
1339		per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1340
1341	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
1342
1343	if (!policy) {
1344		pr_debug("%s: No cpu_data found\n", __func__);
1345		return -EINVAL;
1346	}
1347
1348	if (has_target()) {
1349		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1350		if (ret) {
1351			pr_err("%s: Failed to stop governor\n", __func__);
1352			return ret;
1353		}
 
 
 
1354	}
1355
1356	if (!cpufreq_driver->setpolicy)
1357		strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1358			policy->governor->name, CPUFREQ_NAME_LEN);
1359
1360	down_read(&policy->rwsem);
1361	cpus = cpumask_weight(policy->cpus);
1362	up_read(&policy->rwsem);
1363
1364	if (cpu != policy->cpu) {
1365		sysfs_remove_link(&dev->kobj, "cpufreq");
1366	} else if (cpus > 1) {
1367		new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1368		if (new_cpu >= 0) {
1369			update_policy_cpu(policy, new_cpu);
1370
1371			if (!cpufreq_suspended)
1372				pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1373					 __func__, new_cpu, cpu);
1374		}
1375	} else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
1376		cpufreq_driver->stop_cpu(policy);
1377	}
1378
1379	return 0;
1380}
1381
1382static int __cpufreq_remove_dev_finish(struct device *dev,
1383				       struct subsys_interface *sif)
1384{
1385	unsigned int cpu = dev->id, cpus;
1386	int ret;
1387	unsigned long flags;
1388	struct cpufreq_policy *policy;
1389
1390	read_lock_irqsave(&cpufreq_driver_lock, flags);
1391	policy = per_cpu(cpufreq_cpu_data, cpu);
1392	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1393
 
1394	if (!policy) {
1395		pr_debug("%s: No cpu_data found\n", __func__);
1396		return -EINVAL;
1397	}
1398
1399	down_write(&policy->rwsem);
1400	cpus = cpumask_weight(policy->cpus);
1401
1402	if (cpus > 1)
1403		cpumask_clear_cpu(cpu, policy->cpus);
1404	up_write(&policy->rwsem);
1405
1406	/* If cpu is last user of policy, free policy */
1407	if (cpus == 1) {
1408		if (has_target()) {
1409			ret = __cpufreq_governor(policy,
1410					CPUFREQ_GOV_POLICY_EXIT);
1411			if (ret) {
1412				pr_err("%s: Failed to exit governor\n",
1413				       __func__);
1414				return ret;
1415			}
1416		}
1417
1418		if (!cpufreq_suspended)
1419			cpufreq_policy_put_kobj(policy);
1420
1421		/*
1422		 * Perform the ->exit() even during light-weight tear-down,
1423		 * since this is a core component, and is essential for the
1424		 * subsequent light-weight ->init() to succeed.
1425		 */
1426		if (cpufreq_driver->exit)
1427			cpufreq_driver->exit(policy);
1428
1429		/* Remove policy from list of active policies */
1430		write_lock_irqsave(&cpufreq_driver_lock, flags);
1431		list_del(&policy->policy_list);
1432		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1433
1434		if (!cpufreq_suspended)
1435			cpufreq_policy_free(policy);
1436	} else if (has_target()) {
1437		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1438		if (!ret)
1439			ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1440
1441		if (ret) {
1442			pr_err("%s: Failed to start governor\n", __func__);
1443			return ret;
1444		}
1445	}
1446
1447	per_cpu(cpufreq_cpu_data, cpu) = NULL;
1448	return 0;
1449}
1450
1451/**
1452 * cpufreq_remove_dev - remove a CPU device
1453 *
1454 * Removes the cpufreq interface for a CPU device.
1455 */
1456static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1457{
1458	unsigned int cpu = dev->id;
1459	int ret;
1460
1461	if (cpu_is_offline(cpu))
1462		return 0;
1463
1464	ret = __cpufreq_remove_dev_prepare(dev, sif);
1465
1466	if (!ret)
1467		ret = __cpufreq_remove_dev_finish(dev, sif);
1468
1469	return ret;
1470}
1471
1472static void handle_update(struct work_struct *work)
1473{
1474	struct cpufreq_policy *policy =
1475		container_of(work, struct cpufreq_policy, update);
1476	unsigned int cpu = policy->cpu;
1477	pr_debug("handle_update for cpu %u called\n", cpu);
1478	cpufreq_update_policy(cpu);
 
 
 
 
 
1479}
1480
1481/**
1482 *	cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1483 *	in deep trouble.
1484 *	@cpu: cpu number
1485 *	@old_freq: CPU frequency the kernel thinks the CPU runs at
1486 *	@new_freq: CPU frequency the CPU actually runs at
1487 *
1488 *	We adjust to current frequency first, and need to clean up later.
1489 *	So either call to cpufreq_update_policy() or schedule handle_update()).
1490 */
1491static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1492				unsigned int new_freq)
1493{
1494	struct cpufreq_policy *policy;
1495	struct cpufreq_freqs freqs;
1496	unsigned long flags;
1497
1498	pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1499		 old_freq, new_freq);
1500
1501	freqs.old = old_freq;
1502	freqs.new = new_freq;
1503
1504	read_lock_irqsave(&cpufreq_driver_lock, flags);
1505	policy = per_cpu(cpufreq_cpu_data, cpu);
1506	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1507
1508	cpufreq_freq_transition_begin(policy, &freqs);
1509	cpufreq_freq_transition_end(policy, &freqs, 0);
1510}
1511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1512/**
1513 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1514 * @cpu: CPU number
1515 *
1516 * This is the last known freq, without actually getting it from the driver.
1517 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1518 */
1519unsigned int cpufreq_quick_get(unsigned int cpu)
1520{
1521	struct cpufreq_policy *policy;
1522	unsigned int ret_freq = 0;
 
1523
1524	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1525		return cpufreq_driver->get(cpu);
 
 
 
 
 
 
 
1526
1527	policy = cpufreq_cpu_get(cpu);
1528	if (policy) {
1529		ret_freq = policy->cur;
1530		cpufreq_cpu_put(policy);
1531	}
1532
1533	return ret_freq;
1534}
1535EXPORT_SYMBOL(cpufreq_quick_get);
1536
1537/**
1538 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1539 * @cpu: CPU number
1540 *
1541 * Just return the max possible frequency for a given CPU.
1542 */
1543unsigned int cpufreq_quick_get_max(unsigned int cpu)
1544{
1545	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1546	unsigned int ret_freq = 0;
1547
1548	if (policy) {
1549		ret_freq = policy->max;
1550		cpufreq_cpu_put(policy);
1551	}
1552
1553	return ret_freq;
1554}
1555EXPORT_SYMBOL(cpufreq_quick_get_max);
1556
1557static unsigned int __cpufreq_get(unsigned int cpu)
 
 
 
 
 
 
1558{
1559	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1560	unsigned int ret_freq = 0;
1561
1562	if (!cpufreq_driver->get)
1563		return ret_freq;
1564
1565	ret_freq = cpufreq_driver->get(cpu);
1566
1567	if (ret_freq && policy->cur &&
1568		!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1569		/* verify no discrepancy between actual and
1570					saved value exists */
1571		if (unlikely(ret_freq != policy->cur)) {
1572			cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1573			schedule_work(&policy->update);
1574		}
1575	}
1576
1577	return ret_freq;
1578}
 
 
 
 
 
 
 
 
 
1579
1580/**
1581 * cpufreq_get - get the current CPU frequency (in kHz)
1582 * @cpu: CPU number
1583 *
1584 * Get the CPU current (static) CPU frequency
1585 */
1586unsigned int cpufreq_get(unsigned int cpu)
1587{
1588	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1589	unsigned int ret_freq = 0;
1590
1591	if (policy) {
1592		down_read(&policy->rwsem);
1593		ret_freq = __cpufreq_get(cpu);
 
1594		up_read(&policy->rwsem);
1595
1596		cpufreq_cpu_put(policy);
1597	}
1598
1599	return ret_freq;
1600}
1601EXPORT_SYMBOL(cpufreq_get);
1602
1603static struct subsys_interface cpufreq_interface = {
1604	.name		= "cpufreq",
1605	.subsys		= &cpu_subsys,
1606	.add_dev	= cpufreq_add_dev,
1607	.remove_dev	= cpufreq_remove_dev,
1608};
1609
1610/*
1611 * In case platform wants some specific frequency to be configured
1612 * during suspend..
1613 */
1614int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1615{
1616	int ret;
1617
1618	if (!policy->suspend_freq) {
1619		pr_err("%s: suspend_freq can't be zero\n", __func__);
1620		return -EINVAL;
1621	}
1622
1623	pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1624			policy->suspend_freq);
1625
1626	ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1627			CPUFREQ_RELATION_H);
1628	if (ret)
1629		pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1630				__func__, policy->suspend_freq, ret);
1631
1632	return ret;
1633}
1634EXPORT_SYMBOL(cpufreq_generic_suspend);
1635
1636/**
1637 * cpufreq_suspend() - Suspend CPUFreq governors
1638 *
1639 * Called during system wide Suspend/Hibernate cycles for suspending governors
1640 * as some platforms can't change frequency after this point in suspend cycle.
1641 * Because some of the devices (like: i2c, regulators, etc) they use for
1642 * changing frequency are suspended quickly after this point.
1643 */
1644void cpufreq_suspend(void)
1645{
1646	struct cpufreq_policy *policy;
1647
1648	if (!cpufreq_driver)
1649		return;
1650
1651	if (!has_target())
1652		return;
1653
1654	pr_debug("%s: Suspending Governors\n", __func__);
1655
1656	list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1657		if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1658			pr_err("%s: Failed to stop governor for policy: %p\n",
1659				__func__, policy);
1660		else if (cpufreq_driver->suspend
1661		    && cpufreq_driver->suspend(policy))
1662			pr_err("%s: Failed to suspend driver: %p\n", __func__,
1663				policy);
 
 
1664	}
1665
 
1666	cpufreq_suspended = true;
1667}
1668
1669/**
1670 * cpufreq_resume() - Resume CPUFreq governors
1671 *
1672 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1673 * are suspended with cpufreq_suspend().
1674 */
1675void cpufreq_resume(void)
1676{
1677	struct cpufreq_policy *policy;
 
1678
1679	if (!cpufreq_driver)
1680		return;
1681
1682	if (!has_target())
1683		return;
1684
1685	pr_debug("%s: Resuming Governors\n", __func__);
1686
1687	cpufreq_suspended = false;
1688
1689	list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1690		if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
 
 
 
 
 
1691			pr_err("%s: Failed to resume driver: %p\n", __func__,
1692				policy);
1693		else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1694		    || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1695			pr_err("%s: Failed to start governor for policy: %p\n",
1696				__func__, policy);
1697
1698		/*
1699		 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1700		 * policy in list. It will verify that the current freq is in
1701		 * sync with what we believe it to be.
1702		 */
1703		if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1704			schedule_work(&policy->update);
1705	}
1706}
1707
1708/**
1709 *	cpufreq_get_current_driver - return current driver's name
 
 
 
 
 
 
 
 
 
 
 
 
1710 *
1711 *	Return the name string of the currently loaded cpufreq driver
1712 *	or NULL, if none.
1713 */
1714const char *cpufreq_get_current_driver(void)
1715{
1716	if (cpufreq_driver)
1717		return cpufreq_driver->name;
1718
1719	return NULL;
1720}
1721EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1722
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1723/*********************************************************************
1724 *                     NOTIFIER LISTS INTERFACE                      *
1725 *********************************************************************/
1726
1727/**
1728 *	cpufreq_register_notifier - register a driver with cpufreq
1729 *	@nb: notifier function to register
1730 *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1731 *
1732 *	Add a driver to one of two lists: either a list of drivers that
1733 *      are notified about clock rate changes (once before and once after
1734 *      the transition), or a list of drivers that are notified about
1735 *      changes in cpufreq policy.
1736 *
1737 *	This function may sleep, and has the same return conditions as
1738 *	blocking_notifier_chain_register.
1739 */
1740int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1741{
1742	int ret;
1743
1744	if (cpufreq_disabled())
1745		return -EINVAL;
1746
1747	WARN_ON(!init_cpufreq_transition_notifier_list_called);
1748
1749	switch (list) {
1750	case CPUFREQ_TRANSITION_NOTIFIER:
 
 
 
 
 
 
1751		ret = srcu_notifier_chain_register(
1752				&cpufreq_transition_notifier_list, nb);
 
 
 
 
1753		break;
1754	case CPUFREQ_POLICY_NOTIFIER:
1755		ret = blocking_notifier_chain_register(
1756				&cpufreq_policy_notifier_list, nb);
1757		break;
1758	default:
1759		ret = -EINVAL;
1760	}
1761
1762	return ret;
1763}
1764EXPORT_SYMBOL(cpufreq_register_notifier);
1765
1766/**
1767 *	cpufreq_unregister_notifier - unregister a driver with cpufreq
1768 *	@nb: notifier block to be unregistered
1769 *	@list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1770 *
1771 *	Remove a driver from the CPU frequency notifier list.
1772 *
1773 *	This function may sleep, and has the same return conditions as
1774 *	blocking_notifier_chain_unregister.
1775 */
1776int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1777{
1778	int ret;
1779
1780	if (cpufreq_disabled())
1781		return -EINVAL;
1782
1783	switch (list) {
1784	case CPUFREQ_TRANSITION_NOTIFIER:
 
 
1785		ret = srcu_notifier_chain_unregister(
1786				&cpufreq_transition_notifier_list, nb);
 
 
 
 
1787		break;
1788	case CPUFREQ_POLICY_NOTIFIER:
1789		ret = blocking_notifier_chain_unregister(
1790				&cpufreq_policy_notifier_list, nb);
1791		break;
1792	default:
1793		ret = -EINVAL;
1794	}
1795
1796	return ret;
1797}
1798EXPORT_SYMBOL(cpufreq_unregister_notifier);
1799
1800
1801/*********************************************************************
1802 *                              GOVERNORS                            *
1803 *********************************************************************/
1804
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1805int __cpufreq_driver_target(struct cpufreq_policy *policy,
1806			    unsigned int target_freq,
1807			    unsigned int relation)
1808{
1809	int retval = -EINVAL;
1810	unsigned int old_target_freq = target_freq;
1811
1812	if (cpufreq_disabled())
1813		return -ENODEV;
1814
1815	/* Make sure that target_freq is within supported range */
1816	if (target_freq > policy->max)
1817		target_freq = policy->max;
1818	if (target_freq < policy->min)
1819		target_freq = policy->min;
1820
1821	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1822		 policy->cpu, target_freq, relation, old_target_freq);
1823
1824	/*
1825	 * This might look like a redundant call as we are checking it again
1826	 * after finding index. But it is left intentionally for cases where
1827	 * exactly same freq is called again and so we can save on few function
1828	 * calls.
1829	 */
1830	if (target_freq == policy->cur)
 
1831		return 0;
1832
1833	if (cpufreq_driver->target)
1834		retval = cpufreq_driver->target(policy, target_freq, relation);
1835	else if (cpufreq_driver->target_index) {
1836		struct cpufreq_frequency_table *freq_table;
1837		struct cpufreq_freqs freqs;
1838		bool notify;
1839		int index;
1840
1841		freq_table = cpufreq_frequency_get_table(policy->cpu);
1842		if (unlikely(!freq_table)) {
1843			pr_err("%s: Unable to find freq_table\n", __func__);
1844			goto out;
1845		}
1846
1847		retval = cpufreq_frequency_table_target(policy, freq_table,
1848				target_freq, relation, &index);
1849		if (unlikely(retval)) {
1850			pr_err("%s: Unable to find matching freq\n", __func__);
1851			goto out;
1852		}
1853
1854		if (freq_table[index].frequency == policy->cur) {
1855			retval = 0;
1856			goto out;
1857		}
1858
1859		notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1860
1861		if (notify) {
1862			freqs.old = policy->cur;
1863			freqs.new = freq_table[index].frequency;
1864			freqs.flags = 0;
1865
1866			pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1867				 __func__, policy->cpu, freqs.old, freqs.new);
1868
1869			cpufreq_freq_transition_begin(policy, &freqs);
1870		}
1871
1872		retval = cpufreq_driver->target_index(policy, index);
1873		if (retval)
1874			pr_err("%s: Failed to change cpu frequency: %d\n",
1875			       __func__, retval);
1876
1877		if (notify)
1878			cpufreq_freq_transition_end(policy, &freqs, retval);
1879	}
1880
1881out:
1882	return retval;
 
 
1883}
1884EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1885
1886int cpufreq_driver_target(struct cpufreq_policy *policy,
1887			  unsigned int target_freq,
1888			  unsigned int relation)
1889{
1890	int ret = -EINVAL;
1891
1892	down_write(&policy->rwsem);
1893
1894	ret = __cpufreq_driver_target(policy, target_freq, relation);
1895
1896	up_write(&policy->rwsem);
1897
1898	return ret;
1899}
1900EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1901
1902/*
1903 * when "event" is CPUFREQ_GOV_LIMITS
1904 */
 
1905
1906static int __cpufreq_governor(struct cpufreq_policy *policy,
1907					unsigned int event)
1908{
1909	int ret;
1910
1911	/* Only must be defined when default governor is known to have latency
1912	   restrictions, like e.g. conservative or ondemand.
1913	   That this is the case is already ensured in Kconfig
1914	*/
1915#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1916	struct cpufreq_governor *gov = &cpufreq_gov_performance;
1917#else
1918	struct cpufreq_governor *gov = NULL;
1919#endif
1920
1921	/* Don't start any governor operations if we are entering suspend */
1922	if (cpufreq_suspended)
1923		return 0;
 
 
 
 
 
 
1924
1925	if (policy->governor->max_transition_latency &&
1926	    policy->cpuinfo.transition_latency >
1927	    policy->governor->max_transition_latency) {
1928		if (!gov)
1929			return -EINVAL;
1930		else {
1931			pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1932				policy->governor->name, gov->name);
1933			policy->governor = gov;
 
 
1934		}
1935	}
1936
1937	if (event == CPUFREQ_GOV_POLICY_INIT)
1938		if (!try_module_get(policy->governor->owner))
1939			return -EINVAL;
1940
1941	pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1942		 policy->cpu, event);
1943
1944	mutex_lock(&cpufreq_governor_lock);
1945	if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1946	    || (!policy->governor_enabled
1947	    && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1948		mutex_unlock(&cpufreq_governor_lock);
1949		return -EBUSY;
1950	}
1951
1952	if (event == CPUFREQ_GOV_STOP)
1953		policy->governor_enabled = false;
1954	else if (event == CPUFREQ_GOV_START)
1955		policy->governor_enabled = true;
 
 
 
 
 
1956
1957	mutex_unlock(&cpufreq_governor_lock);
1958
1959	ret = policy->governor->governor(policy, event);
 
1960
1961	if (!ret) {
1962		if (event == CPUFREQ_GOV_POLICY_INIT)
1963			policy->governor->initialized++;
1964		else if (event == CPUFREQ_GOV_POLICY_EXIT)
1965			policy->governor->initialized--;
1966	} else {
1967		/* Restore original values */
1968		mutex_lock(&cpufreq_governor_lock);
1969		if (event == CPUFREQ_GOV_STOP)
1970			policy->governor_enabled = true;
1971		else if (event == CPUFREQ_GOV_START)
1972			policy->governor_enabled = false;
1973		mutex_unlock(&cpufreq_governor_lock);
 
 
 
 
 
 
 
 
 
1974	}
1975
1976	if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1977			((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1978		module_put(policy->governor->owner);
1979
1980	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1981}
1982
1983int cpufreq_register_governor(struct cpufreq_governor *governor)
1984{
1985	int err;
1986
1987	if (!governor)
1988		return -EINVAL;
1989
1990	if (cpufreq_disabled())
1991		return -ENODEV;
1992
1993	mutex_lock(&cpufreq_governor_mutex);
1994
1995	governor->initialized = 0;
1996	err = -EBUSY;
1997	if (__find_governor(governor->name) == NULL) {
1998		err = 0;
1999		list_add(&governor->governor_list, &cpufreq_governor_list);
2000	}
2001
2002	mutex_unlock(&cpufreq_governor_mutex);
2003	return err;
2004}
2005EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2006
2007void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2008{
2009	int cpu;
 
2010
2011	if (!governor)
2012		return;
2013
2014	if (cpufreq_disabled())
2015		return;
2016
2017	for_each_present_cpu(cpu) {
2018		if (cpu_online(cpu))
2019			continue;
2020		if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2021			strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
 
 
2022	}
 
2023
2024	mutex_lock(&cpufreq_governor_mutex);
2025	list_del(&governor->governor_list);
2026	mutex_unlock(&cpufreq_governor_mutex);
2027	return;
2028}
2029EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2030
2031
2032/*********************************************************************
2033 *                          POLICY INTERFACE                         *
2034 *********************************************************************/
2035
2036/**
2037 * cpufreq_get_policy - get the current cpufreq_policy
2038 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2039 *	is written
 
2040 *
2041 * Reads the current cpufreq policy.
2042 */
2043int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2044{
2045	struct cpufreq_policy *cpu_policy;
2046	if (!policy)
2047		return -EINVAL;
2048
2049	cpu_policy = cpufreq_cpu_get(cpu);
2050	if (!cpu_policy)
2051		return -EINVAL;
2052
2053	memcpy(policy, cpu_policy, sizeof(*policy));
2054
2055	cpufreq_cpu_put(cpu_policy);
2056	return 0;
2057}
2058EXPORT_SYMBOL(cpufreq_get_policy);
2059
2060/*
2061 * policy : current policy.
2062 * new_policy: policy to be set.
 
 
 
 
 
 
 
 
 
 
 
2063 */
2064static int cpufreq_set_policy(struct cpufreq_policy *policy,
2065				struct cpufreq_policy *new_policy)
 
2066{
 
2067	struct cpufreq_governor *old_gov;
2068	int ret;
2069
2070	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2071		 new_policy->cpu, new_policy->min, new_policy->max);
2072
2073	memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2074
2075	if (new_policy->min > policy->max || new_policy->max < policy->min)
2076		return -EINVAL;
2077
2078	/* verify the cpu speed can be set within this limit */
2079	ret = cpufreq_driver->verify(new_policy);
2080	if (ret)
2081		return ret;
2082
2083	/* adjust if necessary - all reasons */
2084	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2085			CPUFREQ_ADJUST, new_policy);
2086
2087	/* adjust if necessary - hardware incompatibility*/
2088	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2089			CPUFREQ_INCOMPATIBLE, new_policy);
2090
2091	/*
2092	 * verify the cpu speed can be set within this limit, which might be
2093	 * different to the first one
2094	 */
2095	ret = cpufreq_driver->verify(new_policy);
2096	if (ret)
2097		return ret;
2098
2099	/* notification of the new policy */
2100	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2101			CPUFREQ_NOTIFY, new_policy);
 
 
 
 
 
 
 
2102
2103	policy->min = new_policy->min;
2104	policy->max = new_policy->max;
2105
2106	pr_debug("new min and max freqs are %u - %u kHz\n",
2107		 policy->min, policy->max);
2108
2109	if (cpufreq_driver->setpolicy) {
2110		policy->policy = new_policy->policy;
2111		pr_debug("setting range\n");
2112		return cpufreq_driver->setpolicy(new_policy);
2113	}
2114
2115	if (new_policy->governor == policy->governor)
2116		goto out;
 
 
 
2117
2118	pr_debug("governor switch\n");
2119
2120	/* save old, working values */
2121	old_gov = policy->governor;
2122	/* end old governor */
2123	if (old_gov) {
2124		__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2125		up_write(&policy->rwsem);
2126		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2127		down_write(&policy->rwsem);
2128	}
2129
2130	/* start new governor */
2131	policy->governor = new_policy->governor;
2132	if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2133		if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2134			goto out;
2135
2136		up_write(&policy->rwsem);
2137		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2138		down_write(&policy->rwsem);
 
 
2139	}
2140
2141	/* new governor failed, so re-start old one */
2142	pr_debug("starting governor %s failed\n", policy->governor->name);
2143	if (old_gov) {
2144		policy->governor = old_gov;
2145		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2146		__cpufreq_governor(policy, CPUFREQ_GOV_START);
 
 
2147	}
2148
2149	return -EINVAL;
2150
2151 out:
2152	pr_debug("governor: change or update limits\n");
2153	return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2154}
2155
2156/**
2157 *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
2158 *	@cpu: CPU which shall be re-evaluated
2159 *
2160 *	Useful for policy notifiers which have different necessities
2161 *	at different times.
 
 
2162 */
2163int cpufreq_update_policy(unsigned int cpu)
2164{
2165	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2166	struct cpufreq_policy new_policy;
2167	int ret;
2168
2169	if (!policy) {
2170		ret = -ENODEV;
2171		goto no_policy;
2172	}
2173
2174	down_write(&policy->rwsem);
2175
2176	pr_debug("updating policy for CPU %u\n", cpu);
2177	memcpy(&new_policy, policy, sizeof(*policy));
2178	new_policy.min = policy->user_policy.min;
2179	new_policy.max = policy->user_policy.max;
2180	new_policy.policy = policy->user_policy.policy;
2181	new_policy.governor = policy->user_policy.governor;
2182
2183	/*
2184	 * BIOS might change freq behind our back
2185	 * -> ask driver for current freq and notify governors about a change
2186	 */
2187	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2188		new_policy.cur = cpufreq_driver->get(cpu);
2189		if (WARN_ON(!new_policy.cur)) {
2190			ret = -EIO;
2191			goto no_policy;
2192		}
2193
2194		if (!policy->cur) {
2195			pr_debug("Driver did not initialize current freq\n");
2196			policy->cur = new_policy.cur;
2197		} else {
2198			if (policy->cur != new_policy.cur && has_target())
2199				cpufreq_out_of_sync(cpu, policy->cur,
2200								new_policy.cur);
2201		}
2202	}
2203
2204	ret = cpufreq_set_policy(policy, &new_policy);
2205
2206	up_write(&policy->rwsem);
2207
2208	cpufreq_cpu_put(policy);
2209no_policy:
2210	return ret;
2211}
2212EXPORT_SYMBOL(cpufreq_update_policy);
2213
2214static int cpufreq_cpu_callback(struct notifier_block *nfb,
2215					unsigned long action, void *hcpu)
 
 
 
 
 
 
2216{
2217	unsigned int cpu = (unsigned long)hcpu;
2218	struct device *dev;
2219
2220	dev = get_cpu_device(cpu);
2221	if (dev) {
2222		switch (action & ~CPU_TASKS_FROZEN) {
2223		case CPU_ONLINE:
2224			__cpufreq_add_dev(dev, NULL);
2225			break;
2226
2227		case CPU_DOWN_PREPARE:
2228			__cpufreq_remove_dev_prepare(dev, NULL);
2229			break;
2230
2231		case CPU_POST_DEAD:
2232			__cpufreq_remove_dev_finish(dev, NULL);
2233			break;
2234
2235		case CPU_DOWN_FAILED:
2236			__cpufreq_add_dev(dev, NULL);
2237			break;
2238		}
2239	}
2240	return NOTIFY_OK;
2241}
2242
2243static struct notifier_block __refdata cpufreq_cpu_notifier = {
2244	.notifier_call = cpufreq_cpu_callback,
2245};
2246
2247/*********************************************************************
2248 *               BOOST						     *
2249 *********************************************************************/
2250static int cpufreq_boost_set_sw(int state)
2251{
2252	struct cpufreq_frequency_table *freq_table;
2253	struct cpufreq_policy *policy;
2254	int ret = -EINVAL;
2255
2256	list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2257		freq_table = cpufreq_frequency_get_table(policy->cpu);
2258		if (freq_table) {
2259			ret = cpufreq_frequency_table_cpuinfo(policy,
2260							freq_table);
2261			if (ret) {
2262				pr_err("%s: Policy frequency update failed\n",
2263				       __func__);
2264				break;
2265			}
2266			policy->user_policy.max = policy->max;
2267			__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2268		}
2269	}
2270
2271	return ret;
 
 
 
 
2272}
2273
2274int cpufreq_boost_trigger_state(int state)
2275{
 
2276	unsigned long flags;
2277	int ret = 0;
2278
2279	if (cpufreq_driver->boost_enabled == state)
2280		return 0;
2281
2282	write_lock_irqsave(&cpufreq_driver_lock, flags);
2283	cpufreq_driver->boost_enabled = state;
2284	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2285
2286	ret = cpufreq_driver->set_boost(state);
2287	if (ret) {
2288		write_lock_irqsave(&cpufreq_driver_lock, flags);
2289		cpufreq_driver->boost_enabled = !state;
2290		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2291
2292		pr_err("%s: Cannot %s BOOST\n",
2293		       __func__, state ? "enable" : "disable");
2294	}
 
 
 
 
 
 
 
 
 
 
 
 
 
2295
2296	return ret;
2297}
2298
2299int cpufreq_boost_supported(void)
2300{
2301	if (likely(cpufreq_driver))
2302		return cpufreq_driver->boost_supported;
2303
2304	return 0;
 
 
 
 
 
 
 
 
 
2305}
2306EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2307
2308int cpufreq_boost_enabled(void)
2309{
2310	return cpufreq_driver->boost_enabled;
2311}
2312EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2313
2314/*********************************************************************
2315 *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2316 *********************************************************************/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2317
2318/**
2319 * cpufreq_register_driver - register a CPU Frequency driver
2320 * @driver_data: A struct cpufreq_driver containing the values#
2321 * submitted by the CPU Frequency driver.
2322 *
2323 * Registers a CPU Frequency driver to this core code. This code
2324 * returns zero on success, -EBUSY when another driver got here first
2325 * (and isn't unregistered in the meantime).
2326 *
2327 */
2328int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2329{
2330	unsigned long flags;
2331	int ret;
2332
2333	if (cpufreq_disabled())
2334		return -ENODEV;
2335
 
 
 
 
 
 
 
2336	if (!driver_data || !driver_data->verify || !driver_data->init ||
2337	    !(driver_data->setpolicy || driver_data->target_index ||
2338		    driver_data->target) ||
2339	     (driver_data->setpolicy && (driver_data->target_index ||
2340		    driver_data->target)))
 
 
2341		return -EINVAL;
2342
2343	pr_debug("trying to register driver %s\n", driver_data->name);
2344
2345	if (driver_data->setpolicy)
2346		driver_data->flags |= CPUFREQ_CONST_LOOPS;
2347
2348	write_lock_irqsave(&cpufreq_driver_lock, flags);
2349	if (cpufreq_driver) {
2350		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2351		return -EEXIST;
 
2352	}
2353	cpufreq_driver = driver_data;
2354	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2355
2356	if (cpufreq_boost_supported()) {
2357		/*
2358		 * Check if driver provides function to enable boost -
2359		 * if not, use cpufreq_boost_set_sw as default
2360		 */
2361		if (!cpufreq_driver->set_boost)
2362			cpufreq_driver->set_boost = cpufreq_boost_set_sw;
 
2363
2364		ret = cpufreq_sysfs_create_file(&boost.attr);
2365		if (ret) {
2366			pr_err("%s: cannot register global BOOST sysfs file\n",
2367			       __func__);
 
 
2368			goto err_null_driver;
2369		}
2370	}
2371
2372	ret = subsys_interface_register(&cpufreq_interface);
2373	if (ret)
2374		goto err_boost_unreg;
2375
2376	if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2377		int i;
2378		ret = -ENODEV;
2379
2380		/* check for at least one working CPU */
2381		for (i = 0; i < nr_cpu_ids; i++)
2382			if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2383				ret = 0;
2384				break;
2385			}
2386
2387		/* if all ->init() calls failed, unregister */
2388		if (ret) {
2389			pr_debug("no CPU initialized for driver %s\n",
2390				 driver_data->name);
2391			goto err_if_unreg;
2392		}
2393	}
2394
2395	register_hotcpu_notifier(&cpufreq_cpu_notifier);
 
 
 
 
 
 
 
 
2396	pr_debug("driver %s up and running\n", driver_data->name);
 
2397
2398	return 0;
2399err_if_unreg:
2400	subsys_interface_unregister(&cpufreq_interface);
2401err_boost_unreg:
2402	if (cpufreq_boost_supported())
2403		cpufreq_sysfs_remove_file(&boost.attr);
2404err_null_driver:
2405	write_lock_irqsave(&cpufreq_driver_lock, flags);
2406	cpufreq_driver = NULL;
2407	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 
2408	return ret;
2409}
2410EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2411
2412/**
2413 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2414 *
2415 * Unregister the current CPUFreq driver. Only call this if you have
2416 * the right to do so, i.e. if you have succeeded in initialising before!
2417 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2418 * currently not initialised.
2419 */
2420int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2421{
2422	unsigned long flags;
2423
2424	if (!cpufreq_driver || (driver != cpufreq_driver))
2425		return -EINVAL;
2426
2427	pr_debug("unregistering driver %s\n", driver->name);
2428
 
 
2429	subsys_interface_unregister(&cpufreq_interface);
2430	if (cpufreq_boost_supported())
2431		cpufreq_sysfs_remove_file(&boost.attr);
2432
2433	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2434
2435	down_write(&cpufreq_rwsem);
2436	write_lock_irqsave(&cpufreq_driver_lock, flags);
2437
2438	cpufreq_driver = NULL;
2439
2440	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2441	up_write(&cpufreq_rwsem);
2442
2443	return 0;
2444}
2445EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2446
2447static int __init cpufreq_core_init(void)
2448{
 
 
2449	if (cpufreq_disabled())
2450		return -ENODEV;
2451
2452	cpufreq_global_kobject = kobject_create();
2453	BUG_ON(!cpufreq_global_kobject);
2454
 
 
 
2455	return 0;
2456}
 
 
2457core_initcall(cpufreq_core_init);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/drivers/cpufreq/cpufreq.c
   4 *
   5 *  Copyright (C) 2001 Russell King
   6 *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
   7 *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
   8 *
   9 *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
  10 *	Added handling for CPU hotplug
  11 *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
  12 *	Fix handling for CPU hotplug -- affected CPUs
 
 
 
 
  13 */
  14
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/cpu.h>
  18#include <linux/cpufreq.h>
  19#include <linux/cpu_cooling.h>
  20#include <linux/delay.h>
  21#include <linux/device.h>
  22#include <linux/init.h>
  23#include <linux/kernel_stat.h>
  24#include <linux/module.h>
  25#include <linux/mutex.h>
  26#include <linux/pm_qos.h>
  27#include <linux/slab.h>
  28#include <linux/suspend.h>
  29#include <linux/syscore_ops.h>
  30#include <linux/tick.h>
  31#include <linux/units.h>
  32#include <trace/events/power.h>
  33
  34static LIST_HEAD(cpufreq_policy_list);
  35
  36/* Macros to iterate over CPU policies */
  37#define for_each_suitable_policy(__policy, __active)			 \
  38	list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
  39		if ((__active) == !policy_is_inactive(__policy))
  40
  41#define for_each_active_policy(__policy)		\
  42	for_each_suitable_policy(__policy, true)
  43#define for_each_inactive_policy(__policy)		\
  44	for_each_suitable_policy(__policy, false)
  45
  46/* Iterate over governors */
  47static LIST_HEAD(cpufreq_governor_list);
  48#define for_each_governor(__governor)				\
  49	list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
  50
  51static char default_governor[CPUFREQ_NAME_LEN];
  52
  53/*
  54 * The "cpufreq driver" - the arch- or hardware-dependent low
  55 * level driver of CPUFreq support, and its spinlock. This lock
  56 * also protects the cpufreq_cpu_data array.
  57 */
  58static struct cpufreq_driver *cpufreq_driver;
  59static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
 
  60static DEFINE_RWLOCK(cpufreq_driver_lock);
 
 
  61
  62static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
  63bool cpufreq_supports_freq_invariance(void)
  64{
  65	return static_branch_likely(&cpufreq_freq_invariance);
  66}
  67
  68/* Flag to suspend/resume CPUFreq governors */
  69static bool cpufreq_suspended;
  70
  71static inline bool has_target(void)
  72{
  73	return cpufreq_driver->target_index || cpufreq_driver->target;
  74}
  75
 
 
 
 
 
 
  76/* internal prototypes */
  77static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
  78static int cpufreq_init_governor(struct cpufreq_policy *policy);
  79static void cpufreq_exit_governor(struct cpufreq_policy *policy);
  80static void cpufreq_governor_limits(struct cpufreq_policy *policy);
  81static int cpufreq_set_policy(struct cpufreq_policy *policy,
  82			      struct cpufreq_governor *new_gov,
  83			      unsigned int new_pol);
  84
  85/*
  86 * Two notifier lists: the "policy" list is involved in the
  87 * validation process for a new CPU frequency policy; the
  88 * "transition" list for kernel code that needs to handle
  89 * changes to devices when the CPU clock speed changes.
  90 * The mutex locks both lists.
  91 */
  92static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
  93SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
 
 
 
 
 
 
 
 
 
  94
  95static int off __read_mostly;
  96static int cpufreq_disabled(void)
  97{
  98	return off;
  99}
 100void disable_cpufreq(void)
 101{
 102	off = 1;
 103}
 
 104static DEFINE_MUTEX(cpufreq_governor_mutex);
 105
 106bool have_governor_per_policy(void)
 107{
 108	return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
 109}
 110EXPORT_SYMBOL_GPL(have_governor_per_policy);
 111
 112static struct kobject *cpufreq_global_kobject;
 113
 114struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
 115{
 116	if (have_governor_per_policy())
 117		return &policy->kobj;
 118	else
 119		return cpufreq_global_kobject;
 120}
 121EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
 122
 123static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
 124{
 125	struct kernel_cpustat kcpustat;
 126	u64 cur_wall_time;
 127	u64 idle_time;
 128	u64 busy_time;
 129
 130	cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
 131
 132	kcpustat_cpu_fetch(&kcpustat, cpu);
 133
 134	busy_time = kcpustat.cpustat[CPUTIME_USER];
 135	busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
 136	busy_time += kcpustat.cpustat[CPUTIME_IRQ];
 137	busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
 138	busy_time += kcpustat.cpustat[CPUTIME_STEAL];
 139	busy_time += kcpustat.cpustat[CPUTIME_NICE];
 140
 141	idle_time = cur_wall_time - busy_time;
 142	if (wall)
 143		*wall = div_u64(cur_wall_time, NSEC_PER_USEC);
 144
 145	return div_u64(idle_time, NSEC_PER_USEC);
 146}
 147
 148u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
 149{
 150	u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
 151
 152	if (idle_time == -1ULL)
 153		return get_cpu_idle_time_jiffy(cpu, wall);
 154	else if (!io_busy)
 155		idle_time += get_cpu_iowait_time_us(cpu, wall);
 156
 157	return idle_time;
 158}
 159EXPORT_SYMBOL_GPL(get_cpu_idle_time);
 160
 161/*
 162 * This is a generic cpufreq init() routine which can be used by cpufreq
 163 * drivers of SMP systems. It will do following:
 164 * - validate & show freq table passed
 165 * - set policies transition latency
 166 * - policy->cpus with all possible CPUs
 167 */
 168void cpufreq_generic_init(struct cpufreq_policy *policy,
 169		struct cpufreq_frequency_table *table,
 170		unsigned int transition_latency)
 171{
 172	policy->freq_table = table;
 
 
 
 
 
 
 
 173	policy->cpuinfo.transition_latency = transition_latency;
 174
 175	/*
 176	 * The driver only supports the SMP configuration where all processors
 177	 * share the clock and voltage and clock.
 178	 */
 179	cpumask_setall(policy->cpus);
 
 
 180}
 181EXPORT_SYMBOL_GPL(cpufreq_generic_init);
 182
 183struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
 184{
 185	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 186
 187	return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
 188}
 189EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
 190
 191unsigned int cpufreq_generic_get(unsigned int cpu)
 192{
 193	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
 194
 195	if (!policy || IS_ERR(policy->clk)) {
 196		pr_err("%s: No %s associated to cpu: %d\n",
 197		       __func__, policy ? "clk" : "policy", cpu);
 198		return 0;
 199	}
 200
 201	return clk_get_rate(policy->clk) / 1000;
 202}
 203EXPORT_SYMBOL_GPL(cpufreq_generic_get);
 204
 205/**
 206 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
 207 * @cpu: CPU to find the policy for.
 208 *
 209 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
 210 * the kobject reference counter of that policy.  Return a valid policy on
 211 * success or NULL on failure.
 212 *
 213 * The policy returned by this function has to be released with the help of
 214 * cpufreq_cpu_put() to balance its kobject reference counter properly.
 215 */
 216struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 217{
 218	struct cpufreq_policy *policy = NULL;
 219	unsigned long flags;
 220
 221	if (WARN_ON(cpu >= nr_cpu_ids))
 
 
 
 222		return NULL;
 223
 224	/* get the cpufreq driver */
 225	read_lock_irqsave(&cpufreq_driver_lock, flags);
 226
 227	if (cpufreq_driver) {
 228		/* get the CPU */
 229		policy = cpufreq_cpu_get_raw(cpu);
 230		if (policy)
 231			kobject_get(&policy->kobj);
 232	}
 233
 234	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 235
 
 
 
 236	return policy;
 237}
 238EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
 239
 240/**
 241 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
 242 * @policy: cpufreq policy returned by cpufreq_cpu_get().
 243 */
 244void cpufreq_cpu_put(struct cpufreq_policy *policy)
 245{
 
 
 
 246	kobject_put(&policy->kobj);
 
 247}
 248EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
 249
 250/**
 251 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
 252 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
 253 */
 254void cpufreq_cpu_release(struct cpufreq_policy *policy)
 255{
 256	if (WARN_ON(!policy))
 257		return;
 258
 259	lockdep_assert_held(&policy->rwsem);
 260
 261	up_write(&policy->rwsem);
 262
 263	cpufreq_cpu_put(policy);
 264}
 265
 266/**
 267 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
 268 * @cpu: CPU to find the policy for.
 269 *
 270 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
 271 * if the policy returned by it is not NULL, acquire its rwsem for writing.
 272 * Return the policy if it is active or release it and return NULL otherwise.
 273 *
 274 * The policy returned by this function has to be released with the help of
 275 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
 276 * counter properly.
 277 */
 278struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
 279{
 280	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
 281
 282	if (!policy)
 283		return NULL;
 284
 285	down_write(&policy->rwsem);
 286
 287	if (policy_is_inactive(policy)) {
 288		cpufreq_cpu_release(policy);
 289		return NULL;
 290	}
 291
 292	return policy;
 293}
 294
 295/*********************************************************************
 296 *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
 297 *********************************************************************/
 298
 299/**
 300 * adjust_jiffies - Adjust the system "loops_per_jiffy".
 301 * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
 302 * @ci: Frequency change information.
 303 *
 304 * This function alters the system "loops_per_jiffy" for the clock
 305 * speed change. Note that loops_per_jiffy cannot be updated on SMP
 306 * systems as each CPU might be scaled differently. So, use the arch
 307 * per-CPU loops_per_jiffy value wherever possible.
 308 */
 
 
 
 
 309static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
 310{
 311#ifndef CONFIG_SMP
 312	static unsigned long l_p_j_ref;
 313	static unsigned int l_p_j_ref_freq;
 314
 315	if (ci->flags & CPUFREQ_CONST_LOOPS)
 316		return;
 317
 318	if (!l_p_j_ref_freq) {
 319		l_p_j_ref = loops_per_jiffy;
 320		l_p_j_ref_freq = ci->old;
 321		pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
 322			 l_p_j_ref, l_p_j_ref_freq);
 323	}
 324	if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
 325		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
 326								ci->new);
 327		pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
 328			 loops_per_jiffy, ci->new);
 329	}
 
 
 
 
 
 
 330#endif
 331}
 332
 333/**
 334 * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
 335 * @policy: cpufreq policy to enable fast frequency switching for.
 336 * @freqs: contain details of the frequency update.
 337 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
 338 *
 339 * This function calls the transition notifiers and adjust_jiffies().
 340 *
 341 * It is called twice on all CPU frequency changes that have external effects.
 342 */
 343static void cpufreq_notify_transition(struct cpufreq_policy *policy,
 344				      struct cpufreq_freqs *freqs,
 345				      unsigned int state)
 346{
 347	int cpu;
 348
 349	BUG_ON(irqs_disabled());
 350
 351	if (cpufreq_disabled())
 352		return;
 353
 354	freqs->policy = policy;
 355	freqs->flags = cpufreq_driver->flags;
 356	pr_debug("notification %u of frequency transition to %u kHz\n",
 357		 state, freqs->new);
 358
 359	switch (state) {
 
 360	case CPUFREQ_PRECHANGE:
 361		/*
 362		 * Detect if the driver reported a value as "old frequency"
 363		 * which is not equal to what the cpufreq core thinks is
 364		 * "old frequency".
 365		 */
 366		if (policy->cur && policy->cur != freqs->old) {
 367			pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
 368				 freqs->old, policy->cur);
 369			freqs->old = policy->cur;
 
 
 
 370		}
 371
 372		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
 373					 CPUFREQ_PRECHANGE, freqs);
 374
 375		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
 376		break;
 377
 378	case CPUFREQ_POSTCHANGE:
 379		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
 380		pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
 381			 cpumask_pr_args(policy->cpus));
 382
 383		for_each_cpu(cpu, policy->cpus)
 384			trace_cpu_frequency(freqs->new, cpu);
 385
 386		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
 387					 CPUFREQ_POSTCHANGE, freqs);
 
 
 
 
 
 388
 389		cpufreq_stats_record_transition(policy, freqs->new);
 390		policy->cur = freqs->new;
 391	}
 
 
 
 
 
 
 
 
 
 
 392}
 393
 394/* Do post notifications when there are chances that transition has failed */
 395static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
 396		struct cpufreq_freqs *freqs, int transition_failed)
 397{
 398	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
 399	if (!transition_failed)
 400		return;
 401
 402	swap(freqs->old, freqs->new);
 403	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
 404	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
 405}
 406
 407void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
 408		struct cpufreq_freqs *freqs)
 409{
 410
 411	/*
 412	 * Catch double invocations of _begin() which lead to self-deadlock.
 413	 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
 414	 * doesn't invoke _begin() on their behalf, and hence the chances of
 415	 * double invocations are very low. Moreover, there are scenarios
 416	 * where these checks can emit false-positive warnings in these
 417	 * drivers; so we avoid that by skipping them altogether.
 418	 */
 419	WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
 420				&& current == policy->transition_task);
 421
 422wait:
 423	wait_event(policy->transition_wait, !policy->transition_ongoing);
 424
 425	spin_lock(&policy->transition_lock);
 426
 427	if (unlikely(policy->transition_ongoing)) {
 428		spin_unlock(&policy->transition_lock);
 429		goto wait;
 430	}
 431
 432	policy->transition_ongoing = true;
 433	policy->transition_task = current;
 434
 435	spin_unlock(&policy->transition_lock);
 436
 437	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
 438}
 439EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
 440
 441void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
 442		struct cpufreq_freqs *freqs, int transition_failed)
 443{
 444	if (WARN_ON(!policy->transition_ongoing))
 445		return;
 446
 447	cpufreq_notify_post_transition(policy, freqs, transition_failed);
 448
 449	arch_set_freq_scale(policy->related_cpus,
 450			    policy->cur,
 451			    policy->cpuinfo.max_freq);
 452
 453	policy->transition_ongoing = false;
 454	policy->transition_task = NULL;
 455
 456	wake_up(&policy->transition_wait);
 457}
 458EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
 459
 460/*
 461 * Fast frequency switching status count.  Positive means "enabled", negative
 462 * means "disabled" and 0 means "not decided yet".
 463 */
 464static int cpufreq_fast_switch_count;
 465static DEFINE_MUTEX(cpufreq_fast_switch_lock);
 466
 467static void cpufreq_list_transition_notifiers(void)
 468{
 469	struct notifier_block *nb;
 470
 471	pr_info("Registered transition notifiers:\n");
 472
 473	mutex_lock(&cpufreq_transition_notifier_list.mutex);
 474
 475	for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
 476		pr_info("%pS\n", nb->notifier_call);
 477
 478	mutex_unlock(&cpufreq_transition_notifier_list.mutex);
 479}
 480
 481/**
 482 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
 483 * @policy: cpufreq policy to enable fast frequency switching for.
 484 *
 485 * Try to enable fast frequency switching for @policy.
 486 *
 487 * The attempt will fail if there is at least one transition notifier registered
 488 * at this point, as fast frequency switching is quite fundamentally at odds
 489 * with transition notifiers.  Thus if successful, it will make registration of
 490 * transition notifiers fail going forward.
 491 */
 492void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
 493{
 494	lockdep_assert_held(&policy->rwsem);
 495
 496	if (!policy->fast_switch_possible)
 497		return;
 498
 499	mutex_lock(&cpufreq_fast_switch_lock);
 500	if (cpufreq_fast_switch_count >= 0) {
 501		cpufreq_fast_switch_count++;
 502		policy->fast_switch_enabled = true;
 503	} else {
 504		pr_warn("CPU%u: Fast frequency switching not enabled\n",
 505			policy->cpu);
 506		cpufreq_list_transition_notifiers();
 507	}
 508	mutex_unlock(&cpufreq_fast_switch_lock);
 509}
 510EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
 511
 512/**
 513 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
 514 * @policy: cpufreq policy to disable fast frequency switching for.
 515 */
 516void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
 517{
 518	mutex_lock(&cpufreq_fast_switch_lock);
 519	if (policy->fast_switch_enabled) {
 520		policy->fast_switch_enabled = false;
 521		if (!WARN_ON(cpufreq_fast_switch_count <= 0))
 522			cpufreq_fast_switch_count--;
 523	}
 524	mutex_unlock(&cpufreq_fast_switch_lock);
 525}
 526EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
 527
 528static unsigned int __resolve_freq(struct cpufreq_policy *policy,
 529		unsigned int target_freq, unsigned int relation)
 530{
 531	unsigned int idx;
 532
 533	target_freq = clamp_val(target_freq, policy->min, policy->max);
 534
 535	if (!policy->freq_table)
 536		return target_freq;
 537
 538	idx = cpufreq_frequency_table_target(policy, target_freq, relation);
 539	policy->cached_resolved_idx = idx;
 540	policy->cached_target_freq = target_freq;
 541	return policy->freq_table[idx].frequency;
 542}
 543
 544/**
 545 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
 546 * one.
 547 * @policy: associated policy to interrogate
 548 * @target_freq: target frequency to resolve.
 549 *
 550 * The target to driver frequency mapping is cached in the policy.
 551 *
 552 * Return: Lowest driver-supported frequency greater than or equal to the
 553 * given target_freq, subject to policy (min/max) and driver limitations.
 554 */
 555unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
 556					 unsigned int target_freq)
 557{
 558	return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
 559}
 560EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
 561
 562unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
 563{
 564	unsigned int latency;
 565
 566	if (policy->transition_delay_us)
 567		return policy->transition_delay_us;
 568
 569	latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
 570	if (latency) {
 571		/*
 572		 * For platforms that can change the frequency very fast (< 10
 573		 * us), the above formula gives a decent transition delay. But
 574		 * for platforms where transition_latency is in milliseconds, it
 575		 * ends up giving unrealistic values.
 576		 *
 577		 * Cap the default transition delay to 10 ms, which seems to be
 578		 * a reasonable amount of time after which we should reevaluate
 579		 * the frequency.
 580		 */
 581		return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
 582	}
 583
 584	return LATENCY_MULTIPLIER;
 585}
 586EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
 587
 588/*********************************************************************
 589 *                          SYSFS INTERFACE                          *
 590 *********************************************************************/
 591static ssize_t show_boost(struct kobject *kobj,
 592			  struct kobj_attribute *attr, char *buf)
 593{
 594	return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
 595}
 596
 597static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
 598			   const char *buf, size_t count)
 599{
 600	int ret, enable;
 601
 602	ret = sscanf(buf, "%d", &enable);
 603	if (ret != 1 || enable < 0 || enable > 1)
 604		return -EINVAL;
 605
 606	if (cpufreq_boost_trigger_state(enable)) {
 607		pr_err("%s: Cannot %s BOOST!\n",
 608		       __func__, enable ? "enable" : "disable");
 609		return -EINVAL;
 610	}
 611
 612	pr_debug("%s: cpufreq BOOST %s\n",
 613		 __func__, enable ? "enabled" : "disabled");
 614
 615	return count;
 616}
 617define_one_global_rw(boost);
 618
 619static struct cpufreq_governor *find_governor(const char *str_governor)
 620{
 621	struct cpufreq_governor *t;
 622
 623	for_each_governor(t)
 624		if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
 625			return t;
 626
 627	return NULL;
 628}
 629
 630static struct cpufreq_governor *get_governor(const char *str_governor)
 
 
 
 
 631{
 632	struct cpufreq_governor *t;
 633
 634	mutex_lock(&cpufreq_governor_mutex);
 635	t = find_governor(str_governor);
 636	if (!t)
 637		goto unlock;
 638
 639	if (!try_module_get(t->owner))
 640		t = NULL;
 
 
 
 
 
 
 
 
 
 641
 642unlock:
 643	mutex_unlock(&cpufreq_governor_mutex);
 644
 645	return t;
 646}
 647
 648static unsigned int cpufreq_parse_policy(char *str_governor)
 649{
 650	if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
 651		return CPUFREQ_POLICY_PERFORMANCE;
 652
 653	if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
 654		return CPUFREQ_POLICY_POWERSAVE;
 
 655
 656	return CPUFREQ_POLICY_UNKNOWN;
 657}
 
 658
 659/**
 660 * cpufreq_parse_governor - parse a governor string only for has_target()
 661 * @str_governor: Governor name.
 662 */
 663static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
 664{
 665	struct cpufreq_governor *t;
 666
 667	t = get_governor(str_governor);
 668	if (t)
 669		return t;
 670
 671	if (request_module("cpufreq_%s", str_governor))
 672		return NULL;
 673
 674	return get_governor(str_governor);
 675}
 676
 677/*
 678 * cpufreq_per_cpu_attr_read() / show_##file_name() -
 679 * print out cpufreq information
 680 *
 681 * Write out information from cpufreq_driver->policy[cpu]; object must be
 682 * "unsigned int".
 683 */
 684
 685#define show_one(file_name, object)			\
 686static ssize_t show_##file_name				\
 687(struct cpufreq_policy *policy, char *buf)		\
 688{							\
 689	return sprintf(buf, "%u\n", policy->object);	\
 690}
 691
 692show_one(cpuinfo_min_freq, cpuinfo.min_freq);
 693show_one(cpuinfo_max_freq, cpuinfo.max_freq);
 694show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
 695show_one(scaling_min_freq, min);
 696show_one(scaling_max_freq, max);
 
 697
 698__weak unsigned int arch_freq_get_on_cpu(int cpu)
 699{
 700	return 0;
 701}
 702
 703static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
 704{
 705	ssize_t ret;
 706	unsigned int freq;
 707
 708	freq = arch_freq_get_on_cpu(policy->cpu);
 709	if (freq)
 710		ret = sprintf(buf, "%u\n", freq);
 711	else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
 712		ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
 713	else
 714		ret = sprintf(buf, "%u\n", policy->cur);
 715	return ret;
 716}
 717
 718/*
 719 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
 720 */
 721#define store_one(file_name, object)			\
 722static ssize_t store_##file_name					\
 723(struct cpufreq_policy *policy, const char *buf, size_t count)		\
 724{									\
 725	unsigned long val;						\
 726	int ret;							\
 
 727									\
 728	ret = sscanf(buf, "%lu", &val);					\
 
 
 
 
 729	if (ret != 1)							\
 730		return -EINVAL;						\
 731									\
 732	ret = freq_qos_update_request(policy->object##_freq_req, val);\
 733	return ret >= 0 ? count : ret;					\
 
 
 734}
 735
 736store_one(scaling_min_freq, min);
 737store_one(scaling_max_freq, max);
 738
 739/*
 740 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
 741 */
 742static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
 743					char *buf)
 744{
 745	unsigned int cur_freq = __cpufreq_get(policy);
 746
 747	if (cur_freq)
 748		return sprintf(buf, "%u\n", cur_freq);
 749
 750	return sprintf(buf, "<unknown>\n");
 751}
 752
 753/*
 754 * show_scaling_governor - show the current policy for the specified CPU
 755 */
 756static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
 757{
 758	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
 759		return sprintf(buf, "powersave\n");
 760	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
 761		return sprintf(buf, "performance\n");
 762	else if (policy->governor)
 763		return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
 764				policy->governor->name);
 765	return -EINVAL;
 766}
 767
 768/*
 769 * store_scaling_governor - store policy for the specified CPU
 770 */
 771static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
 772					const char *buf, size_t count)
 773{
 774	char str_governor[16];
 775	int ret;
 
 
 
 
 
 
 776
 777	ret = sscanf(buf, "%15s", str_governor);
 778	if (ret != 1)
 779		return -EINVAL;
 780
 781	if (cpufreq_driver->setpolicy) {
 782		unsigned int new_pol;
 
 783
 784		new_pol = cpufreq_parse_policy(str_governor);
 785		if (!new_pol)
 786			return -EINVAL;
 787
 788		ret = cpufreq_set_policy(policy, NULL, new_pol);
 789	} else {
 790		struct cpufreq_governor *new_gov;
 791
 792		new_gov = cpufreq_parse_governor(str_governor);
 793		if (!new_gov)
 794			return -EINVAL;
 795
 796		ret = cpufreq_set_policy(policy, new_gov,
 797					 CPUFREQ_POLICY_UNKNOWN);
 798
 799		module_put(new_gov->owner);
 800	}
 801
 802	return ret ? ret : count;
 803}
 804
 805/*
 806 * show_scaling_driver - show the cpufreq driver currently loaded
 807 */
 808static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
 809{
 810	return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
 811}
 812
 813/*
 814 * show_scaling_available_governors - show the available CPUfreq governors
 815 */
 816static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
 817						char *buf)
 818{
 819	ssize_t i = 0;
 820	struct cpufreq_governor *t;
 821
 822	if (!has_target()) {
 823		i += sprintf(buf, "performance powersave");
 824		goto out;
 825	}
 826
 827	mutex_lock(&cpufreq_governor_mutex);
 828	for_each_governor(t) {
 829		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
 830		    - (CPUFREQ_NAME_LEN + 2)))
 831			break;
 832		i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
 833	}
 834	mutex_unlock(&cpufreq_governor_mutex);
 835out:
 836	i += sprintf(&buf[i], "\n");
 837	return i;
 838}
 839
 840ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
 841{
 842	ssize_t i = 0;
 843	unsigned int cpu;
 844
 845	for_each_cpu(cpu, mask) {
 846		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u ", cpu);
 
 
 847		if (i >= (PAGE_SIZE - 5))
 848			break;
 849	}
 850
 851	/* Remove the extra space at the end */
 852	i--;
 853
 854	i += sprintf(&buf[i], "\n");
 855	return i;
 856}
 857EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
 858
 859/*
 860 * show_related_cpus - show the CPUs affected by each transition even if
 861 * hw coordination is in use
 862 */
 863static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
 864{
 865	return cpufreq_show_cpus(policy->related_cpus, buf);
 866}
 867
 868/*
 869 * show_affected_cpus - show the CPUs affected by each transition
 870 */
 871static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
 872{
 873	return cpufreq_show_cpus(policy->cpus, buf);
 874}
 875
 876static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
 877					const char *buf, size_t count)
 878{
 879	unsigned int freq = 0;
 880	unsigned int ret;
 881
 882	if (!policy->governor || !policy->governor->store_setspeed)
 883		return -EINVAL;
 884
 885	ret = sscanf(buf, "%u", &freq);
 886	if (ret != 1)
 887		return -EINVAL;
 888
 889	policy->governor->store_setspeed(policy, freq);
 890
 891	return count;
 892}
 893
 894static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
 895{
 896	if (!policy->governor || !policy->governor->show_setspeed)
 897		return sprintf(buf, "<unsupported>\n");
 898
 899	return policy->governor->show_setspeed(policy, buf);
 900}
 901
 902/*
 903 * show_bios_limit - show the current cpufreq HW/BIOS limitation
 904 */
 905static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
 906{
 907	unsigned int limit;
 908	int ret;
 909	ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
 910	if (!ret)
 911		return sprintf(buf, "%u\n", limit);
 
 
 912	return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
 913}
 914
 915cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
 916cpufreq_freq_attr_ro(cpuinfo_min_freq);
 917cpufreq_freq_attr_ro(cpuinfo_max_freq);
 918cpufreq_freq_attr_ro(cpuinfo_transition_latency);
 919cpufreq_freq_attr_ro(scaling_available_governors);
 920cpufreq_freq_attr_ro(scaling_driver);
 921cpufreq_freq_attr_ro(scaling_cur_freq);
 922cpufreq_freq_attr_ro(bios_limit);
 923cpufreq_freq_attr_ro(related_cpus);
 924cpufreq_freq_attr_ro(affected_cpus);
 925cpufreq_freq_attr_rw(scaling_min_freq);
 926cpufreq_freq_attr_rw(scaling_max_freq);
 927cpufreq_freq_attr_rw(scaling_governor);
 928cpufreq_freq_attr_rw(scaling_setspeed);
 929
 930static struct attribute *cpufreq_attrs[] = {
 931	&cpuinfo_min_freq.attr,
 932	&cpuinfo_max_freq.attr,
 933	&cpuinfo_transition_latency.attr,
 934	&scaling_min_freq.attr,
 935	&scaling_max_freq.attr,
 936	&affected_cpus.attr,
 937	&related_cpus.attr,
 938	&scaling_governor.attr,
 939	&scaling_driver.attr,
 940	&scaling_available_governors.attr,
 941	&scaling_setspeed.attr,
 942	NULL
 943};
 944ATTRIBUTE_GROUPS(cpufreq);
 945
 946#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
 947#define to_attr(a) container_of(a, struct freq_attr, attr)
 948
 949static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
 950{
 951	struct cpufreq_policy *policy = to_policy(kobj);
 952	struct freq_attr *fattr = to_attr(attr);
 953	ssize_t ret = -EBUSY;
 954
 955	if (!fattr->show)
 956		return -EIO;
 957
 958	down_read(&policy->rwsem);
 959	if (likely(!policy_is_inactive(policy)))
 
 960		ret = fattr->show(policy, buf);
 
 
 
 961	up_read(&policy->rwsem);
 
 962
 963	return ret;
 964}
 965
 966static ssize_t store(struct kobject *kobj, struct attribute *attr,
 967		     const char *buf, size_t count)
 968{
 969	struct cpufreq_policy *policy = to_policy(kobj);
 970	struct freq_attr *fattr = to_attr(attr);
 971	ssize_t ret = -EBUSY;
 
 
 972
 973	if (!fattr->store)
 974		return -EIO;
 
 
 
 975
 976	down_write(&policy->rwsem);
 977	if (likely(!policy_is_inactive(policy)))
 
 978		ret = fattr->store(policy, buf, count);
 
 
 
 979	up_write(&policy->rwsem);
 980
 
 
 
 
 981	return ret;
 982}
 983
 984static void cpufreq_sysfs_release(struct kobject *kobj)
 985{
 986	struct cpufreq_policy *policy = to_policy(kobj);
 987	pr_debug("last reference is dropped\n");
 988	complete(&policy->kobj_unregister);
 989}
 990
 991static const struct sysfs_ops sysfs_ops = {
 992	.show	= show,
 993	.store	= store,
 994};
 995
 996static struct kobj_type ktype_cpufreq = {
 997	.sysfs_ops	= &sysfs_ops,
 998	.default_groups	= cpufreq_groups,
 999	.release	= cpufreq_sysfs_release,
1000};
1001
1002static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1003				struct device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1004{
1005	if (unlikely(!dev))
1006		return;
 
 
 
 
 
 
 
 
 
 
 
 
1007
1008	if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1009		return;
 
1010
1011	dev_dbg(dev, "%s: Adding symlink\n", __func__);
1012	if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1013		dev_err(dev, "cpufreq symlink creation failed\n");
 
1014}
 
1015
1016static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
1017				   struct device *dev)
1018{
1019	dev_dbg(dev, "%s: Removing symlink\n", __func__);
1020	sysfs_remove_link(&dev->kobj, "cpufreq");
1021	cpumask_clear_cpu(cpu, policy->real_cpus);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1022}
1023
1024static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
 
1025{
1026	struct freq_attr **drv_attr;
1027	int ret = 0;
1028
 
 
 
 
 
 
1029	/* set up files for this cpu device */
1030	drv_attr = cpufreq_driver->attr;
1031	while (drv_attr && *drv_attr) {
1032		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1033		if (ret)
1034			return ret;
1035		drv_attr++;
1036	}
1037	if (cpufreq_driver->get) {
1038		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1039		if (ret)
1040			return ret;
 
 
 
 
 
1041	}
1042
1043	ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1044	if (ret)
1045		return ret;
1046
1047	if (cpufreq_driver->bios_limit) {
1048		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1049		if (ret)
1050			return ret;
1051	}
1052
1053	return 0;
 
 
 
 
 
 
 
 
 
1054}
1055
1056static int cpufreq_init_policy(struct cpufreq_policy *policy)
1057{
1058	struct cpufreq_governor *gov = NULL;
1059	unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1060	int ret;
 
 
1061
1062	if (has_target()) {
1063		/* Update policy governor to the one used before hotplug. */
1064		gov = get_governor(policy->last_governor);
1065		if (gov) {
1066			pr_debug("Restoring governor %s for cpu %d\n",
1067				 gov->name, policy->cpu);
1068		} else {
1069			gov = get_governor(default_governor);
1070		}
1071
1072		if (!gov) {
1073			gov = cpufreq_default_governor();
1074			__module_get(gov->owner);
1075		}
1076
1077	} else {
 
 
1078
1079		/* Use the default policy if there is no last_policy. */
1080		if (policy->last_policy) {
1081			pol = policy->last_policy;
1082		} else {
1083			pol = cpufreq_parse_policy(default_governor);
1084			/*
1085			 * In case the default governor is neither "performance"
1086			 * nor "powersave", fall back to the initial policy
1087			 * value set by the driver.
1088			 */
1089			if (pol == CPUFREQ_POLICY_UNKNOWN)
1090				pol = policy->policy;
1091		}
1092		if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1093		    pol != CPUFREQ_POLICY_POWERSAVE)
1094			return -ENODATA;
1095	}
1096
1097	ret = cpufreq_set_policy(policy, gov, pol);
1098	if (gov)
1099		module_put(gov->owner);
1100
1101	return ret;
1102}
1103
1104static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 
 
1105{
1106	int ret = 0;
 
1107
1108	/* Has this CPU been taken care of already? */
1109	if (cpumask_test_cpu(cpu, policy->cpus))
1110		return 0;
 
 
 
 
1111
1112	down_write(&policy->rwsem);
1113	if (has_target())
1114		cpufreq_stop_governor(policy);
1115
1116	cpumask_set_cpu(cpu, policy->cpus);
 
 
1117
1118	if (has_target()) {
1119		ret = cpufreq_start_governor(policy);
1120		if (ret)
1121			pr_err("%s: Failed to start governor\n", __func__);
1122	}
1123	up_write(&policy->rwsem);
1124	return ret;
1125}
1126
1127void refresh_frequency_limits(struct cpufreq_policy *policy)
1128{
1129	if (!policy_is_inactive(policy)) {
1130		pr_debug("updating policy for CPU %u\n", policy->cpu);
1131
1132		cpufreq_set_policy(policy, policy->governor, policy->policy);
 
 
 
1133	}
1134}
1135EXPORT_SYMBOL(refresh_frequency_limits);
1136
1137static void handle_update(struct work_struct *work)
1138{
1139	struct cpufreq_policy *policy =
1140		container_of(work, struct cpufreq_policy, update);
1141
1142	pr_debug("handle_update for cpu %u called\n", policy->cpu);
1143	down_write(&policy->rwsem);
1144	refresh_frequency_limits(policy);
1145	up_write(&policy->rwsem);
1146}
 
1147
1148static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1149				void *data)
1150{
1151	struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
 
1152
1153	schedule_work(&policy->update);
1154	return 0;
1155}
1156
1157static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1158				void *data)
1159{
1160	struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1161
1162	schedule_work(&policy->update);
1163	return 0;
1164}
1165
1166static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1167{
1168	struct kobject *kobj;
1169	struct completion *cmp;
1170
1171	down_write(&policy->rwsem);
1172	cpufreq_stats_free_table(policy);
1173	kobj = &policy->kobj;
1174	cmp = &policy->kobj_unregister;
1175	up_write(&policy->rwsem);
1176	kobject_put(kobj);
1177
1178	/*
1179	 * We need to make sure that the underlying kobj is
1180	 * actually not referenced anymore by anybody before we
1181	 * proceed with unloading.
1182	 */
1183	pr_debug("waiting for dropping of refcount\n");
1184	wait_for_completion(cmp);
1185	pr_debug("wait complete\n");
1186}
1187
1188static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1189{
1190	struct cpufreq_policy *policy;
1191	struct device *dev = get_cpu_device(cpu);
1192	int ret;
1193
1194	if (!dev)
1195		return NULL;
1196
1197	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1198	if (!policy)
1199		return NULL;
1200
1201	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1202		goto err_free_policy;
1203
1204	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1205		goto err_free_cpumask;
1206
1207	if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1208		goto err_free_rcpumask;
1209
1210	init_completion(&policy->kobj_unregister);
1211	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1212				   cpufreq_global_kobject, "policy%u", cpu);
1213	if (ret) {
1214		dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1215		/*
1216		 * The entire policy object will be freed below, but the extra
1217		 * memory allocated for the kobject name needs to be freed by
1218		 * releasing the kobject.
1219		 */
1220		kobject_put(&policy->kobj);
1221		goto err_free_real_cpus;
1222	}
1223
1224	freq_constraints_init(&policy->constraints);
1225
1226	policy->nb_min.notifier_call = cpufreq_notifier_min;
1227	policy->nb_max.notifier_call = cpufreq_notifier_max;
1228
1229	ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1230				    &policy->nb_min);
1231	if (ret) {
1232		dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1233			ret, cpumask_pr_args(policy->cpus));
1234		goto err_kobj_remove;
1235	}
1236
1237	ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1238				    &policy->nb_max);
1239	if (ret) {
1240		dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1241			ret, cpumask_pr_args(policy->cpus));
1242		goto err_min_qos_notifier;
1243	}
1244
1245	INIT_LIST_HEAD(&policy->policy_list);
1246	init_rwsem(&policy->rwsem);
1247	spin_lock_init(&policy->transition_lock);
1248	init_waitqueue_head(&policy->transition_wait);
1249	INIT_WORK(&policy->update, handle_update);
1250
1251	policy->cpu = cpu;
1252	return policy;
1253
1254err_min_qos_notifier:
1255	freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1256				 &policy->nb_min);
1257err_kobj_remove:
1258	cpufreq_policy_put_kobj(policy);
1259err_free_real_cpus:
1260	free_cpumask_var(policy->real_cpus);
1261err_free_rcpumask:
1262	free_cpumask_var(policy->related_cpus);
1263err_free_cpumask:
1264	free_cpumask_var(policy->cpus);
1265err_free_policy:
1266	kfree(policy);
1267
1268	return NULL;
1269}
1270
1271static void cpufreq_policy_free(struct cpufreq_policy *policy)
1272{
1273	unsigned long flags;
1274	int cpu;
 
 
 
 
 
 
 
 
 
1275
1276	/*
1277	 * The callers must ensure the policy is inactive by now, to avoid any
1278	 * races with show()/store() callbacks.
 
1279	 */
1280	if (unlikely(!policy_is_inactive(policy)))
1281		pr_warn("%s: Freeing active policy\n", __func__);
 
 
1282
1283	/* Remove policy from list */
1284	write_lock_irqsave(&cpufreq_driver_lock, flags);
1285	list_del(&policy->policy_list);
 
 
 
1286
1287	for_each_cpu(cpu, policy->related_cpus)
1288		per_cpu(cpufreq_cpu_data, cpu) = NULL;
1289	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
1290
1291	freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1292				 &policy->nb_max);
1293	freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1294				 &policy->nb_min);
1295
1296	/* Cancel any pending policy->update work before freeing the policy. */
1297	cancel_work_sync(&policy->update);
1298
1299	if (policy->max_freq_req) {
1300		/*
1301		 * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
1302		 * notification, since CPUFREQ_CREATE_POLICY notification was
1303		 * sent after adding max_freq_req earlier.
1304		 */
1305		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1306					     CPUFREQ_REMOVE_POLICY, policy);
1307		freq_qos_remove_request(policy->max_freq_req);
1308	}
1309
1310	freq_qos_remove_request(policy->min_freq_req);
1311	kfree(policy->min_freq_req);
1312
1313	cpufreq_policy_put_kobj(policy);
1314	free_cpumask_var(policy->real_cpus);
1315	free_cpumask_var(policy->related_cpus);
1316	free_cpumask_var(policy->cpus);
1317	kfree(policy);
1318}
1319
1320static int cpufreq_online(unsigned int cpu)
1321{
 
 
1322	struct cpufreq_policy *policy;
1323	bool new_policy;
1324	unsigned long flags;
1325	unsigned int j;
1326	int ret;
 
 
1327
1328	pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
 
1329
1330	/* Check if this CPU already has a policy to manage it */
1331	policy = per_cpu(cpufreq_cpu_data, cpu);
1332	if (policy) {
1333		WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1334		if (!policy_is_inactive(policy))
1335			return cpufreq_add_policy_cpu(policy, cpu);
1336
1337		/* This is the only online CPU for the policy.  Start over. */
1338		new_policy = false;
1339		down_write(&policy->rwsem);
1340		policy->cpu = cpu;
1341		policy->governor = NULL;
1342	} else {
1343		new_policy = true;
1344		policy = cpufreq_policy_alloc(cpu);
1345		if (!policy)
1346			return -ENOMEM;
1347		down_write(&policy->rwsem);
1348	}
 
1349
1350	if (!new_policy && cpufreq_driver->online) {
1351		/* Recover policy->cpus using related_cpus */
1352		cpumask_copy(policy->cpus, policy->related_cpus);
1353
1354		ret = cpufreq_driver->online(policy);
1355		if (ret) {
1356			pr_debug("%s: %d: initialization failed\n", __func__,
1357				 __LINE__);
1358			goto out_exit_policy;
 
 
 
 
1359		}
1360	} else {
1361		cpumask_copy(policy->cpus, cpumask_of(cpu));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1362
1363		/*
1364		 * Call driver. From then on the cpufreq must be able
1365		 * to accept all calls to ->verify and ->setpolicy for this CPU.
1366		 */
1367		ret = cpufreq_driver->init(policy);
1368		if (ret) {
1369			pr_debug("%s: %d: initialization failed\n", __func__,
1370				 __LINE__);
1371			goto out_free_policy;
1372		}
1373
1374		/*
1375		 * The initialization has succeeded and the policy is online.
1376		 * If there is a problem with its frequency table, take it
1377		 * offline and drop it.
1378		 */
1379		ret = cpufreq_table_validate_and_sort(policy);
1380		if (ret)
1381			goto out_offline_policy;
1382
1383		/* related_cpus should at least include policy->cpus. */
1384		cpumask_copy(policy->related_cpus, policy->cpus);
 
 
 
 
 
1385	}
1386
 
 
 
1387	/*
1388	 * affected cpus must always be the one, which are online. We aren't
1389	 * managing offline cpus here.
1390	 */
1391	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1392
1393	if (new_policy) {
1394		for_each_cpu(j, policy->related_cpus) {
1395			per_cpu(cpufreq_cpu_data, j) = policy;
1396			add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1397		}
1398
1399		policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1400					       GFP_KERNEL);
1401		if (!policy->min_freq_req) {
1402			ret = -ENOMEM;
1403			goto out_destroy_policy;
1404		}
1405
1406		ret = freq_qos_add_request(&policy->constraints,
1407					   policy->min_freq_req, FREQ_QOS_MIN,
1408					   FREQ_QOS_MIN_DEFAULT_VALUE);
1409		if (ret < 0) {
1410			/*
1411			 * So we don't call freq_qos_remove_request() for an
1412			 * uninitialized request.
1413			 */
1414			kfree(policy->min_freq_req);
1415			policy->min_freq_req = NULL;
1416			goto out_destroy_policy;
1417		}
1418
1419		/*
1420		 * This must be initialized right here to avoid calling
1421		 * freq_qos_remove_request() on uninitialized request in case
1422		 * of errors.
1423		 */
1424		policy->max_freq_req = policy->min_freq_req + 1;
1425
1426		ret = freq_qos_add_request(&policy->constraints,
1427					   policy->max_freq_req, FREQ_QOS_MAX,
1428					   FREQ_QOS_MAX_DEFAULT_VALUE);
1429		if (ret < 0) {
1430			policy->max_freq_req = NULL;
1431			goto out_destroy_policy;
1432		}
1433
1434		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1435				CPUFREQ_CREATE_POLICY, policy);
1436	}
1437
1438	if (cpufreq_driver->get && has_target()) {
1439		policy->cur = cpufreq_driver->get(policy->cpu);
1440		if (!policy->cur) {
1441			ret = -EIO;
1442			pr_err("%s: ->get() failed\n", __func__);
1443			goto out_destroy_policy;
1444		}
1445	}
1446
1447	/*
1448	 * Sometimes boot loaders set CPU frequency to a value outside of
1449	 * frequency table present with cpufreq core. In such cases CPU might be
1450	 * unstable if it has to run on that frequency for long duration of time
1451	 * and so its better to set it to a frequency which is specified in
1452	 * freq-table. This also makes cpufreq stats inconsistent as
1453	 * cpufreq-stats would fail to register because current frequency of CPU
1454	 * isn't found in freq-table.
1455	 *
1456	 * Because we don't want this change to effect boot process badly, we go
1457	 * for the next freq which is >= policy->cur ('cur' must be set by now,
1458	 * otherwise we will end up setting freq to lowest of the table as 'cur'
1459	 * is initialized to zero).
1460	 *
1461	 * We are passing target-freq as "policy->cur - 1" otherwise
1462	 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1463	 * equal to target-freq.
1464	 */
1465	if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1466	    && has_target()) {
1467		unsigned int old_freq = policy->cur;
1468
1469		/* Are we running at unknown frequency ? */
1470		ret = cpufreq_frequency_table_get_index(policy, old_freq);
1471		if (ret == -EINVAL) {
1472			ret = __cpufreq_driver_target(policy, old_freq - 1,
1473						      CPUFREQ_RELATION_L);
 
 
 
1474
1475			/*
1476			 * Reaching here after boot in a few seconds may not
1477			 * mean that system will remain stable at "unknown"
1478			 * frequency for longer duration. Hence, a BUG_ON().
1479			 */
1480			BUG_ON(ret);
1481			pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1482				__func__, policy->cpu, old_freq, policy->cur);
1483		}
1484	}
1485
1486	if (new_policy) {
1487		ret = cpufreq_add_dev_interface(policy);
 
 
 
1488		if (ret)
1489			goto out_destroy_policy;
 
 
 
1490
1491		cpufreq_stats_create_table(policy);
 
 
1492
1493		write_lock_irqsave(&cpufreq_driver_lock, flags);
1494		list_add(&policy->policy_list, &cpufreq_policy_list);
1495		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1496
1497		/*
1498		 * Register with the energy model before
1499		 * sched_cpufreq_governor_change() is called, which will result
1500		 * in rebuilding of the sched domains, which should only be done
1501		 * once the energy model is properly initialized for the policy
1502		 * first.
1503		 *
1504		 * Also, this should be called before the policy is registered
1505		 * with cooling framework.
1506		 */
1507		if (cpufreq_driver->register_em)
1508			cpufreq_driver->register_em(policy);
1509	}
1510
1511	ret = cpufreq_init_policy(policy);
1512	if (ret) {
1513		pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1514		       __func__, cpu, ret);
1515		goto out_destroy_policy;
1516	}
1517
1518	up_write(&policy->rwsem);
1519
1520	kobject_uevent(&policy->kobj, KOBJ_ADD);
1521
1522	/* Callback for handling stuff after policy is ready */
1523	if (cpufreq_driver->ready)
1524		cpufreq_driver->ready(policy);
1525
1526	if (cpufreq_thermal_control_enabled(cpufreq_driver))
1527		policy->cdev = of_cpufreq_cooling_register(policy);
1528
1529	pr_debug("initialization complete\n");
1530
1531	return 0;
1532
1533out_destroy_policy:
1534	for_each_cpu(j, policy->real_cpus)
1535		remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
1536
1537out_offline_policy:
1538	if (cpufreq_driver->offline)
1539		cpufreq_driver->offline(policy);
1540
1541out_exit_policy:
1542	if (cpufreq_driver->exit)
1543		cpufreq_driver->exit(policy);
 
 
 
 
 
 
 
1544
1545out_free_policy:
1546	cpumask_clear(policy->cpus);
1547	up_write(&policy->rwsem);
1548
1549	cpufreq_policy_free(policy);
1550	return ret;
1551}
1552
1553/**
1554 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1555 * @dev: CPU device.
1556 * @sif: Subsystem interface structure pointer (not used)
 
 
 
 
1557 */
1558static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1559{
1560	struct cpufreq_policy *policy;
1561	unsigned cpu = dev->id;
 
 
 
 
 
1562	int ret;
1563
1564	dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
 
 
 
 
 
 
 
 
 
 
1565
1566	if (cpu_online(cpu)) {
1567		ret = cpufreq_online(cpu);
1568		if (ret)
1569			return ret;
1570	}
1571
1572	/* Create sysfs link on CPU registration */
1573	policy = per_cpu(cpufreq_cpu_data, cpu);
1574	if (policy)
1575		add_cpu_dev_symlink(policy, cpu, dev);
1576
1577	return 0;
1578}
1579
1580static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
 
1581{
1582	int ret;
 
 
 
1583
1584	if (has_target())
1585		cpufreq_stop_governor(policy);
1586
1587	cpumask_clear_cpu(cpu, policy->cpus);
1588
1589	if (!policy_is_inactive(policy)) {
1590		/* Nominate a new CPU if necessary. */
1591		if (cpu == policy->cpu)
1592			policy->cpu = cpumask_any(policy->cpus);
1593
1594		/* Start the governor again for the active policy. */
1595		if (has_target()) {
1596			ret = cpufreq_start_governor(policy);
1597			if (ret)
1598				pr_err("%s: Failed to start governor\n", __func__);
1599		}
1600
1601		return;
 
 
1602	}
1603
1604	if (has_target())
1605		strncpy(policy->last_governor, policy->governor->name,
1606			CPUFREQ_NAME_LEN);
1607	else
1608		policy->last_policy = policy->policy;
1609
1610	if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1611		cpufreq_cooling_unregister(policy->cdev);
1612		policy->cdev = NULL;
1613	}
1614
1615	if (has_target())
1616		cpufreq_exit_governor(policy);
 
1617
1618	/*
1619	 * Perform the ->offline() during light-weight tear-down, as
1620	 * that allows fast recovery when the CPU comes back.
1621	 */
1622	if (cpufreq_driver->offline) {
1623		cpufreq_driver->offline(policy);
1624	} else if (cpufreq_driver->exit) {
1625		cpufreq_driver->exit(policy);
1626		policy->freq_table = NULL;
 
 
 
 
 
 
 
 
1627	}
 
 
1628}
1629
1630static int cpufreq_offline(unsigned int cpu)
 
1631{
 
 
 
1632	struct cpufreq_policy *policy;
1633
1634	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
 
 
1635
1636	policy = cpufreq_cpu_get_raw(cpu);
1637	if (!policy) {
1638		pr_debug("%s: No cpu_data found\n", __func__);
1639		return 0;
1640	}
1641
1642	down_write(&policy->rwsem);
 
1643
1644	__cpufreq_offline(cpu, policy);
 
 
1645
1646	up_write(&policy->rwsem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1647	return 0;
1648}
1649
1650/*
1651 * cpufreq_remove_dev - remove a CPU device
1652 *
1653 * Removes the cpufreq interface for a CPU device.
1654 */
1655static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1656{
1657	unsigned int cpu = dev->id;
1658	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1659
1660	if (!policy)
1661		return;
1662
1663	down_write(&policy->rwsem);
1664
1665	if (cpu_online(cpu))
1666		__cpufreq_offline(cpu, policy);
1667
1668	remove_cpu_dev_symlink(policy, cpu, dev);
 
1669
1670	if (!cpumask_empty(policy->real_cpus)) {
1671		up_write(&policy->rwsem);
1672		return;
1673	}
1674
1675	/* We did light-weight exit earlier, do full tear down now */
1676	if (cpufreq_driver->offline)
1677		cpufreq_driver->exit(policy);
1678
1679	up_write(&policy->rwsem);
1680
1681	cpufreq_policy_free(policy);
1682}
1683
1684/**
1685 * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1686 * @policy: Policy managing CPUs.
1687 * @new_freq: New CPU frequency.
 
 
1688 *
1689 * Adjust to the current frequency first and clean up later by either calling
1690 * cpufreq_update_policy(), or scheduling handle_update().
1691 */
1692static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1693				unsigned int new_freq)
1694{
 
1695	struct cpufreq_freqs freqs;
 
1696
1697	pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1698		 policy->cur, new_freq);
1699
1700	freqs.old = policy->cur;
1701	freqs.new = new_freq;
1702
 
 
 
 
1703	cpufreq_freq_transition_begin(policy, &freqs);
1704	cpufreq_freq_transition_end(policy, &freqs, 0);
1705}
1706
1707static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1708{
1709	unsigned int new_freq;
1710
1711	new_freq = cpufreq_driver->get(policy->cpu);
1712	if (!new_freq)
1713		return 0;
1714
1715	/*
1716	 * If fast frequency switching is used with the given policy, the check
1717	 * against policy->cur is pointless, so skip it in that case.
1718	 */
1719	if (policy->fast_switch_enabled || !has_target())
1720		return new_freq;
1721
1722	if (policy->cur != new_freq) {
1723		/*
1724		 * For some platforms, the frequency returned by hardware may be
1725		 * slightly different from what is provided in the frequency
1726		 * table, for example hardware may return 499 MHz instead of 500
1727		 * MHz. In such cases it is better to avoid getting into
1728		 * unnecessary frequency updates.
1729		 */
1730		if (abs(policy->cur - new_freq) < HZ_PER_MHZ)
1731			return policy->cur;
1732
1733		cpufreq_out_of_sync(policy, new_freq);
1734		if (update)
1735			schedule_work(&policy->update);
1736	}
1737
1738	return new_freq;
1739}
1740
1741/**
1742 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1743 * @cpu: CPU number
1744 *
1745 * This is the last known freq, without actually getting it from the driver.
1746 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1747 */
1748unsigned int cpufreq_quick_get(unsigned int cpu)
1749{
1750	struct cpufreq_policy *policy;
1751	unsigned int ret_freq = 0;
1752	unsigned long flags;
1753
1754	read_lock_irqsave(&cpufreq_driver_lock, flags);
1755
1756	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1757		ret_freq = cpufreq_driver->get(cpu);
1758		read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1759		return ret_freq;
1760	}
1761
1762	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1763
1764	policy = cpufreq_cpu_get(cpu);
1765	if (policy) {
1766		ret_freq = policy->cur;
1767		cpufreq_cpu_put(policy);
1768	}
1769
1770	return ret_freq;
1771}
1772EXPORT_SYMBOL(cpufreq_quick_get);
1773
1774/**
1775 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1776 * @cpu: CPU number
1777 *
1778 * Just return the max possible frequency for a given CPU.
1779 */
1780unsigned int cpufreq_quick_get_max(unsigned int cpu)
1781{
1782	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1783	unsigned int ret_freq = 0;
1784
1785	if (policy) {
1786		ret_freq = policy->max;
1787		cpufreq_cpu_put(policy);
1788	}
1789
1790	return ret_freq;
1791}
1792EXPORT_SYMBOL(cpufreq_quick_get_max);
1793
1794/**
1795 * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1796 * @cpu: CPU number
1797 *
1798 * The default return value is the max_freq field of cpuinfo.
1799 */
1800__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1801{
1802	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1803	unsigned int ret_freq = 0;
1804
1805	if (policy) {
1806		ret_freq = policy->cpuinfo.max_freq;
1807		cpufreq_cpu_put(policy);
 
 
 
 
 
 
 
 
 
 
1808	}
1809
1810	return ret_freq;
1811}
1812EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1813
1814static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1815{
1816	if (unlikely(policy_is_inactive(policy)))
1817		return 0;
1818
1819	return cpufreq_verify_current_freq(policy, true);
1820}
1821
1822/**
1823 * cpufreq_get - get the current CPU frequency (in kHz)
1824 * @cpu: CPU number
1825 *
1826 * Get the CPU current (static) CPU frequency
1827 */
1828unsigned int cpufreq_get(unsigned int cpu)
1829{
1830	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1831	unsigned int ret_freq = 0;
1832
1833	if (policy) {
1834		down_read(&policy->rwsem);
1835		if (cpufreq_driver->get)
1836			ret_freq = __cpufreq_get(policy);
1837		up_read(&policy->rwsem);
1838
1839		cpufreq_cpu_put(policy);
1840	}
1841
1842	return ret_freq;
1843}
1844EXPORT_SYMBOL(cpufreq_get);
1845
1846static struct subsys_interface cpufreq_interface = {
1847	.name		= "cpufreq",
1848	.subsys		= &cpu_subsys,
1849	.add_dev	= cpufreq_add_dev,
1850	.remove_dev	= cpufreq_remove_dev,
1851};
1852
1853/*
1854 * In case platform wants some specific frequency to be configured
1855 * during suspend..
1856 */
1857int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1858{
1859	int ret;
1860
1861	if (!policy->suspend_freq) {
1862		pr_debug("%s: suspend_freq not defined\n", __func__);
1863		return 0;
1864	}
1865
1866	pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1867			policy->suspend_freq);
1868
1869	ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1870			CPUFREQ_RELATION_H);
1871	if (ret)
1872		pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1873				__func__, policy->suspend_freq, ret);
1874
1875	return ret;
1876}
1877EXPORT_SYMBOL(cpufreq_generic_suspend);
1878
1879/**
1880 * cpufreq_suspend() - Suspend CPUFreq governors.
1881 *
1882 * Called during system wide Suspend/Hibernate cycles for suspending governors
1883 * as some platforms can't change frequency after this point in suspend cycle.
1884 * Because some of the devices (like: i2c, regulators, etc) they use for
1885 * changing frequency are suspended quickly after this point.
1886 */
1887void cpufreq_suspend(void)
1888{
1889	struct cpufreq_policy *policy;
1890
1891	if (!cpufreq_driver)
1892		return;
1893
1894	if (!has_target() && !cpufreq_driver->suspend)
1895		goto suspend;
1896
1897	pr_debug("%s: Suspending Governors\n", __func__);
1898
1899	for_each_active_policy(policy) {
1900		if (has_target()) {
1901			down_write(&policy->rwsem);
1902			cpufreq_stop_governor(policy);
1903			up_write(&policy->rwsem);
1904		}
1905
1906		if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1907			pr_err("%s: Failed to suspend driver: %s\n", __func__,
1908				cpufreq_driver->name);
1909	}
1910
1911suspend:
1912	cpufreq_suspended = true;
1913}
1914
1915/**
1916 * cpufreq_resume() - Resume CPUFreq governors.
1917 *
1918 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1919 * are suspended with cpufreq_suspend().
1920 */
1921void cpufreq_resume(void)
1922{
1923	struct cpufreq_policy *policy;
1924	int ret;
1925
1926	if (!cpufreq_driver)
1927		return;
1928
1929	if (unlikely(!cpufreq_suspended))
1930		return;
1931
 
 
1932	cpufreq_suspended = false;
1933
1934	if (!has_target() && !cpufreq_driver->resume)
1935		return;
1936
1937	pr_debug("%s: Resuming Governors\n", __func__);
1938
1939	for_each_active_policy(policy) {
1940		if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1941			pr_err("%s: Failed to resume driver: %p\n", __func__,
1942				policy);
1943		} else if (has_target()) {
1944			down_write(&policy->rwsem);
1945			ret = cpufreq_start_governor(policy);
1946			up_write(&policy->rwsem);
1947
1948			if (ret)
1949				pr_err("%s: Failed to start governor for policy: %p\n",
1950				       __func__, policy);
1951		}
 
 
 
1952	}
1953}
1954
1955/**
1956 * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
1957 * @flags: Flags to test against the current cpufreq driver's flags.
1958 *
1959 * Assumes that the driver is there, so callers must ensure that this is the
1960 * case.
1961 */
1962bool cpufreq_driver_test_flags(u16 flags)
1963{
1964	return !!(cpufreq_driver->flags & flags);
1965}
1966
1967/**
1968 * cpufreq_get_current_driver - Return the current driver's name.
1969 *
1970 * Return the name string of the currently registered cpufreq driver or NULL if
1971 * none.
1972 */
1973const char *cpufreq_get_current_driver(void)
1974{
1975	if (cpufreq_driver)
1976		return cpufreq_driver->name;
1977
1978	return NULL;
1979}
1980EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1981
1982/**
1983 * cpufreq_get_driver_data - Return current driver data.
1984 *
1985 * Return the private data of the currently registered cpufreq driver, or NULL
1986 * if no cpufreq driver has been registered.
1987 */
1988void *cpufreq_get_driver_data(void)
1989{
1990	if (cpufreq_driver)
1991		return cpufreq_driver->driver_data;
1992
1993	return NULL;
1994}
1995EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1996
1997/*********************************************************************
1998 *                     NOTIFIER LISTS INTERFACE                      *
1999 *********************************************************************/
2000
2001/**
2002 * cpufreq_register_notifier - Register a notifier with cpufreq.
2003 * @nb: notifier function to register.
2004 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2005 *
2006 * Add a notifier to one of two lists: either a list of notifiers that run on
2007 * clock rate changes (once before and once after every transition), or a list
2008 * of notifiers that ron on cpufreq policy changes.
 
2009 *
2010 * This function may sleep and it has the same return values as
2011 * blocking_notifier_chain_register().
2012 */
2013int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
2014{
2015	int ret;
2016
2017	if (cpufreq_disabled())
2018		return -EINVAL;
2019
 
 
2020	switch (list) {
2021	case CPUFREQ_TRANSITION_NOTIFIER:
2022		mutex_lock(&cpufreq_fast_switch_lock);
2023
2024		if (cpufreq_fast_switch_count > 0) {
2025			mutex_unlock(&cpufreq_fast_switch_lock);
2026			return -EBUSY;
2027		}
2028		ret = srcu_notifier_chain_register(
2029				&cpufreq_transition_notifier_list, nb);
2030		if (!ret)
2031			cpufreq_fast_switch_count--;
2032
2033		mutex_unlock(&cpufreq_fast_switch_lock);
2034		break;
2035	case CPUFREQ_POLICY_NOTIFIER:
2036		ret = blocking_notifier_chain_register(
2037				&cpufreq_policy_notifier_list, nb);
2038		break;
2039	default:
2040		ret = -EINVAL;
2041	}
2042
2043	return ret;
2044}
2045EXPORT_SYMBOL(cpufreq_register_notifier);
2046
2047/**
2048 * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2049 * @nb: notifier block to be unregistered.
2050 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2051 *
2052 * Remove a notifier from one of the cpufreq notifier lists.
2053 *
2054 * This function may sleep and it has the same return values as
2055 * blocking_notifier_chain_unregister().
2056 */
2057int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2058{
2059	int ret;
2060
2061	if (cpufreq_disabled())
2062		return -EINVAL;
2063
2064	switch (list) {
2065	case CPUFREQ_TRANSITION_NOTIFIER:
2066		mutex_lock(&cpufreq_fast_switch_lock);
2067
2068		ret = srcu_notifier_chain_unregister(
2069				&cpufreq_transition_notifier_list, nb);
2070		if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2071			cpufreq_fast_switch_count++;
2072
2073		mutex_unlock(&cpufreq_fast_switch_lock);
2074		break;
2075	case CPUFREQ_POLICY_NOTIFIER:
2076		ret = blocking_notifier_chain_unregister(
2077				&cpufreq_policy_notifier_list, nb);
2078		break;
2079	default:
2080		ret = -EINVAL;
2081	}
2082
2083	return ret;
2084}
2085EXPORT_SYMBOL(cpufreq_unregister_notifier);
2086
2087
2088/*********************************************************************
2089 *                              GOVERNORS                            *
2090 *********************************************************************/
2091
2092/**
2093 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2094 * @policy: cpufreq policy to switch the frequency for.
2095 * @target_freq: New frequency to set (may be approximate).
2096 *
2097 * Carry out a fast frequency switch without sleeping.
2098 *
2099 * The driver's ->fast_switch() callback invoked by this function must be
2100 * suitable for being called from within RCU-sched read-side critical sections
2101 * and it is expected to select the minimum available frequency greater than or
2102 * equal to @target_freq (CPUFREQ_RELATION_L).
2103 *
2104 * This function must not be called if policy->fast_switch_enabled is unset.
2105 *
2106 * Governors calling this function must guarantee that it will never be invoked
2107 * twice in parallel for the same policy and that it will never be called in
2108 * parallel with either ->target() or ->target_index() for the same policy.
2109 *
2110 * Returns the actual frequency set for the CPU.
2111 *
2112 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2113 * error condition, the hardware configuration must be preserved.
2114 */
2115unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2116					unsigned int target_freq)
2117{
2118	unsigned int freq;
2119	int cpu;
2120
2121	target_freq = clamp_val(target_freq, policy->min, policy->max);
2122	freq = cpufreq_driver->fast_switch(policy, target_freq);
2123
2124	if (!freq)
2125		return 0;
2126
2127	policy->cur = freq;
2128	arch_set_freq_scale(policy->related_cpus, freq,
2129			    policy->cpuinfo.max_freq);
2130	cpufreq_stats_record_transition(policy, freq);
2131
2132	if (trace_cpu_frequency_enabled()) {
2133		for_each_cpu(cpu, policy->cpus)
2134			trace_cpu_frequency(freq, cpu);
2135	}
2136
2137	return freq;
2138}
2139EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2140
2141/**
2142 * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2143 * @cpu: Target CPU.
2144 * @min_perf: Minimum (required) performance level (units of @capacity).
2145 * @target_perf: Target (desired) performance level (units of @capacity).
2146 * @capacity: Capacity of the target CPU.
2147 *
2148 * Carry out a fast performance level switch of @cpu without sleeping.
2149 *
2150 * The driver's ->adjust_perf() callback invoked by this function must be
2151 * suitable for being called from within RCU-sched read-side critical sections
2152 * and it is expected to select a suitable performance level equal to or above
2153 * @min_perf and preferably equal to or below @target_perf.
2154 *
2155 * This function must not be called if policy->fast_switch_enabled is unset.
2156 *
2157 * Governors calling this function must guarantee that it will never be invoked
2158 * twice in parallel for the same CPU and that it will never be called in
2159 * parallel with either ->target() or ->target_index() or ->fast_switch() for
2160 * the same CPU.
2161 */
2162void cpufreq_driver_adjust_perf(unsigned int cpu,
2163				 unsigned long min_perf,
2164				 unsigned long target_perf,
2165				 unsigned long capacity)
2166{
2167	cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2168}
2169
2170/**
2171 * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2172 *
2173 * Return 'true' if the ->adjust_perf callback is present for the
2174 * current driver or 'false' otherwise.
2175 */
2176bool cpufreq_driver_has_adjust_perf(void)
2177{
2178	return !!cpufreq_driver->adjust_perf;
2179}
2180
2181/* Must set freqs->new to intermediate frequency */
2182static int __target_intermediate(struct cpufreq_policy *policy,
2183				 struct cpufreq_freqs *freqs, int index)
2184{
2185	int ret;
2186
2187	freqs->new = cpufreq_driver->get_intermediate(policy, index);
2188
2189	/* We don't need to switch to intermediate freq */
2190	if (!freqs->new)
2191		return 0;
2192
2193	pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2194		 __func__, policy->cpu, freqs->old, freqs->new);
2195
2196	cpufreq_freq_transition_begin(policy, freqs);
2197	ret = cpufreq_driver->target_intermediate(policy, index);
2198	cpufreq_freq_transition_end(policy, freqs, ret);
2199
2200	if (ret)
2201		pr_err("%s: Failed to change to intermediate frequency: %d\n",
2202		       __func__, ret);
2203
2204	return ret;
2205}
2206
2207static int __target_index(struct cpufreq_policy *policy, int index)
2208{
2209	struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2210	unsigned int restore_freq, intermediate_freq = 0;
2211	unsigned int newfreq = policy->freq_table[index].frequency;
2212	int retval = -EINVAL;
2213	bool notify;
2214
2215	if (newfreq == policy->cur)
2216		return 0;
2217
2218	/* Save last value to restore later on errors */
2219	restore_freq = policy->cur;
2220
2221	notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2222	if (notify) {
2223		/* Handle switching to intermediate frequency */
2224		if (cpufreq_driver->get_intermediate) {
2225			retval = __target_intermediate(policy, &freqs, index);
2226			if (retval)
2227				return retval;
2228
2229			intermediate_freq = freqs.new;
2230			/* Set old freq to intermediate */
2231			if (intermediate_freq)
2232				freqs.old = freqs.new;
2233		}
2234
2235		freqs.new = newfreq;
2236		pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2237			 __func__, policy->cpu, freqs.old, freqs.new);
2238
2239		cpufreq_freq_transition_begin(policy, &freqs);
2240	}
2241
2242	retval = cpufreq_driver->target_index(policy, index);
2243	if (retval)
2244		pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2245		       retval);
2246
2247	if (notify) {
2248		cpufreq_freq_transition_end(policy, &freqs, retval);
2249
2250		/*
2251		 * Failed after setting to intermediate freq? Driver should have
2252		 * reverted back to initial frequency and so should we. Check
2253		 * here for intermediate_freq instead of get_intermediate, in
2254		 * case we haven't switched to intermediate freq at all.
2255		 */
2256		if (unlikely(retval && intermediate_freq)) {
2257			freqs.old = intermediate_freq;
2258			freqs.new = restore_freq;
2259			cpufreq_freq_transition_begin(policy, &freqs);
2260			cpufreq_freq_transition_end(policy, &freqs, 0);
2261		}
2262	}
2263
2264	return retval;
2265}
2266
2267int __cpufreq_driver_target(struct cpufreq_policy *policy,
2268			    unsigned int target_freq,
2269			    unsigned int relation)
2270{
 
2271	unsigned int old_target_freq = target_freq;
2272
2273	if (cpufreq_disabled())
2274		return -ENODEV;
2275
2276	target_freq = __resolve_freq(policy, target_freq, relation);
 
 
 
 
2277
2278	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2279		 policy->cpu, target_freq, relation, old_target_freq);
2280
2281	/*
2282	 * This might look like a redundant call as we are checking it again
2283	 * after finding index. But it is left intentionally for cases where
2284	 * exactly same freq is called again and so we can save on few function
2285	 * calls.
2286	 */
2287	if (target_freq == policy->cur &&
2288	    !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2289		return 0;
2290
2291	if (cpufreq_driver->target) {
2292		/*
2293		 * If the driver hasn't setup a single inefficient frequency,
2294		 * it's unlikely it knows how to decode CPUFREQ_RELATION_E.
2295		 */
2296		if (!policy->efficiencies_available)
2297			relation &= ~CPUFREQ_RELATION_E;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2298
2299		return cpufreq_driver->target(policy, target_freq, relation);
 
2300	}
2301
2302	if (!cpufreq_driver->target_index)
2303		return -EINVAL;
2304
2305	return __target_index(policy, policy->cached_resolved_idx);
2306}
2307EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2308
2309int cpufreq_driver_target(struct cpufreq_policy *policy,
2310			  unsigned int target_freq,
2311			  unsigned int relation)
2312{
2313	int ret;
2314
2315	down_write(&policy->rwsem);
2316
2317	ret = __cpufreq_driver_target(policy, target_freq, relation);
2318
2319	up_write(&policy->rwsem);
2320
2321	return ret;
2322}
2323EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2324
2325__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2326{
2327	return NULL;
2328}
2329
2330static int cpufreq_init_governor(struct cpufreq_policy *policy)
 
2331{
2332	int ret;
2333
 
 
 
 
 
 
 
 
 
 
2334	/* Don't start any governor operations if we are entering suspend */
2335	if (cpufreq_suspended)
2336		return 0;
2337	/*
2338	 * Governor might not be initiated here if ACPI _PPC changed
2339	 * notification happened, so check it.
2340	 */
2341	if (!policy->governor)
2342		return -EINVAL;
2343
2344	/* Platform doesn't want dynamic frequency switching ? */
2345	if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2346	    cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2347		struct cpufreq_governor *gov = cpufreq_fallback_governor();
2348
2349		if (gov) {
2350			pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2351				policy->governor->name, gov->name);
2352			policy->governor = gov;
2353		} else {
2354			return -EINVAL;
2355		}
2356	}
2357
2358	if (!try_module_get(policy->governor->owner))
2359		return -EINVAL;
 
2360
2361	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
 
2362
2363	if (policy->governor->init) {
2364		ret = policy->governor->init(policy);
2365		if (ret) {
2366			module_put(policy->governor->owner);
2367			return ret;
2368		}
2369	}
2370
2371	policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2372
2373	return 0;
2374}
2375
2376static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2377{
2378	if (cpufreq_suspended || !policy->governor)
2379		return;
2380
2381	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2382
2383	if (policy->governor->exit)
2384		policy->governor->exit(policy);
2385
2386	module_put(policy->governor->owner);
2387}
2388
2389int cpufreq_start_governor(struct cpufreq_policy *policy)
2390{
2391	int ret;
2392
2393	if (cpufreq_suspended)
2394		return 0;
2395
2396	if (!policy->governor)
2397		return -EINVAL;
2398
2399	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2400
2401	if (cpufreq_driver->get)
2402		cpufreq_verify_current_freq(policy, false);
2403
2404	if (policy->governor->start) {
2405		ret = policy->governor->start(policy);
2406		if (ret)
2407			return ret;
2408	}
2409
2410	if (policy->governor->limits)
2411		policy->governor->limits(policy);
 
2412
2413	return 0;
2414}
2415
2416void cpufreq_stop_governor(struct cpufreq_policy *policy)
2417{
2418	if (cpufreq_suspended || !policy->governor)
2419		return;
2420
2421	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2422
2423	if (policy->governor->stop)
2424		policy->governor->stop(policy);
2425}
2426
2427static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2428{
2429	if (cpufreq_suspended || !policy->governor)
2430		return;
2431
2432	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2433
2434	if (policy->governor->limits)
2435		policy->governor->limits(policy);
2436}
2437
2438int cpufreq_register_governor(struct cpufreq_governor *governor)
2439{
2440	int err;
2441
2442	if (!governor)
2443		return -EINVAL;
2444
2445	if (cpufreq_disabled())
2446		return -ENODEV;
2447
2448	mutex_lock(&cpufreq_governor_mutex);
2449
 
2450	err = -EBUSY;
2451	if (!find_governor(governor->name)) {
2452		err = 0;
2453		list_add(&governor->governor_list, &cpufreq_governor_list);
2454	}
2455
2456	mutex_unlock(&cpufreq_governor_mutex);
2457	return err;
2458}
2459EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2460
2461void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2462{
2463	struct cpufreq_policy *policy;
2464	unsigned long flags;
2465
2466	if (!governor)
2467		return;
2468
2469	if (cpufreq_disabled())
2470		return;
2471
2472	/* clear last_governor for all inactive policies */
2473	read_lock_irqsave(&cpufreq_driver_lock, flags);
2474	for_each_inactive_policy(policy) {
2475		if (!strcmp(policy->last_governor, governor->name)) {
2476			policy->governor = NULL;
2477			strcpy(policy->last_governor, "\0");
2478		}
2479	}
2480	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2481
2482	mutex_lock(&cpufreq_governor_mutex);
2483	list_del(&governor->governor_list);
2484	mutex_unlock(&cpufreq_governor_mutex);
 
2485}
2486EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2487
2488
2489/*********************************************************************
2490 *                          POLICY INTERFACE                         *
2491 *********************************************************************/
2492
2493/**
2494 * cpufreq_get_policy - get the current cpufreq_policy
2495 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2496 *	is written
2497 * @cpu: CPU to find the policy for
2498 *
2499 * Reads the current cpufreq policy.
2500 */
2501int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2502{
2503	struct cpufreq_policy *cpu_policy;
2504	if (!policy)
2505		return -EINVAL;
2506
2507	cpu_policy = cpufreq_cpu_get(cpu);
2508	if (!cpu_policy)
2509		return -EINVAL;
2510
2511	memcpy(policy, cpu_policy, sizeof(*policy));
2512
2513	cpufreq_cpu_put(cpu_policy);
2514	return 0;
2515}
2516EXPORT_SYMBOL(cpufreq_get_policy);
2517
2518/**
2519 * cpufreq_set_policy - Modify cpufreq policy parameters.
2520 * @policy: Policy object to modify.
2521 * @new_gov: Policy governor pointer.
2522 * @new_pol: Policy value (for drivers with built-in governors).
2523 *
2524 * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2525 * limits to be set for the policy, update @policy with the verified limits
2526 * values and either invoke the driver's ->setpolicy() callback (if present) or
2527 * carry out a governor update for @policy.  That is, run the current governor's
2528 * ->limits() callback (if @new_gov points to the same object as the one in
2529 * @policy) or replace the governor for @policy with @new_gov.
2530 *
2531 * The cpuinfo part of @policy is not updated by this function.
2532 */
2533static int cpufreq_set_policy(struct cpufreq_policy *policy,
2534			      struct cpufreq_governor *new_gov,
2535			      unsigned int new_pol)
2536{
2537	struct cpufreq_policy_data new_data;
2538	struct cpufreq_governor *old_gov;
2539	int ret;
2540
2541	memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2542	new_data.freq_table = policy->freq_table;
2543	new_data.cpu = policy->cpu;
2544	/*
2545	 * PM QoS framework collects all the requests from users and provide us
2546	 * the final aggregated value here.
2547	 */
2548	new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2549	new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
 
 
 
2550
2551	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2552		 new_data.cpu, new_data.min, new_data.max);
 
 
 
 
 
2553
2554	/*
2555	 * Verify that the CPU speed can be set within these limits and make sure
2556	 * that min <= max.
2557	 */
2558	ret = cpufreq_driver->verify(&new_data);
2559	if (ret)
2560		return ret;
2561
2562	/*
2563	 * Resolve policy min/max to available frequencies. It ensures
2564	 * no frequency resolution will neither overshoot the requested maximum
2565	 * nor undershoot the requested minimum.
2566	 */
2567	policy->min = new_data.min;
2568	policy->max = new_data.max;
2569	policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
2570	policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
2571	trace_cpu_frequency_limits(policy);
2572
2573	policy->cached_target_freq = UINT_MAX;
 
2574
2575	pr_debug("new min and max freqs are %u - %u kHz\n",
2576		 policy->min, policy->max);
2577
2578	if (cpufreq_driver->setpolicy) {
2579		policy->policy = new_pol;
2580		pr_debug("setting range\n");
2581		return cpufreq_driver->setpolicy(policy);
2582	}
2583
2584	if (new_gov == policy->governor) {
2585		pr_debug("governor limits update\n");
2586		cpufreq_governor_limits(policy);
2587		return 0;
2588	}
2589
2590	pr_debug("governor switch\n");
2591
2592	/* save old, working values */
2593	old_gov = policy->governor;
2594	/* end old governor */
2595	if (old_gov) {
2596		cpufreq_stop_governor(policy);
2597		cpufreq_exit_governor(policy);
 
 
2598	}
2599
2600	/* start new governor */
2601	policy->governor = new_gov;
2602	ret = cpufreq_init_governor(policy);
2603	if (!ret) {
2604		ret = cpufreq_start_governor(policy);
2605		if (!ret) {
2606			pr_debug("governor change\n");
2607			sched_cpufreq_governor_change(policy, old_gov);
2608			return 0;
2609		}
2610		cpufreq_exit_governor(policy);
2611	}
2612
2613	/* new governor failed, so re-start old one */
2614	pr_debug("starting governor %s failed\n", policy->governor->name);
2615	if (old_gov) {
2616		policy->governor = old_gov;
2617		if (cpufreq_init_governor(policy))
2618			policy->governor = NULL;
2619		else
2620			cpufreq_start_governor(policy);
2621	}
2622
2623	return ret;
 
 
 
 
2624}
2625
2626/**
2627 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2628 * @cpu: CPU to re-evaluate the policy for.
2629 *
2630 * Update the current frequency for the cpufreq policy of @cpu and use
2631 * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2632 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2633 * for the policy in question, among other things.
2634 */
2635void cpufreq_update_policy(unsigned int cpu)
2636{
2637	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
 
 
 
 
 
 
 
 
 
2638
2639	if (!policy)
2640		return;
 
 
 
 
2641
2642	/*
2643	 * BIOS might change freq behind our back
2644	 * -> ask driver for current freq and notify governors about a change
2645	 */
2646	if (cpufreq_driver->get && has_target() &&
2647	    (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2648		goto unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2649
2650	refresh_frequency_limits(policy);
2651
2652unlock:
2653	cpufreq_cpu_release(policy);
 
2654}
2655EXPORT_SYMBOL(cpufreq_update_policy);
2656
2657/**
2658 * cpufreq_update_limits - Update policy limits for a given CPU.
2659 * @cpu: CPU to update the policy limits for.
2660 *
2661 * Invoke the driver's ->update_limits callback if present or call
2662 * cpufreq_update_policy() for @cpu.
2663 */
2664void cpufreq_update_limits(unsigned int cpu)
2665{
2666	if (cpufreq_driver->update_limits)
2667		cpufreq_driver->update_limits(cpu);
2668	else
2669		cpufreq_update_policy(cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2670}
2671EXPORT_SYMBOL_GPL(cpufreq_update_limits);
 
 
 
2672
2673/*********************************************************************
2674 *               BOOST						     *
2675 *********************************************************************/
2676static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2677{
2678	int ret;
 
 
2679
2680	if (!policy->freq_table)
2681		return -ENXIO;
2682
2683	ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2684	if (ret) {
2685		pr_err("%s: Policy frequency update failed\n", __func__);
2686		return ret;
 
 
 
 
 
 
2687	}
2688
2689	ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2690	if (ret < 0)
2691		return ret;
2692
2693	return 0;
2694}
2695
2696int cpufreq_boost_trigger_state(int state)
2697{
2698	struct cpufreq_policy *policy;
2699	unsigned long flags;
2700	int ret = 0;
2701
2702	if (cpufreq_driver->boost_enabled == state)
2703		return 0;
2704
2705	write_lock_irqsave(&cpufreq_driver_lock, flags);
2706	cpufreq_driver->boost_enabled = state;
2707	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2708
2709	cpus_read_lock();
2710	for_each_active_policy(policy) {
2711		ret = cpufreq_driver->set_boost(policy, state);
2712		if (ret)
2713			goto err_reset_state;
 
 
 
2714	}
2715	cpus_read_unlock();
2716
2717	return 0;
2718
2719err_reset_state:
2720	cpus_read_unlock();
2721
2722	write_lock_irqsave(&cpufreq_driver_lock, flags);
2723	cpufreq_driver->boost_enabled = !state;
2724	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2725
2726	pr_err("%s: Cannot %s BOOST\n",
2727	       __func__, state ? "enable" : "disable");
2728
2729	return ret;
2730}
2731
2732static bool cpufreq_boost_supported(void)
2733{
2734	return cpufreq_driver->set_boost;
2735}
2736
2737static int create_boost_sysfs_file(void)
2738{
2739	int ret;
2740
2741	ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2742	if (ret)
2743		pr_err("%s: cannot register global BOOST sysfs file\n",
2744		       __func__);
2745
2746	return ret;
2747}
2748
2749static void remove_boost_sysfs_file(void)
2750{
2751	if (cpufreq_boost_supported())
2752		sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2753}
2754
2755int cpufreq_enable_boost_support(void)
2756{
2757	if (!cpufreq_driver)
2758		return -EINVAL;
2759
2760	if (cpufreq_boost_supported())
2761		return 0;
2762
2763	cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2764
2765	/* This will get removed on driver unregister */
2766	return create_boost_sysfs_file();
2767}
2768EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2769
2770int cpufreq_boost_enabled(void)
2771{
2772	return cpufreq_driver->boost_enabled;
2773}
2774EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2775
2776/*********************************************************************
2777 *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2778 *********************************************************************/
2779static enum cpuhp_state hp_online;
2780
2781static int cpuhp_cpufreq_online(unsigned int cpu)
2782{
2783	cpufreq_online(cpu);
2784
2785	return 0;
2786}
2787
2788static int cpuhp_cpufreq_offline(unsigned int cpu)
2789{
2790	cpufreq_offline(cpu);
2791
2792	return 0;
2793}
2794
2795/**
2796 * cpufreq_register_driver - register a CPU Frequency driver
2797 * @driver_data: A struct cpufreq_driver containing the values#
2798 * submitted by the CPU Frequency driver.
2799 *
2800 * Registers a CPU Frequency driver to this core code. This code
2801 * returns zero on success, -EEXIST when another driver got here first
2802 * (and isn't unregistered in the meantime).
2803 *
2804 */
2805int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2806{
2807	unsigned long flags;
2808	int ret;
2809
2810	if (cpufreq_disabled())
2811		return -ENODEV;
2812
2813	/*
2814	 * The cpufreq core depends heavily on the availability of device
2815	 * structure, make sure they are available before proceeding further.
2816	 */
2817	if (!get_cpu_device(0))
2818		return -EPROBE_DEFER;
2819
2820	if (!driver_data || !driver_data->verify || !driver_data->init ||
2821	    !(driver_data->setpolicy || driver_data->target_index ||
2822		    driver_data->target) ||
2823	     (driver_data->setpolicy && (driver_data->target_index ||
2824		    driver_data->target)) ||
2825	     (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2826	     (!driver_data->online != !driver_data->offline))
2827		return -EINVAL;
2828
2829	pr_debug("trying to register driver %s\n", driver_data->name);
2830
2831	/* Protect against concurrent CPU online/offline. */
2832	cpus_read_lock();
2833
2834	write_lock_irqsave(&cpufreq_driver_lock, flags);
2835	if (cpufreq_driver) {
2836		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2837		ret = -EEXIST;
2838		goto out;
2839	}
2840	cpufreq_driver = driver_data;
2841	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2842
2843	/*
2844	 * Mark support for the scheduler's frequency invariance engine for
2845	 * drivers that implement target(), target_index() or fast_switch().
2846	 */
2847	if (!cpufreq_driver->setpolicy) {
2848		static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2849		pr_debug("supports frequency invariance");
2850	}
2851
2852	if (driver_data->setpolicy)
2853		driver_data->flags |= CPUFREQ_CONST_LOOPS;
2854
2855	if (cpufreq_boost_supported()) {
2856		ret = create_boost_sysfs_file();
2857		if (ret)
2858			goto err_null_driver;
 
2859	}
2860
2861	ret = subsys_interface_register(&cpufreq_interface);
2862	if (ret)
2863		goto err_boost_unreg;
2864
2865	if (unlikely(list_empty(&cpufreq_policy_list))) {
 
 
 
 
 
 
 
 
 
 
2866		/* if all ->init() calls failed, unregister */
2867		ret = -ENODEV;
2868		pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2869			 driver_data->name);
2870		goto err_if_unreg;
 
2871	}
2872
2873	ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2874						   "cpufreq:online",
2875						   cpuhp_cpufreq_online,
2876						   cpuhp_cpufreq_offline);
2877	if (ret < 0)
2878		goto err_if_unreg;
2879	hp_online = ret;
2880	ret = 0;
2881
2882	pr_debug("driver %s up and running\n", driver_data->name);
2883	goto out;
2884
 
2885err_if_unreg:
2886	subsys_interface_unregister(&cpufreq_interface);
2887err_boost_unreg:
2888	remove_boost_sysfs_file();
 
2889err_null_driver:
2890	write_lock_irqsave(&cpufreq_driver_lock, flags);
2891	cpufreq_driver = NULL;
2892	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2893out:
2894	cpus_read_unlock();
2895	return ret;
2896}
2897EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2898
2899/*
2900 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2901 *
2902 * Unregister the current CPUFreq driver. Only call this if you have
2903 * the right to do so, i.e. if you have succeeded in initialising before!
2904 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2905 * currently not initialised.
2906 */
2907int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2908{
2909	unsigned long flags;
2910
2911	if (!cpufreq_driver || (driver != cpufreq_driver))
2912		return -EINVAL;
2913
2914	pr_debug("unregistering driver %s\n", driver->name);
2915
2916	/* Protect against concurrent cpu hotplug */
2917	cpus_read_lock();
2918	subsys_interface_unregister(&cpufreq_interface);
2919	remove_boost_sysfs_file();
2920	static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2921	cpuhp_remove_state_nocalls_cpuslocked(hp_online);
 
2922
 
2923	write_lock_irqsave(&cpufreq_driver_lock, flags);
2924
2925	cpufreq_driver = NULL;
2926
2927	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2928	cpus_read_unlock();
2929
2930	return 0;
2931}
2932EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2933
2934static int __init cpufreq_core_init(void)
2935{
2936	struct cpufreq_governor *gov = cpufreq_default_governor();
2937
2938	if (cpufreq_disabled())
2939		return -ENODEV;
2940
2941	cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2942	BUG_ON(!cpufreq_global_kobject);
2943
2944	if (!strlen(default_governor))
2945		strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2946
2947	return 0;
2948}
2949module_param(off, int, 0444);
2950module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2951core_initcall(cpufreq_core_init);