Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * processor_throttling.c - Throttling submodule of the ACPI processor driver
   4 *
   5 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   6 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   7 *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
   8 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   9 *  			- Added processor hotplug support
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/init.h>
  16#include <linux/sched.h>
  17#include <linux/cpufreq.h>
  18#include <linux/acpi.h>
  19#include <acpi/processor.h>
  20#include <asm/io.h>
  21#include <linux/uaccess.h>
  22
  23#define PREFIX "ACPI: "
  24
  25#define ACPI_PROCESSOR_CLASS            "processor"
  26#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
  27ACPI_MODULE_NAME("processor_throttling");
  28
  29/* ignore_tpc:
  30 *  0 -> acpi processor driver doesn't ignore _TPC values
  31 *  1 -> acpi processor driver ignores _TPC values
  32 */
  33static int ignore_tpc;
  34module_param(ignore_tpc, int, 0644);
  35MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
  36
  37struct throttling_tstate {
  38	unsigned int cpu;		/* cpu nr */
  39	int target_state;		/* target T-state */
  40};
  41
  42struct acpi_processor_throttling_arg {
  43	struct acpi_processor *pr;
  44	int target_state;
  45	bool force;
  46};
  47
  48#define THROTTLING_PRECHANGE       (1)
  49#define THROTTLING_POSTCHANGE      (2)
  50
  51static int acpi_processor_get_throttling(struct acpi_processor *pr);
  52static int __acpi_processor_set_throttling(struct acpi_processor *pr,
  53					   int state, bool force, bool direct);
  54
  55static int acpi_processor_update_tsd_coord(void)
  56{
  57	int count, count_target;
  58	int retval = 0;
  59	unsigned int i, j;
  60	cpumask_var_t covered_cpus;
  61	struct acpi_processor *pr, *match_pr;
  62	struct acpi_tsd_package *pdomain, *match_pdomain;
  63	struct acpi_processor_throttling *pthrottling, *match_pthrottling;
  64
  65	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
  66		return -ENOMEM;
  67
  68	/*
  69	 * Now that we have _TSD data from all CPUs, lets setup T-state
  70	 * coordination between all CPUs.
  71	 */
  72	for_each_possible_cpu(i) {
  73		pr = per_cpu(processors, i);
  74		if (!pr)
  75			continue;
  76
  77		/* Basic validity check for domain info */
  78		pthrottling = &(pr->throttling);
  79
  80		/*
  81		 * If tsd package for one cpu is invalid, the coordination
  82		 * among all CPUs is thought as invalid.
  83		 * Maybe it is ugly.
  84		 */
  85		if (!pthrottling->tsd_valid_flag) {
  86			retval = -EINVAL;
  87			break;
  88		}
  89	}
  90	if (retval)
  91		goto err_ret;
  92
  93	for_each_possible_cpu(i) {
  94		pr = per_cpu(processors, i);
  95		if (!pr)
  96			continue;
  97
  98		if (cpumask_test_cpu(i, covered_cpus))
  99			continue;
 100		pthrottling = &pr->throttling;
 101
 102		pdomain = &(pthrottling->domain_info);
 103		cpumask_set_cpu(i, pthrottling->shared_cpu_map);
 104		cpumask_set_cpu(i, covered_cpus);
 105		/*
 106		 * If the number of processor in the TSD domain is 1, it is
 107		 * unnecessary to parse the coordination for this CPU.
 108		 */
 109		if (pdomain->num_processors <= 1)
 110			continue;
 111
 112		/* Validate the Domain info */
 113		count_target = pdomain->num_processors;
 114		count = 1;
 115
 116		for_each_possible_cpu(j) {
 117			if (i == j)
 118				continue;
 119
 120			match_pr = per_cpu(processors, j);
 121			if (!match_pr)
 122				continue;
 123
 124			match_pthrottling = &(match_pr->throttling);
 125			match_pdomain = &(match_pthrottling->domain_info);
 126			if (match_pdomain->domain != pdomain->domain)
 127				continue;
 128
 129			/* Here i and j are in the same domain.
 130			 * If two TSD packages have the same domain, they
 131			 * should have the same num_porcessors and
 132			 * coordination type. Otherwise it will be regarded
 133			 * as illegal.
 134			 */
 135			if (match_pdomain->num_processors != count_target) {
 136				retval = -EINVAL;
 137				goto err_ret;
 138			}
 139
 140			if (pdomain->coord_type != match_pdomain->coord_type) {
 141				retval = -EINVAL;
 142				goto err_ret;
 143			}
 144
 145			cpumask_set_cpu(j, covered_cpus);
 146			cpumask_set_cpu(j, pthrottling->shared_cpu_map);
 147			count++;
 148		}
 149		for_each_possible_cpu(j) {
 150			if (i == j)
 151				continue;
 152
 153			match_pr = per_cpu(processors, j);
 154			if (!match_pr)
 155				continue;
 156
 157			match_pthrottling = &(match_pr->throttling);
 158			match_pdomain = &(match_pthrottling->domain_info);
 159			if (match_pdomain->domain != pdomain->domain)
 160				continue;
 161
 162			/*
 163			 * If some CPUS have the same domain, they
 164			 * will have the same shared_cpu_map.
 165			 */
 166			cpumask_copy(match_pthrottling->shared_cpu_map,
 167				     pthrottling->shared_cpu_map);
 168		}
 169	}
 170
 171err_ret:
 172	free_cpumask_var(covered_cpus);
 173
 174	for_each_possible_cpu(i) {
 175		pr = per_cpu(processors, i);
 176		if (!pr)
 177			continue;
 178
 179		/*
 180		 * Assume no coordination on any error parsing domain info.
 181		 * The coordination type will be forced as SW_ALL.
 182		 */
 183		if (retval) {
 184			pthrottling = &(pr->throttling);
 185			cpumask_clear(pthrottling->shared_cpu_map);
 186			cpumask_set_cpu(i, pthrottling->shared_cpu_map);
 187			pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
 188		}
 189	}
 190
 191	return retval;
 192}
 193
 194/*
 195 * Update the T-state coordination after the _TSD
 196 * data for all cpus is obtained.
 197 */
 198void acpi_processor_throttling_init(void)
 199{
 200	if (acpi_processor_update_tsd_coord()) {
 201		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 202			"Assume no T-state coordination\n"));
 203	}
 204
 205	return;
 206}
 207
 208static int acpi_processor_throttling_notifier(unsigned long event, void *data)
 209{
 210	struct throttling_tstate *p_tstate = data;
 211	struct acpi_processor *pr;
 212	unsigned int cpu ;
 213	int target_state;
 214	struct acpi_processor_limit *p_limit;
 215	struct acpi_processor_throttling *p_throttling;
 216
 217	cpu = p_tstate->cpu;
 218	pr = per_cpu(processors, cpu);
 219	if (!pr) {
 220		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
 221		return 0;
 222	}
 223	if (!pr->flags.throttling) {
 224		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
 225				"unsupported on CPU %d\n", cpu));
 226		return 0;
 227	}
 228	target_state = p_tstate->target_state;
 229	p_throttling = &(pr->throttling);
 230	switch (event) {
 231	case THROTTLING_PRECHANGE:
 232		/*
 233		 * Prechange event is used to choose one proper t-state,
 234		 * which meets the limits of thermal, user and _TPC.
 235		 */
 236		p_limit = &pr->limit;
 237		if (p_limit->thermal.tx > target_state)
 238			target_state = p_limit->thermal.tx;
 239		if (p_limit->user.tx > target_state)
 240			target_state = p_limit->user.tx;
 241		if (pr->throttling_platform_limit > target_state)
 242			target_state = pr->throttling_platform_limit;
 243		if (target_state >= p_throttling->state_count) {
 244			printk(KERN_WARNING
 245				"Exceed the limit of T-state \n");
 246			target_state = p_throttling->state_count - 1;
 247		}
 248		p_tstate->target_state = target_state;
 249		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
 250				"target T-state of CPU %d is T%d\n",
 251				cpu, target_state));
 252		break;
 253	case THROTTLING_POSTCHANGE:
 254		/*
 255		 * Postchange event is only used to update the
 256		 * T-state flag of acpi_processor_throttling.
 257		 */
 258		p_throttling->state = target_state;
 259		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
 260				"CPU %d is switched to T%d\n",
 261				cpu, target_state));
 262		break;
 263	default:
 264		printk(KERN_WARNING
 265			"Unsupported Throttling notifier event\n");
 266		break;
 267	}
 268
 269	return 0;
 270}
 271
 272/*
 273 * _TPC - Throttling Present Capabilities
 274 */
 275static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
 276{
 277	acpi_status status = 0;
 278	unsigned long long tpc = 0;
 279
 280	if (!pr)
 281		return -EINVAL;
 282
 283	if (ignore_tpc)
 284		goto end;
 285
 286	status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
 287	if (ACPI_FAILURE(status)) {
 288		if (status != AE_NOT_FOUND) {
 289			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
 290		}
 291		return -ENODEV;
 292	}
 293
 294end:
 295	pr->throttling_platform_limit = (int)tpc;
 296	return 0;
 297}
 298
 299int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
 300{
 301	int result = 0;
 302	int throttling_limit;
 303	int current_state;
 304	struct acpi_processor_limit *limit;
 305	int target_state;
 306
 307	if (ignore_tpc)
 308		return 0;
 309
 310	result = acpi_processor_get_platform_limit(pr);
 311	if (result) {
 312		/* Throttling Limit is unsupported */
 313		return result;
 314	}
 315
 316	throttling_limit = pr->throttling_platform_limit;
 317	if (throttling_limit >= pr->throttling.state_count) {
 318		/* Uncorrect Throttling Limit */
 319		return -EINVAL;
 320	}
 321
 322	current_state = pr->throttling.state;
 323	if (current_state > throttling_limit) {
 324		/*
 325		 * The current state can meet the requirement of
 326		 * _TPC limit. But it is reasonable that OSPM changes
 327		 * t-states from high to low for better performance.
 328		 * Of course the limit condition of thermal
 329		 * and user should be considered.
 330		 */
 331		limit = &pr->limit;
 332		target_state = throttling_limit;
 333		if (limit->thermal.tx > target_state)
 334			target_state = limit->thermal.tx;
 335		if (limit->user.tx > target_state)
 336			target_state = limit->user.tx;
 337	} else if (current_state == throttling_limit) {
 338		/*
 339		 * Unnecessary to change the throttling state
 340		 */
 341		return 0;
 342	} else {
 343		/*
 344		 * If the current state is lower than the limit of _TPC, it
 345		 * will be forced to switch to the throttling state defined
 346		 * by throttling_platfor_limit.
 347		 * Because the previous state meets with the limit condition
 348		 * of thermal and user, it is unnecessary to check it again.
 349		 */
 350		target_state = throttling_limit;
 351	}
 352	return acpi_processor_set_throttling(pr, target_state, false);
 353}
 354
 355/*
 356 * This function is used to reevaluate whether the T-state is valid
 357 * after one CPU is onlined/offlined.
 358 * It is noted that it won't reevaluate the following properties for
 359 * the T-state.
 360 *	1. Control method.
 361 *	2. the number of supported T-state
 362 *	3. TSD domain
 363 */
 364void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
 365					bool is_dead)
 366{
 367	int result = 0;
 368
 369	if (is_dead) {
 370		/* When one CPU is offline, the T-state throttling
 371		 * will be invalidated.
 372		 */
 373		pr->flags.throttling = 0;
 374		return;
 375	}
 376	/* the following is to recheck whether the T-state is valid for
 377	 * the online CPU
 378	 */
 379	if (!pr->throttling.state_count) {
 380		/* If the number of T-state is invalid, it is
 381		 * invalidated.
 382		 */
 383		pr->flags.throttling = 0;
 384		return;
 385	}
 386	pr->flags.throttling = 1;
 387
 388	/* Disable throttling (if enabled).  We'll let subsequent
 389	 * policy (e.g.thermal) decide to lower performance if it
 390	 * so chooses, but for now we'll crank up the speed.
 391	 */
 392
 393	result = acpi_processor_get_throttling(pr);
 394	if (result)
 395		goto end;
 396
 397	if (pr->throttling.state) {
 398		result = acpi_processor_set_throttling(pr, 0, false);
 399		if (result)
 400			goto end;
 401	}
 402
 403end:
 404	if (result)
 405		pr->flags.throttling = 0;
 406}
 407/*
 408 * _PTC - Processor Throttling Control (and status) register location
 409 */
 410static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
 411{
 412	int result = 0;
 413	acpi_status status = 0;
 414	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 415	union acpi_object *ptc = NULL;
 416	union acpi_object obj = { 0 };
 417	struct acpi_processor_throttling *throttling;
 418
 419	status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
 420	if (ACPI_FAILURE(status)) {
 421		if (status != AE_NOT_FOUND) {
 422			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
 423		}
 424		return -ENODEV;
 425	}
 426
 427	ptc = (union acpi_object *)buffer.pointer;
 428	if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
 429	    || (ptc->package.count != 2)) {
 430		printk(KERN_ERR PREFIX "Invalid _PTC data\n");
 431		result = -EFAULT;
 432		goto end;
 433	}
 434
 435	/*
 436	 * control_register
 437	 */
 438
 439	obj = ptc->package.elements[0];
 440
 441	if ((obj.type != ACPI_TYPE_BUFFER)
 442	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
 443	    || (obj.buffer.pointer == NULL)) {
 444		printk(KERN_ERR PREFIX
 445		       "Invalid _PTC data (control_register)\n");
 446		result = -EFAULT;
 447		goto end;
 448	}
 449	memcpy(&pr->throttling.control_register, obj.buffer.pointer,
 450	       sizeof(struct acpi_ptc_register));
 451
 452	/*
 453	 * status_register
 454	 */
 455
 456	obj = ptc->package.elements[1];
 457
 458	if ((obj.type != ACPI_TYPE_BUFFER)
 459	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
 460	    || (obj.buffer.pointer == NULL)) {
 461		printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
 462		result = -EFAULT;
 463		goto end;
 464	}
 465
 466	memcpy(&pr->throttling.status_register, obj.buffer.pointer,
 467	       sizeof(struct acpi_ptc_register));
 468
 469	throttling = &pr->throttling;
 470
 471	if ((throttling->control_register.bit_width +
 472		throttling->control_register.bit_offset) > 32) {
 473		printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
 474		result = -EFAULT;
 475		goto end;
 476	}
 477
 478	if ((throttling->status_register.bit_width +
 479		throttling->status_register.bit_offset) > 32) {
 480		printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
 481		result = -EFAULT;
 482		goto end;
 483	}
 484
 485      end:
 486	kfree(buffer.pointer);
 487
 488	return result;
 489}
 490
 491/*
 492 * _TSS - Throttling Supported States
 493 */
 494static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
 495{
 496	int result = 0;
 497	acpi_status status = AE_OK;
 498	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 499	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
 500	struct acpi_buffer state = { 0, NULL };
 501	union acpi_object *tss = NULL;
 502	int i;
 503
 504	status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
 505	if (ACPI_FAILURE(status)) {
 506		if (status != AE_NOT_FOUND) {
 507			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
 508		}
 509		return -ENODEV;
 510	}
 511
 512	tss = buffer.pointer;
 513	if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
 514		printk(KERN_ERR PREFIX "Invalid _TSS data\n");
 515		result = -EFAULT;
 516		goto end;
 517	}
 518
 519	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
 520			  tss->package.count));
 521
 522	pr->throttling.state_count = tss->package.count;
 523	pr->throttling.states_tss =
 524	    kmalloc_array(tss->package.count,
 525			  sizeof(struct acpi_processor_tx_tss),
 526			  GFP_KERNEL);
 527	if (!pr->throttling.states_tss) {
 528		result = -ENOMEM;
 529		goto end;
 530	}
 531
 532	for (i = 0; i < pr->throttling.state_count; i++) {
 533
 534		struct acpi_processor_tx_tss *tx =
 535		    (struct acpi_processor_tx_tss *)&(pr->throttling.
 536						      states_tss[i]);
 537
 538		state.length = sizeof(struct acpi_processor_tx_tss);
 539		state.pointer = tx;
 540
 541		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
 542
 543		status = acpi_extract_package(&(tss->package.elements[i]),
 544					      &format, &state);
 545		if (ACPI_FAILURE(status)) {
 546			ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
 547			result = -EFAULT;
 548			kfree(pr->throttling.states_tss);
 549			goto end;
 550		}
 551
 552		if (!tx->freqpercentage) {
 553			printk(KERN_ERR PREFIX
 554			       "Invalid _TSS data: freq is zero\n");
 555			result = -EFAULT;
 556			kfree(pr->throttling.states_tss);
 557			goto end;
 558		}
 559	}
 560
 561      end:
 562	kfree(buffer.pointer);
 563
 564	return result;
 565}
 566
 567/*
 568 * _TSD - T-State Dependencies
 569 */
 570static int acpi_processor_get_tsd(struct acpi_processor *pr)
 571{
 572	int result = 0;
 573	acpi_status status = AE_OK;
 574	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 575	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
 576	struct acpi_buffer state = { 0, NULL };
 577	union acpi_object *tsd = NULL;
 578	struct acpi_tsd_package *pdomain;
 579	struct acpi_processor_throttling *pthrottling;
 580
 581	pthrottling = &pr->throttling;
 582	pthrottling->tsd_valid_flag = 0;
 583
 584	status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
 585	if (ACPI_FAILURE(status)) {
 586		if (status != AE_NOT_FOUND) {
 587			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
 588		}
 589		return -ENODEV;
 590	}
 591
 592	tsd = buffer.pointer;
 593	if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
 594		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
 595		result = -EFAULT;
 596		goto end;
 597	}
 598
 599	if (tsd->package.count != 1) {
 600		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
 601		result = -EFAULT;
 602		goto end;
 603	}
 604
 605	pdomain = &(pr->throttling.domain_info);
 606
 607	state.length = sizeof(struct acpi_tsd_package);
 608	state.pointer = pdomain;
 609
 610	status = acpi_extract_package(&(tsd->package.elements[0]),
 611				      &format, &state);
 612	if (ACPI_FAILURE(status)) {
 613		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
 614		result = -EFAULT;
 615		goto end;
 616	}
 617
 618	if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
 619		printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
 620		result = -EFAULT;
 621		goto end;
 622	}
 623
 624	if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
 625		printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
 626		result = -EFAULT;
 627		goto end;
 628	}
 629
 630	pthrottling = &pr->throttling;
 631	pthrottling->tsd_valid_flag = 1;
 632	pthrottling->shared_type = pdomain->coord_type;
 633	cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
 634	/*
 635	 * If the coordination type is not defined in ACPI spec,
 636	 * the tsd_valid_flag will be clear and coordination type
 637	 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
 638	 */
 639	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
 640		pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
 641		pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
 642		pthrottling->tsd_valid_flag = 0;
 643		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
 644	}
 645
 646      end:
 647	kfree(buffer.pointer);
 648	return result;
 649}
 650
 651/* --------------------------------------------------------------------------
 652                              Throttling Control
 653   -------------------------------------------------------------------------- */
 654static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
 655{
 656	int state = 0;
 657	u32 value = 0;
 658	u32 duty_mask = 0;
 659	u32 duty_value = 0;
 660
 661	if (!pr)
 662		return -EINVAL;
 663
 664	if (!pr->flags.throttling)
 665		return -ENODEV;
 666
 667	/*
 668	 * We don't care about error returns - we just try to mark
 669	 * these reserved so that nobody else is confused into thinking
 670	 * that this region might be unused..
 671	 *
 672	 * (In particular, allocating the IO range for Cardbus)
 673	 */
 674	request_region(pr->throttling.address, 6, "ACPI CPU throttle");
 675
 676	pr->throttling.state = 0;
 677
 678	duty_mask = pr->throttling.state_count - 1;
 679
 680	duty_mask <<= pr->throttling.duty_offset;
 681
 682	local_irq_disable();
 683
 684	value = inl(pr->throttling.address);
 685
 686	/*
 687	 * Compute the current throttling state when throttling is enabled
 688	 * (bit 4 is on).
 689	 */
 690	if (value & 0x10) {
 691		duty_value = value & duty_mask;
 692		duty_value >>= pr->throttling.duty_offset;
 693
 694		if (duty_value)
 695			state = pr->throttling.state_count - duty_value;
 696	}
 697
 698	pr->throttling.state = state;
 699
 700	local_irq_enable();
 701
 702	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 703			  "Throttling state is T%d (%d%% throttling applied)\n",
 704			  state, pr->throttling.states[state].performance));
 705
 706	return 0;
 707}
 708
 709#ifdef CONFIG_X86
 710static int acpi_throttling_rdmsr(u64 *value)
 711{
 712	u64 msr_high, msr_low;
 713	u64 msr = 0;
 714	int ret = -1;
 715
 716	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
 717		!this_cpu_has(X86_FEATURE_ACPI)) {
 718		printk(KERN_ERR PREFIX
 719			"HARDWARE addr space,NOT supported yet\n");
 720	} else {
 721		msr_low = 0;
 722		msr_high = 0;
 723		rdmsr_safe(MSR_IA32_THERM_CONTROL,
 724			(u32 *)&msr_low , (u32 *) &msr_high);
 725		msr = (msr_high << 32) | msr_low;
 726		*value = (u64) msr;
 727		ret = 0;
 728	}
 729	return ret;
 730}
 731
 732static int acpi_throttling_wrmsr(u64 value)
 733{
 734	int ret = -1;
 735	u64 msr;
 736
 737	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
 738		!this_cpu_has(X86_FEATURE_ACPI)) {
 739		printk(KERN_ERR PREFIX
 740			"HARDWARE addr space,NOT supported yet\n");
 741	} else {
 742		msr = value;
 743		wrmsr_safe(MSR_IA32_THERM_CONTROL,
 744			msr & 0xffffffff, msr >> 32);
 745		ret = 0;
 746	}
 747	return ret;
 748}
 749#else
 750static int acpi_throttling_rdmsr(u64 *value)
 751{
 752	printk(KERN_ERR PREFIX
 753		"HARDWARE addr space,NOT supported yet\n");
 754	return -1;
 755}
 756
 757static int acpi_throttling_wrmsr(u64 value)
 758{
 759	printk(KERN_ERR PREFIX
 760		"HARDWARE addr space,NOT supported yet\n");
 761	return -1;
 762}
 763#endif
 764
 765static int acpi_read_throttling_status(struct acpi_processor *pr,
 766					u64 *value)
 767{
 768	u32 bit_width, bit_offset;
 769	u32 ptc_value;
 770	u64 ptc_mask;
 771	struct acpi_processor_throttling *throttling;
 772	int ret = -1;
 773
 774	throttling = &pr->throttling;
 775	switch (throttling->status_register.space_id) {
 776	case ACPI_ADR_SPACE_SYSTEM_IO:
 777		bit_width = throttling->status_register.bit_width;
 778		bit_offset = throttling->status_register.bit_offset;
 779
 780		acpi_os_read_port((acpi_io_address) throttling->status_register.
 781				  address, &ptc_value,
 782				  (u32) (bit_width + bit_offset));
 783		ptc_mask = (1 << bit_width) - 1;
 784		*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
 785		ret = 0;
 786		break;
 787	case ACPI_ADR_SPACE_FIXED_HARDWARE:
 788		ret = acpi_throttling_rdmsr(value);
 789		break;
 790	default:
 791		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
 792		       (u32) (throttling->status_register.space_id));
 793	}
 794	return ret;
 795}
 796
 797static int acpi_write_throttling_state(struct acpi_processor *pr,
 798				u64 value)
 799{
 800	u32 bit_width, bit_offset;
 801	u64 ptc_value;
 802	u64 ptc_mask;
 803	struct acpi_processor_throttling *throttling;
 804	int ret = -1;
 805
 806	throttling = &pr->throttling;
 807	switch (throttling->control_register.space_id) {
 808	case ACPI_ADR_SPACE_SYSTEM_IO:
 809		bit_width = throttling->control_register.bit_width;
 810		bit_offset = throttling->control_register.bit_offset;
 811		ptc_mask = (1 << bit_width) - 1;
 812		ptc_value = value & ptc_mask;
 813
 814		acpi_os_write_port((acpi_io_address) throttling->
 815					control_register.address,
 816					(u32) (ptc_value << bit_offset),
 817					(u32) (bit_width + bit_offset));
 818		ret = 0;
 819		break;
 820	case ACPI_ADR_SPACE_FIXED_HARDWARE:
 821		ret = acpi_throttling_wrmsr(value);
 822		break;
 823	default:
 824		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
 825		       (u32) (throttling->control_register.space_id));
 826	}
 827	return ret;
 828}
 829
 830static int acpi_get_throttling_state(struct acpi_processor *pr,
 831				u64 value)
 832{
 833	int i;
 834
 835	for (i = 0; i < pr->throttling.state_count; i++) {
 836		struct acpi_processor_tx_tss *tx =
 837		    (struct acpi_processor_tx_tss *)&(pr->throttling.
 838						      states_tss[i]);
 839		if (tx->control == value)
 840			return i;
 841	}
 842	return -1;
 843}
 844
 845static int acpi_get_throttling_value(struct acpi_processor *pr,
 846			int state, u64 *value)
 847{
 848	int ret = -1;
 849
 850	if (state >= 0 && state <= pr->throttling.state_count) {
 851		struct acpi_processor_tx_tss *tx =
 852		    (struct acpi_processor_tx_tss *)&(pr->throttling.
 853						      states_tss[state]);
 854		*value = tx->control;
 855		ret = 0;
 856	}
 857	return ret;
 858}
 859
 860static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
 861{
 862	int state = 0;
 863	int ret;
 864	u64 value;
 865
 866	if (!pr)
 867		return -EINVAL;
 868
 869	if (!pr->flags.throttling)
 870		return -ENODEV;
 871
 872	pr->throttling.state = 0;
 873
 874	value = 0;
 875	ret = acpi_read_throttling_status(pr, &value);
 876	if (ret >= 0) {
 877		state = acpi_get_throttling_state(pr, value);
 878		if (state == -1) {
 879			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 880				"Invalid throttling state, reset\n"));
 881			state = 0;
 882			ret = __acpi_processor_set_throttling(pr, state, true,
 883							      true);
 884			if (ret)
 885				return ret;
 886		}
 887		pr->throttling.state = state;
 888	}
 889
 890	return 0;
 891}
 892
 893static long __acpi_processor_get_throttling(void *data)
 894{
 895	struct acpi_processor *pr = data;
 896
 897	return pr->throttling.acpi_processor_get_throttling(pr);
 898}
 899
 900static int acpi_processor_get_throttling(struct acpi_processor *pr)
 901{
 
 
 
 902	if (!pr)
 903		return -EINVAL;
 904
 905	if (!pr->flags.throttling)
 906		return -ENODEV;
 907
 
 
 
 908	/*
 909	 * This is either called from the CPU hotplug callback of
 910	 * processor_driver or via the ACPI probe function. In the latter
 911	 * case the CPU is not guaranteed to be online. Both call sites are
 912	 * protected against CPU hotplug.
 913	 */
 914	if (!cpu_online(pr->id))
 
 
 
 
 915		return -ENODEV;
 
 
 
 
 
 916
 917	return call_on_cpu(pr->id, __acpi_processor_get_throttling, pr, false);
 918}
 919
 920static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
 921{
 922	int i, step;
 923
 924	if (!pr->throttling.address) {
 925		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
 926		return -EINVAL;
 927	} else if (!pr->throttling.duty_width) {
 928		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
 929		return -EINVAL;
 930	}
 931	/* TBD: Support duty_cycle values that span bit 4. */
 932	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
 933		printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
 934		return -EINVAL;
 935	}
 936
 937	pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
 938
 939	/*
 940	 * Compute state values. Note that throttling displays a linear power
 941	 * performance relationship (at 50% performance the CPU will consume
 942	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
 943	 */
 944
 945	step = (1000 / pr->throttling.state_count);
 946
 947	for (i = 0; i < pr->throttling.state_count; i++) {
 948		pr->throttling.states[i].performance = 1000 - step * i;
 949		pr->throttling.states[i].power = 1000 - step * i;
 950	}
 951	return 0;
 952}
 953
 954static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
 955					      int state, bool force)
 956{
 957	u32 value = 0;
 958	u32 duty_mask = 0;
 959	u32 duty_value = 0;
 960
 961	if (!pr)
 962		return -EINVAL;
 963
 964	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
 965		return -EINVAL;
 966
 967	if (!pr->flags.throttling)
 968		return -ENODEV;
 969
 970	if (!force && (state == pr->throttling.state))
 971		return 0;
 972
 973	if (state < pr->throttling_platform_limit)
 974		return -EPERM;
 975	/*
 976	 * Calculate the duty_value and duty_mask.
 977	 */
 978	if (state) {
 979		duty_value = pr->throttling.state_count - state;
 980
 981		duty_value <<= pr->throttling.duty_offset;
 982
 983		/* Used to clear all duty_value bits */
 984		duty_mask = pr->throttling.state_count - 1;
 985
 986		duty_mask <<= acpi_gbl_FADT.duty_offset;
 987		duty_mask = ~duty_mask;
 988	}
 989
 990	local_irq_disable();
 991
 992	/*
 993	 * Disable throttling by writing a 0 to bit 4.  Note that we must
 994	 * turn it off before you can change the duty_value.
 995	 */
 996	value = inl(pr->throttling.address);
 997	if (value & 0x10) {
 998		value &= 0xFFFFFFEF;
 999		outl(value, pr->throttling.address);
1000	}
1001
1002	/*
1003	 * Write the new duty_value and then enable throttling.  Note
1004	 * that a state value of 0 leaves throttling disabled.
1005	 */
1006	if (state) {
1007		value &= duty_mask;
1008		value |= duty_value;
1009		outl(value, pr->throttling.address);
1010
1011		value |= 0x00000010;
1012		outl(value, pr->throttling.address);
1013	}
1014
1015	pr->throttling.state = state;
1016
1017	local_irq_enable();
1018
1019	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1020			  "Throttling state set to T%d (%d%%)\n", state,
1021			  (pr->throttling.states[state].performance ? pr->
1022			   throttling.states[state].performance / 10 : 0)));
1023
1024	return 0;
1025}
1026
1027static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
1028					     int state, bool force)
1029{
1030	int ret;
1031	u64 value;
1032
1033	if (!pr)
1034		return -EINVAL;
1035
1036	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1037		return -EINVAL;
1038
1039	if (!pr->flags.throttling)
1040		return -ENODEV;
1041
1042	if (!force && (state == pr->throttling.state))
1043		return 0;
1044
1045	if (state < pr->throttling_platform_limit)
1046		return -EPERM;
1047
1048	value = 0;
1049	ret = acpi_get_throttling_value(pr, state, &value);
1050	if (ret >= 0) {
1051		acpi_write_throttling_state(pr, value);
1052		pr->throttling.state = state;
1053	}
1054
1055	return 0;
1056}
1057
1058static long acpi_processor_throttling_fn(void *data)
1059{
1060	struct acpi_processor_throttling_arg *arg = data;
1061	struct acpi_processor *pr = arg->pr;
1062
1063	return pr->throttling.acpi_processor_set_throttling(pr,
1064			arg->target_state, arg->force);
1065}
1066
1067static int __acpi_processor_set_throttling(struct acpi_processor *pr,
1068					   int state, bool force, bool direct)
1069{
1070	int ret = 0;
1071	unsigned int i;
1072	struct acpi_processor *match_pr;
1073	struct acpi_processor_throttling *p_throttling;
1074	struct acpi_processor_throttling_arg arg;
1075	struct throttling_tstate t_state;
1076
1077	if (!pr)
1078		return -EINVAL;
1079
1080	if (!pr->flags.throttling)
1081		return -ENODEV;
1082
1083	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1084		return -EINVAL;
1085
1086	if (cpu_is_offline(pr->id)) {
1087		/*
1088		 * the cpu pointed by pr->id is offline. Unnecessary to change
1089		 * the throttling state any more.
1090		 */
1091		return -ENODEV;
1092	}
1093
1094	t_state.target_state = state;
1095	p_throttling = &(pr->throttling);
1096
1097	/*
1098	 * The throttling notifier will be called for every
1099	 * affected cpu in order to get one proper T-state.
1100	 * The notifier event is THROTTLING_PRECHANGE.
1101	 */
1102	for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
1103		t_state.cpu = i;
1104		acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1105							&t_state);
1106	}
1107	/*
1108	 * The function of acpi_processor_set_throttling will be called
1109	 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
1110	 * it is necessary to call it for every affected cpu. Otherwise
1111	 * it can be called only for the cpu pointed by pr.
1112	 */
1113	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1114		arg.pr = pr;
1115		arg.target_state = state;
1116		arg.force = force;
1117		ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
1118				  direct);
1119	} else {
1120		/*
1121		 * When the T-state coordination is SW_ALL or HW_ALL,
1122		 * it is necessary to set T-state for every affected
1123		 * cpus.
1124		 */
1125		for_each_cpu_and(i, cpu_online_mask,
1126		    p_throttling->shared_cpu_map) {
1127			match_pr = per_cpu(processors, i);
1128			/*
1129			 * If the pointer is invalid, we will report the
1130			 * error message and continue.
1131			 */
1132			if (!match_pr) {
1133				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1134					"Invalid Pointer for CPU %d\n", i));
1135				continue;
1136			}
1137			/*
1138			 * If the throttling control is unsupported on CPU i,
1139			 * we will report the error message and continue.
1140			 */
1141			if (!match_pr->flags.throttling) {
1142				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1143					"Throttling Control is unsupported "
1144					"on CPU %d\n", i));
1145				continue;
1146			}
1147
1148			arg.pr = match_pr;
1149			arg.target_state = state;
1150			arg.force = force;
1151			ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
1152					  &arg, direct);
1153		}
1154	}
1155	/*
1156	 * After the set_throttling is called, the
1157	 * throttling notifier is called for every
1158	 * affected cpu to update the T-states.
1159	 * The notifier event is THROTTLING_POSTCHANGE
1160	 */
1161	for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
1162		t_state.cpu = i;
1163		acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1164							&t_state);
1165	}
1166
1167	return ret;
1168}
1169
1170int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
1171				  bool force)
1172{
1173	return __acpi_processor_set_throttling(pr, state, force, false);
1174}
1175
1176int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1177{
1178	int result = 0;
1179	struct acpi_processor_throttling *pthrottling;
1180
1181	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1182			  "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1183			  pr->throttling.address,
1184			  pr->throttling.duty_offset,
1185			  pr->throttling.duty_width));
1186
1187	/*
1188	 * Evaluate _PTC, _TSS and _TPC
1189	 * They must all be present or none of them can be used.
1190	 */
1191	if (acpi_processor_get_throttling_control(pr) ||
1192		acpi_processor_get_throttling_states(pr) ||
1193		acpi_processor_get_platform_limit(pr))
1194	{
1195		pr->throttling.acpi_processor_get_throttling =
1196		    &acpi_processor_get_throttling_fadt;
1197		pr->throttling.acpi_processor_set_throttling =
1198		    &acpi_processor_set_throttling_fadt;
1199		if (acpi_processor_get_fadt_info(pr))
1200			return 0;
1201	} else {
1202		pr->throttling.acpi_processor_get_throttling =
1203		    &acpi_processor_get_throttling_ptc;
1204		pr->throttling.acpi_processor_set_throttling =
1205		    &acpi_processor_set_throttling_ptc;
1206	}
1207
1208	/*
1209	 * If TSD package for one CPU can't be parsed successfully, it means
1210	 * that this CPU will have no coordination with other CPUs.
1211	 */
1212	if (acpi_processor_get_tsd(pr)) {
1213		pthrottling = &pr->throttling;
1214		pthrottling->tsd_valid_flag = 0;
1215		cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
1216		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
1217	}
1218
1219	/*
1220	 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1221	 * This shouldn't be an issue as few (if any) mobile systems ever
1222	 * used this part.
1223	 */
1224	if (errata.piix4.throttle) {
1225		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1226				  "Throttling not supported on PIIX4 A- or B-step\n"));
1227		return 0;
1228	}
1229
1230	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
1231			  pr->throttling.state_count));
1232
1233	pr->flags.throttling = 1;
1234
1235	/*
1236	 * Disable throttling (if enabled).  We'll let subsequent policy (e.g.
1237	 * thermal) decide to lower performance if it so chooses, but for now
1238	 * we'll crank up the speed.
1239	 */
1240
1241	result = acpi_processor_get_throttling(pr);
1242	if (result)
1243		goto end;
1244
1245	if (pr->throttling.state) {
1246		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1247				  "Disabling throttling (was T%d)\n",
1248				  pr->throttling.state));
1249		result = acpi_processor_set_throttling(pr, 0, false);
1250		if (result)
1251			goto end;
1252	}
1253
1254      end:
1255	if (result)
1256		pr->flags.throttling = 0;
1257
1258	return result;
1259}
1260
v3.15
 
   1/*
   2 * processor_throttling.c - Throttling submodule of the ACPI processor driver
   3 *
   4 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   5 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   6 *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
   7 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   8 *  			- Added processor hotplug support
   9 *
  10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11 *
  12 *  This program is free software; you can redistribute it and/or modify
  13 *  it under the terms of the GNU General Public License as published by
  14 *  the Free Software Foundation; either version 2 of the License, or (at
  15 *  your option) any later version.
  16 *
  17 *  This program is distributed in the hope that it will be useful, but
  18 *  WITHOUT ANY WARRANTY; without even the implied warranty of
  19 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  20 *  General Public License for more details.
  21 *
  22 *  You should have received a copy of the GNU General Public License along
  23 *  with this program; if not, write to the Free Software Foundation, Inc.,
  24 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  25 *
  26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  27 */
  28
  29#include <linux/kernel.h>
  30#include <linux/module.h>
  31#include <linux/slab.h>
  32#include <linux/init.h>
  33#include <linux/sched.h>
  34#include <linux/cpufreq.h>
  35#include <linux/acpi.h>
  36#include <acpi/processor.h>
  37#include <asm/io.h>
  38#include <asm/uaccess.h>
  39
  40#define PREFIX "ACPI: "
  41
  42#define ACPI_PROCESSOR_CLASS            "processor"
  43#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
  44ACPI_MODULE_NAME("processor_throttling");
  45
  46/* ignore_tpc:
  47 *  0 -> acpi processor driver doesn't ignore _TPC values
  48 *  1 -> acpi processor driver ignores _TPC values
  49 */
  50static int ignore_tpc;
  51module_param(ignore_tpc, int, 0644);
  52MODULE_PARM_DESC(ignore_tpc, "Disable broken BIOS _TPC throttling support");
  53
  54struct throttling_tstate {
  55	unsigned int cpu;		/* cpu nr */
  56	int target_state;		/* target T-state */
  57};
  58
  59struct acpi_processor_throttling_arg {
  60	struct acpi_processor *pr;
  61	int target_state;
  62	bool force;
  63};
  64
  65#define THROTTLING_PRECHANGE       (1)
  66#define THROTTLING_POSTCHANGE      (2)
  67
  68static int acpi_processor_get_throttling(struct acpi_processor *pr);
  69int acpi_processor_set_throttling(struct acpi_processor *pr,
  70						int state, bool force);
  71
  72static int acpi_processor_update_tsd_coord(void)
  73{
  74	int count, count_target;
  75	int retval = 0;
  76	unsigned int i, j;
  77	cpumask_var_t covered_cpus;
  78	struct acpi_processor *pr, *match_pr;
  79	struct acpi_tsd_package *pdomain, *match_pdomain;
  80	struct acpi_processor_throttling *pthrottling, *match_pthrottling;
  81
  82	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
  83		return -ENOMEM;
  84
  85	/*
  86	 * Now that we have _TSD data from all CPUs, lets setup T-state
  87	 * coordination between all CPUs.
  88	 */
  89	for_each_possible_cpu(i) {
  90		pr = per_cpu(processors, i);
  91		if (!pr)
  92			continue;
  93
  94		/* Basic validity check for domain info */
  95		pthrottling = &(pr->throttling);
  96
  97		/*
  98		 * If tsd package for one cpu is invalid, the coordination
  99		 * among all CPUs is thought as invalid.
 100		 * Maybe it is ugly.
 101		 */
 102		if (!pthrottling->tsd_valid_flag) {
 103			retval = -EINVAL;
 104			break;
 105		}
 106	}
 107	if (retval)
 108		goto err_ret;
 109
 110	for_each_possible_cpu(i) {
 111		pr = per_cpu(processors, i);
 112		if (!pr)
 113			continue;
 114
 115		if (cpumask_test_cpu(i, covered_cpus))
 116			continue;
 117		pthrottling = &pr->throttling;
 118
 119		pdomain = &(pthrottling->domain_info);
 120		cpumask_set_cpu(i, pthrottling->shared_cpu_map);
 121		cpumask_set_cpu(i, covered_cpus);
 122		/*
 123		 * If the number of processor in the TSD domain is 1, it is
 124		 * unnecessary to parse the coordination for this CPU.
 125		 */
 126		if (pdomain->num_processors <= 1)
 127			continue;
 128
 129		/* Validate the Domain info */
 130		count_target = pdomain->num_processors;
 131		count = 1;
 132
 133		for_each_possible_cpu(j) {
 134			if (i == j)
 135				continue;
 136
 137			match_pr = per_cpu(processors, j);
 138			if (!match_pr)
 139				continue;
 140
 141			match_pthrottling = &(match_pr->throttling);
 142			match_pdomain = &(match_pthrottling->domain_info);
 143			if (match_pdomain->domain != pdomain->domain)
 144				continue;
 145
 146			/* Here i and j are in the same domain.
 147			 * If two TSD packages have the same domain, they
 148			 * should have the same num_porcessors and
 149			 * coordination type. Otherwise it will be regarded
 150			 * as illegal.
 151			 */
 152			if (match_pdomain->num_processors != count_target) {
 153				retval = -EINVAL;
 154				goto err_ret;
 155			}
 156
 157			if (pdomain->coord_type != match_pdomain->coord_type) {
 158				retval = -EINVAL;
 159				goto err_ret;
 160			}
 161
 162			cpumask_set_cpu(j, covered_cpus);
 163			cpumask_set_cpu(j, pthrottling->shared_cpu_map);
 164			count++;
 165		}
 166		for_each_possible_cpu(j) {
 167			if (i == j)
 168				continue;
 169
 170			match_pr = per_cpu(processors, j);
 171			if (!match_pr)
 172				continue;
 173
 174			match_pthrottling = &(match_pr->throttling);
 175			match_pdomain = &(match_pthrottling->domain_info);
 176			if (match_pdomain->domain != pdomain->domain)
 177				continue;
 178
 179			/*
 180			 * If some CPUS have the same domain, they
 181			 * will have the same shared_cpu_map.
 182			 */
 183			cpumask_copy(match_pthrottling->shared_cpu_map,
 184				     pthrottling->shared_cpu_map);
 185		}
 186	}
 187
 188err_ret:
 189	free_cpumask_var(covered_cpus);
 190
 191	for_each_possible_cpu(i) {
 192		pr = per_cpu(processors, i);
 193		if (!pr)
 194			continue;
 195
 196		/*
 197		 * Assume no coordination on any error parsing domain info.
 198		 * The coordination type will be forced as SW_ALL.
 199		 */
 200		if (retval) {
 201			pthrottling = &(pr->throttling);
 202			cpumask_clear(pthrottling->shared_cpu_map);
 203			cpumask_set_cpu(i, pthrottling->shared_cpu_map);
 204			pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
 205		}
 206	}
 207
 208	return retval;
 209}
 210
 211/*
 212 * Update the T-state coordination after the _TSD
 213 * data for all cpus is obtained.
 214 */
 215void acpi_processor_throttling_init(void)
 216{
 217	if (acpi_processor_update_tsd_coord()) {
 218		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 219			"Assume no T-state coordination\n"));
 220	}
 221
 222	return;
 223}
 224
 225static int acpi_processor_throttling_notifier(unsigned long event, void *data)
 226{
 227	struct throttling_tstate *p_tstate = data;
 228	struct acpi_processor *pr;
 229	unsigned int cpu ;
 230	int target_state;
 231	struct acpi_processor_limit *p_limit;
 232	struct acpi_processor_throttling *p_throttling;
 233
 234	cpu = p_tstate->cpu;
 235	pr = per_cpu(processors, cpu);
 236	if (!pr) {
 237		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
 238		return 0;
 239	}
 240	if (!pr->flags.throttling) {
 241		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
 242				"unsupported on CPU %d\n", cpu));
 243		return 0;
 244	}
 245	target_state = p_tstate->target_state;
 246	p_throttling = &(pr->throttling);
 247	switch (event) {
 248	case THROTTLING_PRECHANGE:
 249		/*
 250		 * Prechange event is used to choose one proper t-state,
 251		 * which meets the limits of thermal, user and _TPC.
 252		 */
 253		p_limit = &pr->limit;
 254		if (p_limit->thermal.tx > target_state)
 255			target_state = p_limit->thermal.tx;
 256		if (p_limit->user.tx > target_state)
 257			target_state = p_limit->user.tx;
 258		if (pr->throttling_platform_limit > target_state)
 259			target_state = pr->throttling_platform_limit;
 260		if (target_state >= p_throttling->state_count) {
 261			printk(KERN_WARNING
 262				"Exceed the limit of T-state \n");
 263			target_state = p_throttling->state_count - 1;
 264		}
 265		p_tstate->target_state = target_state;
 266		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
 267				"target T-state of CPU %d is T%d\n",
 268				cpu, target_state));
 269		break;
 270	case THROTTLING_POSTCHANGE:
 271		/*
 272		 * Postchange event is only used to update the
 273		 * T-state flag of acpi_processor_throttling.
 274		 */
 275		p_throttling->state = target_state;
 276		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
 277				"CPU %d is switched to T%d\n",
 278				cpu, target_state));
 279		break;
 280	default:
 281		printk(KERN_WARNING
 282			"Unsupported Throttling notifier event\n");
 283		break;
 284	}
 285
 286	return 0;
 287}
 288
 289/*
 290 * _TPC - Throttling Present Capabilities
 291 */
 292static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
 293{
 294	acpi_status status = 0;
 295	unsigned long long tpc = 0;
 296
 297	if (!pr)
 298		return -EINVAL;
 299
 300	if (ignore_tpc)
 301		goto end;
 302
 303	status = acpi_evaluate_integer(pr->handle, "_TPC", NULL, &tpc);
 304	if (ACPI_FAILURE(status)) {
 305		if (status != AE_NOT_FOUND) {
 306			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TPC"));
 307		}
 308		return -ENODEV;
 309	}
 310
 311end:
 312	pr->throttling_platform_limit = (int)tpc;
 313	return 0;
 314}
 315
 316int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
 317{
 318	int result = 0;
 319	int throttling_limit;
 320	int current_state;
 321	struct acpi_processor_limit *limit;
 322	int target_state;
 323
 324	if (ignore_tpc)
 325		return 0;
 326
 327	result = acpi_processor_get_platform_limit(pr);
 328	if (result) {
 329		/* Throttling Limit is unsupported */
 330		return result;
 331	}
 332
 333	throttling_limit = pr->throttling_platform_limit;
 334	if (throttling_limit >= pr->throttling.state_count) {
 335		/* Uncorrect Throttling Limit */
 336		return -EINVAL;
 337	}
 338
 339	current_state = pr->throttling.state;
 340	if (current_state > throttling_limit) {
 341		/*
 342		 * The current state can meet the requirement of
 343		 * _TPC limit. But it is reasonable that OSPM changes
 344		 * t-states from high to low for better performance.
 345		 * Of course the limit condition of thermal
 346		 * and user should be considered.
 347		 */
 348		limit = &pr->limit;
 349		target_state = throttling_limit;
 350		if (limit->thermal.tx > target_state)
 351			target_state = limit->thermal.tx;
 352		if (limit->user.tx > target_state)
 353			target_state = limit->user.tx;
 354	} else if (current_state == throttling_limit) {
 355		/*
 356		 * Unnecessary to change the throttling state
 357		 */
 358		return 0;
 359	} else {
 360		/*
 361		 * If the current state is lower than the limit of _TPC, it
 362		 * will be forced to switch to the throttling state defined
 363		 * by throttling_platfor_limit.
 364		 * Because the previous state meets with the limit condition
 365		 * of thermal and user, it is unnecessary to check it again.
 366		 */
 367		target_state = throttling_limit;
 368	}
 369	return acpi_processor_set_throttling(pr, target_state, false);
 370}
 371
 372/*
 373 * This function is used to reevaluate whether the T-state is valid
 374 * after one CPU is onlined/offlined.
 375 * It is noted that it won't reevaluate the following properties for
 376 * the T-state.
 377 *	1. Control method.
 378 *	2. the number of supported T-state
 379 *	3. TSD domain
 380 */
 381void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
 382					unsigned long action)
 383{
 384	int result = 0;
 385
 386	if (action == CPU_DEAD) {
 387		/* When one CPU is offline, the T-state throttling
 388		 * will be invalidated.
 389		 */
 390		pr->flags.throttling = 0;
 391		return;
 392	}
 393	/* the following is to recheck whether the T-state is valid for
 394	 * the online CPU
 395	 */
 396	if (!pr->throttling.state_count) {
 397		/* If the number of T-state is invalid, it is
 398		 * invalidated.
 399		 */
 400		pr->flags.throttling = 0;
 401		return;
 402	}
 403	pr->flags.throttling = 1;
 404
 405	/* Disable throttling (if enabled).  We'll let subsequent
 406	 * policy (e.g.thermal) decide to lower performance if it
 407	 * so chooses, but for now we'll crank up the speed.
 408	 */
 409
 410	result = acpi_processor_get_throttling(pr);
 411	if (result)
 412		goto end;
 413
 414	if (pr->throttling.state) {
 415		result = acpi_processor_set_throttling(pr, 0, false);
 416		if (result)
 417			goto end;
 418	}
 419
 420end:
 421	if (result)
 422		pr->flags.throttling = 0;
 423}
 424/*
 425 * _PTC - Processor Throttling Control (and status) register location
 426 */
 427static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
 428{
 429	int result = 0;
 430	acpi_status status = 0;
 431	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 432	union acpi_object *ptc = NULL;
 433	union acpi_object obj = { 0 };
 434	struct acpi_processor_throttling *throttling;
 435
 436	status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
 437	if (ACPI_FAILURE(status)) {
 438		if (status != AE_NOT_FOUND) {
 439			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTC"));
 440		}
 441		return -ENODEV;
 442	}
 443
 444	ptc = (union acpi_object *)buffer.pointer;
 445	if (!ptc || (ptc->type != ACPI_TYPE_PACKAGE)
 446	    || (ptc->package.count != 2)) {
 447		printk(KERN_ERR PREFIX "Invalid _PTC data\n");
 448		result = -EFAULT;
 449		goto end;
 450	}
 451
 452	/*
 453	 * control_register
 454	 */
 455
 456	obj = ptc->package.elements[0];
 457
 458	if ((obj.type != ACPI_TYPE_BUFFER)
 459	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
 460	    || (obj.buffer.pointer == NULL)) {
 461		printk(KERN_ERR PREFIX
 462		       "Invalid _PTC data (control_register)\n");
 463		result = -EFAULT;
 464		goto end;
 465	}
 466	memcpy(&pr->throttling.control_register, obj.buffer.pointer,
 467	       sizeof(struct acpi_ptc_register));
 468
 469	/*
 470	 * status_register
 471	 */
 472
 473	obj = ptc->package.elements[1];
 474
 475	if ((obj.type != ACPI_TYPE_BUFFER)
 476	    || (obj.buffer.length < sizeof(struct acpi_ptc_register))
 477	    || (obj.buffer.pointer == NULL)) {
 478		printk(KERN_ERR PREFIX "Invalid _PTC data (status_register)\n");
 479		result = -EFAULT;
 480		goto end;
 481	}
 482
 483	memcpy(&pr->throttling.status_register, obj.buffer.pointer,
 484	       sizeof(struct acpi_ptc_register));
 485
 486	throttling = &pr->throttling;
 487
 488	if ((throttling->control_register.bit_width +
 489		throttling->control_register.bit_offset) > 32) {
 490		printk(KERN_ERR PREFIX "Invalid _PTC control register\n");
 491		result = -EFAULT;
 492		goto end;
 493	}
 494
 495	if ((throttling->status_register.bit_width +
 496		throttling->status_register.bit_offset) > 32) {
 497		printk(KERN_ERR PREFIX "Invalid _PTC status register\n");
 498		result = -EFAULT;
 499		goto end;
 500	}
 501
 502      end:
 503	kfree(buffer.pointer);
 504
 505	return result;
 506}
 507
 508/*
 509 * _TSS - Throttling Supported States
 510 */
 511static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
 512{
 513	int result = 0;
 514	acpi_status status = AE_OK;
 515	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 516	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
 517	struct acpi_buffer state = { 0, NULL };
 518	union acpi_object *tss = NULL;
 519	int i;
 520
 521	status = acpi_evaluate_object(pr->handle, "_TSS", NULL, &buffer);
 522	if (ACPI_FAILURE(status)) {
 523		if (status != AE_NOT_FOUND) {
 524			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSS"));
 525		}
 526		return -ENODEV;
 527	}
 528
 529	tss = buffer.pointer;
 530	if (!tss || (tss->type != ACPI_TYPE_PACKAGE)) {
 531		printk(KERN_ERR PREFIX "Invalid _TSS data\n");
 532		result = -EFAULT;
 533		goto end;
 534	}
 535
 536	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
 537			  tss->package.count));
 538
 539	pr->throttling.state_count = tss->package.count;
 540	pr->throttling.states_tss =
 541	    kmalloc(sizeof(struct acpi_processor_tx_tss) * tss->package.count,
 542		    GFP_KERNEL);
 
 543	if (!pr->throttling.states_tss) {
 544		result = -ENOMEM;
 545		goto end;
 546	}
 547
 548	for (i = 0; i < pr->throttling.state_count; i++) {
 549
 550		struct acpi_processor_tx_tss *tx =
 551		    (struct acpi_processor_tx_tss *)&(pr->throttling.
 552						      states_tss[i]);
 553
 554		state.length = sizeof(struct acpi_processor_tx_tss);
 555		state.pointer = tx;
 556
 557		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
 558
 559		status = acpi_extract_package(&(tss->package.elements[i]),
 560					      &format, &state);
 561		if (ACPI_FAILURE(status)) {
 562			ACPI_EXCEPTION((AE_INFO, status, "Invalid _TSS data"));
 563			result = -EFAULT;
 564			kfree(pr->throttling.states_tss);
 565			goto end;
 566		}
 567
 568		if (!tx->freqpercentage) {
 569			printk(KERN_ERR PREFIX
 570			       "Invalid _TSS data: freq is zero\n");
 571			result = -EFAULT;
 572			kfree(pr->throttling.states_tss);
 573			goto end;
 574		}
 575	}
 576
 577      end:
 578	kfree(buffer.pointer);
 579
 580	return result;
 581}
 582
 583/*
 584 * _TSD - T-State Dependencies
 585 */
 586static int acpi_processor_get_tsd(struct acpi_processor *pr)
 587{
 588	int result = 0;
 589	acpi_status status = AE_OK;
 590	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 591	struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
 592	struct acpi_buffer state = { 0, NULL };
 593	union acpi_object *tsd = NULL;
 594	struct acpi_tsd_package *pdomain;
 595	struct acpi_processor_throttling *pthrottling;
 596
 597	pthrottling = &pr->throttling;
 598	pthrottling->tsd_valid_flag = 0;
 599
 600	status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
 601	if (ACPI_FAILURE(status)) {
 602		if (status != AE_NOT_FOUND) {
 603			ACPI_EXCEPTION((AE_INFO, status, "Evaluating _TSD"));
 604		}
 605		return -ENODEV;
 606	}
 607
 608	tsd = buffer.pointer;
 609	if (!tsd || (tsd->type != ACPI_TYPE_PACKAGE)) {
 610		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
 611		result = -EFAULT;
 612		goto end;
 613	}
 614
 615	if (tsd->package.count != 1) {
 616		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
 617		result = -EFAULT;
 618		goto end;
 619	}
 620
 621	pdomain = &(pr->throttling.domain_info);
 622
 623	state.length = sizeof(struct acpi_tsd_package);
 624	state.pointer = pdomain;
 625
 626	status = acpi_extract_package(&(tsd->package.elements[0]),
 627				      &format, &state);
 628	if (ACPI_FAILURE(status)) {
 629		printk(KERN_ERR PREFIX "Invalid _TSD data\n");
 630		result = -EFAULT;
 631		goto end;
 632	}
 633
 634	if (pdomain->num_entries != ACPI_TSD_REV0_ENTRIES) {
 635		printk(KERN_ERR PREFIX "Unknown _TSD:num_entries\n");
 636		result = -EFAULT;
 637		goto end;
 638	}
 639
 640	if (pdomain->revision != ACPI_TSD_REV0_REVISION) {
 641		printk(KERN_ERR PREFIX "Unknown _TSD:revision\n");
 642		result = -EFAULT;
 643		goto end;
 644	}
 645
 646	pthrottling = &pr->throttling;
 647	pthrottling->tsd_valid_flag = 1;
 648	pthrottling->shared_type = pdomain->coord_type;
 649	cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
 650	/*
 651	 * If the coordination type is not defined in ACPI spec,
 652	 * the tsd_valid_flag will be clear and coordination type
 653	 * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
 654	 */
 655	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
 656		pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
 657		pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
 658		pthrottling->tsd_valid_flag = 0;
 659		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
 660	}
 661
 662      end:
 663	kfree(buffer.pointer);
 664	return result;
 665}
 666
 667/* --------------------------------------------------------------------------
 668                              Throttling Control
 669   -------------------------------------------------------------------------- */
 670static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
 671{
 672	int state = 0;
 673	u32 value = 0;
 674	u32 duty_mask = 0;
 675	u32 duty_value = 0;
 676
 677	if (!pr)
 678		return -EINVAL;
 679
 680	if (!pr->flags.throttling)
 681		return -ENODEV;
 682
 
 
 
 
 
 
 
 
 
 683	pr->throttling.state = 0;
 684
 685	duty_mask = pr->throttling.state_count - 1;
 686
 687	duty_mask <<= pr->throttling.duty_offset;
 688
 689	local_irq_disable();
 690
 691	value = inl(pr->throttling.address);
 692
 693	/*
 694	 * Compute the current throttling state when throttling is enabled
 695	 * (bit 4 is on).
 696	 */
 697	if (value & 0x10) {
 698		duty_value = value & duty_mask;
 699		duty_value >>= pr->throttling.duty_offset;
 700
 701		if (duty_value)
 702			state = pr->throttling.state_count - duty_value;
 703	}
 704
 705	pr->throttling.state = state;
 706
 707	local_irq_enable();
 708
 709	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 710			  "Throttling state is T%d (%d%% throttling applied)\n",
 711			  state, pr->throttling.states[state].performance));
 712
 713	return 0;
 714}
 715
 716#ifdef CONFIG_X86
 717static int acpi_throttling_rdmsr(u64 *value)
 718{
 719	u64 msr_high, msr_low;
 720	u64 msr = 0;
 721	int ret = -1;
 722
 723	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
 724		!this_cpu_has(X86_FEATURE_ACPI)) {
 725		printk(KERN_ERR PREFIX
 726			"HARDWARE addr space,NOT supported yet\n");
 727	} else {
 728		msr_low = 0;
 729		msr_high = 0;
 730		rdmsr_safe(MSR_IA32_THERM_CONTROL,
 731			(u32 *)&msr_low , (u32 *) &msr_high);
 732		msr = (msr_high << 32) | msr_low;
 733		*value = (u64) msr;
 734		ret = 0;
 735	}
 736	return ret;
 737}
 738
 739static int acpi_throttling_wrmsr(u64 value)
 740{
 741	int ret = -1;
 742	u64 msr;
 743
 744	if ((this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_INTEL) ||
 745		!this_cpu_has(X86_FEATURE_ACPI)) {
 746		printk(KERN_ERR PREFIX
 747			"HARDWARE addr space,NOT supported yet\n");
 748	} else {
 749		msr = value;
 750		wrmsr_safe(MSR_IA32_THERM_CONTROL,
 751			msr & 0xffffffff, msr >> 32);
 752		ret = 0;
 753	}
 754	return ret;
 755}
 756#else
 757static int acpi_throttling_rdmsr(u64 *value)
 758{
 759	printk(KERN_ERR PREFIX
 760		"HARDWARE addr space,NOT supported yet\n");
 761	return -1;
 762}
 763
 764static int acpi_throttling_wrmsr(u64 value)
 765{
 766	printk(KERN_ERR PREFIX
 767		"HARDWARE addr space,NOT supported yet\n");
 768	return -1;
 769}
 770#endif
 771
 772static int acpi_read_throttling_status(struct acpi_processor *pr,
 773					u64 *value)
 774{
 775	u32 bit_width, bit_offset;
 776	u32 ptc_value;
 777	u64 ptc_mask;
 778	struct acpi_processor_throttling *throttling;
 779	int ret = -1;
 780
 781	throttling = &pr->throttling;
 782	switch (throttling->status_register.space_id) {
 783	case ACPI_ADR_SPACE_SYSTEM_IO:
 784		bit_width = throttling->status_register.bit_width;
 785		bit_offset = throttling->status_register.bit_offset;
 786
 787		acpi_os_read_port((acpi_io_address) throttling->status_register.
 788				  address, &ptc_value,
 789				  (u32) (bit_width + bit_offset));
 790		ptc_mask = (1 << bit_width) - 1;
 791		*value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
 792		ret = 0;
 793		break;
 794	case ACPI_ADR_SPACE_FIXED_HARDWARE:
 795		ret = acpi_throttling_rdmsr(value);
 796		break;
 797	default:
 798		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
 799		       (u32) (throttling->status_register.space_id));
 800	}
 801	return ret;
 802}
 803
 804static int acpi_write_throttling_state(struct acpi_processor *pr,
 805				u64 value)
 806{
 807	u32 bit_width, bit_offset;
 808	u64 ptc_value;
 809	u64 ptc_mask;
 810	struct acpi_processor_throttling *throttling;
 811	int ret = -1;
 812
 813	throttling = &pr->throttling;
 814	switch (throttling->control_register.space_id) {
 815	case ACPI_ADR_SPACE_SYSTEM_IO:
 816		bit_width = throttling->control_register.bit_width;
 817		bit_offset = throttling->control_register.bit_offset;
 818		ptc_mask = (1 << bit_width) - 1;
 819		ptc_value = value & ptc_mask;
 820
 821		acpi_os_write_port((acpi_io_address) throttling->
 822					control_register.address,
 823					(u32) (ptc_value << bit_offset),
 824					(u32) (bit_width + bit_offset));
 825		ret = 0;
 826		break;
 827	case ACPI_ADR_SPACE_FIXED_HARDWARE:
 828		ret = acpi_throttling_wrmsr(value);
 829		break;
 830	default:
 831		printk(KERN_ERR PREFIX "Unknown addr space %d\n",
 832		       (u32) (throttling->control_register.space_id));
 833	}
 834	return ret;
 835}
 836
 837static int acpi_get_throttling_state(struct acpi_processor *pr,
 838				u64 value)
 839{
 840	int i;
 841
 842	for (i = 0; i < pr->throttling.state_count; i++) {
 843		struct acpi_processor_tx_tss *tx =
 844		    (struct acpi_processor_tx_tss *)&(pr->throttling.
 845						      states_tss[i]);
 846		if (tx->control == value)
 847			return i;
 848	}
 849	return -1;
 850}
 851
 852static int acpi_get_throttling_value(struct acpi_processor *pr,
 853			int state, u64 *value)
 854{
 855	int ret = -1;
 856
 857	if (state >= 0 && state <= pr->throttling.state_count) {
 858		struct acpi_processor_tx_tss *tx =
 859		    (struct acpi_processor_tx_tss *)&(pr->throttling.
 860						      states_tss[state]);
 861		*value = tx->control;
 862		ret = 0;
 863	}
 864	return ret;
 865}
 866
 867static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
 868{
 869	int state = 0;
 870	int ret;
 871	u64 value;
 872
 873	if (!pr)
 874		return -EINVAL;
 875
 876	if (!pr->flags.throttling)
 877		return -ENODEV;
 878
 879	pr->throttling.state = 0;
 880
 881	value = 0;
 882	ret = acpi_read_throttling_status(pr, &value);
 883	if (ret >= 0) {
 884		state = acpi_get_throttling_state(pr, value);
 885		if (state == -1) {
 886			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 887				"Invalid throttling state, reset\n"));
 888			state = 0;
 889			ret = acpi_processor_set_throttling(pr, state, true);
 
 890			if (ret)
 891				return ret;
 892		}
 893		pr->throttling.state = state;
 894	}
 895
 896	return 0;
 897}
 898
 
 
 
 
 
 
 
 899static int acpi_processor_get_throttling(struct acpi_processor *pr)
 900{
 901	cpumask_var_t saved_mask;
 902	int ret;
 903
 904	if (!pr)
 905		return -EINVAL;
 906
 907	if (!pr->flags.throttling)
 908		return -ENODEV;
 909
 910	if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
 911		return -ENOMEM;
 912
 913	/*
 914	 * Migrate task to the cpu pointed by pr.
 
 
 
 915	 */
 916	cpumask_copy(saved_mask, &current->cpus_allowed);
 917	/* FIXME: use work_on_cpu() */
 918	if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
 919		/* Can't migrate to the target pr->id CPU. Exit */
 920		free_cpumask_var(saved_mask);
 921		return -ENODEV;
 922	}
 923	ret = pr->throttling.acpi_processor_get_throttling(pr);
 924	/* restore the previous state */
 925	set_cpus_allowed_ptr(current, saved_mask);
 926	free_cpumask_var(saved_mask);
 927
 928	return ret;
 929}
 930
 931static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
 932{
 933	int i, step;
 934
 935	if (!pr->throttling.address) {
 936		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling register\n"));
 937		return -EINVAL;
 938	} else if (!pr->throttling.duty_width) {
 939		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No throttling states\n"));
 940		return -EINVAL;
 941	}
 942	/* TBD: Support duty_cycle values that span bit 4. */
 943	else if ((pr->throttling.duty_offset + pr->throttling.duty_width) > 4) {
 944		printk(KERN_WARNING PREFIX "duty_cycle spans bit 4\n");
 945		return -EINVAL;
 946	}
 947
 948	pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
 949
 950	/*
 951	 * Compute state values. Note that throttling displays a linear power
 952	 * performance relationship (at 50% performance the CPU will consume
 953	 * 50% power).  Values are in 1/10th of a percent to preserve accuracy.
 954	 */
 955
 956	step = (1000 / pr->throttling.state_count);
 957
 958	for (i = 0; i < pr->throttling.state_count; i++) {
 959		pr->throttling.states[i].performance = 1000 - step * i;
 960		pr->throttling.states[i].power = 1000 - step * i;
 961	}
 962	return 0;
 963}
 964
 965static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
 966					      int state, bool force)
 967{
 968	u32 value = 0;
 969	u32 duty_mask = 0;
 970	u32 duty_value = 0;
 971
 972	if (!pr)
 973		return -EINVAL;
 974
 975	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
 976		return -EINVAL;
 977
 978	if (!pr->flags.throttling)
 979		return -ENODEV;
 980
 981	if (!force && (state == pr->throttling.state))
 982		return 0;
 983
 984	if (state < pr->throttling_platform_limit)
 985		return -EPERM;
 986	/*
 987	 * Calculate the duty_value and duty_mask.
 988	 */
 989	if (state) {
 990		duty_value = pr->throttling.state_count - state;
 991
 992		duty_value <<= pr->throttling.duty_offset;
 993
 994		/* Used to clear all duty_value bits */
 995		duty_mask = pr->throttling.state_count - 1;
 996
 997		duty_mask <<= acpi_gbl_FADT.duty_offset;
 998		duty_mask = ~duty_mask;
 999	}
1000
1001	local_irq_disable();
1002
1003	/*
1004	 * Disable throttling by writing a 0 to bit 4.  Note that we must
1005	 * turn it off before you can change the duty_value.
1006	 */
1007	value = inl(pr->throttling.address);
1008	if (value & 0x10) {
1009		value &= 0xFFFFFFEF;
1010		outl(value, pr->throttling.address);
1011	}
1012
1013	/*
1014	 * Write the new duty_value and then enable throttling.  Note
1015	 * that a state value of 0 leaves throttling disabled.
1016	 */
1017	if (state) {
1018		value &= duty_mask;
1019		value |= duty_value;
1020		outl(value, pr->throttling.address);
1021
1022		value |= 0x00000010;
1023		outl(value, pr->throttling.address);
1024	}
1025
1026	pr->throttling.state = state;
1027
1028	local_irq_enable();
1029
1030	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1031			  "Throttling state set to T%d (%d%%)\n", state,
1032			  (pr->throttling.states[state].performance ? pr->
1033			   throttling.states[state].performance / 10 : 0)));
1034
1035	return 0;
1036}
1037
1038static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
1039					     int state, bool force)
1040{
1041	int ret;
1042	u64 value;
1043
1044	if (!pr)
1045		return -EINVAL;
1046
1047	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1048		return -EINVAL;
1049
1050	if (!pr->flags.throttling)
1051		return -ENODEV;
1052
1053	if (!force && (state == pr->throttling.state))
1054		return 0;
1055
1056	if (state < pr->throttling_platform_limit)
1057		return -EPERM;
1058
1059	value = 0;
1060	ret = acpi_get_throttling_value(pr, state, &value);
1061	if (ret >= 0) {
1062		acpi_write_throttling_state(pr, value);
1063		pr->throttling.state = state;
1064	}
1065
1066	return 0;
1067}
1068
1069static long acpi_processor_throttling_fn(void *data)
1070{
1071	struct acpi_processor_throttling_arg *arg = data;
1072	struct acpi_processor *pr = arg->pr;
1073
1074	return pr->throttling.acpi_processor_set_throttling(pr,
1075			arg->target_state, arg->force);
1076}
1077
1078int acpi_processor_set_throttling(struct acpi_processor *pr,
1079						int state, bool force)
1080{
1081	int ret = 0;
1082	unsigned int i;
1083	struct acpi_processor *match_pr;
1084	struct acpi_processor_throttling *p_throttling;
1085	struct acpi_processor_throttling_arg arg;
1086	struct throttling_tstate t_state;
1087
1088	if (!pr)
1089		return -EINVAL;
1090
1091	if (!pr->flags.throttling)
1092		return -ENODEV;
1093
1094	if ((state < 0) || (state > (pr->throttling.state_count - 1)))
1095		return -EINVAL;
1096
1097	if (cpu_is_offline(pr->id)) {
1098		/*
1099		 * the cpu pointed by pr->id is offline. Unnecessary to change
1100		 * the throttling state any more.
1101		 */
1102		return -ENODEV;
1103	}
1104
1105	t_state.target_state = state;
1106	p_throttling = &(pr->throttling);
1107
1108	/*
1109	 * The throttling notifier will be called for every
1110	 * affected cpu in order to get one proper T-state.
1111	 * The notifier event is THROTTLING_PRECHANGE.
1112	 */
1113	for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
1114		t_state.cpu = i;
1115		acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
1116							&t_state);
1117	}
1118	/*
1119	 * The function of acpi_processor_set_throttling will be called
1120	 * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
1121	 * it is necessary to call it for every affected cpu. Otherwise
1122	 * it can be called only for the cpu pointed by pr.
1123	 */
1124	if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1125		arg.pr = pr;
1126		arg.target_state = state;
1127		arg.force = force;
1128		ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
 
1129	} else {
1130		/*
1131		 * When the T-state coordination is SW_ALL or HW_ALL,
1132		 * it is necessary to set T-state for every affected
1133		 * cpus.
1134		 */
1135		for_each_cpu_and(i, cpu_online_mask,
1136		    p_throttling->shared_cpu_map) {
1137			match_pr = per_cpu(processors, i);
1138			/*
1139			 * If the pointer is invalid, we will report the
1140			 * error message and continue.
1141			 */
1142			if (!match_pr) {
1143				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1144					"Invalid Pointer for CPU %d\n", i));
1145				continue;
1146			}
1147			/*
1148			 * If the throttling control is unsupported on CPU i,
1149			 * we will report the error message and continue.
1150			 */
1151			if (!match_pr->flags.throttling) {
1152				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1153					"Throttling Control is unsupported "
1154					"on CPU %d\n", i));
1155				continue;
1156			}
1157
1158			arg.pr = match_pr;
1159			arg.target_state = state;
1160			arg.force = force;
1161			ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
1162				&arg);
1163		}
1164	}
1165	/*
1166	 * After the set_throttling is called, the
1167	 * throttling notifier is called for every
1168	 * affected cpu to update the T-states.
1169	 * The notifier event is THROTTLING_POSTCHANGE
1170	 */
1171	for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
1172		t_state.cpu = i;
1173		acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
1174							&t_state);
1175	}
1176
1177	return ret;
 
 
 
 
 
 
1178}
1179
1180int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1181{
1182	int result = 0;
1183	struct acpi_processor_throttling *pthrottling;
1184
1185	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1186			  "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
1187			  pr->throttling.address,
1188			  pr->throttling.duty_offset,
1189			  pr->throttling.duty_width));
1190
1191	/*
1192	 * Evaluate _PTC, _TSS and _TPC
1193	 * They must all be present or none of them can be used.
1194	 */
1195	if (acpi_processor_get_throttling_control(pr) ||
1196		acpi_processor_get_throttling_states(pr) ||
1197		acpi_processor_get_platform_limit(pr))
1198	{
1199		pr->throttling.acpi_processor_get_throttling =
1200		    &acpi_processor_get_throttling_fadt;
1201		pr->throttling.acpi_processor_set_throttling =
1202		    &acpi_processor_set_throttling_fadt;
1203		if (acpi_processor_get_fadt_info(pr))
1204			return 0;
1205	} else {
1206		pr->throttling.acpi_processor_get_throttling =
1207		    &acpi_processor_get_throttling_ptc;
1208		pr->throttling.acpi_processor_set_throttling =
1209		    &acpi_processor_set_throttling_ptc;
1210	}
1211
1212	/*
1213	 * If TSD package for one CPU can't be parsed successfully, it means
1214	 * that this CPU will have no coordination with other CPUs.
1215	 */
1216	if (acpi_processor_get_tsd(pr)) {
1217		pthrottling = &pr->throttling;
1218		pthrottling->tsd_valid_flag = 0;
1219		cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
1220		pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
1221	}
1222
1223	/*
1224	 * PIIX4 Errata: We don't support throttling on the original PIIX4.
1225	 * This shouldn't be an issue as few (if any) mobile systems ever
1226	 * used this part.
1227	 */
1228	if (errata.piix4.throttle) {
1229		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1230				  "Throttling not supported on PIIX4 A- or B-step\n"));
1231		return 0;
1232	}
1233
1234	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d throttling states\n",
1235			  pr->throttling.state_count));
1236
1237	pr->flags.throttling = 1;
1238
1239	/*
1240	 * Disable throttling (if enabled).  We'll let subsequent policy (e.g.
1241	 * thermal) decide to lower performance if it so chooses, but for now
1242	 * we'll crank up the speed.
1243	 */
1244
1245	result = acpi_processor_get_throttling(pr);
1246	if (result)
1247		goto end;
1248
1249	if (pr->throttling.state) {
1250		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1251				  "Disabling throttling (was T%d)\n",
1252				  pr->throttling.state));
1253		result = acpi_processor_set_throttling(pr, 0, false);
1254		if (result)
1255			goto end;
1256	}
1257
1258      end:
1259	if (result)
1260		pr->flags.throttling = 0;
1261
1262	return result;
1263}
1264