Linux Audio

Check our new training course

Loading...
v4.6
   1/*
 
 
   2 * Copyright IBM Corp. 2010
   3 * Author: Heinz Graalfs <graalfs@de.ibm.com>
   4 */
   5
   6#include <linux/kernel_stat.h>
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/smp.h>
  10#include <linux/errno.h>
  11#include <linux/workqueue.h>
  12#include <linux/interrupt.h>
  13#include <linux/notifier.h>
  14#include <linux/cpu.h>
  15#include <linux/semaphore.h>
  16#include <linux/oom.h>
  17#include <linux/oprofile.h>
  18
  19#include <asm/facility.h>
  20#include <asm/cpu_mf.h>
  21#include <asm/irq.h>
  22
  23#include "hwsampler.h"
  24#include "op_counter.h"
  25
  26#define MAX_NUM_SDB 511
  27#define MIN_NUM_SDB 1
  28
 
 
 
  29DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
  30
  31struct hws_execute_parms {
  32	void *buffer;
  33	signed int rc;
  34};
  35
  36DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
  37EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer);
  38
  39static DEFINE_MUTEX(hws_sem);
  40static DEFINE_MUTEX(hws_sem_oom);
  41
  42static unsigned char hws_flush_all;
  43static unsigned int hws_oom;
  44static unsigned int hws_alert;
  45static struct workqueue_struct *hws_wq;
  46
  47static unsigned int hws_state;
  48enum {
  49	HWS_INIT = 1,
  50	HWS_DEALLOCATED,
  51	HWS_STOPPED,
  52	HWS_STARTED,
  53	HWS_STOPPING };
  54
  55/* set to 1 if called by kernel during memory allocation */
  56static unsigned char oom_killer_was_active;
  57/* size of SDBT and SDB as of allocate API */
  58static unsigned long num_sdbt = 100;
  59static unsigned long num_sdb = 511;
  60/* sampling interval (machine cycles) */
  61static unsigned long interval;
  62
  63static unsigned long min_sampler_rate;
  64static unsigned long max_sampler_rate;
  65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  66static void execute_qsi(void *parms)
  67{
  68	struct hws_execute_parms *ep = parms;
  69
  70	ep->rc = qsi(ep->buffer);
  71}
  72
  73static void execute_ssctl(void *parms)
  74{
  75	struct hws_execute_parms *ep = parms;
  76
  77	ep->rc = lsctl(ep->buffer);
  78}
  79
  80static int smp_ctl_ssctl_stop(int cpu)
  81{
  82	int rc;
  83	struct hws_execute_parms ep;
  84	struct hws_cpu_buffer *cb;
  85
  86	cb = &per_cpu(sampler_cpu_buffer, cpu);
  87
  88	cb->ssctl.es = 0;
  89	cb->ssctl.cs = 0;
  90
  91	ep.buffer = &cb->ssctl;
  92	smp_call_function_single(cpu, execute_ssctl, &ep, 1);
  93	rc = ep.rc;
  94	if (rc) {
  95		printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
  96		dump_stack();
  97	}
  98
  99	ep.buffer = &cb->qsi;
 100	smp_call_function_single(cpu, execute_qsi, &ep, 1);
 101
 102	if (cb->qsi.es || cb->qsi.cs) {
 103		printk(KERN_EMERG "CPUMF sampling did not stop properly.\n");
 104		dump_stack();
 105	}
 106
 107	return rc;
 108}
 109
 110static int smp_ctl_ssctl_deactivate(int cpu)
 111{
 112	int rc;
 113	struct hws_execute_parms ep;
 114	struct hws_cpu_buffer *cb;
 115
 116	cb = &per_cpu(sampler_cpu_buffer, cpu);
 117
 118	cb->ssctl.es = 1;
 119	cb->ssctl.cs = 0;
 120
 121	ep.buffer = &cb->ssctl;
 122	smp_call_function_single(cpu, execute_ssctl, &ep, 1);
 123	rc = ep.rc;
 124	if (rc)
 125		printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
 126
 127	ep.buffer = &cb->qsi;
 128	smp_call_function_single(cpu, execute_qsi, &ep, 1);
 129
 130	if (cb->qsi.cs)
 131		printk(KERN_EMERG "CPUMF sampling was not set inactive.\n");
 132
 133	return rc;
 134}
 135
 136static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval)
 137{
 138	int rc;
 139	struct hws_execute_parms ep;
 140	struct hws_cpu_buffer *cb;
 141
 142	cb = &per_cpu(sampler_cpu_buffer, cpu);
 143
 144	cb->ssctl.h = 1;
 145	cb->ssctl.tear = cb->first_sdbt;
 146	cb->ssctl.dear = *(unsigned long *) cb->first_sdbt;
 147	cb->ssctl.interval = interval;
 148	cb->ssctl.es = 1;
 149	cb->ssctl.cs = 1;
 150
 151	ep.buffer = &cb->ssctl;
 152	smp_call_function_single(cpu, execute_ssctl, &ep, 1);
 153	rc = ep.rc;
 154	if (rc)
 155		printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
 156
 157	ep.buffer = &cb->qsi;
 158	smp_call_function_single(cpu, execute_qsi, &ep, 1);
 159	if (ep.rc)
 160		printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu);
 161
 162	return rc;
 163}
 164
 165static int smp_ctl_qsi(int cpu)
 166{
 167	struct hws_execute_parms ep;
 168	struct hws_cpu_buffer *cb;
 169
 170	cb = &per_cpu(sampler_cpu_buffer, cpu);
 171
 172	ep.buffer = &cb->qsi;
 173	smp_call_function_single(cpu, execute_qsi, &ep, 1);
 174
 175	return ep.rc;
 176}
 177
 
 
 
 
 
 
 
 
 
 
 
 178static void hws_ext_handler(struct ext_code ext_code,
 179			    unsigned int param32, unsigned long param64)
 180{
 181	struct hws_cpu_buffer *cb = this_cpu_ptr(&sampler_cpu_buffer);
 182
 183	if (!(param32 & CPU_MF_INT_SF_MASK))
 184		return;
 185
 186	if (!hws_alert)
 187		return;
 188
 189	inc_irq_stat(IRQEXT_CMS);
 190	atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
 191
 192	if (hws_wq)
 193		queue_work(hws_wq, &cb->worker);
 194}
 195
 196static void worker(struct work_struct *work);
 197
 198static void add_samples_to_oprofile(unsigned cpu, unsigned long *,
 199				unsigned long *dear);
 200
 201static void init_all_cpu_buffers(void)
 202{
 203	int cpu;
 204	struct hws_cpu_buffer *cb;
 205
 206	for_each_online_cpu(cpu) {
 207		cb = &per_cpu(sampler_cpu_buffer, cpu);
 208		memset(cb, 0, sizeof(struct hws_cpu_buffer));
 209	}
 210}
 211
 212static void prepare_cpu_buffers(void)
 
 
 
 
 
 
 
 
 
 
 213{
 214	struct hws_cpu_buffer *cb;
 215	int cpu;
 
 
 216
 
 217	for_each_online_cpu(cpu) {
 218		cb = &per_cpu(sampler_cpu_buffer, cpu);
 219		atomic_set(&cb->ext_params, 0);
 220		cb->worker_entry = 0;
 221		cb->sample_overflow = 0;
 222		cb->req_alert = 0;
 223		cb->incorrect_sdbt_entry = 0;
 224		cb->invalid_entry_address = 0;
 225		cb->loss_of_sample_data = 0;
 226		cb->sample_auth_change_alert = 0;
 227		cb->finish = 0;
 228		cb->oom = 0;
 229		cb->stop_mode = 0;
 230	}
 
 
 231}
 232
 233/*
 234 * allocate_sdbt() - allocate sampler memory
 235 * @cpu: the cpu for which sampler memory is allocated
 236 *
 237 * A 4K page is allocated for each requested SDBT.
 238 * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs.
 239 * Set ALERT_REQ mask in each SDBs trailer.
 240 * Returns zero if successful, <0 otherwise.
 241 */
 242static int allocate_sdbt(int cpu)
 243{
 244	int j, k, rc;
 245	unsigned long *sdbt;
 246	unsigned long  sdb;
 247	unsigned long *tail;
 248	unsigned long *trailer;
 249	struct hws_cpu_buffer *cb;
 250
 251	cb = &per_cpu(sampler_cpu_buffer, cpu);
 252
 253	if (cb->first_sdbt)
 254		return -EINVAL;
 255
 256	sdbt = NULL;
 257	tail = sdbt;
 258
 259	for (j = 0; j < num_sdbt; j++) {
 260		sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
 261
 262		mutex_lock(&hws_sem_oom);
 263		/* OOM killer might have been activated */
 264		barrier();
 265		if (oom_killer_was_active || !sdbt) {
 266			if (sdbt)
 267				free_page((unsigned long)sdbt);
 268
 269			goto allocate_sdbt_error;
 270		}
 271		if (cb->first_sdbt == 0)
 272			cb->first_sdbt = (unsigned long)sdbt;
 273
 274		/* link current page to tail of chain */
 275		if (tail)
 276			*tail = (unsigned long)(void *)sdbt + 1;
 277
 278		mutex_unlock(&hws_sem_oom);
 279
 280		for (k = 0; k < num_sdb; k++) {
 281			/* get and set SDB page */
 282			sdb = get_zeroed_page(GFP_KERNEL);
 283
 284			mutex_lock(&hws_sem_oom);
 285			/* OOM killer might have been activated */
 286			barrier();
 287			if (oom_killer_was_active || !sdb) {
 288				if (sdb)
 289					free_page(sdb);
 290
 291				goto allocate_sdbt_error;
 292			}
 293			*sdbt = sdb;
 294			trailer = trailer_entry_ptr(*sdbt);
 295			*trailer = SDB_TE_ALERT_REQ_MASK;
 296			sdbt++;
 297			mutex_unlock(&hws_sem_oom);
 298		}
 299		tail = sdbt;
 300	}
 301	mutex_lock(&hws_sem_oom);
 302	if (oom_killer_was_active)
 303		goto allocate_sdbt_error;
 304
 305	rc = 0;
 306	if (tail)
 307		*tail = (unsigned long)
 308			((void *)cb->first_sdbt) + 1;
 309
 310allocate_sdbt_exit:
 311	mutex_unlock(&hws_sem_oom);
 312	return rc;
 313
 314allocate_sdbt_error:
 315	rc = -ENOMEM;
 316	goto allocate_sdbt_exit;
 317}
 318
 319/*
 320 * deallocate_sdbt() - deallocate all sampler memory
 321 *
 322 * For each online CPU all SDBT trees are deallocated.
 323 * Returns the number of freed pages.
 324 */
 325static int deallocate_sdbt(void)
 326{
 327	int cpu;
 328	int counter;
 329
 330	counter = 0;
 331
 332	for_each_online_cpu(cpu) {
 333		unsigned long start;
 334		unsigned long sdbt;
 335		unsigned long *curr;
 336		struct hws_cpu_buffer *cb;
 337
 338		cb = &per_cpu(sampler_cpu_buffer, cpu);
 339
 340		if (!cb->first_sdbt)
 341			continue;
 342
 343		sdbt = cb->first_sdbt;
 344		curr = (unsigned long *) sdbt;
 345		start = sdbt;
 346
 347		/* we'll free the SDBT after all SDBs are processed... */
 348		while (1) {
 349			if (!*curr || !sdbt)
 350				break;
 351
 352			/* watch for link entry reset if found */
 353			if (is_link_entry(curr)) {
 354				curr = get_next_sdbt(curr);
 355				if (sdbt)
 356					free_page(sdbt);
 357
 358				/* we are done if we reach the start */
 359				if ((unsigned long) curr == start)
 360					break;
 361				else
 362					sdbt = (unsigned long) curr;
 363			} else {
 364				/* process SDB pointer */
 365				if (*curr) {
 366					free_page(*curr);
 367					curr++;
 368				}
 369			}
 370			counter++;
 371		}
 372		cb->first_sdbt = 0;
 373	}
 374	return counter;
 375}
 376
 377static int start_sampling(int cpu)
 378{
 379	int rc;
 380	struct hws_cpu_buffer *cb;
 381
 382	cb = &per_cpu(sampler_cpu_buffer, cpu);
 383	rc = smp_ctl_ssctl_enable_activate(cpu, interval);
 384	if (rc) {
 385		printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu);
 386		goto start_exit;
 387	}
 388
 389	rc = -EINVAL;
 390	if (!cb->qsi.es) {
 391		printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu);
 392		goto start_exit;
 393	}
 394
 395	if (!cb->qsi.cs) {
 396		printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu);
 397		goto start_exit;
 398	}
 399
 400	printk(KERN_INFO
 401		"hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n",
 402		cpu, interval);
 403
 404	rc = 0;
 405
 406start_exit:
 407	return rc;
 408}
 409
 410static int stop_sampling(int cpu)
 411{
 412	unsigned long v;
 413	int rc;
 414	struct hws_cpu_buffer *cb;
 415
 416	rc = smp_ctl_qsi(cpu);
 417	WARN_ON(rc);
 418
 419	cb = &per_cpu(sampler_cpu_buffer, cpu);
 420	if (!rc && !cb->qsi.es)
 421		printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu);
 422
 423	rc = smp_ctl_ssctl_stop(cpu);
 424	if (rc) {
 425		printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n",
 426				cpu, rc);
 427		goto stop_exit;
 428	}
 429
 430	printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu);
 431
 432stop_exit:
 433	v = cb->req_alert;
 434	if (v)
 435		printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert,"
 436				" count=%lu.\n", cpu, v);
 437
 438	v = cb->loss_of_sample_data;
 439	if (v)
 440		printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data,"
 441				" count=%lu.\n", cpu, v);
 442
 443	v = cb->invalid_entry_address;
 444	if (v)
 445		printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address,"
 446				" count=%lu.\n", cpu, v);
 447
 448	v = cb->incorrect_sdbt_entry;
 449	if (v)
 450		printk(KERN_ERR
 451				"hwsampler: CPU %d CPUMF Incorrect SDBT address,"
 452				" count=%lu.\n", cpu, v);
 453
 454	v = cb->sample_auth_change_alert;
 455	if (v)
 456		printk(KERN_ERR
 457				"hwsampler: CPU %d CPUMF Sample authorization change,"
 458				" count=%lu.\n", cpu, v);
 459
 460	return rc;
 461}
 462
 463static int check_hardware_prerequisites(void)
 464{
 465	if (!test_facility(68))
 466		return -EOPNOTSUPP;
 467	return 0;
 468}
 469/*
 470 * hws_oom_callback() - the OOM callback function
 471 *
 472 * In case the callback is invoked during memory allocation for the
 473 *  hw sampler, all obtained memory is deallocated and a flag is set
 474 *  so main sampler memory allocation can exit with a failure code.
 475 * In case the callback is invoked during sampling the hw sampler
 476 *  is deactivated for all CPUs.
 477 */
 478static int hws_oom_callback(struct notifier_block *nfb,
 479	unsigned long dummy, void *parm)
 480{
 481	unsigned long *freed;
 482	int cpu;
 483	struct hws_cpu_buffer *cb;
 484
 485	freed = parm;
 486
 487	mutex_lock(&hws_sem_oom);
 488
 489	if (hws_state == HWS_DEALLOCATED) {
 490		/* during memory allocation */
 491		if (oom_killer_was_active == 0) {
 492			oom_killer_was_active = 1;
 493			*freed += deallocate_sdbt();
 494		}
 495	} else {
 496		int i;
 497		cpu = get_cpu();
 498		cb = &per_cpu(sampler_cpu_buffer, cpu);
 499
 500		if (!cb->oom) {
 501			for_each_online_cpu(i) {
 502				smp_ctl_ssctl_deactivate(i);
 503				cb->oom = 1;
 504			}
 505			cb->finish = 1;
 506
 507			printk(KERN_INFO
 508				"hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n",
 509				cpu);
 510		}
 511	}
 512
 513	mutex_unlock(&hws_sem_oom);
 514
 515	return NOTIFY_OK;
 516}
 517
 518static struct notifier_block hws_oom_notifier = {
 519	.notifier_call = hws_oom_callback
 520};
 521
 522static int hws_cpu_callback(struct notifier_block *nfb,
 523	unsigned long action, void *hcpu)
 524{
 525	/* We do not have sampler space available for all possible CPUs.
 526	   All CPUs should be online when hw sampling is activated. */
 527	return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD;
 528}
 529
 530static struct notifier_block hws_cpu_notifier = {
 531	.notifier_call = hws_cpu_callback
 532};
 533
 534/**
 535 * hwsampler_deactivate() - set hardware sampling temporarily inactive
 536 * @cpu:  specifies the CPU to be set inactive.
 537 *
 538 * Returns 0 on success, !0 on failure.
 539 */
 540int hwsampler_deactivate(unsigned int cpu)
 541{
 542	/*
 543	 * Deactivate hw sampling temporarily and flush the buffer
 544	 * by pushing all the pending samples to oprofile buffer.
 545	 *
 546	 * This function can be called under one of the following conditions:
 547	 *     Memory unmap, task is exiting.
 548	 */
 549	int rc;
 550	struct hws_cpu_buffer *cb;
 551
 552	rc = 0;
 553	mutex_lock(&hws_sem);
 554
 555	cb = &per_cpu(sampler_cpu_buffer, cpu);
 556	if (hws_state == HWS_STARTED) {
 557		rc = smp_ctl_qsi(cpu);
 558		WARN_ON(rc);
 559		if (cb->qsi.cs) {
 560			rc = smp_ctl_ssctl_deactivate(cpu);
 561			if (rc) {
 562				printk(KERN_INFO
 563				"hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu);
 564				cb->finish = 1;
 565				hws_state = HWS_STOPPING;
 566			} else  {
 567				hws_flush_all = 1;
 568				/* Add work to queue to read pending samples.*/
 569				queue_work_on(cpu, hws_wq, &cb->worker);
 570			}
 571		}
 572	}
 573	mutex_unlock(&hws_sem);
 574
 575	if (hws_wq)
 576		flush_workqueue(hws_wq);
 577
 578	return rc;
 579}
 580
 581/**
 582 * hwsampler_activate() - activate/resume hardware sampling which was deactivated
 583 * @cpu:  specifies the CPU to be set active.
 584 *
 585 * Returns 0 on success, !0 on failure.
 586 */
 587int hwsampler_activate(unsigned int cpu)
 588{
 589	/*
 590	 * Re-activate hw sampling. This should be called in pair with
 591	 * hwsampler_deactivate().
 592	 */
 593	int rc;
 594	struct hws_cpu_buffer *cb;
 595
 596	rc = 0;
 597	mutex_lock(&hws_sem);
 598
 599	cb = &per_cpu(sampler_cpu_buffer, cpu);
 600	if (hws_state == HWS_STARTED) {
 601		rc = smp_ctl_qsi(cpu);
 602		WARN_ON(rc);
 603		if (!cb->qsi.cs) {
 604			hws_flush_all = 0;
 605			rc = smp_ctl_ssctl_enable_activate(cpu, interval);
 606			if (rc) {
 607				printk(KERN_ERR
 608				"CPU %d, CPUMF activate sampling failed.\n",
 609					 cpu);
 610			}
 611		}
 612	}
 613
 614	mutex_unlock(&hws_sem);
 615
 616	return rc;
 617}
 618
 619static int check_qsi_on_setup(void)
 620{
 621	int rc;
 622	unsigned int cpu;
 623	struct hws_cpu_buffer *cb;
 624
 625	for_each_online_cpu(cpu) {
 626		cb = &per_cpu(sampler_cpu_buffer, cpu);
 627		rc = smp_ctl_qsi(cpu);
 628		WARN_ON(rc);
 629		if (rc)
 630			return -EOPNOTSUPP;
 631
 632		if (!cb->qsi.as) {
 633			printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n");
 634			return -EINVAL;
 635		}
 636
 637		if (cb->qsi.es) {
 638			printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n");
 639			rc = smp_ctl_ssctl_stop(cpu);
 640			if (rc)
 641				return -EINVAL;
 642
 643			printk(KERN_INFO
 644				"CPU %d, CPUMF Sampling stopped now.\n", cpu);
 645		}
 646	}
 647	return 0;
 648}
 649
 650static int check_qsi_on_start(void)
 651{
 652	unsigned int cpu;
 653	int rc;
 654	struct hws_cpu_buffer *cb;
 655
 656	for_each_online_cpu(cpu) {
 657		cb = &per_cpu(sampler_cpu_buffer, cpu);
 658		rc = smp_ctl_qsi(cpu);
 659		WARN_ON(rc);
 660
 661		if (!cb->qsi.as)
 662			return -EINVAL;
 663
 664		if (cb->qsi.es)
 665			return -EINVAL;
 666
 667		if (cb->qsi.cs)
 668			return -EINVAL;
 669	}
 670	return 0;
 671}
 672
 673static void worker_on_start(unsigned int cpu)
 674{
 675	struct hws_cpu_buffer *cb;
 676
 677	cb = &per_cpu(sampler_cpu_buffer, cpu);
 678	cb->worker_entry = cb->first_sdbt;
 679}
 680
 681static int worker_check_error(unsigned int cpu, int ext_params)
 682{
 683	int rc;
 684	unsigned long *sdbt;
 685	struct hws_cpu_buffer *cb;
 686
 687	rc = 0;
 688	cb = &per_cpu(sampler_cpu_buffer, cpu);
 689	sdbt = (unsigned long *) cb->worker_entry;
 690
 691	if (!sdbt || !*sdbt)
 692		return -EINVAL;
 693
 694	if (ext_params & CPU_MF_INT_SF_PRA)
 695		cb->req_alert++;
 696
 697	if (ext_params & CPU_MF_INT_SF_LSDA)
 698		cb->loss_of_sample_data++;
 699
 700	if (ext_params & CPU_MF_INT_SF_IAE) {
 701		cb->invalid_entry_address++;
 702		rc = -EINVAL;
 703	}
 704
 705	if (ext_params & CPU_MF_INT_SF_ISE) {
 706		cb->incorrect_sdbt_entry++;
 707		rc = -EINVAL;
 708	}
 709
 710	if (ext_params & CPU_MF_INT_SF_SACA) {
 711		cb->sample_auth_change_alert++;
 712		rc = -EINVAL;
 713	}
 714
 715	return rc;
 716}
 717
 718static void worker_on_finish(unsigned int cpu)
 719{
 720	int rc, i;
 721	struct hws_cpu_buffer *cb;
 722
 723	cb = &per_cpu(sampler_cpu_buffer, cpu);
 724
 725	if (cb->finish) {
 726		rc = smp_ctl_qsi(cpu);
 727		WARN_ON(rc);
 728		if (cb->qsi.es) {
 729			printk(KERN_INFO
 730				"hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n",
 731				cpu);
 732			rc = smp_ctl_ssctl_stop(cpu);
 733			if (rc)
 734				printk(KERN_INFO
 735					"hwsampler: CPU %d, CPUMF Deactivation failed.\n",
 736					cpu);
 737
 738			for_each_online_cpu(i) {
 739				if (i == cpu)
 740					continue;
 741				if (!cb->finish) {
 742					cb->finish = 1;
 743					queue_work_on(i, hws_wq,
 744						&cb->worker);
 745				}
 746			}
 747		}
 748	}
 749}
 750
 751static void worker_on_interrupt(unsigned int cpu)
 752{
 753	unsigned long *sdbt;
 754	unsigned char done;
 755	struct hws_cpu_buffer *cb;
 756
 757	cb = &per_cpu(sampler_cpu_buffer, cpu);
 758
 759	sdbt = (unsigned long *) cb->worker_entry;
 760
 761	done = 0;
 762	/* do not proceed if stop was entered,
 763	 * forget the buffers not yet processed */
 764	while (!done && !cb->stop_mode) {
 765		unsigned long *trailer;
 766		struct hws_trailer_entry *te;
 767		unsigned long *dear = 0;
 768
 769		trailer = trailer_entry_ptr(*sdbt);
 770		/* leave loop if no more work to do */
 771		if (!(*trailer & SDB_TE_BUFFER_FULL_MASK)) {
 772			done = 1;
 773			if (!hws_flush_all)
 774				continue;
 775		}
 776
 777		te = (struct hws_trailer_entry *)trailer;
 778		cb->sample_overflow += te->overflow;
 779
 780		add_samples_to_oprofile(cpu, sdbt, dear);
 781
 782		/* reset trailer */
 783		xchg((unsigned char *) te, 0x40);
 784
 785		/* advance to next sdb slot in current sdbt */
 786		sdbt++;
 787		/* in case link bit is set use address w/o link bit */
 788		if (is_link_entry(sdbt))
 789			sdbt = get_next_sdbt(sdbt);
 790
 791		cb->worker_entry = (unsigned long)sdbt;
 792	}
 793}
 794
 795static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
 796		unsigned long *dear)
 797{
 798	struct hws_basic_entry *sample_data_ptr;
 799	unsigned long *trailer;
 800
 801	trailer = trailer_entry_ptr(*sdbt);
 802	if (dear) {
 803		if (dear > trailer)
 804			return;
 805		trailer = dear;
 806	}
 807
 808	sample_data_ptr = (struct hws_basic_entry *)(*sdbt);
 809
 810	while ((unsigned long *)sample_data_ptr < trailer) {
 811		struct pt_regs *regs = NULL;
 812		struct task_struct *tsk = NULL;
 813
 814		/*
 815		 * Check sampling mode, 1 indicates basic (=customer) sampling
 816		 * mode.
 817		 */
 818		if (sample_data_ptr->def != 1) {
 819			/* sample slot is not yet written */
 820			break;
 821		} else {
 822			/* make sure we don't use it twice,
 823			 * the next time the sampler will set it again */
 824			sample_data_ptr->def = 0;
 825		}
 826
 827		/* Get pt_regs. */
 828		if (sample_data_ptr->P == 1) {
 829			/* userspace sample */
 830			unsigned int pid = sample_data_ptr->prim_asn;
 831			if (!counter_config.user)
 832				goto skip_sample;
 833			rcu_read_lock();
 834			tsk = pid_task(find_vpid(pid), PIDTYPE_PID);
 835			if (tsk)
 836				regs = task_pt_regs(tsk);
 837			rcu_read_unlock();
 838		} else {
 839			/* kernelspace sample */
 840			if (!counter_config.kernel)
 841				goto skip_sample;
 842			regs = task_pt_regs(current);
 843		}
 844
 845		mutex_lock(&hws_sem);
 846		oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0,
 847				!sample_data_ptr->P, tsk);
 848		mutex_unlock(&hws_sem);
 849	skip_sample:
 850		sample_data_ptr++;
 851	}
 852}
 853
 854static void worker(struct work_struct *work)
 855{
 856	unsigned int cpu;
 857	int ext_params;
 858	struct hws_cpu_buffer *cb;
 859
 860	cb = container_of(work, struct hws_cpu_buffer, worker);
 861	cpu = smp_processor_id();
 862	ext_params = atomic_xchg(&cb->ext_params, 0);
 863
 864	if (!cb->worker_entry)
 865		worker_on_start(cpu);
 866
 867	if (worker_check_error(cpu, ext_params))
 868		return;
 869
 870	if (!cb->finish)
 871		worker_on_interrupt(cpu);
 872
 873	if (cb->finish)
 874		worker_on_finish(cpu);
 875}
 876
 877/**
 878 * hwsampler_allocate() - allocate memory for the hardware sampler
 879 * @sdbt:  number of SDBTs per online CPU (must be > 0)
 880 * @sdb:   number of SDBs per SDBT (minimum 1, maximum 511)
 881 *
 882 * Returns 0 on success, !0 on failure.
 883 */
 884int hwsampler_allocate(unsigned long sdbt, unsigned long sdb)
 885{
 886	int cpu, rc;
 887	mutex_lock(&hws_sem);
 888
 889	rc = -EINVAL;
 890	if (hws_state != HWS_DEALLOCATED)
 891		goto allocate_exit;
 892
 893	if (sdbt < 1)
 894		goto allocate_exit;
 895
 896	if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB)
 897		goto allocate_exit;
 898
 899	num_sdbt = sdbt;
 900	num_sdb = sdb;
 901
 902	oom_killer_was_active = 0;
 903	register_oom_notifier(&hws_oom_notifier);
 904
 905	for_each_online_cpu(cpu) {
 906		if (allocate_sdbt(cpu)) {
 907			unregister_oom_notifier(&hws_oom_notifier);
 908			goto allocate_error;
 909		}
 910	}
 911	unregister_oom_notifier(&hws_oom_notifier);
 912	if (oom_killer_was_active)
 913		goto allocate_error;
 914
 915	hws_state = HWS_STOPPED;
 916	rc = 0;
 917
 918allocate_exit:
 919	mutex_unlock(&hws_sem);
 920	return rc;
 921
 922allocate_error:
 923	rc = -ENOMEM;
 924	printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n");
 925	goto allocate_exit;
 926}
 927
 928/**
 929 * hwsampler_deallocate() - deallocate hardware sampler memory
 930 *
 931 * Returns 0 on success, !0 on failure.
 932 */
 933int hwsampler_deallocate(void)
 934{
 935	int rc;
 936
 937	mutex_lock(&hws_sem);
 938
 939	rc = -EINVAL;
 940	if (hws_state != HWS_STOPPED)
 941		goto deallocate_exit;
 942
 943	irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 944	hws_alert = 0;
 945	deallocate_sdbt();
 946
 947	hws_state = HWS_DEALLOCATED;
 948	rc = 0;
 949
 950deallocate_exit:
 951	mutex_unlock(&hws_sem);
 952
 953	return rc;
 954}
 955
 956unsigned long hwsampler_query_min_interval(void)
 957{
 958	return min_sampler_rate;
 959}
 960
 961unsigned long hwsampler_query_max_interval(void)
 962{
 963	return max_sampler_rate;
 964}
 965
 966unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
 967{
 968	struct hws_cpu_buffer *cb;
 969
 970	cb = &per_cpu(sampler_cpu_buffer, cpu);
 971
 972	return cb->sample_overflow;
 973}
 974
 975int hwsampler_setup(void)
 976{
 977	int rc;
 978	int cpu;
 979	struct hws_cpu_buffer *cb;
 980
 981	mutex_lock(&hws_sem);
 982
 983	rc = -EINVAL;
 984	if (hws_state)
 985		goto setup_exit;
 986
 987	hws_state = HWS_INIT;
 988
 989	init_all_cpu_buffers();
 990
 991	rc = check_hardware_prerequisites();
 992	if (rc)
 993		goto setup_exit;
 994
 995	rc = check_qsi_on_setup();
 996	if (rc)
 997		goto setup_exit;
 998
 999	rc = -EINVAL;
1000	hws_wq = create_workqueue("hwsampler");
1001	if (!hws_wq)
1002		goto setup_exit;
1003
1004	register_cpu_notifier(&hws_cpu_notifier);
1005
1006	for_each_online_cpu(cpu) {
1007		cb = &per_cpu(sampler_cpu_buffer, cpu);
1008		INIT_WORK(&cb->worker, worker);
1009		rc = smp_ctl_qsi(cpu);
1010		WARN_ON(rc);
1011		if (min_sampler_rate != cb->qsi.min_sampl_rate) {
1012			if (min_sampler_rate) {
1013				printk(KERN_WARNING
1014					"hwsampler: different min sampler rate values.\n");
1015				if (min_sampler_rate < cb->qsi.min_sampl_rate)
1016					min_sampler_rate =
1017						cb->qsi.min_sampl_rate;
1018			} else
1019				min_sampler_rate = cb->qsi.min_sampl_rate;
1020		}
1021		if (max_sampler_rate != cb->qsi.max_sampl_rate) {
1022			if (max_sampler_rate) {
1023				printk(KERN_WARNING
1024					"hwsampler: different max sampler rate values.\n");
1025				if (max_sampler_rate > cb->qsi.max_sampl_rate)
1026					max_sampler_rate =
1027						cb->qsi.max_sampl_rate;
1028			} else
1029				max_sampler_rate = cb->qsi.max_sampl_rate;
1030		}
1031	}
1032	register_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler);
1033
1034	hws_state = HWS_DEALLOCATED;
1035	rc = 0;
1036
1037setup_exit:
1038	mutex_unlock(&hws_sem);
1039	return rc;
1040}
1041
1042int hwsampler_shutdown(void)
1043{
1044	int rc;
1045
1046	mutex_lock(&hws_sem);
1047
1048	rc = -EINVAL;
1049	if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) {
1050		mutex_unlock(&hws_sem);
1051
1052		if (hws_wq)
1053			flush_workqueue(hws_wq);
1054
1055		mutex_lock(&hws_sem);
1056
1057		if (hws_state == HWS_STOPPED) {
1058			irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
1059			hws_alert = 0;
1060			deallocate_sdbt();
1061		}
1062		if (hws_wq) {
1063			destroy_workqueue(hws_wq);
1064			hws_wq = NULL;
1065		}
1066
1067		unregister_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler);
1068		hws_state = HWS_INIT;
1069		rc = 0;
1070	}
1071	mutex_unlock(&hws_sem);
1072
1073	unregister_cpu_notifier(&hws_cpu_notifier);
1074
1075	return rc;
1076}
1077
1078/**
1079 * hwsampler_start_all() - start hardware sampling on all online CPUs
1080 * @rate:  specifies the used interval when samples are taken
1081 *
1082 * Returns 0 on success, !0 on failure.
1083 */
1084int hwsampler_start_all(unsigned long rate)
1085{
1086	int rc, cpu;
1087
1088	mutex_lock(&hws_sem);
1089
1090	hws_oom = 0;
1091
1092	rc = -EINVAL;
1093	if (hws_state != HWS_STOPPED)
1094		goto start_all_exit;
1095
1096	interval = rate;
1097
1098	/* fail if rate is not valid */
1099	if (interval < min_sampler_rate || interval > max_sampler_rate)
1100		goto start_all_exit;
1101
1102	rc = check_qsi_on_start();
1103	if (rc)
1104		goto start_all_exit;
1105
1106	prepare_cpu_buffers();
 
 
1107
1108	for_each_online_cpu(cpu) {
1109		rc = start_sampling(cpu);
1110		if (rc)
1111			break;
1112	}
1113	if (rc) {
1114		for_each_online_cpu(cpu) {
1115			stop_sampling(cpu);
1116		}
1117		goto start_all_exit;
1118	}
1119	hws_state = HWS_STARTED;
1120	rc = 0;
1121
1122start_all_exit:
1123	mutex_unlock(&hws_sem);
1124
1125	if (rc)
1126		return rc;
1127
1128	register_oom_notifier(&hws_oom_notifier);
1129	hws_oom = 1;
1130	hws_flush_all = 0;
1131	/* now let them in, 1407 CPUMF external interrupts */
1132	hws_alert = 1;
1133	irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
1134
1135	return 0;
1136}
1137
1138/**
1139 * hwsampler_stop_all() - stop hardware sampling on all online CPUs
1140 *
1141 * Returns 0 on success, !0 on failure.
1142 */
1143int hwsampler_stop_all(void)
1144{
1145	int tmp_rc, rc, cpu;
1146	struct hws_cpu_buffer *cb;
1147
1148	mutex_lock(&hws_sem);
1149
1150	rc = 0;
1151	if (hws_state == HWS_INIT) {
1152		mutex_unlock(&hws_sem);
1153		return 0;
1154	}
1155	hws_state = HWS_STOPPING;
1156	mutex_unlock(&hws_sem);
1157
1158	for_each_online_cpu(cpu) {
1159		cb = &per_cpu(sampler_cpu_buffer, cpu);
1160		cb->stop_mode = 1;
1161		tmp_rc = stop_sampling(cpu);
1162		if (tmp_rc)
1163			rc = tmp_rc;
1164	}
1165
1166	if (hws_wq)
1167		flush_workqueue(hws_wq);
1168
1169	mutex_lock(&hws_sem);
1170	if (hws_oom) {
1171		unregister_oom_notifier(&hws_oom_notifier);
1172		hws_oom = 0;
1173	}
1174	hws_state = HWS_STOPPED;
1175	mutex_unlock(&hws_sem);
1176
1177	return rc;
1178}
v3.5.6
   1/**
   2 * arch/s390/oprofile/hwsampler.c
   3 *
   4 * Copyright IBM Corp. 2010
   5 * Author: Heinz Graalfs <graalfs@de.ibm.com>
   6 */
   7
   8#include <linux/kernel_stat.h>
   9#include <linux/kernel.h>
  10#include <linux/module.h>
  11#include <linux/smp.h>
  12#include <linux/errno.h>
  13#include <linux/workqueue.h>
  14#include <linux/interrupt.h>
  15#include <linux/notifier.h>
  16#include <linux/cpu.h>
  17#include <linux/semaphore.h>
  18#include <linux/oom.h>
  19#include <linux/oprofile.h>
  20
  21#include <asm/facility.h>
  22#include <asm/cpu_mf.h>
  23#include <asm/irq.h>
  24
  25#include "hwsampler.h"
  26#include "op_counter.h"
  27
  28#define MAX_NUM_SDB 511
  29#define MIN_NUM_SDB 1
  30
  31#define ALERT_REQ_MASK   0x4000000000000000ul
  32#define BUFFER_FULL_MASK 0x8000000000000000ul
  33
  34DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
  35
  36struct hws_execute_parms {
  37	void *buffer;
  38	signed int rc;
  39};
  40
  41DEFINE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
  42EXPORT_PER_CPU_SYMBOL(sampler_cpu_buffer);
  43
  44static DEFINE_MUTEX(hws_sem);
  45static DEFINE_MUTEX(hws_sem_oom);
  46
  47static unsigned char hws_flush_all;
  48static unsigned int hws_oom;
 
  49static struct workqueue_struct *hws_wq;
  50
  51static unsigned int hws_state;
  52enum {
  53	HWS_INIT = 1,
  54	HWS_DEALLOCATED,
  55	HWS_STOPPED,
  56	HWS_STARTED,
  57	HWS_STOPPING };
  58
  59/* set to 1 if called by kernel during memory allocation */
  60static unsigned char oom_killer_was_active;
  61/* size of SDBT and SDB as of allocate API */
  62static unsigned long num_sdbt = 100;
  63static unsigned long num_sdb = 511;
  64/* sampling interval (machine cycles) */
  65static unsigned long interval;
  66
  67static unsigned long min_sampler_rate;
  68static unsigned long max_sampler_rate;
  69
  70static int ssctl(void *buffer)
  71{
  72	int cc;
  73
  74	/* set in order to detect a program check */
  75	cc = 1;
  76
  77	asm volatile(
  78		"0: .insn s,0xB2870000,0(%1)\n"
  79		"1: ipm %0\n"
  80		"   srl %0,28\n"
  81		"2:\n"
  82		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
  83		: "+d" (cc), "+a" (buffer)
  84		: "m" (*((struct hws_ssctl_request_block *)buffer))
  85		: "cc", "memory");
  86
  87	return cc ? -EINVAL : 0 ;
  88}
  89
  90static int qsi(void *buffer)
  91{
  92	int cc;
  93	cc = 1;
  94
  95	asm volatile(
  96		"0: .insn s,0xB2860000,0(%1)\n"
  97		"1: lhi %0,0\n"
  98		"2:\n"
  99		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
 100		: "=d" (cc), "+a" (buffer)
 101		: "m" (*((struct hws_qsi_info_block *)buffer))
 102		: "cc", "memory");
 103
 104	return cc ? -EINVAL : 0;
 105}
 106
 107static void execute_qsi(void *parms)
 108{
 109	struct hws_execute_parms *ep = parms;
 110
 111	ep->rc = qsi(ep->buffer);
 112}
 113
 114static void execute_ssctl(void *parms)
 115{
 116	struct hws_execute_parms *ep = parms;
 117
 118	ep->rc = ssctl(ep->buffer);
 119}
 120
 121static int smp_ctl_ssctl_stop(int cpu)
 122{
 123	int rc;
 124	struct hws_execute_parms ep;
 125	struct hws_cpu_buffer *cb;
 126
 127	cb = &per_cpu(sampler_cpu_buffer, cpu);
 128
 129	cb->ssctl.es = 0;
 130	cb->ssctl.cs = 0;
 131
 132	ep.buffer = &cb->ssctl;
 133	smp_call_function_single(cpu, execute_ssctl, &ep, 1);
 134	rc = ep.rc;
 135	if (rc) {
 136		printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
 137		dump_stack();
 138	}
 139
 140	ep.buffer = &cb->qsi;
 141	smp_call_function_single(cpu, execute_qsi, &ep, 1);
 142
 143	if (cb->qsi.es || cb->qsi.cs) {
 144		printk(KERN_EMERG "CPUMF sampling did not stop properly.\n");
 145		dump_stack();
 146	}
 147
 148	return rc;
 149}
 150
 151static int smp_ctl_ssctl_deactivate(int cpu)
 152{
 153	int rc;
 154	struct hws_execute_parms ep;
 155	struct hws_cpu_buffer *cb;
 156
 157	cb = &per_cpu(sampler_cpu_buffer, cpu);
 158
 159	cb->ssctl.es = 1;
 160	cb->ssctl.cs = 0;
 161
 162	ep.buffer = &cb->ssctl;
 163	smp_call_function_single(cpu, execute_ssctl, &ep, 1);
 164	rc = ep.rc;
 165	if (rc)
 166		printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
 167
 168	ep.buffer = &cb->qsi;
 169	smp_call_function_single(cpu, execute_qsi, &ep, 1);
 170
 171	if (cb->qsi.cs)
 172		printk(KERN_EMERG "CPUMF sampling was not set inactive.\n");
 173
 174	return rc;
 175}
 176
 177static int smp_ctl_ssctl_enable_activate(int cpu, unsigned long interval)
 178{
 179	int rc;
 180	struct hws_execute_parms ep;
 181	struct hws_cpu_buffer *cb;
 182
 183	cb = &per_cpu(sampler_cpu_buffer, cpu);
 184
 185	cb->ssctl.h = 1;
 186	cb->ssctl.tear = cb->first_sdbt;
 187	cb->ssctl.dear = *(unsigned long *) cb->first_sdbt;
 188	cb->ssctl.interval = interval;
 189	cb->ssctl.es = 1;
 190	cb->ssctl.cs = 1;
 191
 192	ep.buffer = &cb->ssctl;
 193	smp_call_function_single(cpu, execute_ssctl, &ep, 1);
 194	rc = ep.rc;
 195	if (rc)
 196		printk(KERN_ERR "hwsampler: CPU %d CPUMF SSCTL failed.\n", cpu);
 197
 198	ep.buffer = &cb->qsi;
 199	smp_call_function_single(cpu, execute_qsi, &ep, 1);
 200	if (ep.rc)
 201		printk(KERN_ERR "hwsampler: CPU %d CPUMF QSI failed.\n", cpu);
 202
 203	return rc;
 204}
 205
 206static int smp_ctl_qsi(int cpu)
 207{
 208	struct hws_execute_parms ep;
 209	struct hws_cpu_buffer *cb;
 210
 211	cb = &per_cpu(sampler_cpu_buffer, cpu);
 212
 213	ep.buffer = &cb->qsi;
 214	smp_call_function_single(cpu, execute_qsi, &ep, 1);
 215
 216	return ep.rc;
 217}
 218
 219static inline unsigned long *trailer_entry_ptr(unsigned long v)
 220{
 221	void *ret;
 222
 223	ret = (void *)v;
 224	ret += PAGE_SIZE;
 225	ret -= sizeof(struct hws_trailer_entry);
 226
 227	return (unsigned long *) ret;
 228}
 229
 230static void hws_ext_handler(struct ext_code ext_code,
 231			    unsigned int param32, unsigned long param64)
 232{
 233	struct hws_cpu_buffer *cb = &__get_cpu_var(sampler_cpu_buffer);
 234
 235	if (!(param32 & CPU_MF_INT_SF_MASK))
 236		return;
 237
 238	kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++;
 
 
 
 239	atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
 240
 241	if (hws_wq)
 242		queue_work(hws_wq, &cb->worker);
 243}
 244
 245static void worker(struct work_struct *work);
 246
 247static void add_samples_to_oprofile(unsigned cpu, unsigned long *,
 248				unsigned long *dear);
 249
 250static void init_all_cpu_buffers(void)
 251{
 252	int cpu;
 253	struct hws_cpu_buffer *cb;
 254
 255	for_each_online_cpu(cpu) {
 256		cb = &per_cpu(sampler_cpu_buffer, cpu);
 257		memset(cb, 0, sizeof(struct hws_cpu_buffer));
 258	}
 259}
 260
 261static int is_link_entry(unsigned long *s)
 262{
 263	return *s & 0x1ul ? 1 : 0;
 264}
 265
 266static unsigned long *get_next_sdbt(unsigned long *s)
 267{
 268	return (unsigned long *) (*s & ~0x1ul);
 269}
 270
 271static int prepare_cpu_buffers(void)
 272{
 
 273	int cpu;
 274	int rc;
 275	struct hws_cpu_buffer *cb;
 276
 277	rc = 0;
 278	for_each_online_cpu(cpu) {
 279		cb = &per_cpu(sampler_cpu_buffer, cpu);
 280		atomic_set(&cb->ext_params, 0);
 281		cb->worker_entry = 0;
 282		cb->sample_overflow = 0;
 283		cb->req_alert = 0;
 284		cb->incorrect_sdbt_entry = 0;
 285		cb->invalid_entry_address = 0;
 286		cb->loss_of_sample_data = 0;
 287		cb->sample_auth_change_alert = 0;
 288		cb->finish = 0;
 289		cb->oom = 0;
 290		cb->stop_mode = 0;
 291	}
 292
 293	return rc;
 294}
 295
 296/*
 297 * allocate_sdbt() - allocate sampler memory
 298 * @cpu: the cpu for which sampler memory is allocated
 299 *
 300 * A 4K page is allocated for each requested SDBT.
 301 * A maximum of 511 4K pages are allocated for the SDBs in each of the SDBTs.
 302 * Set ALERT_REQ mask in each SDBs trailer.
 303 * Returns zero if successful, <0 otherwise.
 304 */
 305static int allocate_sdbt(int cpu)
 306{
 307	int j, k, rc;
 308	unsigned long *sdbt;
 309	unsigned long  sdb;
 310	unsigned long *tail;
 311	unsigned long *trailer;
 312	struct hws_cpu_buffer *cb;
 313
 314	cb = &per_cpu(sampler_cpu_buffer, cpu);
 315
 316	if (cb->first_sdbt)
 317		return -EINVAL;
 318
 319	sdbt = NULL;
 320	tail = sdbt;
 321
 322	for (j = 0; j < num_sdbt; j++) {
 323		sdbt = (unsigned long *)get_zeroed_page(GFP_KERNEL);
 324
 325		mutex_lock(&hws_sem_oom);
 326		/* OOM killer might have been activated */
 327		barrier();
 328		if (oom_killer_was_active || !sdbt) {
 329			if (sdbt)
 330				free_page((unsigned long)sdbt);
 331
 332			goto allocate_sdbt_error;
 333		}
 334		if (cb->first_sdbt == 0)
 335			cb->first_sdbt = (unsigned long)sdbt;
 336
 337		/* link current page to tail of chain */
 338		if (tail)
 339			*tail = (unsigned long)(void *)sdbt + 1;
 340
 341		mutex_unlock(&hws_sem_oom);
 342
 343		for (k = 0; k < num_sdb; k++) {
 344			/* get and set SDB page */
 345			sdb = get_zeroed_page(GFP_KERNEL);
 346
 347			mutex_lock(&hws_sem_oom);
 348			/* OOM killer might have been activated */
 349			barrier();
 350			if (oom_killer_was_active || !sdb) {
 351				if (sdb)
 352					free_page(sdb);
 353
 354				goto allocate_sdbt_error;
 355			}
 356			*sdbt = sdb;
 357			trailer = trailer_entry_ptr(*sdbt);
 358			*trailer = ALERT_REQ_MASK;
 359			sdbt++;
 360			mutex_unlock(&hws_sem_oom);
 361		}
 362		tail = sdbt;
 363	}
 364	mutex_lock(&hws_sem_oom);
 365	if (oom_killer_was_active)
 366		goto allocate_sdbt_error;
 367
 368	rc = 0;
 369	if (tail)
 370		*tail = (unsigned long)
 371			((void *)cb->first_sdbt) + 1;
 372
 373allocate_sdbt_exit:
 374	mutex_unlock(&hws_sem_oom);
 375	return rc;
 376
 377allocate_sdbt_error:
 378	rc = -ENOMEM;
 379	goto allocate_sdbt_exit;
 380}
 381
 382/*
 383 * deallocate_sdbt() - deallocate all sampler memory
 384 *
 385 * For each online CPU all SDBT trees are deallocated.
 386 * Returns the number of freed pages.
 387 */
 388static int deallocate_sdbt(void)
 389{
 390	int cpu;
 391	int counter;
 392
 393	counter = 0;
 394
 395	for_each_online_cpu(cpu) {
 396		unsigned long start;
 397		unsigned long sdbt;
 398		unsigned long *curr;
 399		struct hws_cpu_buffer *cb;
 400
 401		cb = &per_cpu(sampler_cpu_buffer, cpu);
 402
 403		if (!cb->first_sdbt)
 404			continue;
 405
 406		sdbt = cb->first_sdbt;
 407		curr = (unsigned long *) sdbt;
 408		start = sdbt;
 409
 410		/* we'll free the SDBT after all SDBs are processed... */
 411		while (1) {
 412			if (!*curr || !sdbt)
 413				break;
 414
 415			/* watch for link entry reset if found */
 416			if (is_link_entry(curr)) {
 417				curr = get_next_sdbt(curr);
 418				if (sdbt)
 419					free_page(sdbt);
 420
 421				/* we are done if we reach the start */
 422				if ((unsigned long) curr == start)
 423					break;
 424				else
 425					sdbt = (unsigned long) curr;
 426			} else {
 427				/* process SDB pointer */
 428				if (*curr) {
 429					free_page(*curr);
 430					curr++;
 431				}
 432			}
 433			counter++;
 434		}
 435		cb->first_sdbt = 0;
 436	}
 437	return counter;
 438}
 439
 440static int start_sampling(int cpu)
 441{
 442	int rc;
 443	struct hws_cpu_buffer *cb;
 444
 445	cb = &per_cpu(sampler_cpu_buffer, cpu);
 446	rc = smp_ctl_ssctl_enable_activate(cpu, interval);
 447	if (rc) {
 448		printk(KERN_INFO "hwsampler: CPU %d ssctl failed.\n", cpu);
 449		goto start_exit;
 450	}
 451
 452	rc = -EINVAL;
 453	if (!cb->qsi.es) {
 454		printk(KERN_INFO "hwsampler: CPU %d ssctl not enabled.\n", cpu);
 455		goto start_exit;
 456	}
 457
 458	if (!cb->qsi.cs) {
 459		printk(KERN_INFO "hwsampler: CPU %d ssctl not active.\n", cpu);
 460		goto start_exit;
 461	}
 462
 463	printk(KERN_INFO
 464		"hwsampler: CPU %d, CPUMF Sampling started, interval %lu.\n",
 465		cpu, interval);
 466
 467	rc = 0;
 468
 469start_exit:
 470	return rc;
 471}
 472
 473static int stop_sampling(int cpu)
 474{
 475	unsigned long v;
 476	int rc;
 477	struct hws_cpu_buffer *cb;
 478
 479	rc = smp_ctl_qsi(cpu);
 480	WARN_ON(rc);
 481
 482	cb = &per_cpu(sampler_cpu_buffer, cpu);
 483	if (!rc && !cb->qsi.es)
 484		printk(KERN_INFO "hwsampler: CPU %d, already stopped.\n", cpu);
 485
 486	rc = smp_ctl_ssctl_stop(cpu);
 487	if (rc) {
 488		printk(KERN_INFO "hwsampler: CPU %d, ssctl stop error %d.\n",
 489				cpu, rc);
 490		goto stop_exit;
 491	}
 492
 493	printk(KERN_INFO "hwsampler: CPU %d, CPUMF Sampling stopped.\n", cpu);
 494
 495stop_exit:
 496	v = cb->req_alert;
 497	if (v)
 498		printk(KERN_ERR "hwsampler: CPU %d CPUMF Request alert,"
 499				" count=%lu.\n", cpu, v);
 500
 501	v = cb->loss_of_sample_data;
 502	if (v)
 503		printk(KERN_ERR "hwsampler: CPU %d CPUMF Loss of sample data,"
 504				" count=%lu.\n", cpu, v);
 505
 506	v = cb->invalid_entry_address;
 507	if (v)
 508		printk(KERN_ERR "hwsampler: CPU %d CPUMF Invalid entry address,"
 509				" count=%lu.\n", cpu, v);
 510
 511	v = cb->incorrect_sdbt_entry;
 512	if (v)
 513		printk(KERN_ERR
 514				"hwsampler: CPU %d CPUMF Incorrect SDBT address,"
 515				" count=%lu.\n", cpu, v);
 516
 517	v = cb->sample_auth_change_alert;
 518	if (v)
 519		printk(KERN_ERR
 520				"hwsampler: CPU %d CPUMF Sample authorization change,"
 521				" count=%lu.\n", cpu, v);
 522
 523	return rc;
 524}
 525
 526static int check_hardware_prerequisites(void)
 527{
 528	if (!test_facility(68))
 529		return -EOPNOTSUPP;
 530	return 0;
 531}
 532/*
 533 * hws_oom_callback() - the OOM callback function
 534 *
 535 * In case the callback is invoked during memory allocation for the
 536 *  hw sampler, all obtained memory is deallocated and a flag is set
 537 *  so main sampler memory allocation can exit with a failure code.
 538 * In case the callback is invoked during sampling the hw sampler
 539 *  is deactivated for all CPUs.
 540 */
 541static int hws_oom_callback(struct notifier_block *nfb,
 542	unsigned long dummy, void *parm)
 543{
 544	unsigned long *freed;
 545	int cpu;
 546	struct hws_cpu_buffer *cb;
 547
 548	freed = parm;
 549
 550	mutex_lock(&hws_sem_oom);
 551
 552	if (hws_state == HWS_DEALLOCATED) {
 553		/* during memory allocation */
 554		if (oom_killer_was_active == 0) {
 555			oom_killer_was_active = 1;
 556			*freed += deallocate_sdbt();
 557		}
 558	} else {
 559		int i;
 560		cpu = get_cpu();
 561		cb = &per_cpu(sampler_cpu_buffer, cpu);
 562
 563		if (!cb->oom) {
 564			for_each_online_cpu(i) {
 565				smp_ctl_ssctl_deactivate(i);
 566				cb->oom = 1;
 567			}
 568			cb->finish = 1;
 569
 570			printk(KERN_INFO
 571				"hwsampler: CPU %d, OOM notify during CPUMF Sampling.\n",
 572				cpu);
 573		}
 574	}
 575
 576	mutex_unlock(&hws_sem_oom);
 577
 578	return NOTIFY_OK;
 579}
 580
 581static struct notifier_block hws_oom_notifier = {
 582	.notifier_call = hws_oom_callback
 583};
 584
 585static int hws_cpu_callback(struct notifier_block *nfb,
 586	unsigned long action, void *hcpu)
 587{
 588	/* We do not have sampler space available for all possible CPUs.
 589	   All CPUs should be online when hw sampling is activated. */
 590	return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD;
 591}
 592
 593static struct notifier_block hws_cpu_notifier = {
 594	.notifier_call = hws_cpu_callback
 595};
 596
 597/**
 598 * hwsampler_deactivate() - set hardware sampling temporarily inactive
 599 * @cpu:  specifies the CPU to be set inactive.
 600 *
 601 * Returns 0 on success, !0 on failure.
 602 */
 603int hwsampler_deactivate(unsigned int cpu)
 604{
 605	/*
 606	 * Deactivate hw sampling temporarily and flush the buffer
 607	 * by pushing all the pending samples to oprofile buffer.
 608	 *
 609	 * This function can be called under one of the following conditions:
 610	 *     Memory unmap, task is exiting.
 611	 */
 612	int rc;
 613	struct hws_cpu_buffer *cb;
 614
 615	rc = 0;
 616	mutex_lock(&hws_sem);
 617
 618	cb = &per_cpu(sampler_cpu_buffer, cpu);
 619	if (hws_state == HWS_STARTED) {
 620		rc = smp_ctl_qsi(cpu);
 621		WARN_ON(rc);
 622		if (cb->qsi.cs) {
 623			rc = smp_ctl_ssctl_deactivate(cpu);
 624			if (rc) {
 625				printk(KERN_INFO
 626				"hwsampler: CPU %d, CPUMF Deactivation failed.\n", cpu);
 627				cb->finish = 1;
 628				hws_state = HWS_STOPPING;
 629			} else  {
 630				hws_flush_all = 1;
 631				/* Add work to queue to read pending samples.*/
 632				queue_work_on(cpu, hws_wq, &cb->worker);
 633			}
 634		}
 635	}
 636	mutex_unlock(&hws_sem);
 637
 638	if (hws_wq)
 639		flush_workqueue(hws_wq);
 640
 641	return rc;
 642}
 643
 644/**
 645 * hwsampler_activate() - activate/resume hardware sampling which was deactivated
 646 * @cpu:  specifies the CPU to be set active.
 647 *
 648 * Returns 0 on success, !0 on failure.
 649 */
 650int hwsampler_activate(unsigned int cpu)
 651{
 652	/*
 653	 * Re-activate hw sampling. This should be called in pair with
 654	 * hwsampler_deactivate().
 655	 */
 656	int rc;
 657	struct hws_cpu_buffer *cb;
 658
 659	rc = 0;
 660	mutex_lock(&hws_sem);
 661
 662	cb = &per_cpu(sampler_cpu_buffer, cpu);
 663	if (hws_state == HWS_STARTED) {
 664		rc = smp_ctl_qsi(cpu);
 665		WARN_ON(rc);
 666		if (!cb->qsi.cs) {
 667			hws_flush_all = 0;
 668			rc = smp_ctl_ssctl_enable_activate(cpu, interval);
 669			if (rc) {
 670				printk(KERN_ERR
 671				"CPU %d, CPUMF activate sampling failed.\n",
 672					 cpu);
 673			}
 674		}
 675	}
 676
 677	mutex_unlock(&hws_sem);
 678
 679	return rc;
 680}
 681
 682static int check_qsi_on_setup(void)
 683{
 684	int rc;
 685	unsigned int cpu;
 686	struct hws_cpu_buffer *cb;
 687
 688	for_each_online_cpu(cpu) {
 689		cb = &per_cpu(sampler_cpu_buffer, cpu);
 690		rc = smp_ctl_qsi(cpu);
 691		WARN_ON(rc);
 692		if (rc)
 693			return -EOPNOTSUPP;
 694
 695		if (!cb->qsi.as) {
 696			printk(KERN_INFO "hwsampler: CPUMF sampling is not authorized.\n");
 697			return -EINVAL;
 698		}
 699
 700		if (cb->qsi.es) {
 701			printk(KERN_WARNING "hwsampler: CPUMF is still enabled.\n");
 702			rc = smp_ctl_ssctl_stop(cpu);
 703			if (rc)
 704				return -EINVAL;
 705
 706			printk(KERN_INFO
 707				"CPU %d, CPUMF Sampling stopped now.\n", cpu);
 708		}
 709	}
 710	return 0;
 711}
 712
 713static int check_qsi_on_start(void)
 714{
 715	unsigned int cpu;
 716	int rc;
 717	struct hws_cpu_buffer *cb;
 718
 719	for_each_online_cpu(cpu) {
 720		cb = &per_cpu(sampler_cpu_buffer, cpu);
 721		rc = smp_ctl_qsi(cpu);
 722		WARN_ON(rc);
 723
 724		if (!cb->qsi.as)
 725			return -EINVAL;
 726
 727		if (cb->qsi.es)
 728			return -EINVAL;
 729
 730		if (cb->qsi.cs)
 731			return -EINVAL;
 732	}
 733	return 0;
 734}
 735
 736static void worker_on_start(unsigned int cpu)
 737{
 738	struct hws_cpu_buffer *cb;
 739
 740	cb = &per_cpu(sampler_cpu_buffer, cpu);
 741	cb->worker_entry = cb->first_sdbt;
 742}
 743
 744static int worker_check_error(unsigned int cpu, int ext_params)
 745{
 746	int rc;
 747	unsigned long *sdbt;
 748	struct hws_cpu_buffer *cb;
 749
 750	rc = 0;
 751	cb = &per_cpu(sampler_cpu_buffer, cpu);
 752	sdbt = (unsigned long *) cb->worker_entry;
 753
 754	if (!sdbt || !*sdbt)
 755		return -EINVAL;
 756
 757	if (ext_params & CPU_MF_INT_SF_PRA)
 758		cb->req_alert++;
 759
 760	if (ext_params & CPU_MF_INT_SF_LSDA)
 761		cb->loss_of_sample_data++;
 762
 763	if (ext_params & CPU_MF_INT_SF_IAE) {
 764		cb->invalid_entry_address++;
 765		rc = -EINVAL;
 766	}
 767
 768	if (ext_params & CPU_MF_INT_SF_ISE) {
 769		cb->incorrect_sdbt_entry++;
 770		rc = -EINVAL;
 771	}
 772
 773	if (ext_params & CPU_MF_INT_SF_SACA) {
 774		cb->sample_auth_change_alert++;
 775		rc = -EINVAL;
 776	}
 777
 778	return rc;
 779}
 780
 781static void worker_on_finish(unsigned int cpu)
 782{
 783	int rc, i;
 784	struct hws_cpu_buffer *cb;
 785
 786	cb = &per_cpu(sampler_cpu_buffer, cpu);
 787
 788	if (cb->finish) {
 789		rc = smp_ctl_qsi(cpu);
 790		WARN_ON(rc);
 791		if (cb->qsi.es) {
 792			printk(KERN_INFO
 793				"hwsampler: CPU %d, CPUMF Stop/Deactivate sampling.\n",
 794				cpu);
 795			rc = smp_ctl_ssctl_stop(cpu);
 796			if (rc)
 797				printk(KERN_INFO
 798					"hwsampler: CPU %d, CPUMF Deactivation failed.\n",
 799					cpu);
 800
 801			for_each_online_cpu(i) {
 802				if (i == cpu)
 803					continue;
 804				if (!cb->finish) {
 805					cb->finish = 1;
 806					queue_work_on(i, hws_wq,
 807						&cb->worker);
 808				}
 809			}
 810		}
 811	}
 812}
 813
 814static void worker_on_interrupt(unsigned int cpu)
 815{
 816	unsigned long *sdbt;
 817	unsigned char done;
 818	struct hws_cpu_buffer *cb;
 819
 820	cb = &per_cpu(sampler_cpu_buffer, cpu);
 821
 822	sdbt = (unsigned long *) cb->worker_entry;
 823
 824	done = 0;
 825	/* do not proceed if stop was entered,
 826	 * forget the buffers not yet processed */
 827	while (!done && !cb->stop_mode) {
 828		unsigned long *trailer;
 829		struct hws_trailer_entry *te;
 830		unsigned long *dear = 0;
 831
 832		trailer = trailer_entry_ptr(*sdbt);
 833		/* leave loop if no more work to do */
 834		if (!(*trailer & BUFFER_FULL_MASK)) {
 835			done = 1;
 836			if (!hws_flush_all)
 837				continue;
 838		}
 839
 840		te = (struct hws_trailer_entry *)trailer;
 841		cb->sample_overflow += te->overflow;
 842
 843		add_samples_to_oprofile(cpu, sdbt, dear);
 844
 845		/* reset trailer */
 846		xchg((unsigned char *) te, 0x40);
 847
 848		/* advance to next sdb slot in current sdbt */
 849		sdbt++;
 850		/* in case link bit is set use address w/o link bit */
 851		if (is_link_entry(sdbt))
 852			sdbt = get_next_sdbt(sdbt);
 853
 854		cb->worker_entry = (unsigned long)sdbt;
 855	}
 856}
 857
 858static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
 859		unsigned long *dear)
 860{
 861	struct hws_data_entry *sample_data_ptr;
 862	unsigned long *trailer;
 863
 864	trailer = trailer_entry_ptr(*sdbt);
 865	if (dear) {
 866		if (dear > trailer)
 867			return;
 868		trailer = dear;
 869	}
 870
 871	sample_data_ptr = (struct hws_data_entry *)(*sdbt);
 872
 873	while ((unsigned long *)sample_data_ptr < trailer) {
 874		struct pt_regs *regs = NULL;
 875		struct task_struct *tsk = NULL;
 876
 877		/*
 878		 * Check sampling mode, 1 indicates basic (=customer) sampling
 879		 * mode.
 880		 */
 881		if (sample_data_ptr->def != 1) {
 882			/* sample slot is not yet written */
 883			break;
 884		} else {
 885			/* make sure we don't use it twice,
 886			 * the next time the sampler will set it again */
 887			sample_data_ptr->def = 0;
 888		}
 889
 890		/* Get pt_regs. */
 891		if (sample_data_ptr->P == 1) {
 892			/* userspace sample */
 893			unsigned int pid = sample_data_ptr->prim_asn;
 894			if (!counter_config.user)
 895				goto skip_sample;
 896			rcu_read_lock();
 897			tsk = pid_task(find_vpid(pid), PIDTYPE_PID);
 898			if (tsk)
 899				regs = task_pt_regs(tsk);
 900			rcu_read_unlock();
 901		} else {
 902			/* kernelspace sample */
 903			if (!counter_config.kernel)
 904				goto skip_sample;
 905			regs = task_pt_regs(current);
 906		}
 907
 908		mutex_lock(&hws_sem);
 909		oprofile_add_ext_hw_sample(sample_data_ptr->ia, regs, 0,
 910				!sample_data_ptr->P, tsk);
 911		mutex_unlock(&hws_sem);
 912	skip_sample:
 913		sample_data_ptr++;
 914	}
 915}
 916
 917static void worker(struct work_struct *work)
 918{
 919	unsigned int cpu;
 920	int ext_params;
 921	struct hws_cpu_buffer *cb;
 922
 923	cb = container_of(work, struct hws_cpu_buffer, worker);
 924	cpu = smp_processor_id();
 925	ext_params = atomic_xchg(&cb->ext_params, 0);
 926
 927	if (!cb->worker_entry)
 928		worker_on_start(cpu);
 929
 930	if (worker_check_error(cpu, ext_params))
 931		return;
 932
 933	if (!cb->finish)
 934		worker_on_interrupt(cpu);
 935
 936	if (cb->finish)
 937		worker_on_finish(cpu);
 938}
 939
 940/**
 941 * hwsampler_allocate() - allocate memory for the hardware sampler
 942 * @sdbt:  number of SDBTs per online CPU (must be > 0)
 943 * @sdb:   number of SDBs per SDBT (minimum 1, maximum 511)
 944 *
 945 * Returns 0 on success, !0 on failure.
 946 */
 947int hwsampler_allocate(unsigned long sdbt, unsigned long sdb)
 948{
 949	int cpu, rc;
 950	mutex_lock(&hws_sem);
 951
 952	rc = -EINVAL;
 953	if (hws_state != HWS_DEALLOCATED)
 954		goto allocate_exit;
 955
 956	if (sdbt < 1)
 957		goto allocate_exit;
 958
 959	if (sdb > MAX_NUM_SDB || sdb < MIN_NUM_SDB)
 960		goto allocate_exit;
 961
 962	num_sdbt = sdbt;
 963	num_sdb = sdb;
 964
 965	oom_killer_was_active = 0;
 966	register_oom_notifier(&hws_oom_notifier);
 967
 968	for_each_online_cpu(cpu) {
 969		if (allocate_sdbt(cpu)) {
 970			unregister_oom_notifier(&hws_oom_notifier);
 971			goto allocate_error;
 972		}
 973	}
 974	unregister_oom_notifier(&hws_oom_notifier);
 975	if (oom_killer_was_active)
 976		goto allocate_error;
 977
 978	hws_state = HWS_STOPPED;
 979	rc = 0;
 980
 981allocate_exit:
 982	mutex_unlock(&hws_sem);
 983	return rc;
 984
 985allocate_error:
 986	rc = -ENOMEM;
 987	printk(KERN_ERR "hwsampler: CPUMF Memory allocation failed.\n");
 988	goto allocate_exit;
 989}
 990
 991/**
 992 * hwsampler_deallocate() - deallocate hardware sampler memory
 993 *
 994 * Returns 0 on success, !0 on failure.
 995 */
 996int hwsampler_deallocate(void)
 997{
 998	int rc;
 999
1000	mutex_lock(&hws_sem);
1001
1002	rc = -EINVAL;
1003	if (hws_state != HWS_STOPPED)
1004		goto deallocate_exit;
1005
1006	measurement_alert_subclass_unregister();
 
1007	deallocate_sdbt();
1008
1009	hws_state = HWS_DEALLOCATED;
1010	rc = 0;
1011
1012deallocate_exit:
1013	mutex_unlock(&hws_sem);
1014
1015	return rc;
1016}
1017
1018unsigned long hwsampler_query_min_interval(void)
1019{
1020	return min_sampler_rate;
1021}
1022
1023unsigned long hwsampler_query_max_interval(void)
1024{
1025	return max_sampler_rate;
1026}
1027
1028unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
1029{
1030	struct hws_cpu_buffer *cb;
1031
1032	cb = &per_cpu(sampler_cpu_buffer, cpu);
1033
1034	return cb->sample_overflow;
1035}
1036
1037int hwsampler_setup(void)
1038{
1039	int rc;
1040	int cpu;
1041	struct hws_cpu_buffer *cb;
1042
1043	mutex_lock(&hws_sem);
1044
1045	rc = -EINVAL;
1046	if (hws_state)
1047		goto setup_exit;
1048
1049	hws_state = HWS_INIT;
1050
1051	init_all_cpu_buffers();
1052
1053	rc = check_hardware_prerequisites();
1054	if (rc)
1055		goto setup_exit;
1056
1057	rc = check_qsi_on_setup();
1058	if (rc)
1059		goto setup_exit;
1060
1061	rc = -EINVAL;
1062	hws_wq = create_workqueue("hwsampler");
1063	if (!hws_wq)
1064		goto setup_exit;
1065
1066	register_cpu_notifier(&hws_cpu_notifier);
1067
1068	for_each_online_cpu(cpu) {
1069		cb = &per_cpu(sampler_cpu_buffer, cpu);
1070		INIT_WORK(&cb->worker, worker);
1071		rc = smp_ctl_qsi(cpu);
1072		WARN_ON(rc);
1073		if (min_sampler_rate != cb->qsi.min_sampl_rate) {
1074			if (min_sampler_rate) {
1075				printk(KERN_WARNING
1076					"hwsampler: different min sampler rate values.\n");
1077				if (min_sampler_rate < cb->qsi.min_sampl_rate)
1078					min_sampler_rate =
1079						cb->qsi.min_sampl_rate;
1080			} else
1081				min_sampler_rate = cb->qsi.min_sampl_rate;
1082		}
1083		if (max_sampler_rate != cb->qsi.max_sampl_rate) {
1084			if (max_sampler_rate) {
1085				printk(KERN_WARNING
1086					"hwsampler: different max sampler rate values.\n");
1087				if (max_sampler_rate > cb->qsi.max_sampl_rate)
1088					max_sampler_rate =
1089						cb->qsi.max_sampl_rate;
1090			} else
1091				max_sampler_rate = cb->qsi.max_sampl_rate;
1092		}
1093	}
1094	register_external_interrupt(0x1407, hws_ext_handler);
1095
1096	hws_state = HWS_DEALLOCATED;
1097	rc = 0;
1098
1099setup_exit:
1100	mutex_unlock(&hws_sem);
1101	return rc;
1102}
1103
1104int hwsampler_shutdown(void)
1105{
1106	int rc;
1107
1108	mutex_lock(&hws_sem);
1109
1110	rc = -EINVAL;
1111	if (hws_state == HWS_DEALLOCATED || hws_state == HWS_STOPPED) {
1112		mutex_unlock(&hws_sem);
1113
1114		if (hws_wq)
1115			flush_workqueue(hws_wq);
1116
1117		mutex_lock(&hws_sem);
1118
1119		if (hws_state == HWS_STOPPED) {
1120			measurement_alert_subclass_unregister();
 
1121			deallocate_sdbt();
1122		}
1123		if (hws_wq) {
1124			destroy_workqueue(hws_wq);
1125			hws_wq = NULL;
1126		}
1127
1128		unregister_external_interrupt(0x1407, hws_ext_handler);
1129		hws_state = HWS_INIT;
1130		rc = 0;
1131	}
1132	mutex_unlock(&hws_sem);
1133
1134	unregister_cpu_notifier(&hws_cpu_notifier);
1135
1136	return rc;
1137}
1138
1139/**
1140 * hwsampler_start_all() - start hardware sampling on all online CPUs
1141 * @rate:  specifies the used interval when samples are taken
1142 *
1143 * Returns 0 on success, !0 on failure.
1144 */
1145int hwsampler_start_all(unsigned long rate)
1146{
1147	int rc, cpu;
1148
1149	mutex_lock(&hws_sem);
1150
1151	hws_oom = 0;
1152
1153	rc = -EINVAL;
1154	if (hws_state != HWS_STOPPED)
1155		goto start_all_exit;
1156
1157	interval = rate;
1158
1159	/* fail if rate is not valid */
1160	if (interval < min_sampler_rate || interval > max_sampler_rate)
1161		goto start_all_exit;
1162
1163	rc = check_qsi_on_start();
1164	if (rc)
1165		goto start_all_exit;
1166
1167	rc = prepare_cpu_buffers();
1168	if (rc)
1169		goto start_all_exit;
1170
1171	for_each_online_cpu(cpu) {
1172		rc = start_sampling(cpu);
1173		if (rc)
1174			break;
1175	}
1176	if (rc) {
1177		for_each_online_cpu(cpu) {
1178			stop_sampling(cpu);
1179		}
1180		goto start_all_exit;
1181	}
1182	hws_state = HWS_STARTED;
1183	rc = 0;
1184
1185start_all_exit:
1186	mutex_unlock(&hws_sem);
1187
1188	if (rc)
1189		return rc;
1190
1191	register_oom_notifier(&hws_oom_notifier);
1192	hws_oom = 1;
1193	hws_flush_all = 0;
1194	/* now let them in, 1407 CPUMF external interrupts */
1195	measurement_alert_subclass_register();
 
1196
1197	return 0;
1198}
1199
1200/**
1201 * hwsampler_stop_all() - stop hardware sampling on all online CPUs
1202 *
1203 * Returns 0 on success, !0 on failure.
1204 */
1205int hwsampler_stop_all(void)
1206{
1207	int tmp_rc, rc, cpu;
1208	struct hws_cpu_buffer *cb;
1209
1210	mutex_lock(&hws_sem);
1211
1212	rc = 0;
1213	if (hws_state == HWS_INIT) {
1214		mutex_unlock(&hws_sem);
1215		return rc;
1216	}
1217	hws_state = HWS_STOPPING;
1218	mutex_unlock(&hws_sem);
1219
1220	for_each_online_cpu(cpu) {
1221		cb = &per_cpu(sampler_cpu_buffer, cpu);
1222		cb->stop_mode = 1;
1223		tmp_rc = stop_sampling(cpu);
1224		if (tmp_rc)
1225			rc = tmp_rc;
1226	}
1227
1228	if (hws_wq)
1229		flush_workqueue(hws_wq);
1230
1231	mutex_lock(&hws_sem);
1232	if (hws_oom) {
1233		unregister_oom_notifier(&hws_oom_notifier);
1234		hws_oom = 0;
1235	}
1236	hws_state = HWS_STOPPED;
1237	mutex_unlock(&hws_sem);
1238
1239	return rc;
1240}