Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Intel & MS High Precision Event Timer Implementation.
   3 *
   4 * Copyright (C) 2003 Intel Corporation
   5 *	Venki Pallipadi
   6 * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
   7 *	Bob Picco <robert.picco@hp.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/interrupt.h>
  15#include <linux/module.h>
  16#include <linux/kernel.h>
  17#include <linux/types.h>
  18#include <linux/miscdevice.h>
  19#include <linux/major.h>
  20#include <linux/ioport.h>
  21#include <linux/fcntl.h>
  22#include <linux/init.h>
  23#include <linux/poll.h>
  24#include <linux/mm.h>
  25#include <linux/proc_fs.h>
  26#include <linux/spinlock.h>
  27#include <linux/sysctl.h>
  28#include <linux/wait.h>
  29#include <linux/bcd.h>
  30#include <linux/seq_file.h>
  31#include <linux/bitops.h>
  32#include <linux/compat.h>
  33#include <linux/clocksource.h>
  34#include <linux/uaccess.h>
  35#include <linux/slab.h>
  36#include <linux/io.h>
  37
 
  38#include <asm/current.h>
  39#include <asm/system.h>
  40#include <asm/irq.h>
  41#include <asm/div64.h>
  42
  43#include <linux/acpi.h>
  44#include <acpi/acpi_bus.h>
  45#include <linux/hpet.h>
  46
  47/*
  48 * The High Precision Event Timer driver.
  49 * This driver is closely modelled after the rtc.c driver.
  50 * http://www.intel.com/hardwaredesign/hpetspec_1.pdf
  51 */
  52#define	HPET_USER_FREQ	(64)
  53#define	HPET_DRIFT	(500)
  54
  55#define HPET_RANGE_SIZE		1024	/* from HPET spec */
  56
  57
  58/* WARNING -- don't get confused.  These macros are never used
  59 * to write the (single) counter, and rarely to read it.
  60 * They're badly named; to fix, someday.
  61 */
  62#if BITS_PER_LONG == 64
  63#define	write_counter(V, MC)	writeq(V, MC)
  64#define	read_counter(MC)	readq(MC)
  65#else
  66#define	write_counter(V, MC)	writel(V, MC)
  67#define	read_counter(MC)	readl(MC)
  68#endif
  69
  70static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */
  71static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
  72
  73/* This clocksource driver currently only works on ia64 */
  74#ifdef CONFIG_IA64
  75static void __iomem *hpet_mctr;
  76
  77static cycle_t read_hpet(struct clocksource *cs)
  78{
  79	return (cycle_t)read_counter((void __iomem *)hpet_mctr);
  80}
  81
  82static struct clocksource clocksource_hpet = {
  83	.name		= "hpet",
  84	.rating		= 250,
  85	.read		= read_hpet,
  86	.mask		= CLOCKSOURCE_MASK(64),
  87	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
  88};
  89static struct clocksource *hpet_clocksource;
  90#endif
  91
  92/* A lock for concurrent access by app and isr hpet activity. */
  93static DEFINE_SPINLOCK(hpet_lock);
  94
  95#define	HPET_DEV_NAME	(7)
  96
  97struct hpet_dev {
  98	struct hpets *hd_hpets;
  99	struct hpet __iomem *hd_hpet;
 100	struct hpet_timer __iomem *hd_timer;
 101	unsigned long hd_ireqfreq;
 102	unsigned long hd_irqdata;
 103	wait_queue_head_t hd_waitqueue;
 104	struct fasync_struct *hd_async_queue;
 105	unsigned int hd_flags;
 106	unsigned int hd_irq;
 107	unsigned int hd_hdwirq;
 108	char hd_name[HPET_DEV_NAME];
 109};
 110
 111struct hpets {
 112	struct hpets *hp_next;
 113	struct hpet __iomem *hp_hpet;
 114	unsigned long hp_hpet_phys;
 115	struct clocksource *hp_clocksource;
 116	unsigned long long hp_tick_freq;
 117	unsigned long hp_delta;
 118	unsigned int hp_ntimer;
 119	unsigned int hp_which;
 120	struct hpet_dev hp_dev[1];
 121};
 122
 123static struct hpets *hpets;
 124
 125#define	HPET_OPEN		0x0001
 126#define	HPET_IE			0x0002	/* interrupt enabled */
 127#define	HPET_PERIODIC		0x0004
 128#define	HPET_SHARED_IRQ		0x0008
 129
 130
 131#ifndef readq
 132static inline unsigned long long readq(void __iomem *addr)
 133{
 134	return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
 135}
 136#endif
 137
 138#ifndef writeq
 139static inline void writeq(unsigned long long v, void __iomem *addr)
 140{
 141	writel(v & 0xffffffff, addr);
 142	writel(v >> 32, addr + 4);
 143}
 144#endif
 145
 146static irqreturn_t hpet_interrupt(int irq, void *data)
 147{
 148	struct hpet_dev *devp;
 149	unsigned long isr;
 150
 151	devp = data;
 152	isr = 1 << (devp - devp->hd_hpets->hp_dev);
 153
 154	if ((devp->hd_flags & HPET_SHARED_IRQ) &&
 155	    !(isr & readl(&devp->hd_hpet->hpet_isr)))
 156		return IRQ_NONE;
 157
 158	spin_lock(&hpet_lock);
 159	devp->hd_irqdata++;
 160
 161	/*
 162	 * For non-periodic timers, increment the accumulator.
 163	 * This has the effect of treating non-periodic like periodic.
 164	 */
 165	if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
 166		unsigned long m, t, mc, base, k;
 167		struct hpet __iomem *hpet = devp->hd_hpet;
 168		struct hpets *hpetp = devp->hd_hpets;
 169
 170		t = devp->hd_ireqfreq;
 171		m = read_counter(&devp->hd_timer->hpet_compare);
 172		mc = read_counter(&hpet->hpet_mc);
 173		/* The time for the next interrupt would logically be t + m,
 174		 * however, if we are very unlucky and the interrupt is delayed
 175		 * for longer than t then we will completely miss the next
 176		 * interrupt if we set t + m and an application will hang.
 177		 * Therefore we need to make a more complex computation assuming
 178		 * that there exists a k for which the following is true:
 179		 * k * t + base < mc + delta
 180		 * (k + 1) * t + base > mc + delta
 181		 * where t is the interval in hpet ticks for the given freq,
 182		 * base is the theoretical start value 0 < base < t,
 183		 * mc is the main counter value at the time of the interrupt,
 184		 * delta is the time it takes to write the a value to the
 185		 * comparator.
 186		 * k may then be computed as (mc - base + delta) / t .
 187		 */
 188		base = mc % t;
 189		k = (mc - base + hpetp->hp_delta) / t;
 190		write_counter(t * (k + 1) + base,
 191			      &devp->hd_timer->hpet_compare);
 192	}
 193
 194	if (devp->hd_flags & HPET_SHARED_IRQ)
 195		writel(isr, &devp->hd_hpet->hpet_isr);
 196	spin_unlock(&hpet_lock);
 197
 198	wake_up_interruptible(&devp->hd_waitqueue);
 199
 200	kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN);
 201
 202	return IRQ_HANDLED;
 203}
 204
 205static void hpet_timer_set_irq(struct hpet_dev *devp)
 206{
 207	unsigned long v;
 208	int irq, gsi;
 209	struct hpet_timer __iomem *timer;
 210
 211	spin_lock_irq(&hpet_lock);
 212	if (devp->hd_hdwirq) {
 213		spin_unlock_irq(&hpet_lock);
 214		return;
 215	}
 216
 217	timer = devp->hd_timer;
 218
 219	/* we prefer level triggered mode */
 220	v = readl(&timer->hpet_config);
 221	if (!(v & Tn_INT_TYPE_CNF_MASK)) {
 222		v |= Tn_INT_TYPE_CNF_MASK;
 223		writel(v, &timer->hpet_config);
 224	}
 225	spin_unlock_irq(&hpet_lock);
 226
 227	v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >>
 228				 Tn_INT_ROUTE_CAP_SHIFT;
 229
 230	/*
 231	 * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by
 232	 * legacy device. In IO APIC mode, we skip all the legacy IRQS.
 233	 */
 234	if (acpi_irq_model == ACPI_IRQ_MODEL_PIC)
 235		v &= ~0xf3df;
 236	else
 237		v &= ~0xffff;
 238
 239	for_each_set_bit(irq, &v, HPET_MAX_IRQ) {
 240		if (irq >= nr_irqs) {
 241			irq = HPET_MAX_IRQ;
 242			break;
 243		}
 244
 245		gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
 246					ACPI_ACTIVE_LOW);
 247		if (gsi > 0)
 248			break;
 249
 250		/* FIXME: Setup interrupt source table */
 251	}
 252
 253	if (irq < HPET_MAX_IRQ) {
 254		spin_lock_irq(&hpet_lock);
 255		v = readl(&timer->hpet_config);
 256		v |= irq << Tn_INT_ROUTE_CNF_SHIFT;
 257		writel(v, &timer->hpet_config);
 258		devp->hd_hdwirq = gsi;
 259		spin_unlock_irq(&hpet_lock);
 260	}
 261	return;
 262}
 263
 264static int hpet_open(struct inode *inode, struct file *file)
 265{
 266	struct hpet_dev *devp;
 267	struct hpets *hpetp;
 268	int i;
 269
 270	if (file->f_mode & FMODE_WRITE)
 271		return -EINVAL;
 272
 273	mutex_lock(&hpet_mutex);
 274	spin_lock_irq(&hpet_lock);
 275
 276	for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
 277		for (i = 0; i < hpetp->hp_ntimer; i++)
 278			if (hpetp->hp_dev[i].hd_flags & HPET_OPEN)
 279				continue;
 280			else {
 281				devp = &hpetp->hp_dev[i];
 282				break;
 283			}
 284
 285	if (!devp) {
 286		spin_unlock_irq(&hpet_lock);
 287		mutex_unlock(&hpet_mutex);
 288		return -EBUSY;
 289	}
 290
 291	file->private_data = devp;
 292	devp->hd_irqdata = 0;
 293	devp->hd_flags |= HPET_OPEN;
 294	spin_unlock_irq(&hpet_lock);
 295	mutex_unlock(&hpet_mutex);
 296
 297	hpet_timer_set_irq(devp);
 298
 299	return 0;
 300}
 301
 302static ssize_t
 303hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
 304{
 305	DECLARE_WAITQUEUE(wait, current);
 306	unsigned long data;
 307	ssize_t retval;
 308	struct hpet_dev *devp;
 309
 310	devp = file->private_data;
 311	if (!devp->hd_ireqfreq)
 312		return -EIO;
 313
 314	if (count < sizeof(unsigned long))
 315		return -EINVAL;
 316
 317	add_wait_queue(&devp->hd_waitqueue, &wait);
 318
 319	for ( ; ; ) {
 320		set_current_state(TASK_INTERRUPTIBLE);
 321
 322		spin_lock_irq(&hpet_lock);
 323		data = devp->hd_irqdata;
 324		devp->hd_irqdata = 0;
 325		spin_unlock_irq(&hpet_lock);
 326
 327		if (data)
 328			break;
 329		else if (file->f_flags & O_NONBLOCK) {
 330			retval = -EAGAIN;
 331			goto out;
 332		} else if (signal_pending(current)) {
 333			retval = -ERESTARTSYS;
 334			goto out;
 335		}
 336		schedule();
 337	}
 338
 339	retval = put_user(data, (unsigned long __user *)buf);
 340	if (!retval)
 341		retval = sizeof(unsigned long);
 342out:
 343	__set_current_state(TASK_RUNNING);
 344	remove_wait_queue(&devp->hd_waitqueue, &wait);
 345
 346	return retval;
 347}
 348
 349static unsigned int hpet_poll(struct file *file, poll_table * wait)
 350{
 351	unsigned long v;
 352	struct hpet_dev *devp;
 353
 354	devp = file->private_data;
 355
 356	if (!devp->hd_ireqfreq)
 357		return 0;
 358
 359	poll_wait(file, &devp->hd_waitqueue, wait);
 360
 361	spin_lock_irq(&hpet_lock);
 362	v = devp->hd_irqdata;
 363	spin_unlock_irq(&hpet_lock);
 364
 365	if (v != 0)
 366		return POLLIN | POLLRDNORM;
 367
 368	return 0;
 369}
 370
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 371static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
 372{
 373#ifdef	CONFIG_HPET_MMAP
 374	struct hpet_dev *devp;
 375	unsigned long addr;
 376
 377	if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
 378		return -EINVAL;
 379
 380	devp = file->private_data;
 381	addr = devp->hd_hpets->hp_hpet_phys;
 382
 383	if (addr & (PAGE_SIZE - 1))
 384		return -ENOSYS;
 385
 386	vma->vm_flags |= VM_IO;
 387	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 388
 389	if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
 390					PAGE_SIZE, vma->vm_page_prot)) {
 391		printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
 392			__func__);
 393		return -EAGAIN;
 394	}
 395
 396	return 0;
 397#else
 
 
 398	return -ENOSYS;
 399#endif
 400}
 
 401
 402static int hpet_fasync(int fd, struct file *file, int on)
 403{
 404	struct hpet_dev *devp;
 405
 406	devp = file->private_data;
 407
 408	if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0)
 409		return 0;
 410	else
 411		return -EIO;
 412}
 413
 414static int hpet_release(struct inode *inode, struct file *file)
 415{
 416	struct hpet_dev *devp;
 417	struct hpet_timer __iomem *timer;
 418	int irq = 0;
 419
 420	devp = file->private_data;
 421	timer = devp->hd_timer;
 422
 423	spin_lock_irq(&hpet_lock);
 424
 425	writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
 426	       &timer->hpet_config);
 427
 428	irq = devp->hd_irq;
 429	devp->hd_irq = 0;
 430
 431	devp->hd_ireqfreq = 0;
 432
 433	if (devp->hd_flags & HPET_PERIODIC
 434	    && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
 435		unsigned long v;
 436
 437		v = readq(&timer->hpet_config);
 438		v ^= Tn_TYPE_CNF_MASK;
 439		writeq(v, &timer->hpet_config);
 440	}
 441
 442	devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC);
 443	spin_unlock_irq(&hpet_lock);
 444
 445	if (irq)
 446		free_irq(irq, devp);
 447
 448	file->private_data = NULL;
 449	return 0;
 450}
 451
 452static int hpet_ioctl_ieon(struct hpet_dev *devp)
 453{
 454	struct hpet_timer __iomem *timer;
 455	struct hpet __iomem *hpet;
 456	struct hpets *hpetp;
 457	int irq;
 458	unsigned long g, v, t, m;
 459	unsigned long flags, isr;
 460
 461	timer = devp->hd_timer;
 462	hpet = devp->hd_hpet;
 463	hpetp = devp->hd_hpets;
 464
 465	if (!devp->hd_ireqfreq)
 466		return -EIO;
 467
 468	spin_lock_irq(&hpet_lock);
 469
 470	if (devp->hd_flags & HPET_IE) {
 471		spin_unlock_irq(&hpet_lock);
 472		return -EBUSY;
 473	}
 474
 475	devp->hd_flags |= HPET_IE;
 476
 477	if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK)
 478		devp->hd_flags |= HPET_SHARED_IRQ;
 479	spin_unlock_irq(&hpet_lock);
 480
 481	irq = devp->hd_hdwirq;
 482
 483	if (irq) {
 484		unsigned long irq_flags;
 485
 486		if (devp->hd_flags & HPET_SHARED_IRQ) {
 487			/*
 488			 * To prevent the interrupt handler from seeing an
 489			 * unwanted interrupt status bit, program the timer
 490			 * so that it will not fire in the near future ...
 491			 */
 492			writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
 493			       &timer->hpet_config);
 494			write_counter(read_counter(&hpet->hpet_mc),
 495				      &timer->hpet_compare);
 496			/* ... and clear any left-over status. */
 497			isr = 1 << (devp - devp->hd_hpets->hp_dev);
 498			writel(isr, &hpet->hpet_isr);
 499		}
 500
 501		sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
 502		irq_flags = devp->hd_flags & HPET_SHARED_IRQ
 503						? IRQF_SHARED : IRQF_DISABLED;
 504		if (request_irq(irq, hpet_interrupt, irq_flags,
 505				devp->hd_name, (void *)devp)) {
 506			printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
 507			irq = 0;
 508		}
 509	}
 510
 511	if (irq == 0) {
 512		spin_lock_irq(&hpet_lock);
 513		devp->hd_flags ^= HPET_IE;
 514		spin_unlock_irq(&hpet_lock);
 515		return -EIO;
 516	}
 517
 518	devp->hd_irq = irq;
 519	t = devp->hd_ireqfreq;
 520	v = readq(&timer->hpet_config);
 521
 522	/* 64-bit comparators are not yet supported through the ioctls,
 523	 * so force this into 32-bit mode if it supports both modes
 524	 */
 525	g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK;
 526
 527	if (devp->hd_flags & HPET_PERIODIC) {
 528		g |= Tn_TYPE_CNF_MASK;
 529		v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK;
 530		writeq(v, &timer->hpet_config);
 531		local_irq_save(flags);
 532
 533		/*
 534		 * NOTE: First we modify the hidden accumulator
 535		 * register supported by periodic-capable comparators.
 536		 * We never want to modify the (single) counter; that
 537		 * would affect all the comparators. The value written
 538		 * is the counter value when the first interrupt is due.
 539		 */
 540		m = read_counter(&hpet->hpet_mc);
 541		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
 542		/*
 543		 * Then we modify the comparator, indicating the period
 544		 * for subsequent interrupt.
 545		 */
 546		write_counter(t, &timer->hpet_compare);
 547	} else {
 548		local_irq_save(flags);
 549		m = read_counter(&hpet->hpet_mc);
 550		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
 551	}
 552
 553	if (devp->hd_flags & HPET_SHARED_IRQ) {
 554		isr = 1 << (devp - devp->hd_hpets->hp_dev);
 555		writel(isr, &hpet->hpet_isr);
 556	}
 557	writeq(g, &timer->hpet_config);
 558	local_irq_restore(flags);
 559
 560	return 0;
 561}
 562
 563/* converts Hz to number of timer ticks */
 564static inline unsigned long hpet_time_div(struct hpets *hpets,
 565					  unsigned long dis)
 566{
 567	unsigned long long m;
 568
 569	m = hpets->hp_tick_freq + (dis >> 1);
 570	do_div(m, dis);
 571	return (unsigned long)m;
 572}
 573
 574static int
 575hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
 576		  struct hpet_info *info)
 577{
 578	struct hpet_timer __iomem *timer;
 579	struct hpet __iomem *hpet;
 580	struct hpets *hpetp;
 581	int err;
 582	unsigned long v;
 583
 584	switch (cmd) {
 585	case HPET_IE_OFF:
 586	case HPET_INFO:
 587	case HPET_EPI:
 588	case HPET_DPI:
 589	case HPET_IRQFREQ:
 590		timer = devp->hd_timer;
 591		hpet = devp->hd_hpet;
 592		hpetp = devp->hd_hpets;
 593		break;
 594	case HPET_IE_ON:
 595		return hpet_ioctl_ieon(devp);
 596	default:
 597		return -EINVAL;
 598	}
 599
 600	err = 0;
 601
 602	switch (cmd) {
 603	case HPET_IE_OFF:
 604		if ((devp->hd_flags & HPET_IE) == 0)
 605			break;
 606		v = readq(&timer->hpet_config);
 607		v &= ~Tn_INT_ENB_CNF_MASK;
 608		writeq(v, &timer->hpet_config);
 609		if (devp->hd_irq) {
 610			free_irq(devp->hd_irq, devp);
 611			devp->hd_irq = 0;
 612		}
 613		devp->hd_flags ^= HPET_IE;
 614		break;
 615	case HPET_INFO:
 616		{
 617			memset(info, 0, sizeof(*info));
 618			if (devp->hd_ireqfreq)
 619				info->hi_ireqfreq =
 620					hpet_time_div(hpetp, devp->hd_ireqfreq);
 621			info->hi_flags =
 622			    readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
 623			info->hi_hpet = hpetp->hp_which;
 624			info->hi_timer = devp - hpetp->hp_dev;
 625			break;
 626		}
 627	case HPET_EPI:
 628		v = readq(&timer->hpet_config);
 629		if ((v & Tn_PER_INT_CAP_MASK) == 0) {
 630			err = -ENXIO;
 631			break;
 632		}
 633		devp->hd_flags |= HPET_PERIODIC;
 634		break;
 635	case HPET_DPI:
 636		v = readq(&timer->hpet_config);
 637		if ((v & Tn_PER_INT_CAP_MASK) == 0) {
 638			err = -ENXIO;
 639			break;
 640		}
 641		if (devp->hd_flags & HPET_PERIODIC &&
 642		    readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
 643			v = readq(&timer->hpet_config);
 644			v ^= Tn_TYPE_CNF_MASK;
 645			writeq(v, &timer->hpet_config);
 646		}
 647		devp->hd_flags &= ~HPET_PERIODIC;
 648		break;
 649	case HPET_IRQFREQ:
 650		if ((arg > hpet_max_freq) &&
 651		    !capable(CAP_SYS_RESOURCE)) {
 652			err = -EACCES;
 653			break;
 654		}
 655
 656		if (!arg) {
 657			err = -EINVAL;
 658			break;
 659		}
 660
 661		devp->hd_ireqfreq = hpet_time_div(hpetp, arg);
 662	}
 663
 664	return err;
 665}
 666
 667static long
 668hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 669{
 670	struct hpet_info info;
 671	int err;
 672
 673	mutex_lock(&hpet_mutex);
 674	err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
 675	mutex_unlock(&hpet_mutex);
 676
 677	if ((cmd == HPET_INFO) && !err &&
 678	    (copy_to_user((void __user *)arg, &info, sizeof(info))))
 679		err = -EFAULT;
 680
 681	return err;
 682}
 683
 684#ifdef CONFIG_COMPAT
 685struct compat_hpet_info {
 686	compat_ulong_t hi_ireqfreq;	/* Hz */
 687	compat_ulong_t hi_flags;	/* information */
 688	unsigned short hi_hpet;
 689	unsigned short hi_timer;
 690};
 691
 692static long
 693hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 694{
 695	struct hpet_info info;
 696	int err;
 697
 698	mutex_lock(&hpet_mutex);
 699	err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
 700	mutex_unlock(&hpet_mutex);
 701
 702	if ((cmd == HPET_INFO) && !err) {
 703		struct compat_hpet_info __user *u = compat_ptr(arg);
 704		if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) ||
 705		    put_user(info.hi_flags, &u->hi_flags) ||
 706		    put_user(info.hi_hpet, &u->hi_hpet) ||
 707		    put_user(info.hi_timer, &u->hi_timer))
 708			err = -EFAULT;
 709	}
 710
 711	return err;
 712}
 713#endif
 714
 715static const struct file_operations hpet_fops = {
 716	.owner = THIS_MODULE,
 717	.llseek = no_llseek,
 718	.read = hpet_read,
 719	.poll = hpet_poll,
 720	.unlocked_ioctl = hpet_ioctl,
 721#ifdef CONFIG_COMPAT
 722	.compat_ioctl = hpet_compat_ioctl,
 723#endif
 724	.open = hpet_open,
 725	.release = hpet_release,
 726	.fasync = hpet_fasync,
 727	.mmap = hpet_mmap,
 728};
 729
 730static int hpet_is_known(struct hpet_data *hdp)
 731{
 732	struct hpets *hpetp;
 733
 734	for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
 735		if (hpetp->hp_hpet_phys == hdp->hd_phys_address)
 736			return 1;
 737
 738	return 0;
 739}
 740
 741static ctl_table hpet_table[] = {
 742	{
 743	 .procname = "max-user-freq",
 744	 .data = &hpet_max_freq,
 745	 .maxlen = sizeof(int),
 746	 .mode = 0644,
 747	 .proc_handler = proc_dointvec,
 748	 },
 749	{}
 750};
 751
 752static ctl_table hpet_root[] = {
 753	{
 754	 .procname = "hpet",
 755	 .maxlen = 0,
 756	 .mode = 0555,
 757	 .child = hpet_table,
 758	 },
 759	{}
 760};
 761
 762static ctl_table dev_root[] = {
 763	{
 764	 .procname = "dev",
 765	 .maxlen = 0,
 766	 .mode = 0555,
 767	 .child = hpet_root,
 768	 },
 769	{}
 770};
 771
 772static struct ctl_table_header *sysctl_header;
 773
 774/*
 775 * Adjustment for when arming the timer with
 776 * initial conditions.  That is, main counter
 777 * ticks expired before interrupts are enabled.
 778 */
 779#define	TICK_CALIBRATE	(1000UL)
 780
 781static unsigned long __hpet_calibrate(struct hpets *hpetp)
 782{
 783	struct hpet_timer __iomem *timer = NULL;
 784	unsigned long t, m, count, i, flags, start;
 785	struct hpet_dev *devp;
 786	int j;
 787	struct hpet __iomem *hpet;
 788
 789	for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++)
 790		if ((devp->hd_flags & HPET_OPEN) == 0) {
 791			timer = devp->hd_timer;
 792			break;
 793		}
 794
 795	if (!timer)
 796		return 0;
 797
 798	hpet = hpetp->hp_hpet;
 799	t = read_counter(&timer->hpet_compare);
 800
 801	i = 0;
 802	count = hpet_time_div(hpetp, TICK_CALIBRATE);
 803
 804	local_irq_save(flags);
 805
 806	start = read_counter(&hpet->hpet_mc);
 807
 808	do {
 809		m = read_counter(&hpet->hpet_mc);
 810		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
 811	} while (i++, (m - start) < count);
 812
 813	local_irq_restore(flags);
 814
 815	return (m - start) / i;
 816}
 817
 818static unsigned long hpet_calibrate(struct hpets *hpetp)
 819{
 820	unsigned long ret = -1;
 821	unsigned long tmp;
 822
 823	/*
 824	 * Try to calibrate until return value becomes stable small value.
 825	 * If SMI interruption occurs in calibration loop, the return value
 826	 * will be big. This avoids its impact.
 827	 */
 828	for ( ; ; ) {
 829		tmp = __hpet_calibrate(hpetp);
 830		if (ret <= tmp)
 831			break;
 832		ret = tmp;
 833	}
 834
 835	return ret;
 836}
 837
 838int hpet_alloc(struct hpet_data *hdp)
 839{
 840	u64 cap, mcfg;
 841	struct hpet_dev *devp;
 842	u32 i, ntimer;
 843	struct hpets *hpetp;
 844	size_t siz;
 845	struct hpet __iomem *hpet;
 846	static struct hpets *last;
 847	unsigned long period;
 848	unsigned long long temp;
 849	u32 remainder;
 850
 851	/*
 852	 * hpet_alloc can be called by platform dependent code.
 853	 * If platform dependent code has allocated the hpet that
 854	 * ACPI has also reported, then we catch it here.
 855	 */
 856	if (hpet_is_known(hdp)) {
 857		printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
 858			__func__);
 859		return 0;
 860	}
 861
 862	siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) *
 863				      sizeof(struct hpet_dev));
 864
 865	hpetp = kzalloc(siz, GFP_KERNEL);
 866
 867	if (!hpetp)
 868		return -ENOMEM;
 869
 870	hpetp->hp_which = hpet_nhpet++;
 871	hpetp->hp_hpet = hdp->hd_address;
 872	hpetp->hp_hpet_phys = hdp->hd_phys_address;
 873
 874	hpetp->hp_ntimer = hdp->hd_nirqs;
 875
 876	for (i = 0; i < hdp->hd_nirqs; i++)
 877		hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
 878
 879	hpet = hpetp->hp_hpet;
 880
 881	cap = readq(&hpet->hpet_cap);
 882
 883	ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1;
 884
 885	if (hpetp->hp_ntimer != ntimer) {
 886		printk(KERN_WARNING "hpet: number irqs doesn't agree"
 887		       " with number of timers\n");
 888		kfree(hpetp);
 889		return -ENODEV;
 890	}
 891
 892	if (last)
 893		last->hp_next = hpetp;
 894	else
 895		hpets = hpetp;
 896
 897	last = hpetp;
 898
 899	period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >>
 900		HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */
 901	temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */
 902	temp += period >> 1; /* round */
 903	do_div(temp, period);
 904	hpetp->hp_tick_freq = temp; /* ticks per second */
 905
 906	printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
 907		hpetp->hp_which, hdp->hd_phys_address,
 908		hpetp->hp_ntimer > 1 ? "s" : "");
 909	for (i = 0; i < hpetp->hp_ntimer; i++)
 910		printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
 911	printk("\n");
 912
 913	temp = hpetp->hp_tick_freq;
 914	remainder = do_div(temp, 1000000);
 915	printk(KERN_INFO
 916		"hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n",
 917		hpetp->hp_which, hpetp->hp_ntimer,
 918		cap & HPET_COUNTER_SIZE_MASK ? 64 : 32,
 919		(unsigned) temp, remainder);
 920
 921	mcfg = readq(&hpet->hpet_config);
 922	if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
 923		write_counter(0L, &hpet->hpet_mc);
 924		mcfg |= HPET_ENABLE_CNF_MASK;
 925		writeq(mcfg, &hpet->hpet_config);
 926	}
 927
 928	for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) {
 929		struct hpet_timer __iomem *timer;
 930
 931		timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
 932
 933		devp->hd_hpets = hpetp;
 934		devp->hd_hpet = hpet;
 935		devp->hd_timer = timer;
 936
 937		/*
 938		 * If the timer was reserved by platform code,
 939		 * then make timer unavailable for opens.
 940		 */
 941		if (hdp->hd_state & (1 << i)) {
 942			devp->hd_flags = HPET_OPEN;
 943			continue;
 944		}
 945
 946		init_waitqueue_head(&devp->hd_waitqueue);
 947	}
 948
 949	hpetp->hp_delta = hpet_calibrate(hpetp);
 950
 951/* This clocksource driver currently only works on ia64 */
 952#ifdef CONFIG_IA64
 953	if (!hpet_clocksource) {
 954		hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
 955		clocksource_hpet.archdata.fsys_mmio = hpet_mctr;
 956		clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
 957		hpetp->hp_clocksource = &clocksource_hpet;
 958		hpet_clocksource = &clocksource_hpet;
 959	}
 960#endif
 961
 962	return 0;
 963}
 964
 965static acpi_status hpet_resources(struct acpi_resource *res, void *data)
 966{
 967	struct hpet_data *hdp;
 968	acpi_status status;
 969	struct acpi_resource_address64 addr;
 970
 971	hdp = data;
 972
 973	status = acpi_resource_to_address64(res, &addr);
 974
 975	if (ACPI_SUCCESS(status)) {
 976		hdp->hd_phys_address = addr.minimum;
 977		hdp->hd_address = ioremap(addr.minimum, addr.address_length);
 978
 979		if (hpet_is_known(hdp)) {
 980			iounmap(hdp->hd_address);
 981			return AE_ALREADY_EXISTS;
 982		}
 983	} else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
 984		struct acpi_resource_fixed_memory32 *fixmem32;
 985
 986		fixmem32 = &res->data.fixed_memory32;
 987		if (!fixmem32)
 988			return AE_NO_MEMORY;
 989
 990		hdp->hd_phys_address = fixmem32->address;
 991		hdp->hd_address = ioremap(fixmem32->address,
 992						HPET_RANGE_SIZE);
 993
 994		if (hpet_is_known(hdp)) {
 995			iounmap(hdp->hd_address);
 996			return AE_ALREADY_EXISTS;
 997		}
 998	} else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
 999		struct acpi_resource_extended_irq *irqp;
1000		int i, irq;
1001
1002		irqp = &res->data.extended_irq;
1003
1004		for (i = 0; i < irqp->interrupt_count; i++) {
 
 
 
1005			irq = acpi_register_gsi(NULL, irqp->interrupts[i],
1006				      irqp->triggering, irqp->polarity);
1007			if (irq < 0)
1008				return AE_ERROR;
1009
1010			hdp->hd_irq[hdp->hd_nirqs] = irq;
1011			hdp->hd_nirqs++;
1012		}
1013	}
1014
1015	return AE_OK;
1016}
1017
1018static int hpet_acpi_add(struct acpi_device *device)
1019{
1020	acpi_status result;
1021	struct hpet_data data;
1022
1023	memset(&data, 0, sizeof(data));
1024
1025	result =
1026	    acpi_walk_resources(device->handle, METHOD_NAME__CRS,
1027				hpet_resources, &data);
1028
1029	if (ACPI_FAILURE(result))
1030		return -ENODEV;
1031
1032	if (!data.hd_address || !data.hd_nirqs) {
1033		if (data.hd_address)
1034			iounmap(data.hd_address);
1035		printk("%s: no address or irqs in _CRS\n", __func__);
1036		return -ENODEV;
1037	}
1038
1039	return hpet_alloc(&data);
1040}
1041
1042static int hpet_acpi_remove(struct acpi_device *device, int type)
1043{
1044	/* XXX need to unregister clocksource, dealloc mem, etc */
1045	return -EINVAL;
1046}
1047
1048static const struct acpi_device_id hpet_device_ids[] = {
1049	{"PNP0103", 0},
1050	{"", 0},
1051};
1052MODULE_DEVICE_TABLE(acpi, hpet_device_ids);
1053
1054static struct acpi_driver hpet_acpi_driver = {
1055	.name = "hpet",
1056	.ids = hpet_device_ids,
1057	.ops = {
1058		.add = hpet_acpi_add,
1059		.remove = hpet_acpi_remove,
1060		},
1061};
1062
1063static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
1064
1065static int __init hpet_init(void)
1066{
1067	int result;
1068
1069	result = misc_register(&hpet_misc);
1070	if (result < 0)
1071		return -ENODEV;
1072
1073	sysctl_header = register_sysctl_table(dev_root);
1074
1075	result = acpi_bus_register_driver(&hpet_acpi_driver);
1076	if (result < 0) {
1077		if (sysctl_header)
1078			unregister_sysctl_table(sysctl_header);
1079		misc_deregister(&hpet_misc);
1080		return result;
1081	}
1082
1083	return 0;
1084}
1085
1086static void __exit hpet_exit(void)
1087{
1088	acpi_bus_unregister_driver(&hpet_acpi_driver);
1089
1090	if (sysctl_header)
1091		unregister_sysctl_table(sysctl_header);
1092	misc_deregister(&hpet_misc);
1093
1094	return;
1095}
1096
1097module_init(hpet_init);
1098module_exit(hpet_exit);
1099MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>");
1100MODULE_LICENSE("GPL");
v3.15
   1/*
   2 * Intel & MS High Precision Event Timer Implementation.
   3 *
   4 * Copyright (C) 2003 Intel Corporation
   5 *	Venki Pallipadi
   6 * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
   7 *	Bob Picco <robert.picco@hp.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/interrupt.h>
  15#include <linux/module.h>
  16#include <linux/kernel.h>
  17#include <linux/types.h>
  18#include <linux/miscdevice.h>
  19#include <linux/major.h>
  20#include <linux/ioport.h>
  21#include <linux/fcntl.h>
  22#include <linux/init.h>
  23#include <linux/poll.h>
  24#include <linux/mm.h>
  25#include <linux/proc_fs.h>
  26#include <linux/spinlock.h>
  27#include <linux/sysctl.h>
  28#include <linux/wait.h>
  29#include <linux/bcd.h>
  30#include <linux/seq_file.h>
  31#include <linux/bitops.h>
  32#include <linux/compat.h>
  33#include <linux/clocksource.h>
  34#include <linux/uaccess.h>
  35#include <linux/slab.h>
  36#include <linux/io.h>
  37#include <linux/acpi.h>
  38#include <linux/hpet.h>
  39#include <asm/current.h>
 
  40#include <asm/irq.h>
  41#include <asm/div64.h>
  42
 
 
 
 
  43/*
  44 * The High Precision Event Timer driver.
  45 * This driver is closely modelled after the rtc.c driver.
  46 * http://www.intel.com/hardwaredesign/hpetspec_1.pdf
  47 */
  48#define	HPET_USER_FREQ	(64)
  49#define	HPET_DRIFT	(500)
  50
  51#define HPET_RANGE_SIZE		1024	/* from HPET spec */
  52
  53
  54/* WARNING -- don't get confused.  These macros are never used
  55 * to write the (single) counter, and rarely to read it.
  56 * They're badly named; to fix, someday.
  57 */
  58#if BITS_PER_LONG == 64
  59#define	write_counter(V, MC)	writeq(V, MC)
  60#define	read_counter(MC)	readq(MC)
  61#else
  62#define	write_counter(V, MC)	writel(V, MC)
  63#define	read_counter(MC)	readl(MC)
  64#endif
  65
  66static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */
  67static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
  68
  69/* This clocksource driver currently only works on ia64 */
  70#ifdef CONFIG_IA64
  71static void __iomem *hpet_mctr;
  72
  73static cycle_t read_hpet(struct clocksource *cs)
  74{
  75	return (cycle_t)read_counter((void __iomem *)hpet_mctr);
  76}
  77
  78static struct clocksource clocksource_hpet = {
  79	.name		= "hpet",
  80	.rating		= 250,
  81	.read		= read_hpet,
  82	.mask		= CLOCKSOURCE_MASK(64),
  83	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
  84};
  85static struct clocksource *hpet_clocksource;
  86#endif
  87
  88/* A lock for concurrent access by app and isr hpet activity. */
  89static DEFINE_SPINLOCK(hpet_lock);
  90
  91#define	HPET_DEV_NAME	(7)
  92
  93struct hpet_dev {
  94	struct hpets *hd_hpets;
  95	struct hpet __iomem *hd_hpet;
  96	struct hpet_timer __iomem *hd_timer;
  97	unsigned long hd_ireqfreq;
  98	unsigned long hd_irqdata;
  99	wait_queue_head_t hd_waitqueue;
 100	struct fasync_struct *hd_async_queue;
 101	unsigned int hd_flags;
 102	unsigned int hd_irq;
 103	unsigned int hd_hdwirq;
 104	char hd_name[HPET_DEV_NAME];
 105};
 106
 107struct hpets {
 108	struct hpets *hp_next;
 109	struct hpet __iomem *hp_hpet;
 110	unsigned long hp_hpet_phys;
 111	struct clocksource *hp_clocksource;
 112	unsigned long long hp_tick_freq;
 113	unsigned long hp_delta;
 114	unsigned int hp_ntimer;
 115	unsigned int hp_which;
 116	struct hpet_dev hp_dev[1];
 117};
 118
 119static struct hpets *hpets;
 120
 121#define	HPET_OPEN		0x0001
 122#define	HPET_IE			0x0002	/* interrupt enabled */
 123#define	HPET_PERIODIC		0x0004
 124#define	HPET_SHARED_IRQ		0x0008
 125
 126
 127#ifndef readq
 128static inline unsigned long long readq(void __iomem *addr)
 129{
 130	return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
 131}
 132#endif
 133
 134#ifndef writeq
 135static inline void writeq(unsigned long long v, void __iomem *addr)
 136{
 137	writel(v & 0xffffffff, addr);
 138	writel(v >> 32, addr + 4);
 139}
 140#endif
 141
 142static irqreturn_t hpet_interrupt(int irq, void *data)
 143{
 144	struct hpet_dev *devp;
 145	unsigned long isr;
 146
 147	devp = data;
 148	isr = 1 << (devp - devp->hd_hpets->hp_dev);
 149
 150	if ((devp->hd_flags & HPET_SHARED_IRQ) &&
 151	    !(isr & readl(&devp->hd_hpet->hpet_isr)))
 152		return IRQ_NONE;
 153
 154	spin_lock(&hpet_lock);
 155	devp->hd_irqdata++;
 156
 157	/*
 158	 * For non-periodic timers, increment the accumulator.
 159	 * This has the effect of treating non-periodic like periodic.
 160	 */
 161	if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
 162		unsigned long m, t, mc, base, k;
 163		struct hpet __iomem *hpet = devp->hd_hpet;
 164		struct hpets *hpetp = devp->hd_hpets;
 165
 166		t = devp->hd_ireqfreq;
 167		m = read_counter(&devp->hd_timer->hpet_compare);
 168		mc = read_counter(&hpet->hpet_mc);
 169		/* The time for the next interrupt would logically be t + m,
 170		 * however, if we are very unlucky and the interrupt is delayed
 171		 * for longer than t then we will completely miss the next
 172		 * interrupt if we set t + m and an application will hang.
 173		 * Therefore we need to make a more complex computation assuming
 174		 * that there exists a k for which the following is true:
 175		 * k * t + base < mc + delta
 176		 * (k + 1) * t + base > mc + delta
 177		 * where t is the interval in hpet ticks for the given freq,
 178		 * base is the theoretical start value 0 < base < t,
 179		 * mc is the main counter value at the time of the interrupt,
 180		 * delta is the time it takes to write the a value to the
 181		 * comparator.
 182		 * k may then be computed as (mc - base + delta) / t .
 183		 */
 184		base = mc % t;
 185		k = (mc - base + hpetp->hp_delta) / t;
 186		write_counter(t * (k + 1) + base,
 187			      &devp->hd_timer->hpet_compare);
 188	}
 189
 190	if (devp->hd_flags & HPET_SHARED_IRQ)
 191		writel(isr, &devp->hd_hpet->hpet_isr);
 192	spin_unlock(&hpet_lock);
 193
 194	wake_up_interruptible(&devp->hd_waitqueue);
 195
 196	kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN);
 197
 198	return IRQ_HANDLED;
 199}
 200
 201static void hpet_timer_set_irq(struct hpet_dev *devp)
 202{
 203	unsigned long v;
 204	int irq, gsi;
 205	struct hpet_timer __iomem *timer;
 206
 207	spin_lock_irq(&hpet_lock);
 208	if (devp->hd_hdwirq) {
 209		spin_unlock_irq(&hpet_lock);
 210		return;
 211	}
 212
 213	timer = devp->hd_timer;
 214
 215	/* we prefer level triggered mode */
 216	v = readl(&timer->hpet_config);
 217	if (!(v & Tn_INT_TYPE_CNF_MASK)) {
 218		v |= Tn_INT_TYPE_CNF_MASK;
 219		writel(v, &timer->hpet_config);
 220	}
 221	spin_unlock_irq(&hpet_lock);
 222
 223	v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >>
 224				 Tn_INT_ROUTE_CAP_SHIFT;
 225
 226	/*
 227	 * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by
 228	 * legacy device. In IO APIC mode, we skip all the legacy IRQS.
 229	 */
 230	if (acpi_irq_model == ACPI_IRQ_MODEL_PIC)
 231		v &= ~0xf3df;
 232	else
 233		v &= ~0xffff;
 234
 235	for_each_set_bit(irq, &v, HPET_MAX_IRQ) {
 236		if (irq >= nr_irqs) {
 237			irq = HPET_MAX_IRQ;
 238			break;
 239		}
 240
 241		gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
 242					ACPI_ACTIVE_LOW);
 243		if (gsi > 0)
 244			break;
 245
 246		/* FIXME: Setup interrupt source table */
 247	}
 248
 249	if (irq < HPET_MAX_IRQ) {
 250		spin_lock_irq(&hpet_lock);
 251		v = readl(&timer->hpet_config);
 252		v |= irq << Tn_INT_ROUTE_CNF_SHIFT;
 253		writel(v, &timer->hpet_config);
 254		devp->hd_hdwirq = gsi;
 255		spin_unlock_irq(&hpet_lock);
 256	}
 257	return;
 258}
 259
 260static int hpet_open(struct inode *inode, struct file *file)
 261{
 262	struct hpet_dev *devp;
 263	struct hpets *hpetp;
 264	int i;
 265
 266	if (file->f_mode & FMODE_WRITE)
 267		return -EINVAL;
 268
 269	mutex_lock(&hpet_mutex);
 270	spin_lock_irq(&hpet_lock);
 271
 272	for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
 273		for (i = 0; i < hpetp->hp_ntimer; i++)
 274			if (hpetp->hp_dev[i].hd_flags & HPET_OPEN)
 275				continue;
 276			else {
 277				devp = &hpetp->hp_dev[i];
 278				break;
 279			}
 280
 281	if (!devp) {
 282		spin_unlock_irq(&hpet_lock);
 283		mutex_unlock(&hpet_mutex);
 284		return -EBUSY;
 285	}
 286
 287	file->private_data = devp;
 288	devp->hd_irqdata = 0;
 289	devp->hd_flags |= HPET_OPEN;
 290	spin_unlock_irq(&hpet_lock);
 291	mutex_unlock(&hpet_mutex);
 292
 293	hpet_timer_set_irq(devp);
 294
 295	return 0;
 296}
 297
 298static ssize_t
 299hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
 300{
 301	DECLARE_WAITQUEUE(wait, current);
 302	unsigned long data;
 303	ssize_t retval;
 304	struct hpet_dev *devp;
 305
 306	devp = file->private_data;
 307	if (!devp->hd_ireqfreq)
 308		return -EIO;
 309
 310	if (count < sizeof(unsigned long))
 311		return -EINVAL;
 312
 313	add_wait_queue(&devp->hd_waitqueue, &wait);
 314
 315	for ( ; ; ) {
 316		set_current_state(TASK_INTERRUPTIBLE);
 317
 318		spin_lock_irq(&hpet_lock);
 319		data = devp->hd_irqdata;
 320		devp->hd_irqdata = 0;
 321		spin_unlock_irq(&hpet_lock);
 322
 323		if (data)
 324			break;
 325		else if (file->f_flags & O_NONBLOCK) {
 326			retval = -EAGAIN;
 327			goto out;
 328		} else if (signal_pending(current)) {
 329			retval = -ERESTARTSYS;
 330			goto out;
 331		}
 332		schedule();
 333	}
 334
 335	retval = put_user(data, (unsigned long __user *)buf);
 336	if (!retval)
 337		retval = sizeof(unsigned long);
 338out:
 339	__set_current_state(TASK_RUNNING);
 340	remove_wait_queue(&devp->hd_waitqueue, &wait);
 341
 342	return retval;
 343}
 344
 345static unsigned int hpet_poll(struct file *file, poll_table * wait)
 346{
 347	unsigned long v;
 348	struct hpet_dev *devp;
 349
 350	devp = file->private_data;
 351
 352	if (!devp->hd_ireqfreq)
 353		return 0;
 354
 355	poll_wait(file, &devp->hd_waitqueue, wait);
 356
 357	spin_lock_irq(&hpet_lock);
 358	v = devp->hd_irqdata;
 359	spin_unlock_irq(&hpet_lock);
 360
 361	if (v != 0)
 362		return POLLIN | POLLRDNORM;
 363
 364	return 0;
 365}
 366
 367#ifdef CONFIG_HPET_MMAP
 368#ifdef CONFIG_HPET_MMAP_DEFAULT
 369static int hpet_mmap_enabled = 1;
 370#else
 371static int hpet_mmap_enabled = 0;
 372#endif
 373
 374static __init int hpet_mmap_enable(char *str)
 375{
 376	get_option(&str, &hpet_mmap_enabled);
 377	pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
 378	return 1;
 379}
 380__setup("hpet_mmap", hpet_mmap_enable);
 381
 382static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
 383{
 
 384	struct hpet_dev *devp;
 385	unsigned long addr;
 386
 387	if (!hpet_mmap_enabled)
 388		return -EACCES;
 389
 390	devp = file->private_data;
 391	addr = devp->hd_hpets->hp_hpet_phys;
 392
 393	if (addr & (PAGE_SIZE - 1))
 394		return -ENOSYS;
 395
 
 396	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 397	return vm_iomap_memory(vma, addr, PAGE_SIZE);
 398}
 
 
 
 
 
 
 
 399#else
 400static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
 401{
 402	return -ENOSYS;
 
 403}
 404#endif
 405
 406static int hpet_fasync(int fd, struct file *file, int on)
 407{
 408	struct hpet_dev *devp;
 409
 410	devp = file->private_data;
 411
 412	if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0)
 413		return 0;
 414	else
 415		return -EIO;
 416}
 417
 418static int hpet_release(struct inode *inode, struct file *file)
 419{
 420	struct hpet_dev *devp;
 421	struct hpet_timer __iomem *timer;
 422	int irq = 0;
 423
 424	devp = file->private_data;
 425	timer = devp->hd_timer;
 426
 427	spin_lock_irq(&hpet_lock);
 428
 429	writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
 430	       &timer->hpet_config);
 431
 432	irq = devp->hd_irq;
 433	devp->hd_irq = 0;
 434
 435	devp->hd_ireqfreq = 0;
 436
 437	if (devp->hd_flags & HPET_PERIODIC
 438	    && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
 439		unsigned long v;
 440
 441		v = readq(&timer->hpet_config);
 442		v ^= Tn_TYPE_CNF_MASK;
 443		writeq(v, &timer->hpet_config);
 444	}
 445
 446	devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC);
 447	spin_unlock_irq(&hpet_lock);
 448
 449	if (irq)
 450		free_irq(irq, devp);
 451
 452	file->private_data = NULL;
 453	return 0;
 454}
 455
 456static int hpet_ioctl_ieon(struct hpet_dev *devp)
 457{
 458	struct hpet_timer __iomem *timer;
 459	struct hpet __iomem *hpet;
 460	struct hpets *hpetp;
 461	int irq;
 462	unsigned long g, v, t, m;
 463	unsigned long flags, isr;
 464
 465	timer = devp->hd_timer;
 466	hpet = devp->hd_hpet;
 467	hpetp = devp->hd_hpets;
 468
 469	if (!devp->hd_ireqfreq)
 470		return -EIO;
 471
 472	spin_lock_irq(&hpet_lock);
 473
 474	if (devp->hd_flags & HPET_IE) {
 475		spin_unlock_irq(&hpet_lock);
 476		return -EBUSY;
 477	}
 478
 479	devp->hd_flags |= HPET_IE;
 480
 481	if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK)
 482		devp->hd_flags |= HPET_SHARED_IRQ;
 483	spin_unlock_irq(&hpet_lock);
 484
 485	irq = devp->hd_hdwirq;
 486
 487	if (irq) {
 488		unsigned long irq_flags;
 489
 490		if (devp->hd_flags & HPET_SHARED_IRQ) {
 491			/*
 492			 * To prevent the interrupt handler from seeing an
 493			 * unwanted interrupt status bit, program the timer
 494			 * so that it will not fire in the near future ...
 495			 */
 496			writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
 497			       &timer->hpet_config);
 498			write_counter(read_counter(&hpet->hpet_mc),
 499				      &timer->hpet_compare);
 500			/* ... and clear any left-over status. */
 501			isr = 1 << (devp - devp->hd_hpets->hp_dev);
 502			writel(isr, &hpet->hpet_isr);
 503		}
 504
 505		sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
 506		irq_flags = devp->hd_flags & HPET_SHARED_IRQ ? IRQF_SHARED : 0;
 
 507		if (request_irq(irq, hpet_interrupt, irq_flags,
 508				devp->hd_name, (void *)devp)) {
 509			printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
 510			irq = 0;
 511		}
 512	}
 513
 514	if (irq == 0) {
 515		spin_lock_irq(&hpet_lock);
 516		devp->hd_flags ^= HPET_IE;
 517		spin_unlock_irq(&hpet_lock);
 518		return -EIO;
 519	}
 520
 521	devp->hd_irq = irq;
 522	t = devp->hd_ireqfreq;
 523	v = readq(&timer->hpet_config);
 524
 525	/* 64-bit comparators are not yet supported through the ioctls,
 526	 * so force this into 32-bit mode if it supports both modes
 527	 */
 528	g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK;
 529
 530	if (devp->hd_flags & HPET_PERIODIC) {
 531		g |= Tn_TYPE_CNF_MASK;
 532		v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK;
 533		writeq(v, &timer->hpet_config);
 534		local_irq_save(flags);
 535
 536		/*
 537		 * NOTE: First we modify the hidden accumulator
 538		 * register supported by periodic-capable comparators.
 539		 * We never want to modify the (single) counter; that
 540		 * would affect all the comparators. The value written
 541		 * is the counter value when the first interrupt is due.
 542		 */
 543		m = read_counter(&hpet->hpet_mc);
 544		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
 545		/*
 546		 * Then we modify the comparator, indicating the period
 547		 * for subsequent interrupt.
 548		 */
 549		write_counter(t, &timer->hpet_compare);
 550	} else {
 551		local_irq_save(flags);
 552		m = read_counter(&hpet->hpet_mc);
 553		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
 554	}
 555
 556	if (devp->hd_flags & HPET_SHARED_IRQ) {
 557		isr = 1 << (devp - devp->hd_hpets->hp_dev);
 558		writel(isr, &hpet->hpet_isr);
 559	}
 560	writeq(g, &timer->hpet_config);
 561	local_irq_restore(flags);
 562
 563	return 0;
 564}
 565
 566/* converts Hz to number of timer ticks */
 567static inline unsigned long hpet_time_div(struct hpets *hpets,
 568					  unsigned long dis)
 569{
 570	unsigned long long m;
 571
 572	m = hpets->hp_tick_freq + (dis >> 1);
 573	do_div(m, dis);
 574	return (unsigned long)m;
 575}
 576
 577static int
 578hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
 579		  struct hpet_info *info)
 580{
 581	struct hpet_timer __iomem *timer;
 582	struct hpet __iomem *hpet;
 583	struct hpets *hpetp;
 584	int err;
 585	unsigned long v;
 586
 587	switch (cmd) {
 588	case HPET_IE_OFF:
 589	case HPET_INFO:
 590	case HPET_EPI:
 591	case HPET_DPI:
 592	case HPET_IRQFREQ:
 593		timer = devp->hd_timer;
 594		hpet = devp->hd_hpet;
 595		hpetp = devp->hd_hpets;
 596		break;
 597	case HPET_IE_ON:
 598		return hpet_ioctl_ieon(devp);
 599	default:
 600		return -EINVAL;
 601	}
 602
 603	err = 0;
 604
 605	switch (cmd) {
 606	case HPET_IE_OFF:
 607		if ((devp->hd_flags & HPET_IE) == 0)
 608			break;
 609		v = readq(&timer->hpet_config);
 610		v &= ~Tn_INT_ENB_CNF_MASK;
 611		writeq(v, &timer->hpet_config);
 612		if (devp->hd_irq) {
 613			free_irq(devp->hd_irq, devp);
 614			devp->hd_irq = 0;
 615		}
 616		devp->hd_flags ^= HPET_IE;
 617		break;
 618	case HPET_INFO:
 619		{
 620			memset(info, 0, sizeof(*info));
 621			if (devp->hd_ireqfreq)
 622				info->hi_ireqfreq =
 623					hpet_time_div(hpetp, devp->hd_ireqfreq);
 624			info->hi_flags =
 625			    readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
 626			info->hi_hpet = hpetp->hp_which;
 627			info->hi_timer = devp - hpetp->hp_dev;
 628			break;
 629		}
 630	case HPET_EPI:
 631		v = readq(&timer->hpet_config);
 632		if ((v & Tn_PER_INT_CAP_MASK) == 0) {
 633			err = -ENXIO;
 634			break;
 635		}
 636		devp->hd_flags |= HPET_PERIODIC;
 637		break;
 638	case HPET_DPI:
 639		v = readq(&timer->hpet_config);
 640		if ((v & Tn_PER_INT_CAP_MASK) == 0) {
 641			err = -ENXIO;
 642			break;
 643		}
 644		if (devp->hd_flags & HPET_PERIODIC &&
 645		    readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
 646			v = readq(&timer->hpet_config);
 647			v ^= Tn_TYPE_CNF_MASK;
 648			writeq(v, &timer->hpet_config);
 649		}
 650		devp->hd_flags &= ~HPET_PERIODIC;
 651		break;
 652	case HPET_IRQFREQ:
 653		if ((arg > hpet_max_freq) &&
 654		    !capable(CAP_SYS_RESOURCE)) {
 655			err = -EACCES;
 656			break;
 657		}
 658
 659		if (!arg) {
 660			err = -EINVAL;
 661			break;
 662		}
 663
 664		devp->hd_ireqfreq = hpet_time_div(hpetp, arg);
 665	}
 666
 667	return err;
 668}
 669
 670static long
 671hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 672{
 673	struct hpet_info info;
 674	int err;
 675
 676	mutex_lock(&hpet_mutex);
 677	err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
 678	mutex_unlock(&hpet_mutex);
 679
 680	if ((cmd == HPET_INFO) && !err &&
 681	    (copy_to_user((void __user *)arg, &info, sizeof(info))))
 682		err = -EFAULT;
 683
 684	return err;
 685}
 686
 687#ifdef CONFIG_COMPAT
 688struct compat_hpet_info {
 689	compat_ulong_t hi_ireqfreq;	/* Hz */
 690	compat_ulong_t hi_flags;	/* information */
 691	unsigned short hi_hpet;
 692	unsigned short hi_timer;
 693};
 694
 695static long
 696hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 697{
 698	struct hpet_info info;
 699	int err;
 700
 701	mutex_lock(&hpet_mutex);
 702	err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
 703	mutex_unlock(&hpet_mutex);
 704
 705	if ((cmd == HPET_INFO) && !err) {
 706		struct compat_hpet_info __user *u = compat_ptr(arg);
 707		if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) ||
 708		    put_user(info.hi_flags, &u->hi_flags) ||
 709		    put_user(info.hi_hpet, &u->hi_hpet) ||
 710		    put_user(info.hi_timer, &u->hi_timer))
 711			err = -EFAULT;
 712	}
 713
 714	return err;
 715}
 716#endif
 717
 718static const struct file_operations hpet_fops = {
 719	.owner = THIS_MODULE,
 720	.llseek = no_llseek,
 721	.read = hpet_read,
 722	.poll = hpet_poll,
 723	.unlocked_ioctl = hpet_ioctl,
 724#ifdef CONFIG_COMPAT
 725	.compat_ioctl = hpet_compat_ioctl,
 726#endif
 727	.open = hpet_open,
 728	.release = hpet_release,
 729	.fasync = hpet_fasync,
 730	.mmap = hpet_mmap,
 731};
 732
 733static int hpet_is_known(struct hpet_data *hdp)
 734{
 735	struct hpets *hpetp;
 736
 737	for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
 738		if (hpetp->hp_hpet_phys == hdp->hd_phys_address)
 739			return 1;
 740
 741	return 0;
 742}
 743
 744static struct ctl_table hpet_table[] = {
 745	{
 746	 .procname = "max-user-freq",
 747	 .data = &hpet_max_freq,
 748	 .maxlen = sizeof(int),
 749	 .mode = 0644,
 750	 .proc_handler = proc_dointvec,
 751	 },
 752	{}
 753};
 754
 755static struct ctl_table hpet_root[] = {
 756	{
 757	 .procname = "hpet",
 758	 .maxlen = 0,
 759	 .mode = 0555,
 760	 .child = hpet_table,
 761	 },
 762	{}
 763};
 764
 765static struct ctl_table dev_root[] = {
 766	{
 767	 .procname = "dev",
 768	 .maxlen = 0,
 769	 .mode = 0555,
 770	 .child = hpet_root,
 771	 },
 772	{}
 773};
 774
 775static struct ctl_table_header *sysctl_header;
 776
 777/*
 778 * Adjustment for when arming the timer with
 779 * initial conditions.  That is, main counter
 780 * ticks expired before interrupts are enabled.
 781 */
 782#define	TICK_CALIBRATE	(1000UL)
 783
 784static unsigned long __hpet_calibrate(struct hpets *hpetp)
 785{
 786	struct hpet_timer __iomem *timer = NULL;
 787	unsigned long t, m, count, i, flags, start;
 788	struct hpet_dev *devp;
 789	int j;
 790	struct hpet __iomem *hpet;
 791
 792	for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++)
 793		if ((devp->hd_flags & HPET_OPEN) == 0) {
 794			timer = devp->hd_timer;
 795			break;
 796		}
 797
 798	if (!timer)
 799		return 0;
 800
 801	hpet = hpetp->hp_hpet;
 802	t = read_counter(&timer->hpet_compare);
 803
 804	i = 0;
 805	count = hpet_time_div(hpetp, TICK_CALIBRATE);
 806
 807	local_irq_save(flags);
 808
 809	start = read_counter(&hpet->hpet_mc);
 810
 811	do {
 812		m = read_counter(&hpet->hpet_mc);
 813		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
 814	} while (i++, (m - start) < count);
 815
 816	local_irq_restore(flags);
 817
 818	return (m - start) / i;
 819}
 820
 821static unsigned long hpet_calibrate(struct hpets *hpetp)
 822{
 823	unsigned long ret = ~0UL;
 824	unsigned long tmp;
 825
 826	/*
 827	 * Try to calibrate until return value becomes stable small value.
 828	 * If SMI interruption occurs in calibration loop, the return value
 829	 * will be big. This avoids its impact.
 830	 */
 831	for ( ; ; ) {
 832		tmp = __hpet_calibrate(hpetp);
 833		if (ret <= tmp)
 834			break;
 835		ret = tmp;
 836	}
 837
 838	return ret;
 839}
 840
 841int hpet_alloc(struct hpet_data *hdp)
 842{
 843	u64 cap, mcfg;
 844	struct hpet_dev *devp;
 845	u32 i, ntimer;
 846	struct hpets *hpetp;
 847	size_t siz;
 848	struct hpet __iomem *hpet;
 849	static struct hpets *last;
 850	unsigned long period;
 851	unsigned long long temp;
 852	u32 remainder;
 853
 854	/*
 855	 * hpet_alloc can be called by platform dependent code.
 856	 * If platform dependent code has allocated the hpet that
 857	 * ACPI has also reported, then we catch it here.
 858	 */
 859	if (hpet_is_known(hdp)) {
 860		printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
 861			__func__);
 862		return 0;
 863	}
 864
 865	siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) *
 866				      sizeof(struct hpet_dev));
 867
 868	hpetp = kzalloc(siz, GFP_KERNEL);
 869
 870	if (!hpetp)
 871		return -ENOMEM;
 872
 873	hpetp->hp_which = hpet_nhpet++;
 874	hpetp->hp_hpet = hdp->hd_address;
 875	hpetp->hp_hpet_phys = hdp->hd_phys_address;
 876
 877	hpetp->hp_ntimer = hdp->hd_nirqs;
 878
 879	for (i = 0; i < hdp->hd_nirqs; i++)
 880		hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
 881
 882	hpet = hpetp->hp_hpet;
 883
 884	cap = readq(&hpet->hpet_cap);
 885
 886	ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1;
 887
 888	if (hpetp->hp_ntimer != ntimer) {
 889		printk(KERN_WARNING "hpet: number irqs doesn't agree"
 890		       " with number of timers\n");
 891		kfree(hpetp);
 892		return -ENODEV;
 893	}
 894
 895	if (last)
 896		last->hp_next = hpetp;
 897	else
 898		hpets = hpetp;
 899
 900	last = hpetp;
 901
 902	period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >>
 903		HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */
 904	temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */
 905	temp += period >> 1; /* round */
 906	do_div(temp, period);
 907	hpetp->hp_tick_freq = temp; /* ticks per second */
 908
 909	printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
 910		hpetp->hp_which, hdp->hd_phys_address,
 911		hpetp->hp_ntimer > 1 ? "s" : "");
 912	for (i = 0; i < hpetp->hp_ntimer; i++)
 913		printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
 914	printk(KERN_CONT "\n");
 915
 916	temp = hpetp->hp_tick_freq;
 917	remainder = do_div(temp, 1000000);
 918	printk(KERN_INFO
 919		"hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n",
 920		hpetp->hp_which, hpetp->hp_ntimer,
 921		cap & HPET_COUNTER_SIZE_MASK ? 64 : 32,
 922		(unsigned) temp, remainder);
 923
 924	mcfg = readq(&hpet->hpet_config);
 925	if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
 926		write_counter(0L, &hpet->hpet_mc);
 927		mcfg |= HPET_ENABLE_CNF_MASK;
 928		writeq(mcfg, &hpet->hpet_config);
 929	}
 930
 931	for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) {
 932		struct hpet_timer __iomem *timer;
 933
 934		timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
 935
 936		devp->hd_hpets = hpetp;
 937		devp->hd_hpet = hpet;
 938		devp->hd_timer = timer;
 939
 940		/*
 941		 * If the timer was reserved by platform code,
 942		 * then make timer unavailable for opens.
 943		 */
 944		if (hdp->hd_state & (1 << i)) {
 945			devp->hd_flags = HPET_OPEN;
 946			continue;
 947		}
 948
 949		init_waitqueue_head(&devp->hd_waitqueue);
 950	}
 951
 952	hpetp->hp_delta = hpet_calibrate(hpetp);
 953
 954/* This clocksource driver currently only works on ia64 */
 955#ifdef CONFIG_IA64
 956	if (!hpet_clocksource) {
 957		hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
 958		clocksource_hpet.archdata.fsys_mmio = hpet_mctr;
 959		clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
 960		hpetp->hp_clocksource = &clocksource_hpet;
 961		hpet_clocksource = &clocksource_hpet;
 962	}
 963#endif
 964
 965	return 0;
 966}
 967
 968static acpi_status hpet_resources(struct acpi_resource *res, void *data)
 969{
 970	struct hpet_data *hdp;
 971	acpi_status status;
 972	struct acpi_resource_address64 addr;
 973
 974	hdp = data;
 975
 976	status = acpi_resource_to_address64(res, &addr);
 977
 978	if (ACPI_SUCCESS(status)) {
 979		hdp->hd_phys_address = addr.minimum;
 980		hdp->hd_address = ioremap(addr.minimum, addr.address_length);
 981
 982		if (hpet_is_known(hdp)) {
 983			iounmap(hdp->hd_address);
 984			return AE_ALREADY_EXISTS;
 985		}
 986	} else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
 987		struct acpi_resource_fixed_memory32 *fixmem32;
 988
 989		fixmem32 = &res->data.fixed_memory32;
 
 
 990
 991		hdp->hd_phys_address = fixmem32->address;
 992		hdp->hd_address = ioremap(fixmem32->address,
 993						HPET_RANGE_SIZE);
 994
 995		if (hpet_is_known(hdp)) {
 996			iounmap(hdp->hd_address);
 997			return AE_ALREADY_EXISTS;
 998		}
 999	} else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
1000		struct acpi_resource_extended_irq *irqp;
1001		int i, irq;
1002
1003		irqp = &res->data.extended_irq;
1004
1005		for (i = 0; i < irqp->interrupt_count; i++) {
1006			if (hdp->hd_nirqs >= HPET_MAX_TIMERS)
1007				break;
1008
1009			irq = acpi_register_gsi(NULL, irqp->interrupts[i],
1010				      irqp->triggering, irqp->polarity);
1011			if (irq < 0)
1012				return AE_ERROR;
1013
1014			hdp->hd_irq[hdp->hd_nirqs] = irq;
1015			hdp->hd_nirqs++;
1016		}
1017	}
1018
1019	return AE_OK;
1020}
1021
1022static int hpet_acpi_add(struct acpi_device *device)
1023{
1024	acpi_status result;
1025	struct hpet_data data;
1026
1027	memset(&data, 0, sizeof(data));
1028
1029	result =
1030	    acpi_walk_resources(device->handle, METHOD_NAME__CRS,
1031				hpet_resources, &data);
1032
1033	if (ACPI_FAILURE(result))
1034		return -ENODEV;
1035
1036	if (!data.hd_address || !data.hd_nirqs) {
1037		if (data.hd_address)
1038			iounmap(data.hd_address);
1039		printk("%s: no address or irqs in _CRS\n", __func__);
1040		return -ENODEV;
1041	}
1042
1043	return hpet_alloc(&data);
1044}
1045
1046static int hpet_acpi_remove(struct acpi_device *device)
1047{
1048	/* XXX need to unregister clocksource, dealloc mem, etc */
1049	return -EINVAL;
1050}
1051
1052static const struct acpi_device_id hpet_device_ids[] = {
1053	{"PNP0103", 0},
1054	{"", 0},
1055};
1056MODULE_DEVICE_TABLE(acpi, hpet_device_ids);
1057
1058static struct acpi_driver hpet_acpi_driver = {
1059	.name = "hpet",
1060	.ids = hpet_device_ids,
1061	.ops = {
1062		.add = hpet_acpi_add,
1063		.remove = hpet_acpi_remove,
1064		},
1065};
1066
1067static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
1068
1069static int __init hpet_init(void)
1070{
1071	int result;
1072
1073	result = misc_register(&hpet_misc);
1074	if (result < 0)
1075		return -ENODEV;
1076
1077	sysctl_header = register_sysctl_table(dev_root);
1078
1079	result = acpi_bus_register_driver(&hpet_acpi_driver);
1080	if (result < 0) {
1081		if (sysctl_header)
1082			unregister_sysctl_table(sysctl_header);
1083		misc_deregister(&hpet_misc);
1084		return result;
1085	}
1086
1087	return 0;
1088}
1089
1090static void __exit hpet_exit(void)
1091{
1092	acpi_bus_unregister_driver(&hpet_acpi_driver);
1093
1094	if (sysctl_header)
1095		unregister_sysctl_table(sysctl_header);
1096	misc_deregister(&hpet_misc);
1097
1098	return;
1099}
1100
1101module_init(hpet_init);
1102module_exit(hpet_exit);
1103MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>");
1104MODULE_LICENSE("GPL");