Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
  1/*
  2 * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
  3 * accessing in atomic context.
  4 *
  5 * This is used for NMI handler to access IO memory area, because
  6 * ioremap/iounmap can not be used in NMI handler. The IO memory area
  7 * is pre-mapped in process context and accessed in NMI handler.
  8 *
  9 * Copyright (C) 2009-2010, Intel Corp.
 10 *	Author: Huang Ying <ying.huang@intel.com>
 11 *
 12 * This program is free software; you can redistribute it and/or
 13 * modify it under the terms of the GNU General Public License version
 14 * 2 as published by the Free Software Foundation.
 15 *
 16 * This program is distributed in the hope that it will be useful,
 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 19 * GNU General Public License for more details.
 20 *
 21 * You should have received a copy of the GNU General Public License
 22 * along with this program; if not, write to the Free Software
 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 24 */
 25
 26#include <linux/kernel.h>
 27#include <linux/module.h>
 28#include <linux/init.h>
 29#include <linux/acpi.h>
 30#include <linux/io.h>
 31#include <linux/kref.h>
 32#include <linux/rculist.h>
 33#include <linux/interrupt.h>
 34#include <linux/slab.h>
 35#include <acpi/atomicio.h>
 36
 37#define ACPI_PFX "ACPI: "
 38
 39static LIST_HEAD(acpi_iomaps);
 40/*
 41 * Used for mutual exclusion between writers of acpi_iomaps list, for
 42 * synchronization between readers and writer, RCU is used.
 43 */
 44static DEFINE_SPINLOCK(acpi_iomaps_lock);
 45
 46struct acpi_iomap {
 47	struct list_head list;
 48	void __iomem *vaddr;
 49	unsigned long size;
 50	phys_addr_t paddr;
 51	struct kref ref;
 52};
 53
 54/* acpi_iomaps_lock or RCU read lock must be held before calling */
 55static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
 56					    unsigned long size)
 57{
 58	struct acpi_iomap *map;
 59
 60	list_for_each_entry_rcu(map, &acpi_iomaps, list) {
 61		if (map->paddr + map->size >= paddr + size &&
 62		    map->paddr <= paddr)
 63			return map;
 64	}
 65	return NULL;
 66}
 67
 68/*
 69 * Atomic "ioremap" used by NMI handler, if the specified IO memory
 70 * area is not pre-mapped, NULL will be returned.
 71 *
 72 * acpi_iomaps_lock or RCU read lock must be held before calling
 73 */
 74static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
 75					 unsigned long size)
 76{
 77	struct acpi_iomap *map;
 78
 79	map = __acpi_find_iomap(paddr, size);
 80	if (map)
 81		return map->vaddr + (paddr - map->paddr);
 82	else
 83		return NULL;
 84}
 85
 86/* acpi_iomaps_lock must be held before calling */
 87static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
 88					unsigned long size)
 89{
 90	struct acpi_iomap *map;
 91
 92	map = __acpi_find_iomap(paddr, size);
 93	if (map) {
 94		kref_get(&map->ref);
 95		return map->vaddr + (paddr - map->paddr);
 96	} else
 97		return NULL;
 98}
 99
100/*
101 * Used to pre-map the specified IO memory area. First try to find
102 * whether the area is already pre-mapped, if it is, increase the
103 * reference count (in __acpi_try_ioremap) and return; otherwise, do
104 * the real ioremap, and add the mapping into acpi_iomaps list.
105 */
106static void __iomem *acpi_pre_map(phys_addr_t paddr,
107				  unsigned long size)
108{
109	void __iomem *vaddr;
110	struct acpi_iomap *map;
111	unsigned long pg_sz, flags;
112	phys_addr_t pg_off;
113
114	spin_lock_irqsave(&acpi_iomaps_lock, flags);
115	vaddr = __acpi_try_ioremap(paddr, size);
116	spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
117	if (vaddr)
118		return vaddr;
119
120	pg_off = paddr & PAGE_MASK;
121	pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
122	vaddr = ioremap(pg_off, pg_sz);
123	if (!vaddr)
124		return NULL;
125	map = kmalloc(sizeof(*map), GFP_KERNEL);
126	if (!map)
127		goto err_unmap;
128	INIT_LIST_HEAD(&map->list);
129	map->paddr = pg_off;
130	map->size = pg_sz;
131	map->vaddr = vaddr;
132	kref_init(&map->ref);
133
134	spin_lock_irqsave(&acpi_iomaps_lock, flags);
135	vaddr = __acpi_try_ioremap(paddr, size);
136	if (vaddr) {
137		spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
138		iounmap(map->vaddr);
139		kfree(map);
140		return vaddr;
141	}
142	list_add_tail_rcu(&map->list, &acpi_iomaps);
143	spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
144
145	return map->vaddr + (paddr - map->paddr);
146err_unmap:
147	iounmap(vaddr);
148	return NULL;
149}
150
151/* acpi_iomaps_lock must be held before calling */
152static void __acpi_kref_del_iomap(struct kref *ref)
153{
154	struct acpi_iomap *map;
155
156	map = container_of(ref, struct acpi_iomap, ref);
157	list_del_rcu(&map->list);
158}
159
160/*
161 * Used to post-unmap the specified IO memory area. The iounmap is
162 * done only if the reference count goes zero.
163 */
164static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
165{
166	struct acpi_iomap *map;
167	unsigned long flags;
168	int del;
169
170	spin_lock_irqsave(&acpi_iomaps_lock, flags);
171	map = __acpi_find_iomap(paddr, size);
172	BUG_ON(!map);
173	del = kref_put(&map->ref, __acpi_kref_del_iomap);
174	spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
175
176	if (!del)
177		return;
178
179	synchronize_rcu();
180	iounmap(map->vaddr);
181	kfree(map);
182}
183
184/* In NMI handler, should set silent = 1 */
185static int acpi_check_gar(struct acpi_generic_address *reg,
186			  u64 *paddr, int silent)
187{
188	u32 width, space_id;
189
190	width = reg->bit_width;
191	space_id = reg->space_id;
192	/* Handle possible alignment issues */
193	memcpy(paddr, &reg->address, sizeof(*paddr));
194	if (!*paddr) {
195		if (!silent)
196			pr_warning(FW_BUG ACPI_PFX
197			"Invalid physical address in GAR [0x%llx/%u/%u]\n",
198				   *paddr, width, space_id);
199		return -EINVAL;
200	}
201
202	if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
203		if (!silent)
204			pr_warning(FW_BUG ACPI_PFX
205				   "Invalid bit width in GAR [0x%llx/%u/%u]\n",
206				   *paddr, width, space_id);
207		return -EINVAL;
208	}
209
210	if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
211	    space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
212		if (!silent)
213			pr_warning(FW_BUG ACPI_PFX
214			"Invalid address space type in GAR [0x%llx/%u/%u]\n",
215				   *paddr, width, space_id);
216		return -EINVAL;
217	}
218
219	return 0;
220}
221
222/* Pre-map, working on GAR */
223int acpi_pre_map_gar(struct acpi_generic_address *reg)
224{
225	u64 paddr;
226	void __iomem *vaddr;
227	int rc;
228
229	if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
230		return 0;
231
232	rc = acpi_check_gar(reg, &paddr, 0);
233	if (rc)
234		return rc;
235
236	vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
237	if (!vaddr)
238		return -EIO;
239
240	return 0;
241}
242EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
243
244/* Post-unmap, working on GAR */
245int acpi_post_unmap_gar(struct acpi_generic_address *reg)
246{
247	u64 paddr;
248	int rc;
249
250	if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
251		return 0;
252
253	rc = acpi_check_gar(reg, &paddr, 0);
254	if (rc)
255		return rc;
256
257	acpi_post_unmap(paddr, reg->bit_width / 8);
258
259	return 0;
260}
261EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
262
263/*
264 * Can be used in atomic (including NMI) or process context. RCU read
265 * lock can only be released after the IO memory area accessing.
266 */
267static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
268{
269	void __iomem *addr;
270
271	rcu_read_lock();
272	addr = __acpi_ioremap_fast(paddr, width);
273	switch (width) {
274	case 8:
275		*val = readb(addr);
276		break;
277	case 16:
278		*val = readw(addr);
279		break;
280	case 32:
281		*val = readl(addr);
282		break;
283#ifdef readq
284	case 64:
285		*val = readq(addr);
286		break;
287#endif
288	default:
289		return -EINVAL;
290	}
291	rcu_read_unlock();
292
293	return 0;
294}
295
296static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
297{
298	void __iomem *addr;
299
300	rcu_read_lock();
301	addr = __acpi_ioremap_fast(paddr, width);
302	switch (width) {
303	case 8:
304		writeb(val, addr);
305		break;
306	case 16:
307		writew(val, addr);
308		break;
309	case 32:
310		writel(val, addr);
311		break;
312#ifdef writeq
313	case 64:
314		writeq(val, addr);
315		break;
316#endif
317	default:
318		return -EINVAL;
319	}
320	rcu_read_unlock();
321
322	return 0;
323}
324
325/* GAR accessing in atomic (including NMI) or process context */
326int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
327{
328	u64 paddr;
329	int rc;
330
331	rc = acpi_check_gar(reg, &paddr, 1);
332	if (rc)
333		return rc;
334
335	*val = 0;
336	switch (reg->space_id) {
337	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
338		return acpi_atomic_read_mem(paddr, val, reg->bit_width);
339	case ACPI_ADR_SPACE_SYSTEM_IO:
340		return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
341	default:
342		return -EINVAL;
343	}
344}
345EXPORT_SYMBOL_GPL(acpi_atomic_read);
346
347int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
348{
349	u64 paddr;
350	int rc;
351
352	rc = acpi_check_gar(reg, &paddr, 1);
353	if (rc)
354		return rc;
355
356	switch (reg->space_id) {
357	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
358		return acpi_atomic_write_mem(paddr, val, reg->bit_width);
359	case ACPI_ADR_SPACE_SYSTEM_IO:
360		return acpi_os_write_port(paddr, val, reg->bit_width);
361	default:
362		return -EINVAL;
363	}
364}
365EXPORT_SYMBOL_GPL(acpi_atomic_write);