Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (C) 2001-2008 Silicon Graphics, Inc.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms of version 2 of the GNU General Public License
  6 * as published by the Free Software Foundation.
  7 *
  8 * A simple uncached page allocator using the generic allocator. This
  9 * allocator first utilizes the spare (spill) pages found in the EFI
 10 * memmap and will then start converting cached pages to uncached ones
 11 * at a granule at a time. Node awareness is implemented by having a
 12 * pool of pages per node.
 13 */
 14
 15#include <linux/types.h>
 16#include <linux/kernel.h>
 17#include <linux/module.h>
 18#include <linux/init.h>
 19#include <linux/errno.h>
 20#include <linux/string.h>
 21#include <linux/efi.h>
 22#include <linux/genalloc.h>
 23#include <linux/gfp.h>
 24#include <asm/page.h>
 25#include <asm/pal.h>
 26#include <asm/system.h>
 27#include <asm/pgtable.h>
 28#include <linux/atomic.h>
 29#include <asm/tlbflush.h>
 30#include <asm/sn/arch.h>
 31
 32
 33extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
 34
 35struct uncached_pool {
 36	struct gen_pool *pool;
 37	struct mutex add_chunk_mutex;	/* serialize adding a converted chunk */
 38	int nchunks_added;		/* #of converted chunks added to pool */
 39	atomic_t status;		/* smp called function's return status*/
 40};
 41
 42#define MAX_CONVERTED_CHUNKS_PER_NODE	2
 43
 44struct uncached_pool uncached_pools[MAX_NUMNODES];
 45
 46
 47static void uncached_ipi_visibility(void *data)
 48{
 49	int status;
 50	struct uncached_pool *uc_pool = (struct uncached_pool *)data;
 51
 52	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
 53	if ((status != PAL_VISIBILITY_OK) &&
 54	    (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
 55		atomic_inc(&uc_pool->status);
 56}
 57
 58
 59static void uncached_ipi_mc_drain(void *data)
 60{
 61	int status;
 62	struct uncached_pool *uc_pool = (struct uncached_pool *)data;
 63
 64	status = ia64_pal_mc_drain();
 65	if (status != PAL_STATUS_SUCCESS)
 66		atomic_inc(&uc_pool->status);
 67}
 68
 69
 70/*
 71 * Add a new chunk of uncached memory pages to the specified pool.
 72 *
 73 * @pool: pool to add new chunk of uncached memory to
 74 * @nid: node id of node to allocate memory from, or -1
 75 *
 76 * This is accomplished by first allocating a granule of cached memory pages
 77 * and then converting them to uncached memory pages.
 78 */
 79static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
 80{
 81	struct page *page;
 82	int status, i, nchunks_added = uc_pool->nchunks_added;
 83	unsigned long c_addr, uc_addr;
 84
 85	if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
 86		return -1;	/* interrupted by a signal */
 87
 88	if (uc_pool->nchunks_added > nchunks_added) {
 89		/* someone added a new chunk while we were waiting */
 90		mutex_unlock(&uc_pool->add_chunk_mutex);
 91		return 0;
 92	}
 93
 94	if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
 95		mutex_unlock(&uc_pool->add_chunk_mutex);
 96		return -1;
 97	}
 98
 99	/* attempt to allocate a granule's worth of cached memory pages */
100
101	page = alloc_pages_exact_node(nid,
102				GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
103				IA64_GRANULE_SHIFT-PAGE_SHIFT);
104	if (!page) {
105		mutex_unlock(&uc_pool->add_chunk_mutex);
106		return -1;
107	}
108
109	/* convert the memory pages from cached to uncached */
110
111	c_addr = (unsigned long)page_address(page);
112	uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
113
114	/*
115	 * There's a small race here where it's possible for someone to
116	 * access the page through /dev/mem halfway through the conversion
117	 * to uncached - not sure it's really worth bothering about
118	 */
119	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
120		SetPageUncached(&page[i]);
121
122	flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
123
124	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
125	if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
126		atomic_set(&uc_pool->status, 0);
127		status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
128		if (status || atomic_read(&uc_pool->status))
129			goto failed;
130	} else if (status != PAL_VISIBILITY_OK)
131		goto failed;
132
133	preempt_disable();
134
135	if (ia64_platform_is("sn2"))
136		sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
137	else
138		flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
139
140	/* flush the just introduced uncached translation from the TLB */
141	local_flush_tlb_all();
142
143	preempt_enable();
144
145	status = ia64_pal_mc_drain();
146	if (status != PAL_STATUS_SUCCESS)
147		goto failed;
148	atomic_set(&uc_pool->status, 0);
149	status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
150	if (status || atomic_read(&uc_pool->status))
151		goto failed;
152
153	/*
154	 * The chunk of memory pages has been converted to uncached so now we
155	 * can add it to the pool.
156	 */
157	status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
158	if (status)
159		goto failed;
160
161	uc_pool->nchunks_added++;
162	mutex_unlock(&uc_pool->add_chunk_mutex);
163	return 0;
164
165	/* failed to convert or add the chunk so give it back to the kernel */
166failed:
167	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
168		ClearPageUncached(&page[i]);
169
170	free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
171	mutex_unlock(&uc_pool->add_chunk_mutex);
172	return -1;
173}
174
175
176/*
177 * uncached_alloc_page
178 *
179 * @starting_nid: node id of node to start with, or -1
180 * @n_pages: number of contiguous pages to allocate
181 *
182 * Allocate the specified number of contiguous uncached pages on the
183 * the requested node. If not enough contiguous uncached pages are available
184 * on the requested node, roundrobin starting with the next higher node.
185 */
186unsigned long uncached_alloc_page(int starting_nid, int n_pages)
187{
188	unsigned long uc_addr;
189	struct uncached_pool *uc_pool;
190	int nid;
191
192	if (unlikely(starting_nid >= MAX_NUMNODES))
193		return 0;
194
195	if (starting_nid < 0)
196		starting_nid = numa_node_id();
197	nid = starting_nid;
198
199	do {
200		if (!node_state(nid, N_HIGH_MEMORY))
201			continue;
202		uc_pool = &uncached_pools[nid];
203		if (uc_pool->pool == NULL)
204			continue;
205		do {
206			uc_addr = gen_pool_alloc(uc_pool->pool,
207						 n_pages * PAGE_SIZE);
208			if (uc_addr != 0)
209				return uc_addr;
210		} while (uncached_add_chunk(uc_pool, nid) == 0);
211
212	} while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
213
214	return 0;
215}
216EXPORT_SYMBOL(uncached_alloc_page);
217
218
219/*
220 * uncached_free_page
221 *
222 * @uc_addr: uncached address of first page to free
223 * @n_pages: number of contiguous pages to free
224 *
225 * Free the specified number of uncached pages.
226 */
227void uncached_free_page(unsigned long uc_addr, int n_pages)
228{
229	int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
230	struct gen_pool *pool = uncached_pools[nid].pool;
231
232	if (unlikely(pool == NULL))
233		return;
234
235	if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
236		panic("uncached_free_page invalid address %lx\n", uc_addr);
237
238	gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
239}
240EXPORT_SYMBOL(uncached_free_page);
241
242
243/*
244 * uncached_build_memmap,
245 *
246 * @uc_start: uncached starting address of a chunk of uncached memory
247 * @uc_end: uncached ending address of a chunk of uncached memory
248 * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
249 *
250 * Called at boot time to build a map of pages that can be used for
251 * memory special operations.
252 */
253static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
254{
255	int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
256	struct gen_pool *pool = uncached_pools[nid].pool;
257	size_t size = uc_end - uc_start;
258
259	touch_softlockup_watchdog();
260
261	if (pool != NULL) {
262		memset((char *)uc_start, 0, size);
263		(void) gen_pool_add(pool, uc_start, size, nid);
264	}
265	return 0;
266}
267
268
269static int __init uncached_init(void)
270{
271	int nid;
272
273	for_each_node_state(nid, N_ONLINE) {
274		uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
275		mutex_init(&uncached_pools[nid].add_chunk_mutex);
276	}
277
278	efi_memmap_walk_uc(uncached_build_memmap, NULL);
279	return 0;
280}
281
282__initcall(uncached_init);
v4.6
  1/*
  2 * Copyright (C) 2001-2008 Silicon Graphics, Inc.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms of version 2 of the GNU General Public License
  6 * as published by the Free Software Foundation.
  7 *
  8 * A simple uncached page allocator using the generic allocator. This
  9 * allocator first utilizes the spare (spill) pages found in the EFI
 10 * memmap and will then start converting cached pages to uncached ones
 11 * at a granule at a time. Node awareness is implemented by having a
 12 * pool of pages per node.
 13 */
 14
 15#include <linux/types.h>
 16#include <linux/kernel.h>
 17#include <linux/module.h>
 18#include <linux/init.h>
 19#include <linux/errno.h>
 20#include <linux/string.h>
 21#include <linux/efi.h>
 22#include <linux/genalloc.h>
 23#include <linux/gfp.h>
 24#include <asm/page.h>
 25#include <asm/pal.h>
 
 26#include <asm/pgtable.h>
 27#include <linux/atomic.h>
 28#include <asm/tlbflush.h>
 29#include <asm/sn/arch.h>
 30
 31
 32extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
 33
 34struct uncached_pool {
 35	struct gen_pool *pool;
 36	struct mutex add_chunk_mutex;	/* serialize adding a converted chunk */
 37	int nchunks_added;		/* #of converted chunks added to pool */
 38	atomic_t status;		/* smp called function's return status*/
 39};
 40
 41#define MAX_CONVERTED_CHUNKS_PER_NODE	2
 42
 43struct uncached_pool uncached_pools[MAX_NUMNODES];
 44
 45
 46static void uncached_ipi_visibility(void *data)
 47{
 48	int status;
 49	struct uncached_pool *uc_pool = (struct uncached_pool *)data;
 50
 51	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
 52	if ((status != PAL_VISIBILITY_OK) &&
 53	    (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
 54		atomic_inc(&uc_pool->status);
 55}
 56
 57
 58static void uncached_ipi_mc_drain(void *data)
 59{
 60	int status;
 61	struct uncached_pool *uc_pool = (struct uncached_pool *)data;
 62
 63	status = ia64_pal_mc_drain();
 64	if (status != PAL_STATUS_SUCCESS)
 65		atomic_inc(&uc_pool->status);
 66}
 67
 68
 69/*
 70 * Add a new chunk of uncached memory pages to the specified pool.
 71 *
 72 * @pool: pool to add new chunk of uncached memory to
 73 * @nid: node id of node to allocate memory from, or -1
 74 *
 75 * This is accomplished by first allocating a granule of cached memory pages
 76 * and then converting them to uncached memory pages.
 77 */
 78static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
 79{
 80	struct page *page;
 81	int status, i, nchunks_added = uc_pool->nchunks_added;
 82	unsigned long c_addr, uc_addr;
 83
 84	if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
 85		return -1;	/* interrupted by a signal */
 86
 87	if (uc_pool->nchunks_added > nchunks_added) {
 88		/* someone added a new chunk while we were waiting */
 89		mutex_unlock(&uc_pool->add_chunk_mutex);
 90		return 0;
 91	}
 92
 93	if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
 94		mutex_unlock(&uc_pool->add_chunk_mutex);
 95		return -1;
 96	}
 97
 98	/* attempt to allocate a granule's worth of cached memory pages */
 99
100	page = __alloc_pages_node(nid,
101				GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
102				IA64_GRANULE_SHIFT-PAGE_SHIFT);
103	if (!page) {
104		mutex_unlock(&uc_pool->add_chunk_mutex);
105		return -1;
106	}
107
108	/* convert the memory pages from cached to uncached */
109
110	c_addr = (unsigned long)page_address(page);
111	uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
112
113	/*
114	 * There's a small race here where it's possible for someone to
115	 * access the page through /dev/mem halfway through the conversion
116	 * to uncached - not sure it's really worth bothering about
117	 */
118	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
119		SetPageUncached(&page[i]);
120
121	flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
122
123	status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
124	if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
125		atomic_set(&uc_pool->status, 0);
126		status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
127		if (status || atomic_read(&uc_pool->status))
128			goto failed;
129	} else if (status != PAL_VISIBILITY_OK)
130		goto failed;
131
132	preempt_disable();
133
134	if (ia64_platform_is("sn2"))
135		sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
136	else
137		flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
138
139	/* flush the just introduced uncached translation from the TLB */
140	local_flush_tlb_all();
141
142	preempt_enable();
143
144	status = ia64_pal_mc_drain();
145	if (status != PAL_STATUS_SUCCESS)
146		goto failed;
147	atomic_set(&uc_pool->status, 0);
148	status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
149	if (status || atomic_read(&uc_pool->status))
150		goto failed;
151
152	/*
153	 * The chunk of memory pages has been converted to uncached so now we
154	 * can add it to the pool.
155	 */
156	status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
157	if (status)
158		goto failed;
159
160	uc_pool->nchunks_added++;
161	mutex_unlock(&uc_pool->add_chunk_mutex);
162	return 0;
163
164	/* failed to convert or add the chunk so give it back to the kernel */
165failed:
166	for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
167		ClearPageUncached(&page[i]);
168
169	free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
170	mutex_unlock(&uc_pool->add_chunk_mutex);
171	return -1;
172}
173
174
175/*
176 * uncached_alloc_page
177 *
178 * @starting_nid: node id of node to start with, or -1
179 * @n_pages: number of contiguous pages to allocate
180 *
181 * Allocate the specified number of contiguous uncached pages on the
182 * the requested node. If not enough contiguous uncached pages are available
183 * on the requested node, roundrobin starting with the next higher node.
184 */
185unsigned long uncached_alloc_page(int starting_nid, int n_pages)
186{
187	unsigned long uc_addr;
188	struct uncached_pool *uc_pool;
189	int nid;
190
191	if (unlikely(starting_nid >= MAX_NUMNODES))
192		return 0;
193
194	if (starting_nid < 0)
195		starting_nid = numa_node_id();
196	nid = starting_nid;
197
198	do {
199		if (!node_state(nid, N_HIGH_MEMORY))
200			continue;
201		uc_pool = &uncached_pools[nid];
202		if (uc_pool->pool == NULL)
203			continue;
204		do {
205			uc_addr = gen_pool_alloc(uc_pool->pool,
206						 n_pages * PAGE_SIZE);
207			if (uc_addr != 0)
208				return uc_addr;
209		} while (uncached_add_chunk(uc_pool, nid) == 0);
210
211	} while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
212
213	return 0;
214}
215EXPORT_SYMBOL(uncached_alloc_page);
216
217
218/*
219 * uncached_free_page
220 *
221 * @uc_addr: uncached address of first page to free
222 * @n_pages: number of contiguous pages to free
223 *
224 * Free the specified number of uncached pages.
225 */
226void uncached_free_page(unsigned long uc_addr, int n_pages)
227{
228	int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
229	struct gen_pool *pool = uncached_pools[nid].pool;
230
231	if (unlikely(pool == NULL))
232		return;
233
234	if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
235		panic("uncached_free_page invalid address %lx\n", uc_addr);
236
237	gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
238}
239EXPORT_SYMBOL(uncached_free_page);
240
241
242/*
243 * uncached_build_memmap,
244 *
245 * @uc_start: uncached starting address of a chunk of uncached memory
246 * @uc_end: uncached ending address of a chunk of uncached memory
247 * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
248 *
249 * Called at boot time to build a map of pages that can be used for
250 * memory special operations.
251 */
252static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg)
253{
254	int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
255	struct gen_pool *pool = uncached_pools[nid].pool;
256	size_t size = uc_end - uc_start;
257
258	touch_softlockup_watchdog();
259
260	if (pool != NULL) {
261		memset((char *)uc_start, 0, size);
262		(void) gen_pool_add(pool, uc_start, size, nid);
263	}
264	return 0;
265}
266
267
268static int __init uncached_init(void)
269{
270	int nid;
271
272	for_each_node_state(nid, N_ONLINE) {
273		uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
274		mutex_init(&uncached_pools[nid].add_chunk_mutex);
275	}
276
277	efi_memmap_walk_uc(uncached_build_memmap, NULL);
278	return 0;
279}
280
281__initcall(uncached_init);