Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  3 * Copyright (C) 2008-2009 PetaLogix
  4 * Copyright (C) 2006 Atmark Techno, Inc.
  5 *
  6 * This file is subject to the terms and conditions of the GNU General Public
  7 * License. See the file "COPYING" in the main directory of this archive
  8 * for more details.
  9 */
 10
 11#ifndef _ASM_MICROBLAZE_PGALLOC_H
 12#define _ASM_MICROBLAZE_PGALLOC_H
 13
 14#ifdef CONFIG_MMU
 15
 16#include <linux/kernel.h>	/* For min/max macros */
 17#include <linux/highmem.h>
 18#include <asm/setup.h>
 19#include <asm/io.h>
 20#include <asm/page.h>
 21#include <asm/cache.h>
 22#include <asm/pgtable.h>
 23
 24#define PGDIR_ORDER	0
 25
 26/*
 27 * This is handled very differently on MicroBlaze since out page tables
 28 * are all 0's and I want to be able to use these zero'd pages elsewhere
 29 * as well - it gives us quite a speedup.
 30 * -- Cort
 31 */
 32extern struct pgtable_cache_struct {
 33	unsigned long *pgd_cache;
 34	unsigned long *pte_cache;
 35	unsigned long pgtable_cache_sz;
 36} quicklists;
 37
 38#define pgd_quicklist		(quicklists.pgd_cache)
 39#define pmd_quicklist		((unsigned long *)0)
 40#define pte_quicklist		(quicklists.pte_cache)
 41#define pgtable_cache_size	(quicklists.pgtable_cache_sz)
 42
 43extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
 44extern atomic_t zero_sz; /* # currently pre-zero'd pages */
 45extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
 46extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
 47extern atomic_t zerototal; /* # pages zero'd over time */
 48
 49#define zero_quicklist		(zero_cache)
 50#define zero_cache_sz	 	(zero_sz)
 51#define zero_cache_calls	(zeropage_calls)
 52#define zero_cache_hits		(zeropage_hits)
 53#define zero_cache_total	(zerototal)
 54
 55/*
 56 * return a pre-zero'd page from the list,
 57 * return NULL if none available -- Cort
 58 */
 59extern unsigned long get_zero_page_fast(void);
 60
 61extern void __bad_pte(pmd_t *pmd);
 62
 63extern inline pgd_t *get_pgd_slow(void)
 64{
 65	pgd_t *ret;
 66
 67	ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER);
 68	if (ret != NULL)
 69		clear_page(ret);
 70	return ret;
 71}
 72
 73extern inline pgd_t *get_pgd_fast(void)
 74{
 75	unsigned long *ret;
 76
 77	ret = pgd_quicklist;
 78	if (ret != NULL) {
 79		pgd_quicklist = (unsigned long *)(*ret);
 80		ret[0] = 0;
 81		pgtable_cache_size--;
 82	} else
 83		ret = (unsigned long *)get_pgd_slow();
 84	return (pgd_t *)ret;
 85}
 86
 87extern inline void free_pgd_fast(pgd_t *pgd)
 88{
 89	*(unsigned long **)pgd = pgd_quicklist;
 90	pgd_quicklist = (unsigned long *) pgd;
 91	pgtable_cache_size++;
 92}
 93
 94extern inline void free_pgd_slow(pgd_t *pgd)
 95{
 96	free_page((unsigned long)pgd);
 97}
 98
 99#define pgd_free(mm, pgd)        free_pgd_fast(pgd)
100#define pgd_alloc(mm)		get_pgd_fast()
101
102#define pmd_pgtable(pmd)	pmd_page(pmd)
103
104/*
105 * We don't have any real pmd's, and this code never triggers because
106 * the pgd will always be present..
107 */
108#define pmd_alloc_one_fast(mm, address)	({ BUG(); ((pmd_t *)1); })
109#define pmd_alloc_one(mm, address)	({ BUG(); ((pmd_t *)2); })
110
111extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
112
113static inline struct page *pte_alloc_one(struct mm_struct *mm,
114		unsigned long address)
115{
116	struct page *ptepage;
117
118#ifdef CONFIG_HIGHPTE
119	int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
120#else
121	int flags = GFP_KERNEL | __GFP_REPEAT;
122#endif
123
124	ptepage = alloc_pages(flags, 0);
125	if (ptepage)
126		clear_highpage(ptepage);
127	return ptepage;
128}
129
130static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
131		unsigned long address)
132{
133	unsigned long *ret;
134
135	ret = pte_quicklist;
136	if (ret != NULL) {
137		pte_quicklist = (unsigned long *)(*ret);
138		ret[0] = 0;
139		pgtable_cache_size--;
140	}
141	return (pte_t *)ret;
142}
143
144extern inline void pte_free_fast(pte_t *pte)
145{
146	*(unsigned long **)pte = pte_quicklist;
147	pte_quicklist = (unsigned long *) pte;
148	pgtable_cache_size++;
149}
150
151extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
152{
153	free_page((unsigned long)pte);
154}
155
156extern inline void pte_free_slow(struct page *ptepage)
157{
158	__free_page(ptepage);
159}
160
161extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
162{
163	__free_page(ptepage);
164}
165
166#define __pte_free_tlb(tlb, pte, addr)	pte_free((tlb)->mm, (pte))
167
168#define pmd_populate(mm, pmd, pte) \
169			(pmd_val(*(pmd)) = (unsigned long)page_address(pte))
170
171#define pmd_populate_kernel(mm, pmd, pte) \
172		(pmd_val(*(pmd)) = (unsigned long) (pte))
173
174/*
175 * We don't have any real pmd's, and this code never triggers because
176 * the pgd will always be present..
177 */
178#define pmd_alloc_one(mm, address)	({ BUG(); ((pmd_t *)2); })
179#define pmd_free(mm, x)			do { } while (0)
180#define __pmd_free_tlb(tlb, x, addr)	pmd_free((tlb)->mm, x)
181#define pgd_populate(mm, pmd, pte)	BUG()
182
183extern int do_check_pgt_cache(int, int);
184
185#endif /* CONFIG_MMU */
186
187#define check_pgt_cache()		do { } while (0)
188
189#endif /* _ASM_MICROBLAZE_PGALLOC_H */