Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/arch/m68k/mm/motorola.c
  4 *
  5 * Routines specific to the Motorola MMU, originally from:
  6 * linux/arch/m68k/init.c
  7 * which are Copyright (C) 1995 Hamish Macdonald
  8 *
  9 * Moved 8/20/1999 Sam Creasey
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/signal.h>
 14#include <linux/sched.h>
 15#include <linux/mm.h>
 16#include <linux/swap.h>
 17#include <linux/kernel.h>
 18#include <linux/string.h>
 19#include <linux/types.h>
 20#include <linux/init.h>
 21#include <linux/memblock.h>
 22#include <linux/gfp.h>
 23
 24#include <asm/setup.h>
 25#include <linux/uaccess.h>
 26#include <asm/page.h>
 27#include <asm/pgalloc.h>
 28#include <asm/machdep.h>
 29#include <asm/io.h>
 
 30#ifdef CONFIG_ATARI
 31#include <asm/atari_stram.h>
 32#endif
 33#include <asm/sections.h>
 34
 35#undef DEBUG
 36
 37#ifndef mm_cachebits
 38/*
 39 * Bits to add to page descriptors for "normal" caching mode.
 40 * For 68020/030 this is 0.
 41 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
 42 */
 43unsigned long mm_cachebits;
 44EXPORT_SYMBOL(mm_cachebits);
 45#endif
 46
 47/* Prior to calling these routines, the page should have been flushed
 48 * from both the cache and ATC, or the CPU might not notice that the
 49 * cache setting for the page has been changed. -jskov
 50 */
 51static inline void nocache_page(void *vaddr)
 52{
 53	unsigned long addr = (unsigned long)vaddr;
 54
 55	if (CPU_IS_040_OR_060) {
 56		pte_t *ptep = virt_to_kpte(addr);
 57
 58		*ptep = pte_mknocache(*ptep);
 59	}
 60}
 61
 62static inline void cache_page(void *vaddr)
 63{
 64	unsigned long addr = (unsigned long)vaddr;
 65
 66	if (CPU_IS_040_OR_060) {
 67		pte_t *ptep = virt_to_kpte(addr);
 68
 69		*ptep = pte_mkcache(*ptep);
 70	}
 71}
 72
 73/*
 74 * Motorola 680x0 user's manual recommends using uncached memory for address
 75 * translation tables.
 76 *
 77 * Seeing how the MMU can be external on (some of) these chips, that seems like
 78 * a very important recommendation to follow. Provide some helpers to combat
 79 * 'variation' amongst the users of this.
 80 */
 81
 82void mmu_page_ctor(void *page)
 83{
 84	__flush_page_to_ram(page);
 85	flush_tlb_kernel_page(page);
 86	nocache_page(page);
 87}
 88
 89void mmu_page_dtor(void *page)
 90{
 91	cache_page(page);
 92}
 93
 94/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
 95   struct page instead of separately kmalloced struct.  Stolen from
 96   arch/sparc/mm/srmmu.c ... */
 97
 98typedef struct list_head ptable_desc;
 99
100static struct list_head ptable_list[2] = {
101	LIST_HEAD_INIT(ptable_list[0]),
102	LIST_HEAD_INIT(ptable_list[1]),
103};
104
105#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
106#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
107#define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
108
109static const int ptable_shift[2] = {
110	7+2, /* PGD, PMD */
111	6+2, /* PTE */
112};
113
114#define ptable_size(type) (1U << ptable_shift[type])
115#define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
116
117void __init init_pointer_table(void *table, int type)
118{
119	ptable_desc *dp;
120	unsigned long ptable = (unsigned long)table;
121	unsigned long page = ptable & PAGE_MASK;
122	unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
123
124	dp = PD_PTABLE(page);
125	if (!(PD_MARKBITS(dp) & mask)) {
126		PD_MARKBITS(dp) = ptable_mask(type);
127		list_add(dp, &ptable_list[type]);
128	}
129
130	PD_MARKBITS(dp) &= ~mask;
131	pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
132
133	/* unreserve the page so it's possible to free that page */
134	__ClearPageReserved(PD_PAGE(dp));
135	init_page_count(PD_PAGE(dp));
136
137	return;
138}
139
140void *get_pointer_table(int type)
141{
142	ptable_desc *dp = ptable_list[type].next;
143	unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
144	unsigned int tmp, off;
145
146	/*
147	 * For a pointer table for a user process address space, a
148	 * table is taken from a page allocated for the purpose.  Each
149	 * page can hold 8 pointer tables.  The page is remapped in
150	 * virtual address space to be noncacheable.
151	 */
152	if (mask == 0) {
153		void *page;
154		ptable_desc *new;
155
156		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
157			return NULL;
158
159		if (type == TABLE_PTE) {
160			/*
161			 * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
162			 * SMP.
163			 */
164			pgtable_pte_page_ctor(virt_to_page(page));
165		}
166
167		mmu_page_ctor(page);
168
169		new = PD_PTABLE(page);
170		PD_MARKBITS(new) = ptable_mask(type) - 1;
171		list_add_tail(new, dp);
172
173		return (pmd_t *)page;
174	}
175
176	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
177		;
178	PD_MARKBITS(dp) = mask & ~tmp;
179	if (!PD_MARKBITS(dp)) {
180		/* move to end of list */
181		list_move_tail(dp, &ptable_list[type]);
182	}
183	return page_address(PD_PAGE(dp)) + off;
184}
185
186int free_pointer_table(void *table, int type)
187{
188	ptable_desc *dp;
189	unsigned long ptable = (unsigned long)table;
190	unsigned long page = ptable & PAGE_MASK;
191	unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
192
193	dp = PD_PTABLE(page);
194	if (PD_MARKBITS (dp) & mask)
195		panic ("table already free!");
196
197	PD_MARKBITS (dp) |= mask;
198
199	if (PD_MARKBITS(dp) == ptable_mask(type)) {
200		/* all tables in page are free, free page */
201		list_del(dp);
202		mmu_page_dtor((void *)page);
203		if (type == TABLE_PTE)
204			pgtable_pte_page_dtor(virt_to_page(page));
205		free_page (page);
206		return 1;
207	} else if (ptable_list[type].next != dp) {
208		/*
209		 * move this descriptor to the front of the list, since
210		 * it has one or more free tables.
211		 */
212		list_move(dp, &ptable_list[type]);
213	}
214	return 0;
215}
216
217/* size of memory already mapped in head.S */
218extern __initdata unsigned long m68k_init_mapped_size;
219
220extern unsigned long availmem;
221
222static pte_t *last_pte_table __initdata = NULL;
223
224static pte_t * __init kernel_page_table(void)
225{
226	pte_t *pte_table = last_pte_table;
227
228	if (PAGE_ALIGNED(last_pte_table)) {
229		pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
230		if (!pte_table) {
231			panic("%s: Failed to allocate %lu bytes align=%lx\n",
232					__func__, PAGE_SIZE, PAGE_SIZE);
233		}
234
235		clear_page(pte_table);
236		mmu_page_ctor(pte_table);
237
238		last_pte_table = pte_table;
239	}
240
241	last_pte_table += PTRS_PER_PTE;
 
 
 
242
243	return pte_table;
244}
245
246static pmd_t *last_pmd_table __initdata = NULL;
 
247
248static pmd_t * __init kernel_ptr_table(void)
249{
250	if (!last_pmd_table) {
251		unsigned long pmd, last;
252		int i;
253
254		/* Find the last ptr table that was used in head.S and
255		 * reuse the remaining space in that page for further
256		 * ptr tables.
257		 */
258		last = (unsigned long)kernel_pg_dir;
259		for (i = 0; i < PTRS_PER_PGD; i++) {
260			pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
261
262			if (!pud_present(*pud))
263				continue;
264			pmd = pgd_page_vaddr(kernel_pg_dir[i]);
265			if (pmd > last)
266				last = pmd;
267		}
268
269		last_pmd_table = (pmd_t *)last;
270#ifdef DEBUG
271		printk("kernel_ptr_init: %p\n", last_pmd_table);
272#endif
273	}
274
275	last_pmd_table += PTRS_PER_PMD;
276	if (PAGE_ALIGNED(last_pmd_table)) {
277		last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
278		if (!last_pmd_table)
 
279			panic("%s: Failed to allocate %lu bytes align=%lx\n",
280			      __func__, PAGE_SIZE, PAGE_SIZE);
281
282		clear_page(last_pmd_table);
283		mmu_page_ctor(last_pmd_table);
 
 
284	}
285
286	return last_pmd_table;
287}
288
289static void __init map_node(int node)
290{
 
 
291	unsigned long physaddr, virtaddr, size;
292	pgd_t *pgd_dir;
293	p4d_t *p4d_dir;
294	pud_t *pud_dir;
295	pmd_t *pmd_dir;
296	pte_t *pte_dir;
297
298	size = m68k_memory[node].size;
299	physaddr = m68k_memory[node].addr;
300	virtaddr = (unsigned long)phys_to_virt(physaddr);
301	physaddr |= m68k_supervisor_cachemode |
302		    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
303	if (CPU_IS_040_OR_060)
304		physaddr |= _PAGE_GLOBAL040;
305
306	while (size > 0) {
307#ifdef DEBUG
308		if (!(virtaddr & (PMD_SIZE-1)))
309			printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
310				virtaddr);
311#endif
312		pgd_dir = pgd_offset_k(virtaddr);
313		if (virtaddr && CPU_IS_020_OR_030) {
314			if (!(virtaddr & (PGDIR_SIZE-1)) &&
315			    size >= PGDIR_SIZE) {
316#ifdef DEBUG
317				printk ("[very early term]");
318#endif
319				pgd_val(*pgd_dir) = physaddr;
320				size -= PGDIR_SIZE;
321				virtaddr += PGDIR_SIZE;
322				physaddr += PGDIR_SIZE;
323				continue;
324			}
325		}
326		p4d_dir = p4d_offset(pgd_dir, virtaddr);
327		pud_dir = pud_offset(p4d_dir, virtaddr);
328		if (!pud_present(*pud_dir)) {
329			pmd_dir = kernel_ptr_table();
330#ifdef DEBUG
331			printk ("[new pointer %p]", pmd_dir);
332#endif
333			pud_set(pud_dir, pmd_dir);
334		} else
335			pmd_dir = pmd_offset(pud_dir, virtaddr);
336
337		if (CPU_IS_020_OR_030) {
338			if (virtaddr) {
339#ifdef DEBUG
340				printk ("[early term]");
341#endif
342				pmd_val(*pmd_dir) = physaddr;
343				physaddr += PMD_SIZE;
344			} else {
345				int i;
346#ifdef DEBUG
347				printk ("[zero map]");
348#endif
349				pte_dir = kernel_page_table();
350				pmd_set(pmd_dir, pte_dir);
351
 
352				pte_val(*pte_dir++) = 0;
353				physaddr += PAGE_SIZE;
354				for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
355					pte_val(*pte_dir++) = physaddr;
356			}
357			size -= PMD_SIZE;
358			virtaddr += PMD_SIZE;
359		} else {
360			if (!pmd_present(*pmd_dir)) {
361#ifdef DEBUG
362				printk ("[new table]");
363#endif
364				pte_dir = kernel_page_table();
365				pmd_set(pmd_dir, pte_dir);
366			}
367			pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
368
369			if (virtaddr) {
370				if (!pte_present(*pte_dir))
371					pte_val(*pte_dir) = physaddr;
372			} else
373				pte_val(*pte_dir) = 0;
374			size -= PAGE_SIZE;
375			virtaddr += PAGE_SIZE;
376			physaddr += PAGE_SIZE;
377		}
378
379	}
380#ifdef DEBUG
381	printk("\n");
382#endif
383}
384
385/*
386 * Alternate definitions that are compile time constants, for
387 * initializing protection_map.  The cachebits are fixed later.
388 */
389#define PAGE_NONE_C	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
390#define PAGE_SHARED_C	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
391#define PAGE_COPY_C	__pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
392#define PAGE_READONLY_C	__pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
393
394static pgprot_t protection_map[16] __ro_after_init = {
395	[VM_NONE]					= PAGE_NONE_C,
396	[VM_READ]					= PAGE_READONLY_C,
397	[VM_WRITE]					= PAGE_COPY_C,
398	[VM_WRITE | VM_READ]				= PAGE_COPY_C,
399	[VM_EXEC]					= PAGE_READONLY_C,
400	[VM_EXEC | VM_READ]				= PAGE_READONLY_C,
401	[VM_EXEC | VM_WRITE]				= PAGE_COPY_C,
402	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_C,
403	[VM_SHARED]					= PAGE_NONE_C,
404	[VM_SHARED | VM_READ]				= PAGE_READONLY_C,
405	[VM_SHARED | VM_WRITE]				= PAGE_SHARED_C,
406	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED_C,
407	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_C,
408	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_C,
409	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_C,
410	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_C
411};
412DECLARE_VM_GET_PAGE_PROT
413
414/*
415 * paging_init() continues the virtual memory environment setup which
416 * was begun by the code in arch/head.S.
417 */
418void __init paging_init(void)
419{
420	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
421	unsigned long min_addr, max_addr;
422	unsigned long addr;
423	int i;
424
425#ifdef DEBUG
426	printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
427#endif
428
429	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
430	if (CPU_IS_040_OR_060) {
431		int i;
432#ifndef mm_cachebits
433		mm_cachebits = _PAGE_CACHE040;
434#endif
435		for (i = 0; i < 16; i++)
436			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
437	}
438
439	min_addr = m68k_memory[0].addr;
440	max_addr = min_addr + m68k_memory[0].size;
441	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
442			  MEMBLOCK_NONE);
443	for (i = 1; i < m68k_num_memory;) {
444		if (m68k_memory[i].addr < min_addr) {
445			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
446				m68k_memory[i].addr, m68k_memory[i].size);
447			printk("Fix your bootloader or use a memfile to make use of this area!\n");
448			m68k_num_memory--;
449			memmove(m68k_memory + i, m68k_memory + i + 1,
450				(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
451			continue;
452		}
453		memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i,
454				  MEMBLOCK_NONE);
455		addr = m68k_memory[i].addr + m68k_memory[i].size;
456		if (addr > max_addr)
457			max_addr = addr;
458		i++;
459	}
460	m68k_memoffset = min_addr - PAGE_OFFSET;
461	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
462
463	module_fixup(NULL, __start_fixup, __stop_fixup);
464	flush_icache();
465
466	high_memory = phys_to_virt(max_addr);
467
468	min_low_pfn = availmem >> PAGE_SHIFT;
469	max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
470
471	/* Reserve kernel text/data/bss and the memory allocated in head.S */
472	memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
473
474	/*
475	 * Map the physical memory available into the kernel virtual
476	 * address space. Make sure memblock will not try to allocate
477	 * pages beyond the memory we already mapped in head.S
478	 */
479	memblock_set_bottom_up(true);
480
481	for (i = 0; i < m68k_num_memory; i++) {
482		m68k_setup_node(i);
483		map_node(i);
484	}
485
486	flush_tlb_all();
487
488	early_memtest(min_addr, max_addr);
489
490	/*
491	 * initialize the bad page table and bad page to point
492	 * to a couple of allocated pages
493	 */
494	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
495	if (!empty_zero_page)
496		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
497		      __func__, PAGE_SIZE, PAGE_SIZE);
498
499	/*
500	 * Set up SFC/DFC registers
501	 */
502	set_fc(USER_DATA);
503
504#ifdef DEBUG
505	printk ("before free_area_init\n");
506#endif
507	for (i = 0; i < m68k_num_memory; i++)
 
 
 
508		if (node_present_pages(i))
509			node_set_state(i, N_NORMAL_MEMORY);
510
511	max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
512	free_area_init(max_zone_pfn);
513}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/arch/m68k/mm/motorola.c
  4 *
  5 * Routines specific to the Motorola MMU, originally from:
  6 * linux/arch/m68k/init.c
  7 * which are Copyright (C) 1995 Hamish Macdonald
  8 *
  9 * Moved 8/20/1999 Sam Creasey
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/signal.h>
 14#include <linux/sched.h>
 15#include <linux/mm.h>
 16#include <linux/swap.h>
 17#include <linux/kernel.h>
 18#include <linux/string.h>
 19#include <linux/types.h>
 20#include <linux/init.h>
 21#include <linux/memblock.h>
 22#include <linux/gfp.h>
 23
 24#include <asm/setup.h>
 25#include <linux/uaccess.h>
 26#include <asm/page.h>
 27#include <asm/pgalloc.h>
 28#include <asm/machdep.h>
 29#include <asm/io.h>
 30#include <asm/dma.h>
 31#ifdef CONFIG_ATARI
 32#include <asm/atari_stram.h>
 33#endif
 34#include <asm/sections.h>
 35
 36#undef DEBUG
 37
 38#ifndef mm_cachebits
 39/*
 40 * Bits to add to page descriptors for "normal" caching mode.
 41 * For 68020/030 this is 0.
 42 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
 43 */
 44unsigned long mm_cachebits;
 45EXPORT_SYMBOL(mm_cachebits);
 46#endif
 47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48/* size of memory already mapped in head.S */
 49extern __initdata unsigned long m68k_init_mapped_size;
 50
 51extern unsigned long availmem;
 52
 
 
 53static pte_t * __init kernel_page_table(void)
 54{
 55	pte_t *ptablep;
 56
 57	ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
 58	if (!ptablep)
 59		panic("%s: Failed to allocate %lu bytes align=%lx\n",
 60		      __func__, PAGE_SIZE, PAGE_SIZE);
 
 
 
 
 
 
 
 
 61
 62	clear_page(ptablep);
 63	__flush_page_to_ram(ptablep);
 64	flush_tlb_kernel_page(ptablep);
 65	nocache_page(ptablep);
 66
 67	return ptablep;
 68}
 69
 70static pmd_t *last_pgtable __initdata = NULL;
 71pmd_t *zero_pgtable __initdata = NULL;
 72
 73static pmd_t * __init kernel_ptr_table(void)
 74{
 75	if (!last_pgtable) {
 76		unsigned long pmd, last;
 77		int i;
 78
 79		/* Find the last ptr table that was used in head.S and
 80		 * reuse the remaining space in that page for further
 81		 * ptr tables.
 82		 */
 83		last = (unsigned long)kernel_pg_dir;
 84		for (i = 0; i < PTRS_PER_PGD; i++) {
 85			if (!pgd_present(kernel_pg_dir[i]))
 
 
 86				continue;
 87			pmd = __pgd_page(kernel_pg_dir[i]);
 88			if (pmd > last)
 89				last = pmd;
 90		}
 91
 92		last_pgtable = (pmd_t *)last;
 93#ifdef DEBUG
 94		printk("kernel_ptr_init: %p\n", last_pgtable);
 95#endif
 96	}
 97
 98	last_pgtable += PTRS_PER_PMD;
 99	if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
100		last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
101							   PAGE_SIZE);
102		if (!last_pgtable)
103			panic("%s: Failed to allocate %lu bytes align=%lx\n",
104			      __func__, PAGE_SIZE, PAGE_SIZE);
105
106		clear_page(last_pgtable);
107		__flush_page_to_ram(last_pgtable);
108		flush_tlb_kernel_page(last_pgtable);
109		nocache_page(last_pgtable);
110	}
111
112	return last_pgtable;
113}
114
115static void __init map_node(int node)
116{
117#define PTRTREESIZE (256*1024)
118#define ROOTTREESIZE (32*1024*1024)
119	unsigned long physaddr, virtaddr, size;
120	pgd_t *pgd_dir;
 
 
121	pmd_t *pmd_dir;
122	pte_t *pte_dir;
123
124	size = m68k_memory[node].size;
125	physaddr = m68k_memory[node].addr;
126	virtaddr = (unsigned long)phys_to_virt(physaddr);
127	physaddr |= m68k_supervisor_cachemode |
128		    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
129	if (CPU_IS_040_OR_060)
130		physaddr |= _PAGE_GLOBAL040;
131
132	while (size > 0) {
133#ifdef DEBUG
134		if (!(virtaddr & (PTRTREESIZE-1)))
135			printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
136				virtaddr);
137#endif
138		pgd_dir = pgd_offset_k(virtaddr);
139		if (virtaddr && CPU_IS_020_OR_030) {
140			if (!(virtaddr & (ROOTTREESIZE-1)) &&
141			    size >= ROOTTREESIZE) {
142#ifdef DEBUG
143				printk ("[very early term]");
144#endif
145				pgd_val(*pgd_dir) = physaddr;
146				size -= ROOTTREESIZE;
147				virtaddr += ROOTTREESIZE;
148				physaddr += ROOTTREESIZE;
149				continue;
150			}
151		}
152		if (!pgd_present(*pgd_dir)) {
 
 
153			pmd_dir = kernel_ptr_table();
154#ifdef DEBUG
155			printk ("[new pointer %p]", pmd_dir);
156#endif
157			pgd_set(pgd_dir, pmd_dir);
158		} else
159			pmd_dir = pmd_offset(pgd_dir, virtaddr);
160
161		if (CPU_IS_020_OR_030) {
162			if (virtaddr) {
163#ifdef DEBUG
164				printk ("[early term]");
165#endif
166				pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
167				physaddr += PTRTREESIZE;
168			} else {
169				int i;
170#ifdef DEBUG
171				printk ("[zero map]");
172#endif
173				zero_pgtable = kernel_ptr_table();
174				pte_dir = (pte_t *)zero_pgtable;
175				pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
176					_PAGE_TABLE | _PAGE_ACCESSED;
177				pte_val(*pte_dir++) = 0;
178				physaddr += PAGE_SIZE;
179				for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
180					pte_val(*pte_dir++) = physaddr;
181			}
182			size -= PTRTREESIZE;
183			virtaddr += PTRTREESIZE;
184		} else {
185			if (!pmd_present(*pmd_dir)) {
186#ifdef DEBUG
187				printk ("[new table]");
188#endif
189				pte_dir = kernel_page_table();
190				pmd_set(pmd_dir, pte_dir);
191			}
192			pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
193
194			if (virtaddr) {
195				if (!pte_present(*pte_dir))
196					pte_val(*pte_dir) = physaddr;
197			} else
198				pte_val(*pte_dir) = 0;
199			size -= PAGE_SIZE;
200			virtaddr += PAGE_SIZE;
201			physaddr += PAGE_SIZE;
202		}
203
204	}
205#ifdef DEBUG
206	printk("\n");
207#endif
208}
209
210/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211 * paging_init() continues the virtual memory environment setup which
212 * was begun by the code in arch/head.S.
213 */
214void __init paging_init(void)
215{
216	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
217	unsigned long min_addr, max_addr;
218	unsigned long addr;
219	int i;
220
221#ifdef DEBUG
222	printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
223#endif
224
225	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
226	if (CPU_IS_040_OR_060) {
227		int i;
228#ifndef mm_cachebits
229		mm_cachebits = _PAGE_CACHE040;
230#endif
231		for (i = 0; i < 16; i++)
232			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
233	}
234
235	min_addr = m68k_memory[0].addr;
236	max_addr = min_addr + m68k_memory[0].size;
237	memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
 
238	for (i = 1; i < m68k_num_memory;) {
239		if (m68k_memory[i].addr < min_addr) {
240			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
241				m68k_memory[i].addr, m68k_memory[i].size);
242			printk("Fix your bootloader or use a memfile to make use of this area!\n");
243			m68k_num_memory--;
244			memmove(m68k_memory + i, m68k_memory + i + 1,
245				(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
246			continue;
247		}
248		memblock_add(m68k_memory[i].addr, m68k_memory[i].size);
 
249		addr = m68k_memory[i].addr + m68k_memory[i].size;
250		if (addr > max_addr)
251			max_addr = addr;
252		i++;
253	}
254	m68k_memoffset = min_addr - PAGE_OFFSET;
255	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
256
257	module_fixup(NULL, __start_fixup, __stop_fixup);
258	flush_icache();
259
260	high_memory = phys_to_virt(max_addr);
261
262	min_low_pfn = availmem >> PAGE_SHIFT;
263	max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
264
265	/* Reserve kernel text/data/bss and the memory allocated in head.S */
266	memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
267
268	/*
269	 * Map the physical memory available into the kernel virtual
270	 * address space. Make sure memblock will not try to allocate
271	 * pages beyond the memory we already mapped in head.S
272	 */
273	memblock_set_bottom_up(true);
274
275	for (i = 0; i < m68k_num_memory; i++) {
276		m68k_setup_node(i);
277		map_node(i);
278	}
279
280	flush_tlb_all();
281
 
 
282	/*
283	 * initialize the bad page table and bad page to point
284	 * to a couple of allocated pages
285	 */
286	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
287	if (!empty_zero_page)
288		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
289		      __func__, PAGE_SIZE, PAGE_SIZE);
290
291	/*
292	 * Set up SFC/DFC registers
293	 */
294	set_fs(KERNEL_DS);
295
296#ifdef DEBUG
297	printk ("before free_area_init\n");
298#endif
299	for (i = 0; i < m68k_num_memory; i++) {
300		zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
301		free_area_init_node(i, zones_size,
302				    m68k_memory[i].addr >> PAGE_SHIFT, NULL);
303		if (node_present_pages(i))
304			node_set_state(i, N_NORMAL_MEMORY);
305	}
 
 
306}
307