Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/parisc/mm/init.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright 1999 SuSE GmbH
7 * changed by Philipp Rumpf
8 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
9 * Copyright 2004 Randolph Chung (tausq@debian.org)
10 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
11 *
12 */
13
14
15#include <linux/module.h>
16#include <linux/mm.h>
17#include <linux/memblock.h>
18#include <linux/gfp.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/initrd.h>
22#include <linux/swap.h>
23#include <linux/unistd.h>
24#include <linux/nodemask.h> /* for node_online_map */
25#include <linux/pagemap.h> /* for release_pages */
26#include <linux/compat.h>
27
28#include <asm/pgalloc.h>
29#include <asm/pgtable.h>
30#include <asm/tlb.h>
31#include <asm/pdc_chassis.h>
32#include <asm/mmzone.h>
33#include <asm/sections.h>
34#include <asm/msgbuf.h>
35#include <asm/sparsemem.h>
36
37extern int data_start;
38extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
39
40#if CONFIG_PGTABLE_LEVELS == 3
41/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
42 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
43 * guarantee that global objects will be laid out in memory in the same order
44 * as the order of declaration, so put these in different sections and use
45 * the linker script to order them. */
46pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
47#endif
48
49pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
50pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
51
52static struct resource data_resource = {
53 .name = "Kernel data",
54 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
55};
56
57static struct resource code_resource = {
58 .name = "Kernel code",
59 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
60};
61
62static struct resource pdcdata_resource = {
63 .name = "PDC data (Page Zero)",
64 .start = 0,
65 .end = 0x9ff,
66 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
67};
68
69static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
70
71/* The following array is initialized from the firmware specific
72 * information retrieved in kernel/inventory.c.
73 */
74
75physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
76int npmem_ranges __initdata;
77
78#ifdef CONFIG_64BIT
79#define MAX_MEM (1UL << MAX_PHYSMEM_BITS)
80#else /* !CONFIG_64BIT */
81#define MAX_MEM (3584U*1024U*1024U)
82#endif /* !CONFIG_64BIT */
83
84static unsigned long mem_limit __read_mostly = MAX_MEM;
85
86static void __init mem_limit_func(void)
87{
88 char *cp, *end;
89 unsigned long limit;
90
91 /* We need this before __setup() functions are called */
92
93 limit = MAX_MEM;
94 for (cp = boot_command_line; *cp; ) {
95 if (memcmp(cp, "mem=", 4) == 0) {
96 cp += 4;
97 limit = memparse(cp, &end);
98 if (end != cp)
99 break;
100 cp = end;
101 } else {
102 while (*cp != ' ' && *cp)
103 ++cp;
104 while (*cp == ' ')
105 ++cp;
106 }
107 }
108
109 if (limit < mem_limit)
110 mem_limit = limit;
111}
112
113#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
114
115static void __init setup_bootmem(void)
116{
117 unsigned long mem_max;
118#ifndef CONFIG_SPARSEMEM
119 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
120 int npmem_holes;
121#endif
122 int i, sysram_resource_count;
123
124 disable_sr_hashing(); /* Turn off space register hashing */
125
126 /*
127 * Sort the ranges. Since the number of ranges is typically
128 * small, and performance is not an issue here, just do
129 * a simple insertion sort.
130 */
131
132 for (i = 1; i < npmem_ranges; i++) {
133 int j;
134
135 for (j = i; j > 0; j--) {
136 physmem_range_t tmp;
137
138 if (pmem_ranges[j-1].start_pfn <
139 pmem_ranges[j].start_pfn) {
140
141 break;
142 }
143 tmp = pmem_ranges[j-1];
144 pmem_ranges[j-1] = pmem_ranges[j];
145 pmem_ranges[j] = tmp;
146 }
147 }
148
149#ifndef CONFIG_SPARSEMEM
150 /*
151 * Throw out ranges that are too far apart (controlled by
152 * MAX_GAP).
153 */
154
155 for (i = 1; i < npmem_ranges; i++) {
156 if (pmem_ranges[i].start_pfn -
157 (pmem_ranges[i-1].start_pfn +
158 pmem_ranges[i-1].pages) > MAX_GAP) {
159 npmem_ranges = i;
160 printk("Large gap in memory detected (%ld pages). "
161 "Consider turning on CONFIG_SPARSEMEM\n",
162 pmem_ranges[i].start_pfn -
163 (pmem_ranges[i-1].start_pfn +
164 pmem_ranges[i-1].pages));
165 break;
166 }
167 }
168#endif
169
170 /* Print the memory ranges */
171 pr_info("Memory Ranges:\n");
172
173 for (i = 0; i < npmem_ranges; i++) {
174 struct resource *res = &sysram_resources[i];
175 unsigned long start;
176 unsigned long size;
177
178 size = (pmem_ranges[i].pages << PAGE_SHIFT);
179 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
180 pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
181 i, start, start + (size - 1), size >> 20);
182
183 /* request memory resource */
184 res->name = "System RAM";
185 res->start = start;
186 res->end = start + size - 1;
187 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
188 request_resource(&iomem_resource, res);
189 }
190
191 sysram_resource_count = npmem_ranges;
192
193 /*
194 * For 32 bit kernels we limit the amount of memory we can
195 * support, in order to preserve enough kernel address space
196 * for other purposes. For 64 bit kernels we don't normally
197 * limit the memory, but this mechanism can be used to
198 * artificially limit the amount of memory (and it is written
199 * to work with multiple memory ranges).
200 */
201
202 mem_limit_func(); /* check for "mem=" argument */
203
204 mem_max = 0;
205 for (i = 0; i < npmem_ranges; i++) {
206 unsigned long rsize;
207
208 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
209 if ((mem_max + rsize) > mem_limit) {
210 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
211 if (mem_max == mem_limit)
212 npmem_ranges = i;
213 else {
214 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
215 - (mem_max >> PAGE_SHIFT);
216 npmem_ranges = i + 1;
217 mem_max = mem_limit;
218 }
219 break;
220 }
221 mem_max += rsize;
222 }
223
224 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
225
226#ifndef CONFIG_SPARSEMEM
227 /* Merge the ranges, keeping track of the holes */
228 {
229 unsigned long end_pfn;
230 unsigned long hole_pages;
231
232 npmem_holes = 0;
233 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
234 for (i = 1; i < npmem_ranges; i++) {
235
236 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
237 if (hole_pages) {
238 pmem_holes[npmem_holes].start_pfn = end_pfn;
239 pmem_holes[npmem_holes++].pages = hole_pages;
240 end_pfn += hole_pages;
241 }
242 end_pfn += pmem_ranges[i].pages;
243 }
244
245 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
246 npmem_ranges = 1;
247 }
248#endif
249
250 /*
251 * Initialize and free the full range of memory in each range.
252 */
253
254 max_pfn = 0;
255 for (i = 0; i < npmem_ranges; i++) {
256 unsigned long start_pfn;
257 unsigned long npages;
258 unsigned long start;
259 unsigned long size;
260
261 start_pfn = pmem_ranges[i].start_pfn;
262 npages = pmem_ranges[i].pages;
263
264 start = start_pfn << PAGE_SHIFT;
265 size = npages << PAGE_SHIFT;
266
267 /* add system RAM memblock */
268 memblock_add(start, size);
269
270 if ((start_pfn + npages) > max_pfn)
271 max_pfn = start_pfn + npages;
272 }
273
274 /*
275 * We can't use memblock top-down allocations because we only
276 * created the initial mapping up to KERNEL_INITIAL_SIZE in
277 * the assembly bootup code.
278 */
279 memblock_set_bottom_up(true);
280
281 /* IOMMU is always used to access "high mem" on those boxes
282 * that can support enough mem that a PCI device couldn't
283 * directly DMA to any physical addresses.
284 * ISA DMA support will need to revisit this.
285 */
286 max_low_pfn = max_pfn;
287
288 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
289
290#define PDC_CONSOLE_IO_IODC_SIZE 32768
291
292 memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
293 PDC_CONSOLE_IO_IODC_SIZE));
294 memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
295 (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
296
297#ifndef CONFIG_SPARSEMEM
298
299 /* reserve the holes */
300
301 for (i = 0; i < npmem_holes; i++) {
302 memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
303 (pmem_holes[i].pages << PAGE_SHIFT));
304 }
305#endif
306
307#ifdef CONFIG_BLK_DEV_INITRD
308 if (initrd_start) {
309 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
310 if (__pa(initrd_start) < mem_max) {
311 unsigned long initrd_reserve;
312
313 if (__pa(initrd_end) > mem_max) {
314 initrd_reserve = mem_max - __pa(initrd_start);
315 } else {
316 initrd_reserve = initrd_end - initrd_start;
317 }
318 initrd_below_start_ok = 1;
319 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
320
321 memblock_reserve(__pa(initrd_start), initrd_reserve);
322 }
323 }
324#endif
325
326 data_resource.start = virt_to_phys(&data_start);
327 data_resource.end = virt_to_phys(_end) - 1;
328 code_resource.start = virt_to_phys(_text);
329 code_resource.end = virt_to_phys(&data_start)-1;
330
331 /* We don't know which region the kernel will be in, so try
332 * all of them.
333 */
334 for (i = 0; i < sysram_resource_count; i++) {
335 struct resource *res = &sysram_resources[i];
336 request_resource(res, &code_resource);
337 request_resource(res, &data_resource);
338 }
339 request_resource(&sysram_resources[0], &pdcdata_resource);
340
341 /* Initialize Page Deallocation Table (PDT) and check for bad memory. */
342 pdc_pdt_init();
343
344 memblock_allow_resize();
345 memblock_dump_all();
346}
347
348static bool kernel_set_to_readonly;
349
350static void __init map_pages(unsigned long start_vaddr,
351 unsigned long start_paddr, unsigned long size,
352 pgprot_t pgprot, int force)
353{
354 pgd_t *pg_dir;
355 pmd_t *pmd;
356 pte_t *pg_table;
357 unsigned long end_paddr;
358 unsigned long start_pmd;
359 unsigned long start_pte;
360 unsigned long tmp1;
361 unsigned long tmp2;
362 unsigned long address;
363 unsigned long vaddr;
364 unsigned long ro_start;
365 unsigned long ro_end;
366 unsigned long kernel_start, kernel_end;
367
368 ro_start = __pa((unsigned long)_text);
369 ro_end = __pa((unsigned long)&data_start);
370 kernel_start = __pa((unsigned long)&__init_begin);
371 kernel_end = __pa((unsigned long)&_end);
372
373 end_paddr = start_paddr + size;
374
375 pg_dir = pgd_offset_k(start_vaddr);
376
377#if PTRS_PER_PMD == 1
378 start_pmd = 0;
379#else
380 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
381#endif
382 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
383
384 address = start_paddr;
385 vaddr = start_vaddr;
386 while (address < end_paddr) {
387#if PTRS_PER_PMD == 1
388 pmd = (pmd_t *)__pa(pg_dir);
389#else
390 pmd = (pmd_t *)pgd_address(*pg_dir);
391
392 /*
393 * pmd is physical at this point
394 */
395
396 if (!pmd) {
397 pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
398 PAGE_SIZE << PMD_ORDER);
399 if (!pmd)
400 panic("pmd allocation failed.\n");
401 pmd = (pmd_t *) __pa(pmd);
402 }
403
404 pgd_populate(NULL, pg_dir, __va(pmd));
405#endif
406 pg_dir++;
407
408 /* now change pmd to kernel virtual addresses */
409
410 pmd = (pmd_t *)__va(pmd) + start_pmd;
411 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
412
413 /*
414 * pg_table is physical at this point
415 */
416
417 pg_table = (pte_t *)pmd_address(*pmd);
418 if (!pg_table) {
419 pg_table = memblock_alloc(PAGE_SIZE,
420 PAGE_SIZE);
421 if (!pg_table)
422 panic("page table allocation failed\n");
423 pg_table = (pte_t *) __pa(pg_table);
424 }
425
426 pmd_populate_kernel(NULL, pmd, __va(pg_table));
427
428 /* now change pg_table to kernel virtual addresses */
429
430 pg_table = (pte_t *) __va(pg_table) + start_pte;
431 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
432 pte_t pte;
433 pgprot_t prot;
434 bool huge = false;
435
436 if (force) {
437 prot = pgprot;
438 } else if (address < kernel_start || address >= kernel_end) {
439 /* outside kernel memory */
440 prot = PAGE_KERNEL;
441 } else if (!kernel_set_to_readonly) {
442 /* still initializing, allow writing to RO memory */
443 prot = PAGE_KERNEL_RWX;
444 huge = true;
445 } else if (address >= ro_start) {
446 /* Code (ro) and Data areas */
447 prot = (address < ro_end) ?
448 PAGE_KERNEL_EXEC : PAGE_KERNEL;
449 huge = true;
450 } else {
451 prot = PAGE_KERNEL;
452 }
453
454 pte = __mk_pte(address, prot);
455 if (huge)
456 pte = pte_mkhuge(pte);
457
458 if (address >= end_paddr)
459 break;
460
461 set_pte(pg_table, pte);
462
463 address += PAGE_SIZE;
464 vaddr += PAGE_SIZE;
465 }
466 start_pte = 0;
467
468 if (address >= end_paddr)
469 break;
470 }
471 start_pmd = 0;
472 }
473}
474
475void __init set_kernel_text_rw(int enable_read_write)
476{
477 unsigned long start = (unsigned long) __init_begin;
478 unsigned long end = (unsigned long) &data_start;
479
480 map_pages(start, __pa(start), end-start,
481 PAGE_KERNEL_RWX, enable_read_write ? 1:0);
482
483 /* force the kernel to see the new page table entries */
484 flush_cache_all();
485 flush_tlb_all();
486}
487
488void __ref free_initmem(void)
489{
490 unsigned long init_begin = (unsigned long)__init_begin;
491 unsigned long init_end = (unsigned long)__init_end;
492 unsigned long kernel_end = (unsigned long)&_end;
493
494 /* Remap kernel text and data, but do not touch init section yet. */
495 kernel_set_to_readonly = true;
496 map_pages(init_end, __pa(init_end), kernel_end - init_end,
497 PAGE_KERNEL, 0);
498
499 /* The init text pages are marked R-X. We have to
500 * flush the icache and mark them RW-
501 *
502 * This is tricky, because map_pages is in the init section.
503 * Do a dummy remap of the data section first (the data
504 * section is already PAGE_KERNEL) to pull in the TLB entries
505 * for map_kernel */
506 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
507 PAGE_KERNEL_RWX, 1);
508 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
509 * map_pages */
510 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
511 PAGE_KERNEL, 1);
512
513 /* force the kernel to see the new TLB entries */
514 __flush_tlb_range(0, init_begin, kernel_end);
515
516 /* finally dump all the instructions which were cached, since the
517 * pages are no-longer executable */
518 flush_icache_range(init_begin, init_end);
519
520 free_initmem_default(POISON_FREE_INITMEM);
521
522 /* set up a new led state on systems shipped LED State panel */
523 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
524}
525
526
527#ifdef CONFIG_STRICT_KERNEL_RWX
528void mark_rodata_ro(void)
529{
530 /* rodata memory was already mapped with KERNEL_RO access rights by
531 pagetable_init() and map_pages(). No need to do additional stuff here */
532 unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
533
534 pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
535}
536#endif
537
538
539/*
540 * Just an arbitrary offset to serve as a "hole" between mapping areas
541 * (between top of physical memory and a potential pcxl dma mapping
542 * area, and below the vmalloc mapping area).
543 *
544 * The current 32K value just means that there will be a 32K "hole"
545 * between mapping areas. That means that any out-of-bounds memory
546 * accesses will hopefully be caught. The vmalloc() routines leaves
547 * a hole of 4kB between each vmalloced area for the same reason.
548 */
549
550 /* Leave room for gateway page expansion */
551#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
552#error KERNEL_MAP_START is in gateway reserved region
553#endif
554#define MAP_START (KERNEL_MAP_START)
555
556#define VM_MAP_OFFSET (32*1024)
557#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
558 & ~(VM_MAP_OFFSET-1)))
559
560void *parisc_vmalloc_start __ro_after_init;
561EXPORT_SYMBOL(parisc_vmalloc_start);
562
563#ifdef CONFIG_PA11
564unsigned long pcxl_dma_start __ro_after_init;
565#endif
566
567void __init mem_init(void)
568{
569 /* Do sanity checks on IPC (compat) structures */
570 BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
571#ifndef CONFIG_64BIT
572 BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
573 BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
574 BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
575#endif
576#ifdef CONFIG_COMPAT
577 BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
578 BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
579 BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
580 BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
581#endif
582
583 /* Do sanity checks on page table constants */
584 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
585 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
586 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
587 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
588 > BITS_PER_LONG);
589
590 high_memory = __va((max_pfn << PAGE_SHIFT));
591 set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
592 memblock_free_all();
593
594#ifdef CONFIG_PA11
595 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
596 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
597 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
598 + PCXL_DMA_MAP_SIZE);
599 } else
600#endif
601 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
602
603 mem_init_print_info(NULL);
604
605#if 0
606 /*
607 * Do not expose the virtual kernel memory layout to userspace.
608 * But keep code for debugging purposes.
609 */
610 printk("virtual kernel memory layout:\n"
611 " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
612 " fixmap : 0x%px - 0x%px (%4ld kB)\n"
613 " memory : 0x%px - 0x%px (%4ld MB)\n"
614 " .init : 0x%px - 0x%px (%4ld kB)\n"
615 " .data : 0x%px - 0x%px (%4ld kB)\n"
616 " .text : 0x%px - 0x%px (%4ld kB)\n",
617
618 (void*)VMALLOC_START, (void*)VMALLOC_END,
619 (VMALLOC_END - VMALLOC_START) >> 20,
620
621 (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
622 (unsigned long)(FIXMAP_SIZE / 1024),
623
624 __va(0), high_memory,
625 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
626
627 __init_begin, __init_end,
628 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
629
630 _etext, _edata,
631 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
632
633 _text, _etext,
634 ((unsigned long)_etext - (unsigned long)_text) >> 10);
635#endif
636}
637
638unsigned long *empty_zero_page __ro_after_init;
639EXPORT_SYMBOL(empty_zero_page);
640
641/*
642 * pagetable_init() sets up the page tables
643 *
644 * Note that gateway_init() places the Linux gateway page at page 0.
645 * Since gateway pages cannot be dereferenced this has the desirable
646 * side effect of trapping those pesky NULL-reference errors in the
647 * kernel.
648 */
649static void __init pagetable_init(void)
650{
651 int range;
652
653 /* Map each physical memory range to its kernel vaddr */
654
655 for (range = 0; range < npmem_ranges; range++) {
656 unsigned long start_paddr;
657 unsigned long end_paddr;
658 unsigned long size;
659
660 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
661 size = pmem_ranges[range].pages << PAGE_SHIFT;
662 end_paddr = start_paddr + size;
663
664 map_pages((unsigned long)__va(start_paddr), start_paddr,
665 size, PAGE_KERNEL, 0);
666 }
667
668#ifdef CONFIG_BLK_DEV_INITRD
669 if (initrd_end && initrd_end > mem_limit) {
670 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
671 map_pages(initrd_start, __pa(initrd_start),
672 initrd_end - initrd_start, PAGE_KERNEL, 0);
673 }
674#endif
675
676 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
677 if (!empty_zero_page)
678 panic("zero page allocation failed.\n");
679
680}
681
682static void __init gateway_init(void)
683{
684 unsigned long linux_gateway_page_addr;
685 /* FIXME: This is 'const' in order to trick the compiler
686 into not treating it as DP-relative data. */
687 extern void * const linux_gateway_page;
688
689 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
690
691 /*
692 * Setup Linux Gateway page.
693 *
694 * The Linux gateway page will reside in kernel space (on virtual
695 * page 0), so it doesn't need to be aliased into user space.
696 */
697
698 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
699 PAGE_SIZE, PAGE_GATEWAY, 1);
700}
701
702static void __init parisc_bootmem_free(void)
703{
704 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
705 unsigned long holes_size[MAX_NR_ZONES] = { 0, };
706 unsigned long mem_start_pfn = ~0UL, mem_end_pfn = 0, mem_size_pfn = 0;
707 int i;
708
709 for (i = 0; i < npmem_ranges; i++) {
710 unsigned long start = pmem_ranges[i].start_pfn;
711 unsigned long size = pmem_ranges[i].pages;
712 unsigned long end = start + size;
713
714 if (mem_start_pfn > start)
715 mem_start_pfn = start;
716 if (mem_end_pfn < end)
717 mem_end_pfn = end;
718 mem_size_pfn += size;
719 }
720
721 zones_size[0] = mem_end_pfn - mem_start_pfn;
722 holes_size[0] = zones_size[0] - mem_size_pfn;
723
724 free_area_init_node(0, zones_size, mem_start_pfn, holes_size);
725}
726
727void __init paging_init(void)
728{
729 setup_bootmem();
730 pagetable_init();
731 gateway_init();
732 flush_cache_all_local(); /* start with known state */
733 flush_tlb_all_local(NULL);
734
735 /*
736 * Mark all memblocks as present for sparsemem using
737 * memory_present() and then initialize sparsemem.
738 */
739 memblocks_present();
740 sparse_init();
741 parisc_bootmem_free();
742}
743
744#ifdef CONFIG_PA20
745
746/*
747 * Currently, all PA20 chips have 18 bit protection IDs, which is the
748 * limiting factor (space ids are 32 bits).
749 */
750
751#define NR_SPACE_IDS 262144
752
753#else
754
755/*
756 * Currently we have a one-to-one relationship between space IDs and
757 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
758 * support 15 bit protection IDs, so that is the limiting factor.
759 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
760 * probably not worth the effort for a special case here.
761 */
762
763#define NR_SPACE_IDS 32768
764
765#endif /* !CONFIG_PA20 */
766
767#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
768#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
769
770static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
771static unsigned long dirty_space_id[SID_ARRAY_SIZE];
772static unsigned long space_id_index;
773static unsigned long free_space_ids = NR_SPACE_IDS - 1;
774static unsigned long dirty_space_ids = 0;
775
776static DEFINE_SPINLOCK(sid_lock);
777
778unsigned long alloc_sid(void)
779{
780 unsigned long index;
781
782 spin_lock(&sid_lock);
783
784 if (free_space_ids == 0) {
785 if (dirty_space_ids != 0) {
786 spin_unlock(&sid_lock);
787 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
788 spin_lock(&sid_lock);
789 }
790 BUG_ON(free_space_ids == 0);
791 }
792
793 free_space_ids--;
794
795 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
796 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
797 space_id_index = index;
798
799 spin_unlock(&sid_lock);
800
801 return index << SPACEID_SHIFT;
802}
803
804void free_sid(unsigned long spaceid)
805{
806 unsigned long index = spaceid >> SPACEID_SHIFT;
807 unsigned long *dirty_space_offset;
808
809 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
810 index &= (BITS_PER_LONG - 1);
811
812 spin_lock(&sid_lock);
813
814 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
815
816 *dirty_space_offset |= (1L << index);
817 dirty_space_ids++;
818
819 spin_unlock(&sid_lock);
820}
821
822
823#ifdef CONFIG_SMP
824static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
825{
826 int i;
827
828 /* NOTE: sid_lock must be held upon entry */
829
830 *ndirtyptr = dirty_space_ids;
831 if (dirty_space_ids != 0) {
832 for (i = 0; i < SID_ARRAY_SIZE; i++) {
833 dirty_array[i] = dirty_space_id[i];
834 dirty_space_id[i] = 0;
835 }
836 dirty_space_ids = 0;
837 }
838
839 return;
840}
841
842static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
843{
844 int i;
845
846 /* NOTE: sid_lock must be held upon entry */
847
848 if (ndirty != 0) {
849 for (i = 0; i < SID_ARRAY_SIZE; i++) {
850 space_id[i] ^= dirty_array[i];
851 }
852
853 free_space_ids += ndirty;
854 space_id_index = 0;
855 }
856}
857
858#else /* CONFIG_SMP */
859
860static void recycle_sids(void)
861{
862 int i;
863
864 /* NOTE: sid_lock must be held upon entry */
865
866 if (dirty_space_ids != 0) {
867 for (i = 0; i < SID_ARRAY_SIZE; i++) {
868 space_id[i] ^= dirty_space_id[i];
869 dirty_space_id[i] = 0;
870 }
871
872 free_space_ids += dirty_space_ids;
873 dirty_space_ids = 0;
874 space_id_index = 0;
875 }
876}
877#endif
878
879/*
880 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
881 * purged, we can safely reuse the space ids that were released but
882 * not flushed from the tlb.
883 */
884
885#ifdef CONFIG_SMP
886
887static unsigned long recycle_ndirty;
888static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
889static unsigned int recycle_inuse;
890
891void flush_tlb_all(void)
892{
893 int do_recycle;
894
895 __inc_irq_stat(irq_tlb_count);
896 do_recycle = 0;
897 spin_lock(&sid_lock);
898 if (dirty_space_ids > RECYCLE_THRESHOLD) {
899 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
900 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
901 recycle_inuse++;
902 do_recycle++;
903 }
904 spin_unlock(&sid_lock);
905 on_each_cpu(flush_tlb_all_local, NULL, 1);
906 if (do_recycle) {
907 spin_lock(&sid_lock);
908 recycle_sids(recycle_ndirty,recycle_dirty_array);
909 recycle_inuse = 0;
910 spin_unlock(&sid_lock);
911 }
912}
913#else
914void flush_tlb_all(void)
915{
916 __inc_irq_stat(irq_tlb_count);
917 spin_lock(&sid_lock);
918 flush_tlb_all_local(NULL);
919 recycle_sids();
920 spin_unlock(&sid_lock);
921}
922#endif
1/*
2 * linux/arch/parisc/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright 1999 SuSE GmbH
6 * changed by Philipp Rumpf
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2004 Randolph Chung (tausq@debian.org)
9 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
10 *
11 */
12
13
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/bootmem.h>
17#include <linux/gfp.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
21#include <linux/initrd.h>
22#include <linux/swap.h>
23#include <linux/unistd.h>
24#include <linux/nodemask.h> /* for node_online_map */
25#include <linux/pagemap.h> /* for release_pages and page_cache_release */
26
27#include <asm/pgalloc.h>
28#include <asm/pgtable.h>
29#include <asm/tlb.h>
30#include <asm/pdc_chassis.h>
31#include <asm/mmzone.h>
32#include <asm/sections.h>
33
34extern int data_start;
35
36#if PT_NLEVELS == 3
37/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
38 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
39 * guarantee that global objects will be laid out in memory in the same order
40 * as the order of declaration, so put these in different sections and use
41 * the linker script to order them. */
42pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
43#endif
44
45pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
46pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
47
48#ifdef CONFIG_DISCONTIGMEM
49struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
50unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
51#endif
52
53static struct resource data_resource = {
54 .name = "Kernel data",
55 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
56};
57
58static struct resource code_resource = {
59 .name = "Kernel code",
60 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
61};
62
63static struct resource pdcdata_resource = {
64 .name = "PDC data (Page Zero)",
65 .start = 0,
66 .end = 0x9ff,
67 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
68};
69
70static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
71
72/* The following array is initialized from the firmware specific
73 * information retrieved in kernel/inventory.c.
74 */
75
76physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
77int npmem_ranges __read_mostly;
78
79#ifdef CONFIG_64BIT
80#define MAX_MEM (~0UL)
81#else /* !CONFIG_64BIT */
82#define MAX_MEM (3584U*1024U*1024U)
83#endif /* !CONFIG_64BIT */
84
85static unsigned long mem_limit __read_mostly = MAX_MEM;
86
87static void __init mem_limit_func(void)
88{
89 char *cp, *end;
90 unsigned long limit;
91
92 /* We need this before __setup() functions are called */
93
94 limit = MAX_MEM;
95 for (cp = boot_command_line; *cp; ) {
96 if (memcmp(cp, "mem=", 4) == 0) {
97 cp += 4;
98 limit = memparse(cp, &end);
99 if (end != cp)
100 break;
101 cp = end;
102 } else {
103 while (*cp != ' ' && *cp)
104 ++cp;
105 while (*cp == ' ')
106 ++cp;
107 }
108 }
109
110 if (limit < mem_limit)
111 mem_limit = limit;
112}
113
114#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
115
116static void __init setup_bootmem(void)
117{
118 unsigned long bootmap_size;
119 unsigned long mem_max;
120 unsigned long bootmap_pages;
121 unsigned long bootmap_start_pfn;
122 unsigned long bootmap_pfn;
123#ifndef CONFIG_DISCONTIGMEM
124 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
125 int npmem_holes;
126#endif
127 int i, sysram_resource_count;
128
129 disable_sr_hashing(); /* Turn off space register hashing */
130
131 /*
132 * Sort the ranges. Since the number of ranges is typically
133 * small, and performance is not an issue here, just do
134 * a simple insertion sort.
135 */
136
137 for (i = 1; i < npmem_ranges; i++) {
138 int j;
139
140 for (j = i; j > 0; j--) {
141 unsigned long tmp;
142
143 if (pmem_ranges[j-1].start_pfn <
144 pmem_ranges[j].start_pfn) {
145
146 break;
147 }
148 tmp = pmem_ranges[j-1].start_pfn;
149 pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
150 pmem_ranges[j].start_pfn = tmp;
151 tmp = pmem_ranges[j-1].pages;
152 pmem_ranges[j-1].pages = pmem_ranges[j].pages;
153 pmem_ranges[j].pages = tmp;
154 }
155 }
156
157#ifndef CONFIG_DISCONTIGMEM
158 /*
159 * Throw out ranges that are too far apart (controlled by
160 * MAX_GAP).
161 */
162
163 for (i = 1; i < npmem_ranges; i++) {
164 if (pmem_ranges[i].start_pfn -
165 (pmem_ranges[i-1].start_pfn +
166 pmem_ranges[i-1].pages) > MAX_GAP) {
167 npmem_ranges = i;
168 printk("Large gap in memory detected (%ld pages). "
169 "Consider turning on CONFIG_DISCONTIGMEM\n",
170 pmem_ranges[i].start_pfn -
171 (pmem_ranges[i-1].start_pfn +
172 pmem_ranges[i-1].pages));
173 break;
174 }
175 }
176#endif
177
178 if (npmem_ranges > 1) {
179
180 /* Print the memory ranges */
181
182 printk(KERN_INFO "Memory Ranges:\n");
183
184 for (i = 0; i < npmem_ranges; i++) {
185 unsigned long start;
186 unsigned long size;
187
188 size = (pmem_ranges[i].pages << PAGE_SHIFT);
189 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
190 printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
191 i,start, start + (size - 1), size >> 20);
192 }
193 }
194
195 sysram_resource_count = npmem_ranges;
196 for (i = 0; i < sysram_resource_count; i++) {
197 struct resource *res = &sysram_resources[i];
198 res->name = "System RAM";
199 res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
200 res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
201 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
202 request_resource(&iomem_resource, res);
203 }
204
205 /*
206 * For 32 bit kernels we limit the amount of memory we can
207 * support, in order to preserve enough kernel address space
208 * for other purposes. For 64 bit kernels we don't normally
209 * limit the memory, but this mechanism can be used to
210 * artificially limit the amount of memory (and it is written
211 * to work with multiple memory ranges).
212 */
213
214 mem_limit_func(); /* check for "mem=" argument */
215
216 mem_max = 0;
217 num_physpages = 0;
218 for (i = 0; i < npmem_ranges; i++) {
219 unsigned long rsize;
220
221 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
222 if ((mem_max + rsize) > mem_limit) {
223 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
224 if (mem_max == mem_limit)
225 npmem_ranges = i;
226 else {
227 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
228 - (mem_max >> PAGE_SHIFT);
229 npmem_ranges = i + 1;
230 mem_max = mem_limit;
231 }
232 num_physpages += pmem_ranges[i].pages;
233 break;
234 }
235 num_physpages += pmem_ranges[i].pages;
236 mem_max += rsize;
237 }
238
239 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
240
241#ifndef CONFIG_DISCONTIGMEM
242 /* Merge the ranges, keeping track of the holes */
243
244 {
245 unsigned long end_pfn;
246 unsigned long hole_pages;
247
248 npmem_holes = 0;
249 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
250 for (i = 1; i < npmem_ranges; i++) {
251
252 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
253 if (hole_pages) {
254 pmem_holes[npmem_holes].start_pfn = end_pfn;
255 pmem_holes[npmem_holes++].pages = hole_pages;
256 end_pfn += hole_pages;
257 }
258 end_pfn += pmem_ranges[i].pages;
259 }
260
261 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
262 npmem_ranges = 1;
263 }
264#endif
265
266 bootmap_pages = 0;
267 for (i = 0; i < npmem_ranges; i++)
268 bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages);
269
270 bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT;
271
272#ifdef CONFIG_DISCONTIGMEM
273 for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
274 memset(NODE_DATA(i), 0, sizeof(pg_data_t));
275 NODE_DATA(i)->bdata = &bootmem_node_data[i];
276 }
277 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
278
279 for (i = 0; i < npmem_ranges; i++) {
280 node_set_state(i, N_NORMAL_MEMORY);
281 node_set_online(i);
282 }
283#endif
284
285 /*
286 * Initialize and free the full range of memory in each range.
287 * Note that the only writing these routines do are to the bootmap,
288 * and we've made sure to locate the bootmap properly so that they
289 * won't be writing over anything important.
290 */
291
292 bootmap_pfn = bootmap_start_pfn;
293 max_pfn = 0;
294 for (i = 0; i < npmem_ranges; i++) {
295 unsigned long start_pfn;
296 unsigned long npages;
297
298 start_pfn = pmem_ranges[i].start_pfn;
299 npages = pmem_ranges[i].pages;
300
301 bootmap_size = init_bootmem_node(NODE_DATA(i),
302 bootmap_pfn,
303 start_pfn,
304 (start_pfn + npages) );
305 free_bootmem_node(NODE_DATA(i),
306 (start_pfn << PAGE_SHIFT),
307 (npages << PAGE_SHIFT) );
308 bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
309 if ((start_pfn + npages) > max_pfn)
310 max_pfn = start_pfn + npages;
311 }
312
313 /* IOMMU is always used to access "high mem" on those boxes
314 * that can support enough mem that a PCI device couldn't
315 * directly DMA to any physical addresses.
316 * ISA DMA support will need to revisit this.
317 */
318 max_low_pfn = max_pfn;
319
320 /* bootmap sizing messed up? */
321 BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
322
323 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
324
325#define PDC_CONSOLE_IO_IODC_SIZE 32768
326
327 reserve_bootmem_node(NODE_DATA(0), 0UL,
328 (unsigned long)(PAGE0->mem_free +
329 PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
330 reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
331 (unsigned long)(_end - _text), BOOTMEM_DEFAULT);
332 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
333 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
334 BOOTMEM_DEFAULT);
335
336#ifndef CONFIG_DISCONTIGMEM
337
338 /* reserve the holes */
339
340 for (i = 0; i < npmem_holes; i++) {
341 reserve_bootmem_node(NODE_DATA(0),
342 (pmem_holes[i].start_pfn << PAGE_SHIFT),
343 (pmem_holes[i].pages << PAGE_SHIFT),
344 BOOTMEM_DEFAULT);
345 }
346#endif
347
348#ifdef CONFIG_BLK_DEV_INITRD
349 if (initrd_start) {
350 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
351 if (__pa(initrd_start) < mem_max) {
352 unsigned long initrd_reserve;
353
354 if (__pa(initrd_end) > mem_max) {
355 initrd_reserve = mem_max - __pa(initrd_start);
356 } else {
357 initrd_reserve = initrd_end - initrd_start;
358 }
359 initrd_below_start_ok = 1;
360 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
361
362 reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
363 initrd_reserve, BOOTMEM_DEFAULT);
364 }
365 }
366#endif
367
368 data_resource.start = virt_to_phys(&data_start);
369 data_resource.end = virt_to_phys(_end) - 1;
370 code_resource.start = virt_to_phys(_text);
371 code_resource.end = virt_to_phys(&data_start)-1;
372
373 /* We don't know which region the kernel will be in, so try
374 * all of them.
375 */
376 for (i = 0; i < sysram_resource_count; i++) {
377 struct resource *res = &sysram_resources[i];
378 request_resource(res, &code_resource);
379 request_resource(res, &data_resource);
380 }
381 request_resource(&sysram_resources[0], &pdcdata_resource);
382}
383
384static void __init map_pages(unsigned long start_vaddr,
385 unsigned long start_paddr, unsigned long size,
386 pgprot_t pgprot, int force)
387{
388 pgd_t *pg_dir;
389 pmd_t *pmd;
390 pte_t *pg_table;
391 unsigned long end_paddr;
392 unsigned long start_pmd;
393 unsigned long start_pte;
394 unsigned long tmp1;
395 unsigned long tmp2;
396 unsigned long address;
397 unsigned long vaddr;
398 unsigned long ro_start;
399 unsigned long ro_end;
400 unsigned long fv_addr;
401 unsigned long gw_addr;
402 extern const unsigned long fault_vector_20;
403 extern void * const linux_gateway_page;
404
405 ro_start = __pa((unsigned long)_text);
406 ro_end = __pa((unsigned long)&data_start);
407 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
408 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
409
410 end_paddr = start_paddr + size;
411
412 pg_dir = pgd_offset_k(start_vaddr);
413
414#if PTRS_PER_PMD == 1
415 start_pmd = 0;
416#else
417 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
418#endif
419 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
420
421 address = start_paddr;
422 vaddr = start_vaddr;
423 while (address < end_paddr) {
424#if PTRS_PER_PMD == 1
425 pmd = (pmd_t *)__pa(pg_dir);
426#else
427 pmd = (pmd_t *)pgd_address(*pg_dir);
428
429 /*
430 * pmd is physical at this point
431 */
432
433 if (!pmd) {
434 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER);
435 pmd = (pmd_t *) __pa(pmd);
436 }
437
438 pgd_populate(NULL, pg_dir, __va(pmd));
439#endif
440 pg_dir++;
441
442 /* now change pmd to kernel virtual addresses */
443
444 pmd = (pmd_t *)__va(pmd) + start_pmd;
445 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
446
447 /*
448 * pg_table is physical at this point
449 */
450
451 pg_table = (pte_t *)pmd_address(*pmd);
452 if (!pg_table) {
453 pg_table = (pte_t *)
454 alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
455 pg_table = (pte_t *) __pa(pg_table);
456 }
457
458 pmd_populate_kernel(NULL, pmd, __va(pg_table));
459
460 /* now change pg_table to kernel virtual addresses */
461
462 pg_table = (pte_t *) __va(pg_table) + start_pte;
463 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
464 pte_t pte;
465
466 /*
467 * Map the fault vector writable so we can
468 * write the HPMC checksum.
469 */
470 if (force)
471 pte = __mk_pte(address, pgprot);
472 else if (core_kernel_text(vaddr) &&
473 address != fv_addr)
474 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
475 else
476#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
477 if (address >= ro_start && address < ro_end
478 && address != fv_addr
479 && address != gw_addr)
480 pte = __mk_pte(address, PAGE_KERNEL_RO);
481 else
482#endif
483 pte = __mk_pte(address, pgprot);
484
485 if (address >= end_paddr) {
486 if (force)
487 break;
488 else
489 pte_val(pte) = 0;
490 }
491
492 set_pte(pg_table, pte);
493
494 address += PAGE_SIZE;
495 vaddr += PAGE_SIZE;
496 }
497 start_pte = 0;
498
499 if (address >= end_paddr)
500 break;
501 }
502 start_pmd = 0;
503 }
504}
505
506void free_initmem(void)
507{
508 unsigned long addr;
509 unsigned long init_begin = (unsigned long)__init_begin;
510 unsigned long init_end = (unsigned long)__init_end;
511
512 /* The init text pages are marked R-X. We have to
513 * flush the icache and mark them RW-
514 *
515 * This is tricky, because map_pages is in the init section.
516 * Do a dummy remap of the data section first (the data
517 * section is already PAGE_KERNEL) to pull in the TLB entries
518 * for map_kernel */
519 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
520 PAGE_KERNEL_RWX, 1);
521 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
522 * map_pages */
523 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
524 PAGE_KERNEL, 1);
525
526 /* force the kernel to see the new TLB entries */
527 __flush_tlb_range(0, init_begin, init_end);
528 /* Attempt to catch anyone trying to execute code here
529 * by filling the page with BRK insns.
530 */
531 memset((void *)init_begin, 0x00, init_end - init_begin);
532 /* finally dump all the instructions which were cached, since the
533 * pages are no-longer executable */
534 flush_icache_range(init_begin, init_end);
535
536 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
537 ClearPageReserved(virt_to_page(addr));
538 init_page_count(virt_to_page(addr));
539 free_page(addr);
540 num_physpages++;
541 totalram_pages++;
542 }
543
544 /* set up a new led state on systems shipped LED State panel */
545 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
546
547 printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
548 (init_end - init_begin) >> 10);
549}
550
551
552#ifdef CONFIG_DEBUG_RODATA
553void mark_rodata_ro(void)
554{
555 /* rodata memory was already mapped with KERNEL_RO access rights by
556 pagetable_init() and map_pages(). No need to do additional stuff here */
557 printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
558 (unsigned long)(__end_rodata - __start_rodata) >> 10);
559}
560#endif
561
562
563/*
564 * Just an arbitrary offset to serve as a "hole" between mapping areas
565 * (between top of physical memory and a potential pcxl dma mapping
566 * area, and below the vmalloc mapping area).
567 *
568 * The current 32K value just means that there will be a 32K "hole"
569 * between mapping areas. That means that any out-of-bounds memory
570 * accesses will hopefully be caught. The vmalloc() routines leaves
571 * a hole of 4kB between each vmalloced area for the same reason.
572 */
573
574 /* Leave room for gateway page expansion */
575#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
576#error KERNEL_MAP_START is in gateway reserved region
577#endif
578#define MAP_START (KERNEL_MAP_START)
579
580#define VM_MAP_OFFSET (32*1024)
581#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
582 & ~(VM_MAP_OFFSET-1)))
583
584void *parisc_vmalloc_start __read_mostly;
585EXPORT_SYMBOL(parisc_vmalloc_start);
586
587#ifdef CONFIG_PA11
588unsigned long pcxl_dma_start __read_mostly;
589#endif
590
591void __init mem_init(void)
592{
593 int codesize, reservedpages, datasize, initsize;
594
595 /* Do sanity checks on page table constants */
596 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
597 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
598 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
599 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
600 > BITS_PER_LONG);
601
602 high_memory = __va((max_pfn << PAGE_SHIFT));
603
604#ifndef CONFIG_DISCONTIGMEM
605 max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
606 totalram_pages += free_all_bootmem();
607#else
608 {
609 int i;
610
611 for (i = 0; i < npmem_ranges; i++)
612 totalram_pages += free_all_bootmem_node(NODE_DATA(i));
613 }
614#endif
615
616 codesize = (unsigned long)_etext - (unsigned long)_text;
617 datasize = (unsigned long)_edata - (unsigned long)_etext;
618 initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
619
620 reservedpages = 0;
621{
622 unsigned long pfn;
623#ifdef CONFIG_DISCONTIGMEM
624 int i;
625
626 for (i = 0; i < npmem_ranges; i++) {
627 for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) {
628 if (PageReserved(pfn_to_page(pfn)))
629 reservedpages++;
630 }
631 }
632#else /* !CONFIG_DISCONTIGMEM */
633 for (pfn = 0; pfn < max_pfn; pfn++) {
634 /*
635 * Only count reserved RAM pages
636 */
637 if (PageReserved(pfn_to_page(pfn)))
638 reservedpages++;
639 }
640#endif
641}
642
643#ifdef CONFIG_PA11
644 if (hppa_dma_ops == &pcxl_dma_ops) {
645 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
646 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
647 + PCXL_DMA_MAP_SIZE);
648 } else {
649 pcxl_dma_start = 0;
650 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
651 }
652#else
653 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
654#endif
655
656 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
657 nr_free_pages() << (PAGE_SHIFT-10),
658 num_physpages << (PAGE_SHIFT-10),
659 codesize >> 10,
660 reservedpages << (PAGE_SHIFT-10),
661 datasize >> 10,
662 initsize >> 10
663 );
664
665#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
666 printk("virtual kernel memory layout:\n"
667 " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
668 " memory : 0x%p - 0x%p (%4ld MB)\n"
669 " .init : 0x%p - 0x%p (%4ld kB)\n"
670 " .data : 0x%p - 0x%p (%4ld kB)\n"
671 " .text : 0x%p - 0x%p (%4ld kB)\n",
672
673 (void*)VMALLOC_START, (void*)VMALLOC_END,
674 (VMALLOC_END - VMALLOC_START) >> 20,
675
676 __va(0), high_memory,
677 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
678
679 __init_begin, __init_end,
680 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
681
682 _etext, _edata,
683 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
684
685 _text, _etext,
686 ((unsigned long)_etext - (unsigned long)_text) >> 10);
687#endif
688}
689
690unsigned long *empty_zero_page __read_mostly;
691EXPORT_SYMBOL(empty_zero_page);
692
693void show_mem(unsigned int filter)
694{
695 int i,free = 0,total = 0,reserved = 0;
696 int shared = 0, cached = 0;
697
698 printk(KERN_INFO "Mem-info:\n");
699 show_free_areas(filter);
700#ifndef CONFIG_DISCONTIGMEM
701 i = max_mapnr;
702 while (i-- > 0) {
703 total++;
704 if (PageReserved(mem_map+i))
705 reserved++;
706 else if (PageSwapCache(mem_map+i))
707 cached++;
708 else if (!page_count(&mem_map[i]))
709 free++;
710 else
711 shared += page_count(&mem_map[i]) - 1;
712 }
713#else
714 for (i = 0; i < npmem_ranges; i++) {
715 int j;
716
717 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
718 struct page *p;
719 unsigned long flags;
720
721 pgdat_resize_lock(NODE_DATA(i), &flags);
722 p = nid_page_nr(i, j) - node_start_pfn(i);
723
724 total++;
725 if (PageReserved(p))
726 reserved++;
727 else if (PageSwapCache(p))
728 cached++;
729 else if (!page_count(p))
730 free++;
731 else
732 shared += page_count(p) - 1;
733 pgdat_resize_unlock(NODE_DATA(i), &flags);
734 }
735 }
736#endif
737 printk(KERN_INFO "%d pages of RAM\n", total);
738 printk(KERN_INFO "%d reserved pages\n", reserved);
739 printk(KERN_INFO "%d pages shared\n", shared);
740 printk(KERN_INFO "%d pages swap cached\n", cached);
741
742
743#ifdef CONFIG_DISCONTIGMEM
744 {
745 struct zonelist *zl;
746 int i, j;
747
748 for (i = 0; i < npmem_ranges; i++) {
749 zl = node_zonelist(i, 0);
750 for (j = 0; j < MAX_NR_ZONES; j++) {
751 struct zoneref *z;
752 struct zone *zone;
753
754 printk("Zone list for zone %d on node %d: ", j, i);
755 for_each_zone_zonelist(zone, z, zl, j)
756 printk("[%d/%s] ", zone_to_nid(zone),
757 zone->name);
758 printk("\n");
759 }
760 }
761 }
762#endif
763}
764
765/*
766 * pagetable_init() sets up the page tables
767 *
768 * Note that gateway_init() places the Linux gateway page at page 0.
769 * Since gateway pages cannot be dereferenced this has the desirable
770 * side effect of trapping those pesky NULL-reference errors in the
771 * kernel.
772 */
773static void __init pagetable_init(void)
774{
775 int range;
776
777 /* Map each physical memory range to its kernel vaddr */
778
779 for (range = 0; range < npmem_ranges; range++) {
780 unsigned long start_paddr;
781 unsigned long end_paddr;
782 unsigned long size;
783
784 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
785 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
786 size = pmem_ranges[range].pages << PAGE_SHIFT;
787
788 map_pages((unsigned long)__va(start_paddr), start_paddr,
789 size, PAGE_KERNEL, 0);
790 }
791
792#ifdef CONFIG_BLK_DEV_INITRD
793 if (initrd_end && initrd_end > mem_limit) {
794 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
795 map_pages(initrd_start, __pa(initrd_start),
796 initrd_end - initrd_start, PAGE_KERNEL, 0);
797 }
798#endif
799
800 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
801 memset(empty_zero_page, 0, PAGE_SIZE);
802}
803
804static void __init gateway_init(void)
805{
806 unsigned long linux_gateway_page_addr;
807 /* FIXME: This is 'const' in order to trick the compiler
808 into not treating it as DP-relative data. */
809 extern void * const linux_gateway_page;
810
811 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
812
813 /*
814 * Setup Linux Gateway page.
815 *
816 * The Linux gateway page will reside in kernel space (on virtual
817 * page 0), so it doesn't need to be aliased into user space.
818 */
819
820 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
821 PAGE_SIZE, PAGE_GATEWAY, 1);
822}
823
824#ifdef CONFIG_HPUX
825void
826map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
827{
828 pgd_t *pg_dir;
829 pmd_t *pmd;
830 pte_t *pg_table;
831 unsigned long start_pmd;
832 unsigned long start_pte;
833 unsigned long address;
834 unsigned long hpux_gw_page_addr;
835 /* FIXME: This is 'const' in order to trick the compiler
836 into not treating it as DP-relative data. */
837 extern void * const hpux_gateway_page;
838
839 hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK;
840
841 /*
842 * Setup HP-UX Gateway page.
843 *
844 * The HP-UX gateway page resides in the user address space,
845 * so it needs to be aliased into each process.
846 */
847
848 pg_dir = pgd_offset(mm,hpux_gw_page_addr);
849
850#if PTRS_PER_PMD == 1
851 start_pmd = 0;
852#else
853 start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
854#endif
855 start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
856
857 address = __pa(&hpux_gateway_page);
858#if PTRS_PER_PMD == 1
859 pmd = (pmd_t *)__pa(pg_dir);
860#else
861 pmd = (pmd_t *) pgd_address(*pg_dir);
862
863 /*
864 * pmd is physical at this point
865 */
866
867 if (!pmd) {
868 pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL);
869 pmd = (pmd_t *) __pa(pmd);
870 }
871
872 __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd);
873#endif
874 /* now change pmd to kernel virtual addresses */
875
876 pmd = (pmd_t *)__va(pmd) + start_pmd;
877
878 /*
879 * pg_table is physical at this point
880 */
881
882 pg_table = (pte_t *) pmd_address(*pmd);
883 if (!pg_table)
884 pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL));
885
886 __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table);
887
888 /* now change pg_table to kernel virtual addresses */
889
890 pg_table = (pte_t *) __va(pg_table) + start_pte;
891 set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY));
892}
893EXPORT_SYMBOL(map_hpux_gateway_page);
894#endif
895
896void __init paging_init(void)
897{
898 int i;
899
900 setup_bootmem();
901 pagetable_init();
902 gateway_init();
903 flush_cache_all_local(); /* start with known state */
904 flush_tlb_all_local(NULL);
905
906 for (i = 0; i < npmem_ranges; i++) {
907 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
908
909 zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
910
911#ifdef CONFIG_DISCONTIGMEM
912 /* Need to initialize the pfnnid_map before we can initialize
913 the zone */
914 {
915 int j;
916 for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
917 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
918 j++) {
919 pfnnid_map[j] = i;
920 }
921 }
922#endif
923
924 free_area_init_node(i, zones_size,
925 pmem_ranges[i].start_pfn, NULL);
926 }
927}
928
929#ifdef CONFIG_PA20
930
931/*
932 * Currently, all PA20 chips have 18 bit protection IDs, which is the
933 * limiting factor (space ids are 32 bits).
934 */
935
936#define NR_SPACE_IDS 262144
937
938#else
939
940/*
941 * Currently we have a one-to-one relationship between space IDs and
942 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
943 * support 15 bit protection IDs, so that is the limiting factor.
944 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
945 * probably not worth the effort for a special case here.
946 */
947
948#define NR_SPACE_IDS 32768
949
950#endif /* !CONFIG_PA20 */
951
952#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
953#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
954
955static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
956static unsigned long dirty_space_id[SID_ARRAY_SIZE];
957static unsigned long space_id_index;
958static unsigned long free_space_ids = NR_SPACE_IDS - 1;
959static unsigned long dirty_space_ids = 0;
960
961static DEFINE_SPINLOCK(sid_lock);
962
963unsigned long alloc_sid(void)
964{
965 unsigned long index;
966
967 spin_lock(&sid_lock);
968
969 if (free_space_ids == 0) {
970 if (dirty_space_ids != 0) {
971 spin_unlock(&sid_lock);
972 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
973 spin_lock(&sid_lock);
974 }
975 BUG_ON(free_space_ids == 0);
976 }
977
978 free_space_ids--;
979
980 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
981 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
982 space_id_index = index;
983
984 spin_unlock(&sid_lock);
985
986 return index << SPACEID_SHIFT;
987}
988
989void free_sid(unsigned long spaceid)
990{
991 unsigned long index = spaceid >> SPACEID_SHIFT;
992 unsigned long *dirty_space_offset;
993
994 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
995 index &= (BITS_PER_LONG - 1);
996
997 spin_lock(&sid_lock);
998
999 BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
1000
1001 *dirty_space_offset |= (1L << index);
1002 dirty_space_ids++;
1003
1004 spin_unlock(&sid_lock);
1005}
1006
1007
1008#ifdef CONFIG_SMP
1009static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
1010{
1011 int i;
1012
1013 /* NOTE: sid_lock must be held upon entry */
1014
1015 *ndirtyptr = dirty_space_ids;
1016 if (dirty_space_ids != 0) {
1017 for (i = 0; i < SID_ARRAY_SIZE; i++) {
1018 dirty_array[i] = dirty_space_id[i];
1019 dirty_space_id[i] = 0;
1020 }
1021 dirty_space_ids = 0;
1022 }
1023
1024 return;
1025}
1026
1027static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
1028{
1029 int i;
1030
1031 /* NOTE: sid_lock must be held upon entry */
1032
1033 if (ndirty != 0) {
1034 for (i = 0; i < SID_ARRAY_SIZE; i++) {
1035 space_id[i] ^= dirty_array[i];
1036 }
1037
1038 free_space_ids += ndirty;
1039 space_id_index = 0;
1040 }
1041}
1042
1043#else /* CONFIG_SMP */
1044
1045static void recycle_sids(void)
1046{
1047 int i;
1048
1049 /* NOTE: sid_lock must be held upon entry */
1050
1051 if (dirty_space_ids != 0) {
1052 for (i = 0; i < SID_ARRAY_SIZE; i++) {
1053 space_id[i] ^= dirty_space_id[i];
1054 dirty_space_id[i] = 0;
1055 }
1056
1057 free_space_ids += dirty_space_ids;
1058 dirty_space_ids = 0;
1059 space_id_index = 0;
1060 }
1061}
1062#endif
1063
1064/*
1065 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
1066 * purged, we can safely reuse the space ids that were released but
1067 * not flushed from the tlb.
1068 */
1069
1070#ifdef CONFIG_SMP
1071
1072static unsigned long recycle_ndirty;
1073static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
1074static unsigned int recycle_inuse;
1075
1076void flush_tlb_all(void)
1077{
1078 int do_recycle;
1079
1080 do_recycle = 0;
1081 spin_lock(&sid_lock);
1082 if (dirty_space_ids > RECYCLE_THRESHOLD) {
1083 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
1084 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
1085 recycle_inuse++;
1086 do_recycle++;
1087 }
1088 spin_unlock(&sid_lock);
1089 on_each_cpu(flush_tlb_all_local, NULL, 1);
1090 if (do_recycle) {
1091 spin_lock(&sid_lock);
1092 recycle_sids(recycle_ndirty,recycle_dirty_array);
1093 recycle_inuse = 0;
1094 spin_unlock(&sid_lock);
1095 }
1096}
1097#else
1098void flush_tlb_all(void)
1099{
1100 spin_lock(&sid_lock);
1101 flush_tlb_all_local(NULL);
1102 recycle_sids();
1103 spin_unlock(&sid_lock);
1104}
1105#endif
1106
1107#ifdef CONFIG_BLK_DEV_INITRD
1108void free_initrd_mem(unsigned long start, unsigned long end)
1109{
1110 if (start >= end)
1111 return;
1112 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1113 for (; start < end; start += PAGE_SIZE) {
1114 ClearPageReserved(virt_to_page(start));
1115 init_page_count(virt_to_page(start));
1116 free_page(start);
1117 num_physpages++;
1118 totalram_pages++;
1119 }
1120}
1121#endif