Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/parisc/mm/init.c
  4 *
  5 *  Copyright (C) 1995	Linus Torvalds
  6 *  Copyright 1999 SuSE GmbH
  7 *    changed by Philipp Rumpf
  8 *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
  9 *  Copyright 2004 Randolph Chung (tausq@debian.org)
 10 *  Copyright 2006-2007 Helge Deller (deller@gmx.de)
 11 *
 12 */
 13
 14
 15#include <linux/module.h>
 16#include <linux/mm.h>
 17#include <linux/memblock.h>
 18#include <linux/gfp.h>
 19#include <linux/delay.h>
 20#include <linux/init.h>
 21#include <linux/initrd.h>
 22#include <linux/swap.h>
 23#include <linux/unistd.h>
 24#include <linux/nodemask.h>	/* for node_online_map */
 25#include <linux/pagemap.h>	/* for release_pages */
 26#include <linux/compat.h>
 27
 28#include <asm/pgalloc.h>
 29#include <asm/pgtable.h>
 30#include <asm/tlb.h>
 31#include <asm/pdc_chassis.h>
 32#include <asm/mmzone.h>
 33#include <asm/sections.h>
 34#include <asm/msgbuf.h>
 35#include <asm/sparsemem.h>
 36
 37extern int  data_start;
 38extern void parisc_kernel_start(void);	/* Kernel entry point in head.S */
 39
 40#if CONFIG_PGTABLE_LEVELS == 3
 41/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
 42 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
 43 * guarantee that global objects will be laid out in memory in the same order
 44 * as the order of declaration, so put these in different sections and use
 45 * the linker script to order them. */
 46pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
 47#endif
 48
 49pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
 50pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
 51
 52static struct resource data_resource = {
 53	.name	= "Kernel data",
 54	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 55};
 56
 57static struct resource code_resource = {
 58	.name	= "Kernel code",
 59	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 60};
 61
 62static struct resource pdcdata_resource = {
 63	.name	= "PDC data (Page Zero)",
 64	.start	= 0,
 65	.end	= 0x9ff,
 66	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM,
 67};
 68
 69static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
 70
 71/* The following array is initialized from the firmware specific
 72 * information retrieved in kernel/inventory.c.
 73 */
 74
 75physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
 76int npmem_ranges __initdata;
 77
 78#ifdef CONFIG_64BIT
 79#define MAX_MEM         (1UL << MAX_PHYSMEM_BITS)
 80#else /* !CONFIG_64BIT */
 81#define MAX_MEM         (3584U*1024U*1024U)
 82#endif /* !CONFIG_64BIT */
 83
 84static unsigned long mem_limit __read_mostly = MAX_MEM;
 85
 86static void __init mem_limit_func(void)
 87{
 88	char *cp, *end;
 89	unsigned long limit;
 90
 91	/* We need this before __setup() functions are called */
 92
 93	limit = MAX_MEM;
 94	for (cp = boot_command_line; *cp; ) {
 95		if (memcmp(cp, "mem=", 4) == 0) {
 96			cp += 4;
 97			limit = memparse(cp, &end);
 98			if (end != cp)
 99				break;
100			cp = end;
101		} else {
102			while (*cp != ' ' && *cp)
103				++cp;
104			while (*cp == ' ')
105				++cp;
106		}
107	}
108
109	if (limit < mem_limit)
110		mem_limit = limit;
111}
112
113#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
114
115static void __init setup_bootmem(void)
116{
117	unsigned long mem_max;
118#ifndef CONFIG_SPARSEMEM
119	physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
120	int npmem_holes;
121#endif
122	int i, sysram_resource_count;
123
124	disable_sr_hashing(); /* Turn off space register hashing */
125
126	/*
127	 * Sort the ranges. Since the number of ranges is typically
128	 * small, and performance is not an issue here, just do
129	 * a simple insertion sort.
130	 */
131
132	for (i = 1; i < npmem_ranges; i++) {
133		int j;
134
135		for (j = i; j > 0; j--) {
136			physmem_range_t tmp;
137
138			if (pmem_ranges[j-1].start_pfn <
139			    pmem_ranges[j].start_pfn) {
140
141				break;
142			}
143			tmp = pmem_ranges[j-1];
144			pmem_ranges[j-1] = pmem_ranges[j];
145			pmem_ranges[j] = tmp;
146		}
147	}
148
149#ifndef CONFIG_SPARSEMEM
150	/*
151	 * Throw out ranges that are too far apart (controlled by
152	 * MAX_GAP).
153	 */
154
155	for (i = 1; i < npmem_ranges; i++) {
156		if (pmem_ranges[i].start_pfn -
157			(pmem_ranges[i-1].start_pfn +
158			 pmem_ranges[i-1].pages) > MAX_GAP) {
159			npmem_ranges = i;
160			printk("Large gap in memory detected (%ld pages). "
161			       "Consider turning on CONFIG_SPARSEMEM\n",
162			       pmem_ranges[i].start_pfn -
163			       (pmem_ranges[i-1].start_pfn +
164			        pmem_ranges[i-1].pages));
165			break;
166		}
167	}
168#endif
169
170	/* Print the memory ranges */
171	pr_info("Memory Ranges:\n");
172
173	for (i = 0; i < npmem_ranges; i++) {
174		struct resource *res = &sysram_resources[i];
175		unsigned long start;
176		unsigned long size;
177
178		size = (pmem_ranges[i].pages << PAGE_SHIFT);
179		start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
180		pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
181			i, start, start + (size - 1), size >> 20);
182
183		/* request memory resource */
184		res->name = "System RAM";
185		res->start = start;
186		res->end = start + size - 1;
187		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
188		request_resource(&iomem_resource, res);
189	}
190
191	sysram_resource_count = npmem_ranges;
192
193	/*
194	 * For 32 bit kernels we limit the amount of memory we can
195	 * support, in order to preserve enough kernel address space
196	 * for other purposes. For 64 bit kernels we don't normally
197	 * limit the memory, but this mechanism can be used to
198	 * artificially limit the amount of memory (and it is written
199	 * to work with multiple memory ranges).
200	 */
201
202	mem_limit_func();       /* check for "mem=" argument */
203
204	mem_max = 0;
205	for (i = 0; i < npmem_ranges; i++) {
206		unsigned long rsize;
207
208		rsize = pmem_ranges[i].pages << PAGE_SHIFT;
209		if ((mem_max + rsize) > mem_limit) {
210			printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
211			if (mem_max == mem_limit)
212				npmem_ranges = i;
213			else {
214				pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
215						       - (mem_max >> PAGE_SHIFT);
216				npmem_ranges = i + 1;
217				mem_max = mem_limit;
218			}
219			break;
220		}
221		mem_max += rsize;
222	}
223
224	printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
225
226#ifndef CONFIG_SPARSEMEM
227	/* Merge the ranges, keeping track of the holes */
228	{
229		unsigned long end_pfn;
230		unsigned long hole_pages;
231
232		npmem_holes = 0;
233		end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
234		for (i = 1; i < npmem_ranges; i++) {
235
236			hole_pages = pmem_ranges[i].start_pfn - end_pfn;
237			if (hole_pages) {
238				pmem_holes[npmem_holes].start_pfn = end_pfn;
239				pmem_holes[npmem_holes++].pages = hole_pages;
240				end_pfn += hole_pages;
241			}
242			end_pfn += pmem_ranges[i].pages;
243		}
244
245		pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
246		npmem_ranges = 1;
247	}
248#endif
249
250	/*
251	 * Initialize and free the full range of memory in each range.
252	 */
253
254	max_pfn = 0;
255	for (i = 0; i < npmem_ranges; i++) {
256		unsigned long start_pfn;
257		unsigned long npages;
258		unsigned long start;
259		unsigned long size;
260
261		start_pfn = pmem_ranges[i].start_pfn;
262		npages = pmem_ranges[i].pages;
263
264		start = start_pfn << PAGE_SHIFT;
265		size = npages << PAGE_SHIFT;
266
267		/* add system RAM memblock */
268		memblock_add(start, size);
269
270		if ((start_pfn + npages) > max_pfn)
271			max_pfn = start_pfn + npages;
272	}
273
274	/*
275	 * We can't use memblock top-down allocations because we only
276	 * created the initial mapping up to KERNEL_INITIAL_SIZE in
277	 * the assembly bootup code.
278	 */
279	memblock_set_bottom_up(true);
280
281	/* IOMMU is always used to access "high mem" on those boxes
282	 * that can support enough mem that a PCI device couldn't
283	 * directly DMA to any physical addresses.
284	 * ISA DMA support will need to revisit this.
285	 */
286	max_low_pfn = max_pfn;
287
288	/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
289
290#define PDC_CONSOLE_IO_IODC_SIZE 32768
291
292	memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
293				PDC_CONSOLE_IO_IODC_SIZE));
294	memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
295			(unsigned long)(_end - KERNEL_BINARY_TEXT_START));
296
297#ifndef CONFIG_SPARSEMEM
298
299	/* reserve the holes */
300
301	for (i = 0; i < npmem_holes; i++) {
302		memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
303				(pmem_holes[i].pages << PAGE_SHIFT));
304	}
305#endif
306
307#ifdef CONFIG_BLK_DEV_INITRD
308	if (initrd_start) {
309		printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
310		if (__pa(initrd_start) < mem_max) {
311			unsigned long initrd_reserve;
312
313			if (__pa(initrd_end) > mem_max) {
314				initrd_reserve = mem_max - __pa(initrd_start);
315			} else {
316				initrd_reserve = initrd_end - initrd_start;
317			}
318			initrd_below_start_ok = 1;
319			printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
320
321			memblock_reserve(__pa(initrd_start), initrd_reserve);
322		}
323	}
324#endif
325
326	data_resource.start =  virt_to_phys(&data_start);
327	data_resource.end = virt_to_phys(_end) - 1;
328	code_resource.start = virt_to_phys(_text);
329	code_resource.end = virt_to_phys(&data_start)-1;
330
331	/* We don't know which region the kernel will be in, so try
332	 * all of them.
333	 */
334	for (i = 0; i < sysram_resource_count; i++) {
335		struct resource *res = &sysram_resources[i];
336		request_resource(res, &code_resource);
337		request_resource(res, &data_resource);
338	}
339	request_resource(&sysram_resources[0], &pdcdata_resource);
340
341	/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
342	pdc_pdt_init();
343
344	memblock_allow_resize();
345	memblock_dump_all();
346}
347
348static bool kernel_set_to_readonly;
349
350static void __init map_pages(unsigned long start_vaddr,
351			     unsigned long start_paddr, unsigned long size,
352			     pgprot_t pgprot, int force)
353{
354	pgd_t *pg_dir;
355	pmd_t *pmd;
356	pte_t *pg_table;
357	unsigned long end_paddr;
358	unsigned long start_pmd;
359	unsigned long start_pte;
360	unsigned long tmp1;
361	unsigned long tmp2;
362	unsigned long address;
363	unsigned long vaddr;
364	unsigned long ro_start;
365	unsigned long ro_end;
366	unsigned long kernel_start, kernel_end;
367
368	ro_start = __pa((unsigned long)_text);
369	ro_end   = __pa((unsigned long)&data_start);
370	kernel_start = __pa((unsigned long)&__init_begin);
371	kernel_end  = __pa((unsigned long)&_end);
372
373	end_paddr = start_paddr + size;
374
375	pg_dir = pgd_offset_k(start_vaddr);
376
377#if PTRS_PER_PMD == 1
378	start_pmd = 0;
379#else
380	start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
381#endif
382	start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
383
384	address = start_paddr;
385	vaddr = start_vaddr;
386	while (address < end_paddr) {
387#if PTRS_PER_PMD == 1
388		pmd = (pmd_t *)__pa(pg_dir);
389#else
390		pmd = (pmd_t *)pgd_address(*pg_dir);
391
392		/*
393		 * pmd is physical at this point
394		 */
395
396		if (!pmd) {
 
397			pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
398					     PAGE_SIZE << PMD_ORDER);
399			if (!pmd)
400				panic("pmd allocation failed.\n");
401			pmd = (pmd_t *) __pa(pmd);
402		}
403
404		pgd_populate(NULL, pg_dir, __va(pmd));
405#endif
406		pg_dir++;
407
408		/* now change pmd to kernel virtual addresses */
409
410		pmd = (pmd_t *)__va(pmd) + start_pmd;
411		for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
412
413			/*
414			 * pg_table is physical at this point
415			 */
416
417			pg_table = (pte_t *)pmd_address(*pmd);
418			if (!pg_table) {
419				pg_table = memblock_alloc(PAGE_SIZE,
420							  PAGE_SIZE);
421				if (!pg_table)
422					panic("page table allocation failed\n");
423				pg_table = (pte_t *) __pa(pg_table);
424			}
425
426			pmd_populate_kernel(NULL, pmd, __va(pg_table));
427
428			/* now change pg_table to kernel virtual addresses */
429
430			pg_table = (pte_t *) __va(pg_table) + start_pte;
431			for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
432				pte_t pte;
433				pgprot_t prot;
434				bool huge = false;
435
436				if (force) {
437					prot = pgprot;
438				} else if (address < kernel_start || address >= kernel_end) {
439					/* outside kernel memory */
440					prot = PAGE_KERNEL;
441				} else if (!kernel_set_to_readonly) {
442					/* still initializing, allow writing to RO memory */
443					prot = PAGE_KERNEL_RWX;
444					huge = true;
445				} else if (address >= ro_start) {
446					/* Code (ro) and Data areas */
447					prot = (address < ro_end) ?
448						PAGE_KERNEL_EXEC : PAGE_KERNEL;
449					huge = true;
450				} else {
451					prot = PAGE_KERNEL;
452				}
453
454				pte = __mk_pte(address, prot);
455				if (huge)
456					pte = pte_mkhuge(pte);
457
458				if (address >= end_paddr)
459					break;
460
461				set_pte(pg_table, pte);
462
463				address += PAGE_SIZE;
464				vaddr += PAGE_SIZE;
465			}
466			start_pte = 0;
467
468			if (address >= end_paddr)
469			    break;
470		}
471		start_pmd = 0;
472	}
473}
474
475void __init set_kernel_text_rw(int enable_read_write)
476{
477	unsigned long start = (unsigned long) __init_begin;
478	unsigned long end   = (unsigned long) &data_start;
479
480	map_pages(start, __pa(start), end-start,
481		PAGE_KERNEL_RWX, enable_read_write ? 1:0);
482
483	/* force the kernel to see the new page table entries */
484	flush_cache_all();
485	flush_tlb_all();
486}
487
488void __ref free_initmem(void)
489{
490	unsigned long init_begin = (unsigned long)__init_begin;
491	unsigned long init_end = (unsigned long)__init_end;
492	unsigned long kernel_end  = (unsigned long)&_end;
493
494	/* Remap kernel text and data, but do not touch init section yet. */
495	kernel_set_to_readonly = true;
496	map_pages(init_end, __pa(init_end), kernel_end - init_end,
497		  PAGE_KERNEL, 0);
498
499	/* The init text pages are marked R-X.  We have to
500	 * flush the icache and mark them RW-
501	 *
502	 * This is tricky, because map_pages is in the init section.
503	 * Do a dummy remap of the data section first (the data
504	 * section is already PAGE_KERNEL) to pull in the TLB entries
505	 * for map_kernel */
506	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
507		  PAGE_KERNEL_RWX, 1);
508	/* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
509	 * map_pages */
510	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
511		  PAGE_KERNEL, 1);
512
513	/* force the kernel to see the new TLB entries */
514	__flush_tlb_range(0, init_begin, kernel_end);
515
516	/* finally dump all the instructions which were cached, since the
517	 * pages are no-longer executable */
518	flush_icache_range(init_begin, init_end);
519	
520	free_initmem_default(POISON_FREE_INITMEM);
521
522	/* set up a new led state on systems shipped LED State panel */
523	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
524}
525
526
527#ifdef CONFIG_STRICT_KERNEL_RWX
528void mark_rodata_ro(void)
529{
530	/* rodata memory was already mapped with KERNEL_RO access rights by
531           pagetable_init() and map_pages(). No need to do additional stuff here */
532	unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
533
534	pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
535}
536#endif
537
538
539/*
540 * Just an arbitrary offset to serve as a "hole" between mapping areas
541 * (between top of physical memory and a potential pcxl dma mapping
542 * area, and below the vmalloc mapping area).
543 *
544 * The current 32K value just means that there will be a 32K "hole"
545 * between mapping areas. That means that  any out-of-bounds memory
546 * accesses will hopefully be caught. The vmalloc() routines leaves
547 * a hole of 4kB between each vmalloced area for the same reason.
548 */
549
550 /* Leave room for gateway page expansion */
551#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
552#error KERNEL_MAP_START is in gateway reserved region
553#endif
554#define MAP_START (KERNEL_MAP_START)
555
556#define VM_MAP_OFFSET  (32*1024)
557#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
558				     & ~(VM_MAP_OFFSET-1)))
559
560void *parisc_vmalloc_start __ro_after_init;
561EXPORT_SYMBOL(parisc_vmalloc_start);
562
563#ifdef CONFIG_PA11
564unsigned long pcxl_dma_start __ro_after_init;
565#endif
566
567void __init mem_init(void)
568{
569	/* Do sanity checks on IPC (compat) structures */
570	BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
571#ifndef CONFIG_64BIT
572	BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
573	BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
574	BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
575#endif
576#ifdef CONFIG_COMPAT
577	BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
578	BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
579	BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
580	BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
581#endif
582
583	/* Do sanity checks on page table constants */
584	BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
585	BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
586	BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
587	BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
588			> BITS_PER_LONG);
589
590	high_memory = __va((max_pfn << PAGE_SHIFT));
591	set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
592	memblock_free_all();
593
594#ifdef CONFIG_PA11
595	if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
596		pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
597		parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
598						+ PCXL_DMA_MAP_SIZE);
599	} else
600#endif
601		parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
602
603	mem_init_print_info(NULL);
604
605#if 0
606	/*
607	 * Do not expose the virtual kernel memory layout to userspace.
608	 * But keep code for debugging purposes.
609	 */
610	printk("virtual kernel memory layout:\n"
611	       "     vmalloc : 0x%px - 0x%px   (%4ld MB)\n"
612	       "     fixmap  : 0x%px - 0x%px   (%4ld kB)\n"
613	       "     memory  : 0x%px - 0x%px   (%4ld MB)\n"
614	       "       .init : 0x%px - 0x%px   (%4ld kB)\n"
615	       "       .data : 0x%px - 0x%px   (%4ld kB)\n"
616	       "       .text : 0x%px - 0x%px   (%4ld kB)\n",
617
618	       (void*)VMALLOC_START, (void*)VMALLOC_END,
619	       (VMALLOC_END - VMALLOC_START) >> 20,
620
621	       (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
622	       (unsigned long)(FIXMAP_SIZE / 1024),
623
624	       __va(0), high_memory,
625	       ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
626
627	       __init_begin, __init_end,
628	       ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
629
630	       _etext, _edata,
631	       ((unsigned long)_edata - (unsigned long)_etext) >> 10,
632
633	       _text, _etext,
634	       ((unsigned long)_etext - (unsigned long)_text) >> 10);
635#endif
636}
637
638unsigned long *empty_zero_page __ro_after_init;
639EXPORT_SYMBOL(empty_zero_page);
640
641/*
642 * pagetable_init() sets up the page tables
643 *
644 * Note that gateway_init() places the Linux gateway page at page 0.
645 * Since gateway pages cannot be dereferenced this has the desirable
646 * side effect of trapping those pesky NULL-reference errors in the
647 * kernel.
648 */
649static void __init pagetable_init(void)
650{
651	int range;
652
653	/* Map each physical memory range to its kernel vaddr */
654
655	for (range = 0; range < npmem_ranges; range++) {
656		unsigned long start_paddr;
657		unsigned long end_paddr;
658		unsigned long size;
659
660		start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
661		size = pmem_ranges[range].pages << PAGE_SHIFT;
662		end_paddr = start_paddr + size;
663
664		map_pages((unsigned long)__va(start_paddr), start_paddr,
665			  size, PAGE_KERNEL, 0);
666	}
667
668#ifdef CONFIG_BLK_DEV_INITRD
669	if (initrd_end && initrd_end > mem_limit) {
670		printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
671		map_pages(initrd_start, __pa(initrd_start),
672			  initrd_end - initrd_start, PAGE_KERNEL, 0);
673	}
674#endif
675
676	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
677	if (!empty_zero_page)
678		panic("zero page allocation failed.\n");
679
680}
681
682static void __init gateway_init(void)
683{
684	unsigned long linux_gateway_page_addr;
685	/* FIXME: This is 'const' in order to trick the compiler
686	   into not treating it as DP-relative data. */
687	extern void * const linux_gateway_page;
688
689	linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
690
691	/*
692	 * Setup Linux Gateway page.
693	 *
694	 * The Linux gateway page will reside in kernel space (on virtual
695	 * page 0), so it doesn't need to be aliased into user space.
696	 */
697
698	map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
699		  PAGE_SIZE, PAGE_GATEWAY, 1);
700}
701
702static void __init parisc_bootmem_free(void)
703{
704	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
705	unsigned long holes_size[MAX_NR_ZONES] = { 0, };
706	unsigned long mem_start_pfn = ~0UL, mem_end_pfn = 0, mem_size_pfn = 0;
707	int i;
708
709	for (i = 0; i < npmem_ranges; i++) {
710		unsigned long start = pmem_ranges[i].start_pfn;
711		unsigned long size = pmem_ranges[i].pages;
712		unsigned long end = start + size;
713
714		if (mem_start_pfn > start)
715			mem_start_pfn = start;
716		if (mem_end_pfn < end)
717			mem_end_pfn = end;
718		mem_size_pfn += size;
719	}
720
721	zones_size[0] = mem_end_pfn - mem_start_pfn;
722	holes_size[0] = zones_size[0] - mem_size_pfn;
723
724	free_area_init_node(0, zones_size, mem_start_pfn, holes_size);
725}
726
727void __init paging_init(void)
728{
729	setup_bootmem();
730	pagetable_init();
731	gateway_init();
732	flush_cache_all_local(); /* start with known state */
733	flush_tlb_all_local(NULL);
734
735	/*
736	 * Mark all memblocks as present for sparsemem using
737	 * memory_present() and then initialize sparsemem.
738	 */
739	memblocks_present();
740	sparse_init();
741	parisc_bootmem_free();
742}
743
744#ifdef CONFIG_PA20
745
746/*
747 * Currently, all PA20 chips have 18 bit protection IDs, which is the
748 * limiting factor (space ids are 32 bits).
749 */
750
751#define NR_SPACE_IDS 262144
752
753#else
754
755/*
756 * Currently we have a one-to-one relationship between space IDs and
757 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
758 * support 15 bit protection IDs, so that is the limiting factor.
759 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
760 * probably not worth the effort for a special case here.
761 */
762
763#define NR_SPACE_IDS 32768
764
765#endif  /* !CONFIG_PA20 */
766
767#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
768#define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))
769
770static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
771static unsigned long dirty_space_id[SID_ARRAY_SIZE];
772static unsigned long space_id_index;
773static unsigned long free_space_ids = NR_SPACE_IDS - 1;
774static unsigned long dirty_space_ids = 0;
775
776static DEFINE_SPINLOCK(sid_lock);
777
778unsigned long alloc_sid(void)
779{
780	unsigned long index;
781
782	spin_lock(&sid_lock);
783
784	if (free_space_ids == 0) {
785		if (dirty_space_ids != 0) {
786			spin_unlock(&sid_lock);
787			flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
788			spin_lock(&sid_lock);
789		}
790		BUG_ON(free_space_ids == 0);
791	}
792
793	free_space_ids--;
794
795	index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
796	space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
797	space_id_index = index;
798
799	spin_unlock(&sid_lock);
800
801	return index << SPACEID_SHIFT;
802}
803
804void free_sid(unsigned long spaceid)
805{
806	unsigned long index = spaceid >> SPACEID_SHIFT;
807	unsigned long *dirty_space_offset;
808
809	dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
810	index &= (BITS_PER_LONG - 1);
811
812	spin_lock(&sid_lock);
813
814	BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
815
816	*dirty_space_offset |= (1L << index);
817	dirty_space_ids++;
818
819	spin_unlock(&sid_lock);
820}
821
822
823#ifdef CONFIG_SMP
824static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
825{
826	int i;
827
828	/* NOTE: sid_lock must be held upon entry */
829
830	*ndirtyptr = dirty_space_ids;
831	if (dirty_space_ids != 0) {
832	    for (i = 0; i < SID_ARRAY_SIZE; i++) {
833		dirty_array[i] = dirty_space_id[i];
834		dirty_space_id[i] = 0;
835	    }
836	    dirty_space_ids = 0;
837	}
838
839	return;
840}
841
842static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
843{
844	int i;
845
846	/* NOTE: sid_lock must be held upon entry */
847
848	if (ndirty != 0) {
849		for (i = 0; i < SID_ARRAY_SIZE; i++) {
850			space_id[i] ^= dirty_array[i];
851		}
852
853		free_space_ids += ndirty;
854		space_id_index = 0;
855	}
856}
857
858#else /* CONFIG_SMP */
859
860static void recycle_sids(void)
861{
862	int i;
863
864	/* NOTE: sid_lock must be held upon entry */
865
866	if (dirty_space_ids != 0) {
867		for (i = 0; i < SID_ARRAY_SIZE; i++) {
868			space_id[i] ^= dirty_space_id[i];
869			dirty_space_id[i] = 0;
870		}
871
872		free_space_ids += dirty_space_ids;
873		dirty_space_ids = 0;
874		space_id_index = 0;
875	}
876}
877#endif
878
879/*
880 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
881 * purged, we can safely reuse the space ids that were released but
882 * not flushed from the tlb.
883 */
884
885#ifdef CONFIG_SMP
886
887static unsigned long recycle_ndirty;
888static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
889static unsigned int recycle_inuse;
890
891void flush_tlb_all(void)
892{
893	int do_recycle;
894
895	__inc_irq_stat(irq_tlb_count);
896	do_recycle = 0;
897	spin_lock(&sid_lock);
898	if (dirty_space_ids > RECYCLE_THRESHOLD) {
899	    BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */
900	    get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
901	    recycle_inuse++;
902	    do_recycle++;
903	}
904	spin_unlock(&sid_lock);
905	on_each_cpu(flush_tlb_all_local, NULL, 1);
906	if (do_recycle) {
907	    spin_lock(&sid_lock);
908	    recycle_sids(recycle_ndirty,recycle_dirty_array);
909	    recycle_inuse = 0;
910	    spin_unlock(&sid_lock);
911	}
912}
913#else
914void flush_tlb_all(void)
915{
916	__inc_irq_stat(irq_tlb_count);
917	spin_lock(&sid_lock);
918	flush_tlb_all_local(NULL);
919	recycle_sids();
920	spin_unlock(&sid_lock);
921}
922#endif
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/parisc/mm/init.c
  4 *
  5 *  Copyright (C) 1995	Linus Torvalds
  6 *  Copyright 1999 SuSE GmbH
  7 *    changed by Philipp Rumpf
  8 *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
  9 *  Copyright 2004 Randolph Chung (tausq@debian.org)
 10 *  Copyright 2006-2007 Helge Deller (deller@gmx.de)
 11 *
 12 */
 13
 14
 15#include <linux/module.h>
 16#include <linux/mm.h>
 17#include <linux/memblock.h>
 18#include <linux/gfp.h>
 19#include <linux/delay.h>
 20#include <linux/init.h>
 21#include <linux/initrd.h>
 22#include <linux/swap.h>
 23#include <linux/unistd.h>
 24#include <linux/nodemask.h>	/* for node_online_map */
 25#include <linux/pagemap.h>	/* for release_pages */
 26#include <linux/compat.h>
 27
 28#include <asm/pgalloc.h>
 
 29#include <asm/tlb.h>
 30#include <asm/pdc_chassis.h>
 31#include <asm/mmzone.h>
 32#include <asm/sections.h>
 33#include <asm/msgbuf.h>
 34#include <asm/sparsemem.h>
 35
 36extern int  data_start;
 37extern void parisc_kernel_start(void);	/* Kernel entry point in head.S */
 38
 39#if CONFIG_PGTABLE_LEVELS == 3
 40/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
 41 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
 42 * guarantee that global objects will be laid out in memory in the same order
 43 * as the order of declaration, so put these in different sections and use
 44 * the linker script to order them. */
 45pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
 46#endif
 47
 48pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
 49pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
 50
 51static struct resource data_resource = {
 52	.name	= "Kernel data",
 53	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 54};
 55
 56static struct resource code_resource = {
 57	.name	= "Kernel code",
 58	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 59};
 60
 61static struct resource pdcdata_resource = {
 62	.name	= "PDC data (Page Zero)",
 63	.start	= 0,
 64	.end	= 0x9ff,
 65	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM,
 66};
 67
 68static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
 69
 70/* The following array is initialized from the firmware specific
 71 * information retrieved in kernel/inventory.c.
 72 */
 73
 74physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
 75int npmem_ranges __initdata;
 76
 77#ifdef CONFIG_64BIT
 78#define MAX_MEM         (1UL << MAX_PHYSMEM_BITS)
 79#else /* !CONFIG_64BIT */
 80#define MAX_MEM         (3584U*1024U*1024U)
 81#endif /* !CONFIG_64BIT */
 82
 83static unsigned long mem_limit __read_mostly = MAX_MEM;
 84
 85static void __init mem_limit_func(void)
 86{
 87	char *cp, *end;
 88	unsigned long limit;
 89
 90	/* We need this before __setup() functions are called */
 91
 92	limit = MAX_MEM;
 93	for (cp = boot_command_line; *cp; ) {
 94		if (memcmp(cp, "mem=", 4) == 0) {
 95			cp += 4;
 96			limit = memparse(cp, &end);
 97			if (end != cp)
 98				break;
 99			cp = end;
100		} else {
101			while (*cp != ' ' && *cp)
102				++cp;
103			while (*cp == ' ')
104				++cp;
105		}
106	}
107
108	if (limit < mem_limit)
109		mem_limit = limit;
110}
111
112#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
113
114static void __init setup_bootmem(void)
115{
116	unsigned long mem_max;
117#ifndef CONFIG_SPARSEMEM
118	physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
119	int npmem_holes;
120#endif
121	int i, sysram_resource_count;
122
123	disable_sr_hashing(); /* Turn off space register hashing */
124
125	/*
126	 * Sort the ranges. Since the number of ranges is typically
127	 * small, and performance is not an issue here, just do
128	 * a simple insertion sort.
129	 */
130
131	for (i = 1; i < npmem_ranges; i++) {
132		int j;
133
134		for (j = i; j > 0; j--) {
135			physmem_range_t tmp;
136
137			if (pmem_ranges[j-1].start_pfn <
138			    pmem_ranges[j].start_pfn) {
139
140				break;
141			}
142			tmp = pmem_ranges[j-1];
143			pmem_ranges[j-1] = pmem_ranges[j];
144			pmem_ranges[j] = tmp;
145		}
146	}
147
148#ifndef CONFIG_SPARSEMEM
149	/*
150	 * Throw out ranges that are too far apart (controlled by
151	 * MAX_GAP).
152	 */
153
154	for (i = 1; i < npmem_ranges; i++) {
155		if (pmem_ranges[i].start_pfn -
156			(pmem_ranges[i-1].start_pfn +
157			 pmem_ranges[i-1].pages) > MAX_GAP) {
158			npmem_ranges = i;
159			printk("Large gap in memory detected (%ld pages). "
160			       "Consider turning on CONFIG_SPARSEMEM\n",
161			       pmem_ranges[i].start_pfn -
162			       (pmem_ranges[i-1].start_pfn +
163			        pmem_ranges[i-1].pages));
164			break;
165		}
166	}
167#endif
168
169	/* Print the memory ranges */
170	pr_info("Memory Ranges:\n");
171
172	for (i = 0; i < npmem_ranges; i++) {
173		struct resource *res = &sysram_resources[i];
174		unsigned long start;
175		unsigned long size;
176
177		size = (pmem_ranges[i].pages << PAGE_SHIFT);
178		start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
179		pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
180			i, start, start + (size - 1), size >> 20);
181
182		/* request memory resource */
183		res->name = "System RAM";
184		res->start = start;
185		res->end = start + size - 1;
186		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
187		request_resource(&iomem_resource, res);
188	}
189
190	sysram_resource_count = npmem_ranges;
191
192	/*
193	 * For 32 bit kernels we limit the amount of memory we can
194	 * support, in order to preserve enough kernel address space
195	 * for other purposes. For 64 bit kernels we don't normally
196	 * limit the memory, but this mechanism can be used to
197	 * artificially limit the amount of memory (and it is written
198	 * to work with multiple memory ranges).
199	 */
200
201	mem_limit_func();       /* check for "mem=" argument */
202
203	mem_max = 0;
204	for (i = 0; i < npmem_ranges; i++) {
205		unsigned long rsize;
206
207		rsize = pmem_ranges[i].pages << PAGE_SHIFT;
208		if ((mem_max + rsize) > mem_limit) {
209			printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
210			if (mem_max == mem_limit)
211				npmem_ranges = i;
212			else {
213				pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
214						       - (mem_max >> PAGE_SHIFT);
215				npmem_ranges = i + 1;
216				mem_max = mem_limit;
217			}
218			break;
219		}
220		mem_max += rsize;
221	}
222
223	printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
224
225#ifndef CONFIG_SPARSEMEM
226	/* Merge the ranges, keeping track of the holes */
227	{
228		unsigned long end_pfn;
229		unsigned long hole_pages;
230
231		npmem_holes = 0;
232		end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
233		for (i = 1; i < npmem_ranges; i++) {
234
235			hole_pages = pmem_ranges[i].start_pfn - end_pfn;
236			if (hole_pages) {
237				pmem_holes[npmem_holes].start_pfn = end_pfn;
238				pmem_holes[npmem_holes++].pages = hole_pages;
239				end_pfn += hole_pages;
240			}
241			end_pfn += pmem_ranges[i].pages;
242		}
243
244		pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
245		npmem_ranges = 1;
246	}
247#endif
248
249	/*
250	 * Initialize and free the full range of memory in each range.
251	 */
252
253	max_pfn = 0;
254	for (i = 0; i < npmem_ranges; i++) {
255		unsigned long start_pfn;
256		unsigned long npages;
257		unsigned long start;
258		unsigned long size;
259
260		start_pfn = pmem_ranges[i].start_pfn;
261		npages = pmem_ranges[i].pages;
262
263		start = start_pfn << PAGE_SHIFT;
264		size = npages << PAGE_SHIFT;
265
266		/* add system RAM memblock */
267		memblock_add(start, size);
268
269		if ((start_pfn + npages) > max_pfn)
270			max_pfn = start_pfn + npages;
271	}
272
273	/*
274	 * We can't use memblock top-down allocations because we only
275	 * created the initial mapping up to KERNEL_INITIAL_SIZE in
276	 * the assembly bootup code.
277	 */
278	memblock_set_bottom_up(true);
279
280	/* IOMMU is always used to access "high mem" on those boxes
281	 * that can support enough mem that a PCI device couldn't
282	 * directly DMA to any physical addresses.
283	 * ISA DMA support will need to revisit this.
284	 */
285	max_low_pfn = max_pfn;
286
287	/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
288
289#define PDC_CONSOLE_IO_IODC_SIZE 32768
290
291	memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
292				PDC_CONSOLE_IO_IODC_SIZE));
293	memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
294			(unsigned long)(_end - KERNEL_BINARY_TEXT_START));
295
296#ifndef CONFIG_SPARSEMEM
297
298	/* reserve the holes */
299
300	for (i = 0; i < npmem_holes; i++) {
301		memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
302				(pmem_holes[i].pages << PAGE_SHIFT));
303	}
304#endif
305
306#ifdef CONFIG_BLK_DEV_INITRD
307	if (initrd_start) {
308		printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
309		if (__pa(initrd_start) < mem_max) {
310			unsigned long initrd_reserve;
311
312			if (__pa(initrd_end) > mem_max) {
313				initrd_reserve = mem_max - __pa(initrd_start);
314			} else {
315				initrd_reserve = initrd_end - initrd_start;
316			}
317			initrd_below_start_ok = 1;
318			printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
319
320			memblock_reserve(__pa(initrd_start), initrd_reserve);
321		}
322	}
323#endif
324
325	data_resource.start =  virt_to_phys(&data_start);
326	data_resource.end = virt_to_phys(_end) - 1;
327	code_resource.start = virt_to_phys(_text);
328	code_resource.end = virt_to_phys(&data_start)-1;
329
330	/* We don't know which region the kernel will be in, so try
331	 * all of them.
332	 */
333	for (i = 0; i < sysram_resource_count; i++) {
334		struct resource *res = &sysram_resources[i];
335		request_resource(res, &code_resource);
336		request_resource(res, &data_resource);
337	}
338	request_resource(&sysram_resources[0], &pdcdata_resource);
339
340	/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
341	pdc_pdt_init();
342
343	memblock_allow_resize();
344	memblock_dump_all();
345}
346
347static bool kernel_set_to_readonly;
348
349static void __init map_pages(unsigned long start_vaddr,
350			     unsigned long start_paddr, unsigned long size,
351			     pgprot_t pgprot, int force)
352{
 
353	pmd_t *pmd;
354	pte_t *pg_table;
355	unsigned long end_paddr;
356	unsigned long start_pmd;
357	unsigned long start_pte;
358	unsigned long tmp1;
359	unsigned long tmp2;
360	unsigned long address;
361	unsigned long vaddr;
362	unsigned long ro_start;
363	unsigned long ro_end;
364	unsigned long kernel_start, kernel_end;
365
366	ro_start = __pa((unsigned long)_text);
367	ro_end   = __pa((unsigned long)&data_start);
368	kernel_start = __pa((unsigned long)&__init_begin);
369	kernel_end  = __pa((unsigned long)&_end);
370
371	end_paddr = start_paddr + size;
372
373	/* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
 
 
 
 
374	start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
 
375	start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
376
377	address = start_paddr;
378	vaddr = start_vaddr;
379	while (address < end_paddr) {
380		pgd_t *pgd = pgd_offset_k(vaddr);
381		p4d_t *p4d = p4d_offset(pgd, vaddr);
382		pud_t *pud = pud_offset(p4d, vaddr);
 
 
 
 
 
383
384#if CONFIG_PGTABLE_LEVELS == 3
385		if (pud_none(*pud)) {
386			pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
387					     PAGE_SIZE << PMD_ORDER);
388			if (!pmd)
389				panic("pmd allocation failed.\n");
390			pud_populate(NULL, pud, pmd);
391		}
 
 
392#endif
 
393
394		pmd = pmd_offset(pud, vaddr);
 
 
395		for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
396			if (pmd_none(*pmd)) {
397				pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 
 
 
 
 
 
 
398				if (!pg_table)
399					panic("page table allocation failed\n");
400				pmd_populate_kernel(NULL, pmd, pg_table);
401			}
402
403			pg_table = pte_offset_kernel(pmd, vaddr);
 
 
 
 
404			for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
405				pte_t pte;
406				pgprot_t prot;
407				bool huge = false;
408
409				if (force) {
410					prot = pgprot;
411				} else if (address < kernel_start || address >= kernel_end) {
412					/* outside kernel memory */
413					prot = PAGE_KERNEL;
414				} else if (!kernel_set_to_readonly) {
415					/* still initializing, allow writing to RO memory */
416					prot = PAGE_KERNEL_RWX;
417					huge = true;
418				} else if (address >= ro_start) {
419					/* Code (ro) and Data areas */
420					prot = (address < ro_end) ?
421						PAGE_KERNEL_EXEC : PAGE_KERNEL;
422					huge = true;
423				} else {
424					prot = PAGE_KERNEL;
425				}
426
427				pte = __mk_pte(address, prot);
428				if (huge)
429					pte = pte_mkhuge(pte);
430
431				if (address >= end_paddr)
432					break;
433
434				set_pte(pg_table, pte);
435
436				address += PAGE_SIZE;
437				vaddr += PAGE_SIZE;
438			}
439			start_pte = 0;
440
441			if (address >= end_paddr)
442			    break;
443		}
444		start_pmd = 0;
445	}
446}
447
448void __init set_kernel_text_rw(int enable_read_write)
449{
450	unsigned long start = (unsigned long) __init_begin;
451	unsigned long end   = (unsigned long) &data_start;
452
453	map_pages(start, __pa(start), end-start,
454		PAGE_KERNEL_RWX, enable_read_write ? 1:0);
455
456	/* force the kernel to see the new page table entries */
457	flush_cache_all();
458	flush_tlb_all();
459}
460
461void __ref free_initmem(void)
462{
463	unsigned long init_begin = (unsigned long)__init_begin;
464	unsigned long init_end = (unsigned long)__init_end;
465	unsigned long kernel_end  = (unsigned long)&_end;
466
467	/* Remap kernel text and data, but do not touch init section yet. */
468	kernel_set_to_readonly = true;
469	map_pages(init_end, __pa(init_end), kernel_end - init_end,
470		  PAGE_KERNEL, 0);
471
472	/* The init text pages are marked R-X.  We have to
473	 * flush the icache and mark them RW-
474	 *
475	 * This is tricky, because map_pages is in the init section.
476	 * Do a dummy remap of the data section first (the data
477	 * section is already PAGE_KERNEL) to pull in the TLB entries
478	 * for map_kernel */
479	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
480		  PAGE_KERNEL_RWX, 1);
481	/* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
482	 * map_pages */
483	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
484		  PAGE_KERNEL, 1);
485
486	/* force the kernel to see the new TLB entries */
487	__flush_tlb_range(0, init_begin, kernel_end);
488
489	/* finally dump all the instructions which were cached, since the
490	 * pages are no-longer executable */
491	flush_icache_range(init_begin, init_end);
492	
493	free_initmem_default(POISON_FREE_INITMEM);
494
495	/* set up a new led state on systems shipped LED State panel */
496	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
497}
498
499
500#ifdef CONFIG_STRICT_KERNEL_RWX
501void mark_rodata_ro(void)
502{
503	/* rodata memory was already mapped with KERNEL_RO access rights by
504           pagetable_init() and map_pages(). No need to do additional stuff here */
505	unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
506
507	pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
508}
509#endif
510
511
512/*
513 * Just an arbitrary offset to serve as a "hole" between mapping areas
514 * (between top of physical memory and a potential pcxl dma mapping
515 * area, and below the vmalloc mapping area).
516 *
517 * The current 32K value just means that there will be a 32K "hole"
518 * between mapping areas. That means that  any out-of-bounds memory
519 * accesses will hopefully be caught. The vmalloc() routines leaves
520 * a hole of 4kB between each vmalloced area for the same reason.
521 */
522
523 /* Leave room for gateway page expansion */
524#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
525#error KERNEL_MAP_START is in gateway reserved region
526#endif
527#define MAP_START (KERNEL_MAP_START)
528
529#define VM_MAP_OFFSET  (32*1024)
530#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
531				     & ~(VM_MAP_OFFSET-1)))
532
533void *parisc_vmalloc_start __ro_after_init;
534EXPORT_SYMBOL(parisc_vmalloc_start);
535
536#ifdef CONFIG_PA11
537unsigned long pcxl_dma_start __ro_after_init;
538#endif
539
540void __init mem_init(void)
541{
542	/* Do sanity checks on IPC (compat) structures */
543	BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
544#ifndef CONFIG_64BIT
545	BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
546	BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
547	BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
548#endif
549#ifdef CONFIG_COMPAT
550	BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
551	BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
552	BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
553	BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
554#endif
555
556	/* Do sanity checks on page table constants */
557	BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
558	BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
559	BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
560	BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
561			> BITS_PER_LONG);
562
563	high_memory = __va((max_pfn << PAGE_SHIFT));
564	set_max_mapnr(max_low_pfn);
565	memblock_free_all();
566
567#ifdef CONFIG_PA11
568	if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
569		pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
570		parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
571						+ PCXL_DMA_MAP_SIZE);
572	} else
573#endif
574		parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
575
576	mem_init_print_info(NULL);
577
578#if 0
579	/*
580	 * Do not expose the virtual kernel memory layout to userspace.
581	 * But keep code for debugging purposes.
582	 */
583	printk("virtual kernel memory layout:\n"
584	       "     vmalloc : 0x%px - 0x%px   (%4ld MB)\n"
585	       "     fixmap  : 0x%px - 0x%px   (%4ld kB)\n"
586	       "     memory  : 0x%px - 0x%px   (%4ld MB)\n"
587	       "       .init : 0x%px - 0x%px   (%4ld kB)\n"
588	       "       .data : 0x%px - 0x%px   (%4ld kB)\n"
589	       "       .text : 0x%px - 0x%px   (%4ld kB)\n",
590
591	       (void*)VMALLOC_START, (void*)VMALLOC_END,
592	       (VMALLOC_END - VMALLOC_START) >> 20,
593
594	       (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
595	       (unsigned long)(FIXMAP_SIZE / 1024),
596
597	       __va(0), high_memory,
598	       ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
599
600	       __init_begin, __init_end,
601	       ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
602
603	       _etext, _edata,
604	       ((unsigned long)_edata - (unsigned long)_etext) >> 10,
605
606	       _text, _etext,
607	       ((unsigned long)_etext - (unsigned long)_text) >> 10);
608#endif
609}
610
611unsigned long *empty_zero_page __ro_after_init;
612EXPORT_SYMBOL(empty_zero_page);
613
614/*
615 * pagetable_init() sets up the page tables
616 *
617 * Note that gateway_init() places the Linux gateway page at page 0.
618 * Since gateway pages cannot be dereferenced this has the desirable
619 * side effect of trapping those pesky NULL-reference errors in the
620 * kernel.
621 */
622static void __init pagetable_init(void)
623{
624	int range;
625
626	/* Map each physical memory range to its kernel vaddr */
627
628	for (range = 0; range < npmem_ranges; range++) {
629		unsigned long start_paddr;
630		unsigned long end_paddr;
631		unsigned long size;
632
633		start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
634		size = pmem_ranges[range].pages << PAGE_SHIFT;
635		end_paddr = start_paddr + size;
636
637		map_pages((unsigned long)__va(start_paddr), start_paddr,
638			  size, PAGE_KERNEL, 0);
639	}
640
641#ifdef CONFIG_BLK_DEV_INITRD
642	if (initrd_end && initrd_end > mem_limit) {
643		printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
644		map_pages(initrd_start, __pa(initrd_start),
645			  initrd_end - initrd_start, PAGE_KERNEL, 0);
646	}
647#endif
648
649	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
650	if (!empty_zero_page)
651		panic("zero page allocation failed.\n");
652
653}
654
655static void __init gateway_init(void)
656{
657	unsigned long linux_gateway_page_addr;
658	/* FIXME: This is 'const' in order to trick the compiler
659	   into not treating it as DP-relative data. */
660	extern void * const linux_gateway_page;
661
662	linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
663
664	/*
665	 * Setup Linux Gateway page.
666	 *
667	 * The Linux gateway page will reside in kernel space (on virtual
668	 * page 0), so it doesn't need to be aliased into user space.
669	 */
670
671	map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
672		  PAGE_SIZE, PAGE_GATEWAY, 1);
673}
674
675static void __init parisc_bootmem_free(void)
676{
677	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
 
 
 
 
 
 
 
 
678
679	max_zone_pfn[0] = memblock_end_of_DRAM();
 
 
 
 
 
 
 
 
680
681	free_area_init(max_zone_pfn);
682}
683
684void __init paging_init(void)
685{
686	setup_bootmem();
687	pagetable_init();
688	gateway_init();
689	flush_cache_all_local(); /* start with known state */
690	flush_tlb_all_local(NULL);
691
 
 
 
 
 
692	sparse_init();
693	parisc_bootmem_free();
694}
695
696#ifdef CONFIG_PA20
697
698/*
699 * Currently, all PA20 chips have 18 bit protection IDs, which is the
700 * limiting factor (space ids are 32 bits).
701 */
702
703#define NR_SPACE_IDS 262144
704
705#else
706
707/*
708 * Currently we have a one-to-one relationship between space IDs and
709 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
710 * support 15 bit protection IDs, so that is the limiting factor.
711 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
712 * probably not worth the effort for a special case here.
713 */
714
715#define NR_SPACE_IDS 32768
716
717#endif  /* !CONFIG_PA20 */
718
719#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
720#define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))
721
722static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
723static unsigned long dirty_space_id[SID_ARRAY_SIZE];
724static unsigned long space_id_index;
725static unsigned long free_space_ids = NR_SPACE_IDS - 1;
726static unsigned long dirty_space_ids = 0;
727
728static DEFINE_SPINLOCK(sid_lock);
729
730unsigned long alloc_sid(void)
731{
732	unsigned long index;
733
734	spin_lock(&sid_lock);
735
736	if (free_space_ids == 0) {
737		if (dirty_space_ids != 0) {
738			spin_unlock(&sid_lock);
739			flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
740			spin_lock(&sid_lock);
741		}
742		BUG_ON(free_space_ids == 0);
743	}
744
745	free_space_ids--;
746
747	index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
748	space_id[BIT_WORD(index)] |= BIT_MASK(index);
749	space_id_index = index;
750
751	spin_unlock(&sid_lock);
752
753	return index << SPACEID_SHIFT;
754}
755
756void free_sid(unsigned long spaceid)
757{
758	unsigned long index = spaceid >> SPACEID_SHIFT;
759	unsigned long *dirty_space_offset, mask;
760
761	dirty_space_offset = &dirty_space_id[BIT_WORD(index)];
762	mask = BIT_MASK(index);
763
764	spin_lock(&sid_lock);
765
766	BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */
767
768	*dirty_space_offset |= mask;
769	dirty_space_ids++;
770
771	spin_unlock(&sid_lock);
772}
773
774
775#ifdef CONFIG_SMP
776static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
777{
778	int i;
779
780	/* NOTE: sid_lock must be held upon entry */
781
782	*ndirtyptr = dirty_space_ids;
783	if (dirty_space_ids != 0) {
784	    for (i = 0; i < SID_ARRAY_SIZE; i++) {
785		dirty_array[i] = dirty_space_id[i];
786		dirty_space_id[i] = 0;
787	    }
788	    dirty_space_ids = 0;
789	}
790
791	return;
792}
793
794static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
795{
796	int i;
797
798	/* NOTE: sid_lock must be held upon entry */
799
800	if (ndirty != 0) {
801		for (i = 0; i < SID_ARRAY_SIZE; i++) {
802			space_id[i] ^= dirty_array[i];
803		}
804
805		free_space_ids += ndirty;
806		space_id_index = 0;
807	}
808}
809
810#else /* CONFIG_SMP */
811
812static void recycle_sids(void)
813{
814	int i;
815
816	/* NOTE: sid_lock must be held upon entry */
817
818	if (dirty_space_ids != 0) {
819		for (i = 0; i < SID_ARRAY_SIZE; i++) {
820			space_id[i] ^= dirty_space_id[i];
821			dirty_space_id[i] = 0;
822		}
823
824		free_space_ids += dirty_space_ids;
825		dirty_space_ids = 0;
826		space_id_index = 0;
827	}
828}
829#endif
830
831/*
832 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
833 * purged, we can safely reuse the space ids that were released but
834 * not flushed from the tlb.
835 */
836
837#ifdef CONFIG_SMP
838
839static unsigned long recycle_ndirty;
840static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
841static unsigned int recycle_inuse;
842
843void flush_tlb_all(void)
844{
845	int do_recycle;
846
847	__inc_irq_stat(irq_tlb_count);
848	do_recycle = 0;
849	spin_lock(&sid_lock);
850	if (dirty_space_ids > RECYCLE_THRESHOLD) {
851	    BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */
852	    get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
853	    recycle_inuse++;
854	    do_recycle++;
855	}
856	spin_unlock(&sid_lock);
857	on_each_cpu(flush_tlb_all_local, NULL, 1);
858	if (do_recycle) {
859	    spin_lock(&sid_lock);
860	    recycle_sids(recycle_ndirty,recycle_dirty_array);
861	    recycle_inuse = 0;
862	    spin_unlock(&sid_lock);
863	}
864}
865#else
866void flush_tlb_all(void)
867{
868	__inc_irq_stat(irq_tlb_count);
869	spin_lock(&sid_lock);
870	flush_tlb_all_local(NULL);
871	recycle_sids();
872	spin_unlock(&sid_lock);
873}
874#endif