Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/arch/parisc/mm/init.c
   4 *
   5 *  Copyright (C) 1995	Linus Torvalds
   6 *  Copyright 1999 SuSE GmbH
   7 *    changed by Philipp Rumpf
   8 *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
   9 *  Copyright 2004 Randolph Chung (tausq@debian.org)
  10 *  Copyright 2006-2007 Helge Deller (deller@gmx.de)
  11 *
  12 */
  13
  14
  15#include <linux/module.h>
  16#include <linux/mm.h>
 
  17#include <linux/memblock.h>
  18#include <linux/gfp.h>
  19#include <linux/delay.h>
  20#include <linux/init.h>
 
  21#include <linux/initrd.h>
  22#include <linux/swap.h>
  23#include <linux/unistd.h>
  24#include <linux/nodemask.h>	/* for node_online_map */
  25#include <linux/pagemap.h>	/* for release_pages */
  26#include <linux/compat.h>
  27#include <linux/execmem.h>
  28
  29#include <asm/pgalloc.h>
 
  30#include <asm/tlb.h>
  31#include <asm/pdc_chassis.h>
  32#include <asm/mmzone.h>
  33#include <asm/sections.h>
  34#include <asm/msgbuf.h>
  35#include <asm/sparsemem.h>
  36#include <asm/asm-offsets.h>
  37#include <asm/shmbuf.h>
  38
  39extern int  data_start;
  40extern void parisc_kernel_start(void);	/* Kernel entry point in head.S */
  41
  42#if CONFIG_PGTABLE_LEVELS == 3
  43pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE)));
 
 
 
 
 
  44#endif
  45
  46pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE)));
  47pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE)));
 
 
 
 
 
  48
  49static struct resource data_resource = {
  50	.name	= "Kernel data",
  51	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  52};
  53
  54static struct resource code_resource = {
  55	.name	= "Kernel code",
  56	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  57};
  58
  59static struct resource pdcdata_resource = {
  60	.name	= "PDC data (Page Zero)",
  61	.start	= 0,
  62	.end	= 0x9ff,
  63	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM,
  64};
  65
  66static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
  67
  68/* The following array is initialized from the firmware specific
  69 * information retrieved in kernel/inventory.c.
  70 */
  71
  72physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
  73int npmem_ranges __initdata;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  74
  75#ifdef CONFIG_64BIT
  76#define MAX_MEM         (1UL << MAX_PHYSMEM_BITS)
  77#else /* !CONFIG_64BIT */
  78#define MAX_MEM         (3584U*1024U*1024U)
  79#endif /* !CONFIG_64BIT */
  80
  81static unsigned long mem_limit __read_mostly = MAX_MEM;
  82
  83static void __init mem_limit_func(void)
  84{
  85	char *cp, *end;
  86	unsigned long limit;
  87
  88	/* We need this before __setup() functions are called */
  89
  90	limit = MAX_MEM;
  91	for (cp = boot_command_line; *cp; ) {
  92		if (memcmp(cp, "mem=", 4) == 0) {
  93			cp += 4;
  94			limit = memparse(cp, &end);
  95			if (end != cp)
  96				break;
  97			cp = end;
  98		} else {
  99			while (*cp != ' ' && *cp)
 100				++cp;
 101			while (*cp == ' ')
 102				++cp;
 103		}
 104	}
 105
 106	if (limit < mem_limit)
 107		mem_limit = limit;
 108}
 109
 110#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
 111
 112static void __init setup_bootmem(void)
 113{
 114	unsigned long mem_max;
 115#ifndef CONFIG_SPARSEMEM
 116	physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
 117	int npmem_holes;
 118#endif
 119	int i, sysram_resource_count;
 120
 121	disable_sr_hashing(); /* Turn off space register hashing */
 122
 123	/*
 124	 * Sort the ranges. Since the number of ranges is typically
 125	 * small, and performance is not an issue here, just do
 126	 * a simple insertion sort.
 127	 */
 128
 129	for (i = 1; i < npmem_ranges; i++) {
 130		int j;
 131
 132		for (j = i; j > 0; j--) {
 
 
 133			if (pmem_ranges[j-1].start_pfn <
 134			    pmem_ranges[j].start_pfn) {
 135
 136				break;
 137			}
 138			swap(pmem_ranges[j-1], pmem_ranges[j]);
 
 
 
 
 
 139		}
 140	}
 141
 142#ifndef CONFIG_SPARSEMEM
 143	/*
 144	 * Throw out ranges that are too far apart (controlled by
 145	 * MAX_GAP).
 146	 */
 147
 148	for (i = 1; i < npmem_ranges; i++) {
 149		if (pmem_ranges[i].start_pfn -
 150			(pmem_ranges[i-1].start_pfn +
 151			 pmem_ranges[i-1].pages) > MAX_GAP) {
 152			npmem_ranges = i;
 153			printk("Large gap in memory detected (%ld pages). "
 154			       "Consider turning on CONFIG_SPARSEMEM\n",
 155			       pmem_ranges[i].start_pfn -
 156			       (pmem_ranges[i-1].start_pfn +
 157			        pmem_ranges[i-1].pages));
 158			break;
 159		}
 160	}
 161#endif
 162
 163	/* Print the memory ranges */
 164	pr_info("Memory Ranges:\n");
 165
 166	for (i = 0; i < npmem_ranges; i++) {
 167		struct resource *res = &sysram_resources[i];
 168		unsigned long start;
 169		unsigned long size;
 170
 171		size = (pmem_ranges[i].pages << PAGE_SHIFT);
 172		start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
 173		pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
 174			i, start, start + (size - 1), size >> 20);
 175
 176		/* request memory resource */
 177		res->name = "System RAM";
 178		res->start = start;
 179		res->end = start + size - 1;
 180		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 181		request_resource(&iomem_resource, res);
 182	}
 183
 184	sysram_resource_count = npmem_ranges;
 185
 186	/*
 187	 * For 32 bit kernels we limit the amount of memory we can
 188	 * support, in order to preserve enough kernel address space
 189	 * for other purposes. For 64 bit kernels we don't normally
 190	 * limit the memory, but this mechanism can be used to
 191	 * artificially limit the amount of memory (and it is written
 192	 * to work with multiple memory ranges).
 193	 */
 194
 195	mem_limit_func();       /* check for "mem=" argument */
 196
 197	mem_max = 0;
 198	for (i = 0; i < npmem_ranges; i++) {
 199		unsigned long rsize;
 200
 201		rsize = pmem_ranges[i].pages << PAGE_SHIFT;
 202		if ((mem_max + rsize) > mem_limit) {
 203			printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
 204			if (mem_max == mem_limit)
 205				npmem_ranges = i;
 206			else {
 207				pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
 208						       - (mem_max >> PAGE_SHIFT);
 209				npmem_ranges = i + 1;
 210				mem_max = mem_limit;
 211			}
 212			break;
 213		}
 214		mem_max += rsize;
 215	}
 216
 217	printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
 218
 219#ifndef CONFIG_SPARSEMEM
 220	/* Merge the ranges, keeping track of the holes */
 
 221	{
 222		unsigned long end_pfn;
 223		unsigned long hole_pages;
 224
 225		npmem_holes = 0;
 226		end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
 227		for (i = 1; i < npmem_ranges; i++) {
 228
 229			hole_pages = pmem_ranges[i].start_pfn - end_pfn;
 230			if (hole_pages) {
 231				pmem_holes[npmem_holes].start_pfn = end_pfn;
 232				pmem_holes[npmem_holes++].pages = hole_pages;
 233				end_pfn += hole_pages;
 234			}
 235			end_pfn += pmem_ranges[i].pages;
 236		}
 237
 238		pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
 239		npmem_ranges = 1;
 240	}
 241#endif
 242
 
 
 
 
 
 
 
 
 
 
 
 
 243	/*
 244	 * Initialize and free the full range of memory in each range.
 245	 */
 246
 247	max_pfn = 0;
 248	for (i = 0; i < npmem_ranges; i++) {
 249		unsigned long start_pfn;
 250		unsigned long npages;
 251		unsigned long start;
 252		unsigned long size;
 253
 254		start_pfn = pmem_ranges[i].start_pfn;
 255		npages = pmem_ranges[i].pages;
 256
 257		start = start_pfn << PAGE_SHIFT;
 258		size = npages << PAGE_SHIFT;
 259
 260		/* add system RAM memblock */
 261		memblock_add(start, size);
 262
 263		if ((start_pfn + npages) > max_pfn)
 264			max_pfn = start_pfn + npages;
 265	}
 266
 267	/*
 268	 * We can't use memblock top-down allocations because we only
 269	 * created the initial mapping up to KERNEL_INITIAL_SIZE in
 270	 * the assembly bootup code.
 271	 */
 272	memblock_set_bottom_up(true);
 273
 274	/* IOMMU is always used to access "high mem" on those boxes
 275	 * that can support enough mem that a PCI device couldn't
 276	 * directly DMA to any physical addresses.
 277	 * ISA DMA support will need to revisit this.
 278	 */
 279	max_low_pfn = max_pfn;
 280
 281	/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
 282
 283#define PDC_CONSOLE_IO_IODC_SIZE 32768
 284
 285	memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
 286				PDC_CONSOLE_IO_IODC_SIZE));
 287	memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
 288			(unsigned long)(_end - KERNEL_BINARY_TEXT_START));
 289
 290#ifndef CONFIG_SPARSEMEM
 291
 292	/* reserve the holes */
 293
 294	for (i = 0; i < npmem_holes; i++) {
 295		memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
 296				(pmem_holes[i].pages << PAGE_SHIFT));
 297	}
 298#endif
 299
 300#ifdef CONFIG_BLK_DEV_INITRD
 301	if (initrd_start) {
 302		printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
 303		if (__pa(initrd_start) < mem_max) {
 304			unsigned long initrd_reserve;
 305
 306			if (__pa(initrd_end) > mem_max) {
 307				initrd_reserve = mem_max - __pa(initrd_start);
 308			} else {
 309				initrd_reserve = initrd_end - initrd_start;
 310			}
 311			initrd_below_start_ok = 1;
 312			printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
 313
 314			memblock_reserve(__pa(initrd_start), initrd_reserve);
 315		}
 316	}
 317#endif
 318
 319	data_resource.start =  virt_to_phys(&data_start);
 320	data_resource.end = virt_to_phys(_end) - 1;
 321	code_resource.start = virt_to_phys(_text);
 322	code_resource.end = virt_to_phys(&data_start)-1;
 323
 324	/* We don't know which region the kernel will be in, so try
 325	 * all of them.
 326	 */
 327	for (i = 0; i < sysram_resource_count; i++) {
 328		struct resource *res = &sysram_resources[i];
 329		request_resource(res, &code_resource);
 330		request_resource(res, &data_resource);
 331	}
 332	request_resource(&sysram_resources[0], &pdcdata_resource);
 333
 334	/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
 335	pdc_pdt_init();
 336
 337	memblock_allow_resize();
 338	memblock_dump_all();
 339}
 340
 341static bool kernel_set_to_readonly;
 
 
 
 
 
 
 
 
 
 342
 343static void __ref map_pages(unsigned long start_vaddr,
 344			    unsigned long start_paddr, unsigned long size,
 345			    pgprot_t pgprot, int force)
 346{
 
 347	pmd_t *pmd;
 348	pte_t *pg_table;
 349	unsigned long end_paddr;
 350	unsigned long start_pmd;
 351	unsigned long start_pte;
 352	unsigned long tmp1;
 353	unsigned long tmp2;
 354	unsigned long address;
 355	unsigned long vaddr;
 356	unsigned long ro_start;
 357	unsigned long ro_end;
 358	unsigned long kernel_start, kernel_end;
 359
 360	ro_start = __pa((unsigned long)_text);
 361	ro_end   = __pa((unsigned long)&data_start);
 362	kernel_start = __pa((unsigned long)&__init_begin);
 363	kernel_end  = __pa((unsigned long)&_end);
 364
 365	end_paddr = start_paddr + size;
 366
 367	/* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
 
 
 
 
 368	start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
 
 369	start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
 370
 371	address = start_paddr;
 372	vaddr = start_vaddr;
 373	while (address < end_paddr) {
 374		pgd_t *pgd = pgd_offset_k(vaddr);
 375		p4d_t *p4d = p4d_offset(pgd, vaddr);
 376		pud_t *pud = pud_offset(p4d, vaddr);
 
 377
 378#if CONFIG_PGTABLE_LEVELS == 3
 379		if (pud_none(*pud)) {
 380			pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
 381					     PAGE_SIZE << PMD_TABLE_ORDER);
 382			if (!pmd)
 383				panic("pmd allocation failed.\n");
 384			pud_populate(NULL, pud, pmd);
 385		}
 
 
 386#endif
 
 387
 388		pmd = pmd_offset(pud, vaddr);
 
 
 389		for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
 390			if (pmd_none(*pmd)) {
 391				pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 392				if (!pg_table)
 393					panic("page table allocation failed\n");
 394				pmd_populate_kernel(NULL, pmd, pg_table);
 
 
 
 
 395			}
 396
 397			pg_table = pte_offset_kernel(pmd, vaddr);
 
 
 
 
 398			for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
 399				pte_t pte;
 400				pgprot_t prot;
 401				bool huge = false;
 402
 403				if (force) {
 404					prot = pgprot;
 405				} else if (address < kernel_start || address >= kernel_end) {
 406					/* outside kernel memory */
 407					prot = PAGE_KERNEL;
 408				} else if (!kernel_set_to_readonly) {
 409					/* still initializing, allow writing to RO memory */
 410					prot = PAGE_KERNEL_RWX;
 411					huge = true;
 412				} else if (address >= ro_start) {
 413					/* Code (ro) and Data areas */
 414					prot = (address < ro_end) ?
 415						PAGE_KERNEL_EXEC : PAGE_KERNEL;
 416					huge = true;
 417				} else {
 418					prot = PAGE_KERNEL;
 419				}
 420
 421				pte = __mk_pte(address, prot);
 422				if (huge)
 
 423					pte = pte_mkhuge(pte);
 
 
 
 
 
 
 
 424
 425				if (address >= end_paddr)
 426					break;
 
 
 
 
 427
 428				set_pte(pg_table, pte);
 429
 430				address += PAGE_SIZE;
 431				vaddr += PAGE_SIZE;
 432			}
 433			start_pte = 0;
 434
 435			if (address >= end_paddr)
 436			    break;
 437		}
 438		start_pmd = 0;
 439	}
 440}
 441
 442void __init set_kernel_text_rw(int enable_read_write)
 443{
 444	unsigned long start = (unsigned long) __init_begin;
 445	unsigned long end   = (unsigned long) &data_start;
 446
 447	map_pages(start, __pa(start), end-start,
 448		PAGE_KERNEL_RWX, enable_read_write ? 1:0);
 449
 450	/* force the kernel to see the new page table entries */
 451	flush_cache_all();
 452	flush_tlb_all();
 453}
 454
 455void free_initmem(void)
 456{
 457	unsigned long init_begin = (unsigned long)__init_begin;
 458	unsigned long init_end = (unsigned long)__init_end;
 459	unsigned long kernel_end  = (unsigned long)&_end;
 460
 461	/* Remap kernel text and data, but do not touch init section yet. */
 462	map_pages(init_end, __pa(init_end), kernel_end - init_end,
 463		  PAGE_KERNEL, 0);
 464
 465	/* The init text pages are marked R-X.  We have to
 466	 * flush the icache and mark them RW-
 467	 *
 
 468	 * Do a dummy remap of the data section first (the data
 469	 * section is already PAGE_KERNEL) to pull in the TLB entries
 470	 * for map_kernel */
 471	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
 472		  PAGE_KERNEL_RWX, 1);
 473	/* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
 474	 * map_pages */
 475	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
 476		  PAGE_KERNEL, 1);
 477
 478	/* force the kernel to see the new TLB entries */
 479	__flush_tlb_range(0, init_begin, kernel_end);
 480
 481	/* finally dump all the instructions which were cached, since the
 482	 * pages are no-longer executable */
 483	flush_icache_range(init_begin, init_end);
 484
 485	free_initmem_default(POISON_FREE_INITMEM);
 486
 487	/* set up a new led state on systems shipped LED State panel */
 488	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
 489}
 490
 491
 492#ifdef CONFIG_STRICT_KERNEL_RWX
 493void mark_rodata_ro(void)
 494{
 495	unsigned long start = (unsigned long) &__start_rodata;
 496	unsigned long end = (unsigned long) &__end_rodata;
 497
 498	pr_info("Write protecting the kernel read-only data: %luk\n",
 499	       (end - start) >> 10);
 500
 501	kernel_set_to_readonly = true;
 502	map_pages(start, __pa(start), end - start, PAGE_KERNEL, 0);
 503
 504	/* force the kernel to see the new page table entries */
 505	flush_cache_all();
 506	flush_tlb_all();
 507}
 508#endif
 509
 510
 511/*
 512 * Just an arbitrary offset to serve as a "hole" between mapping areas
 513 * (between top of physical memory and a potential pcxl dma mapping
 514 * area, and below the vmalloc mapping area).
 515 *
 516 * The current 32K value just means that there will be a 32K "hole"
 517 * between mapping areas. That means that  any out-of-bounds memory
 518 * accesses will hopefully be caught. The vmalloc() routines leaves
 519 * a hole of 4kB between each vmalloced area for the same reason.
 520 */
 521
 522 /* Leave room for gateway page expansion */
 523#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
 524#error KERNEL_MAP_START is in gateway reserved region
 525#endif
 526#define MAP_START (KERNEL_MAP_START)
 527
 528#define VM_MAP_OFFSET  (32*1024)
 529#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
 530				     & ~(VM_MAP_OFFSET-1)))
 531
 532void *parisc_vmalloc_start __ro_after_init;
 533EXPORT_SYMBOL(parisc_vmalloc_start);
 534
 
 
 
 
 535void __init mem_init(void)
 536{
 537	/* Do sanity checks on IPC (compat) structures */
 538	BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
 539#ifndef CONFIG_64BIT
 540	BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
 541	BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
 542	BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
 543#endif
 544#ifdef CONFIG_COMPAT
 545	BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
 546	BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
 547	BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
 548	BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
 549#endif
 550
 551	/* Do sanity checks on page table constants */
 552	BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
 553	BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
 554	BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
 555	BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
 556			> BITS_PER_LONG);
 557#if CONFIG_PGTABLE_LEVELS == 3
 558	BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD);
 559#else
 560	BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD);
 561#endif
 562
 563#ifdef CONFIG_64BIT
 564	/* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */
 565	BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000);
 566	BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000);
 567#endif
 568
 569	high_memory = __va((max_pfn << PAGE_SHIFT));
 570	set_max_mapnr(max_low_pfn);
 571	memblock_free_all();
 572
 573#ifdef CONFIG_PA11
 574	if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
 575		pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
 576		parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
 577						+ PCXL_DMA_MAP_SIZE);
 578	} else
 579#endif
 580		parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
 
 
 
 
 
 
 581
 582#if 0
 583	/*
 584	 * Do not expose the virtual kernel memory layout to userspace.
 585	 * But keep code for debugging purposes.
 586	 */
 587	printk("virtual kernel memory layout:\n"
 588	       "     vmalloc : 0x%px - 0x%px   (%4ld MB)\n"
 589	       "     fixmap  : 0x%px - 0x%px   (%4ld kB)\n"
 590	       "     memory  : 0x%px - 0x%px   (%4ld MB)\n"
 591	       "       .init : 0x%px - 0x%px   (%4ld kB)\n"
 592	       "       .data : 0x%px - 0x%px   (%4ld kB)\n"
 593	       "       .text : 0x%px - 0x%px   (%4ld kB)\n",
 594
 595	       (void*)VMALLOC_START, (void*)VMALLOC_END,
 596	       (VMALLOC_END - VMALLOC_START) >> 20,
 597
 598	       (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
 599	       (unsigned long)(FIXMAP_SIZE / 1024),
 600
 601	       __va(0), high_memory,
 602	       ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
 603
 604	       __init_begin, __init_end,
 605	       ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
 606
 607	       _etext, _edata,
 608	       ((unsigned long)_edata - (unsigned long)_etext) >> 10,
 609
 610	       _text, _etext,
 611	       ((unsigned long)_etext - (unsigned long)_text) >> 10);
 612#endif
 613}
 614
 615unsigned long *empty_zero_page __ro_after_init;
 616EXPORT_SYMBOL(empty_zero_page);
 617
 618/*
 619 * pagetable_init() sets up the page tables
 620 *
 621 * Note that gateway_init() places the Linux gateway page at page 0.
 622 * Since gateway pages cannot be dereferenced this has the desirable
 623 * side effect of trapping those pesky NULL-reference errors in the
 624 * kernel.
 625 */
 626static void __init pagetable_init(void)
 627{
 628	int range;
 629
 630	/* Map each physical memory range to its kernel vaddr */
 631
 632	for (range = 0; range < npmem_ranges; range++) {
 633		unsigned long start_paddr;
 
 634		unsigned long size;
 635
 636		start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
 637		size = pmem_ranges[range].pages << PAGE_SHIFT;
 
 638
 639		map_pages((unsigned long)__va(start_paddr), start_paddr,
 640			  size, PAGE_KERNEL, 0);
 641	}
 642
 643#ifdef CONFIG_BLK_DEV_INITRD
 644	if (initrd_end && initrd_end > mem_limit) {
 645		printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
 646		map_pages(initrd_start, __pa(initrd_start),
 647			  initrd_end - initrd_start, PAGE_KERNEL, 0);
 648	}
 649#endif
 650
 651	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 652	if (!empty_zero_page)
 653		panic("zero page allocation failed.\n");
 654
 655}
 656
 657static void __init gateway_init(void)
 658{
 659	unsigned long linux_gateway_page_addr;
 660	/* FIXME: This is 'const' in order to trick the compiler
 661	   into not treating it as DP-relative data. */
 662	extern void * const linux_gateway_page;
 663
 664	linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
 665
 666	/*
 667	 * Setup Linux Gateway page.
 668	 *
 669	 * The Linux gateway page will reside in kernel space (on virtual
 670	 * page 0), so it doesn't need to be aliased into user space.
 671	 */
 672
 673	map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
 674		  PAGE_SIZE, PAGE_GATEWAY, 1);
 675}
 676
 677static void __init fixmap_init(void)
 678{
 679	unsigned long addr = FIXMAP_START;
 680	unsigned long end = FIXMAP_START + FIXMAP_SIZE;
 681	pgd_t *pgd = pgd_offset_k(addr);
 682	p4d_t *p4d = p4d_offset(pgd, addr);
 683	pud_t *pud = pud_offset(p4d, addr);
 684	pmd_t *pmd;
 685
 686	BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE);
 687
 688#if CONFIG_PGTABLE_LEVELS == 3
 689	if (pud_none(*pud)) {
 690		pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
 691				     PAGE_SIZE << PMD_TABLE_ORDER);
 692		if (!pmd)
 693			panic("fixmap: pmd allocation failed.\n");
 694		pud_populate(NULL, pud, pmd);
 695	}
 696#endif
 697
 698	pmd = pmd_offset(pud, addr);
 699	do {
 700		pte_t *pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 701		if (!pte)
 702			panic("fixmap: pte allocation failed.\n");
 703
 704		pmd_populate_kernel(&init_mm, pmd, pte);
 705
 706		addr += PAGE_SIZE;
 707	} while (addr < end);
 708}
 709
 710static void __init parisc_bootmem_free(void)
 711{
 712	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
 713
 714	max_zone_pfn[0] = memblock_end_of_DRAM();
 715
 716	free_area_init(max_zone_pfn);
 717}
 718
 719void __init paging_init(void)
 720{
 
 
 721	setup_bootmem();
 722	pagetable_init();
 723	gateway_init();
 724	fixmap_init();
 725	flush_cache_all_local(); /* start with known state */
 726	flush_tlb_all_local(NULL);
 727
 728	sparse_init();
 729	parisc_bootmem_free();
 730}
 731
 732static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
 733			unsigned long entry_info)
 734{
 735	const int slot_max = btlb_info.fixed_range_info.num_comb;
 736	int min_num_pages = btlb_info.min_size;
 737	unsigned long size;
 738
 739	/* map at minimum 4 pages */
 740	if (min_num_pages < 4)
 741		min_num_pages = 4;
 742
 743	size = HUGEPAGE_SIZE;
 744	while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
 745		/* starting address must have same alignment as size! */
 746		/* if correctly aligned and fits in double size, increase */
 747		if (((start & (2 * size - 1)) == 0) &&
 748		    (end - start) >= (2 * size)) {
 749			size <<= 1;
 750			continue;
 751		}
 752		/* if current size alignment is too big, try smaller size */
 753		if ((start & (size - 1)) != 0) {
 754			size >>= 1;
 755			continue;
 756		}
 757		if ((end - start) >= size) {
 758			if ((size >> PAGE_SHIFT) >= min_num_pages)
 759				pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
 760					size >> PAGE_SHIFT, entry_info, *slot);
 761			(*slot)++;
 762			start += size;
 763			continue;
 764		}
 765		size /= 2;
 766		continue;
 767	}
 768}
 769
 770void btlb_init_per_cpu(void)
 771{
 772	unsigned long s, t, e;
 773	int slot;
 774
 775	/* BTLBs are not available on 64-bit CPUs */
 776	if (IS_ENABLED(CONFIG_PA20))
 777		return;
 778	else if (pdc_btlb_info(&btlb_info) < 0) {
 779		memset(&btlb_info, 0, sizeof btlb_info);
 780	}
 781
 782	/* insert BLTLBs for code and data segments */
 783	s = (uintptr_t) dereference_function_descriptor(&_stext);
 784	e = (uintptr_t) dereference_function_descriptor(&_etext);
 785	t = (uintptr_t) dereference_function_descriptor(&_sdata);
 786	BUG_ON(t != e);
 787
 788	/* code segments */
 789	slot = 0;
 790	alloc_btlb(s, e, &slot, 0x13800000);
 791
 792	/* sanity check */
 793	t = (uintptr_t) dereference_function_descriptor(&_edata);
 794	e = (uintptr_t) dereference_function_descriptor(&__bss_start);
 795	BUG_ON(t != e);
 796
 797	/* data segments */
 798	s = (uintptr_t) dereference_function_descriptor(&_sdata);
 799	e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
 800	alloc_btlb(s, e, &slot, 0x11800000);
 801}
 802
 803#ifdef CONFIG_PA20
 804
 805/*
 806 * Currently, all PA20 chips have 18 bit protection IDs, which is the
 807 * limiting factor (space ids are 32 bits).
 808 */
 809
 810#define NR_SPACE_IDS 262144
 811
 812#else
 813
 814/*
 815 * Currently we have a one-to-one relationship between space IDs and
 816 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
 817 * support 15 bit protection IDs, so that is the limiting factor.
 818 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
 819 * probably not worth the effort for a special case here.
 820 */
 821
 822#define NR_SPACE_IDS 32768
 823
 824#endif  /* !CONFIG_PA20 */
 825
 826#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
 827#define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))
 828
 829static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
 830static unsigned long dirty_space_id[SID_ARRAY_SIZE];
 831static unsigned long space_id_index;
 832static unsigned long free_space_ids = NR_SPACE_IDS - 1;
 833static unsigned long dirty_space_ids;
 834
 835static DEFINE_SPINLOCK(sid_lock);
 836
 837unsigned long alloc_sid(void)
 838{
 839	unsigned long index;
 840
 841	spin_lock(&sid_lock);
 842
 843	if (free_space_ids == 0) {
 844		if (dirty_space_ids != 0) {
 845			spin_unlock(&sid_lock);
 846			flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
 847			spin_lock(&sid_lock);
 848		}
 849		BUG_ON(free_space_ids == 0);
 850	}
 851
 852	free_space_ids--;
 853
 854	index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
 855	space_id[BIT_WORD(index)] |= BIT_MASK(index);
 856	space_id_index = index;
 857
 858	spin_unlock(&sid_lock);
 859
 860	return index << SPACEID_SHIFT;
 861}
 862
 863void free_sid(unsigned long spaceid)
 864{
 865	unsigned long index = spaceid >> SPACEID_SHIFT;
 866	unsigned long *dirty_space_offset, mask;
 867
 868	dirty_space_offset = &dirty_space_id[BIT_WORD(index)];
 869	mask = BIT_MASK(index);
 870
 871	spin_lock(&sid_lock);
 872
 873	BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */
 874
 875	*dirty_space_offset |= mask;
 876	dirty_space_ids++;
 877
 878	spin_unlock(&sid_lock);
 879}
 880
 881
 882#ifdef CONFIG_SMP
 883static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
 884{
 885	int i;
 886
 887	/* NOTE: sid_lock must be held upon entry */
 888
 889	*ndirtyptr = dirty_space_ids;
 890	if (dirty_space_ids != 0) {
 891	    for (i = 0; i < SID_ARRAY_SIZE; i++) {
 892		dirty_array[i] = dirty_space_id[i];
 893		dirty_space_id[i] = 0;
 894	    }
 895	    dirty_space_ids = 0;
 896	}
 897
 898	return;
 899}
 900
 901static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
 902{
 903	int i;
 904
 905	/* NOTE: sid_lock must be held upon entry */
 906
 907	if (ndirty != 0) {
 908		for (i = 0; i < SID_ARRAY_SIZE; i++) {
 909			space_id[i] ^= dirty_array[i];
 910		}
 911
 912		free_space_ids += ndirty;
 913		space_id_index = 0;
 914	}
 915}
 916
 917#else /* CONFIG_SMP */
 918
 919static void recycle_sids(void)
 920{
 921	int i;
 922
 923	/* NOTE: sid_lock must be held upon entry */
 924
 925	if (dirty_space_ids != 0) {
 926		for (i = 0; i < SID_ARRAY_SIZE; i++) {
 927			space_id[i] ^= dirty_space_id[i];
 928			dirty_space_id[i] = 0;
 929		}
 930
 931		free_space_ids += dirty_space_ids;
 932		dirty_space_ids = 0;
 933		space_id_index = 0;
 934	}
 935}
 936#endif
 937
 938/*
 939 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
 940 * purged, we can safely reuse the space ids that were released but
 941 * not flushed from the tlb.
 942 */
 943
 944#ifdef CONFIG_SMP
 945
 946static unsigned long recycle_ndirty;
 947static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
 948static unsigned int recycle_inuse;
 949
 950void flush_tlb_all(void)
 951{
 952	int do_recycle;
 953
 
 954	do_recycle = 0;
 955	spin_lock(&sid_lock);
 956	__inc_irq_stat(irq_tlb_count);
 957	if (dirty_space_ids > RECYCLE_THRESHOLD) {
 958	    BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */
 959	    get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
 960	    recycle_inuse++;
 961	    do_recycle++;
 962	}
 963	spin_unlock(&sid_lock);
 964	on_each_cpu(flush_tlb_all_local, NULL, 1);
 965	if (do_recycle) {
 966	    spin_lock(&sid_lock);
 967	    recycle_sids(recycle_ndirty,recycle_dirty_array);
 968	    recycle_inuse = 0;
 969	    spin_unlock(&sid_lock);
 970	}
 971}
 972#else
 973void flush_tlb_all(void)
 974{
 975	spin_lock(&sid_lock);
 976	__inc_irq_stat(irq_tlb_count);
 
 977	flush_tlb_all_local(NULL);
 978	recycle_sids();
 979	spin_unlock(&sid_lock);
 980}
 981#endif
 982
 983static const pgprot_t protection_map[16] = {
 984	[VM_NONE]					= PAGE_NONE,
 985	[VM_READ]					= PAGE_READONLY,
 986	[VM_WRITE]					= PAGE_NONE,
 987	[VM_WRITE | VM_READ]				= PAGE_READONLY,
 988	[VM_EXEC]					= PAGE_EXECREAD,
 989	[VM_EXEC | VM_READ]				= PAGE_EXECREAD,
 990	[VM_EXEC | VM_WRITE]				= PAGE_EXECREAD,
 991	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_EXECREAD,
 992	[VM_SHARED]					= PAGE_NONE,
 993	[VM_SHARED | VM_READ]				= PAGE_READONLY,
 994	[VM_SHARED | VM_WRITE]				= PAGE_WRITEONLY,
 995	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
 996	[VM_SHARED | VM_EXEC]				= PAGE_EXECREAD,
 997	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_EXECREAD,
 998	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_RWX,
 999	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_RWX
1000};
1001DECLARE_VM_GET_PAGE_PROT
1002
1003#ifdef CONFIG_EXECMEM
1004static struct execmem_info execmem_info __ro_after_init;
1005
1006struct execmem_info __init *execmem_arch_setup(void)
1007{
1008	execmem_info = (struct execmem_info){
1009		.ranges = {
1010			[EXECMEM_DEFAULT] = {
1011				.start	= VMALLOC_START,
1012				.end	= VMALLOC_END,
1013				.pgprot	= PAGE_KERNEL_RWX,
1014				.alignment = 1,
1015			},
1016		},
1017	};
1018
1019	return &execmem_info;
1020}
1021#endif /* CONFIG_EXECMEM */
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/parisc/mm/init.c
  4 *
  5 *  Copyright (C) 1995	Linus Torvalds
  6 *  Copyright 1999 SuSE GmbH
  7 *    changed by Philipp Rumpf
  8 *  Copyright 1999 Philipp Rumpf (prumpf@tux.org)
  9 *  Copyright 2004 Randolph Chung (tausq@debian.org)
 10 *  Copyright 2006-2007 Helge Deller (deller@gmx.de)
 11 *
 12 */
 13
 14
 15#include <linux/module.h>
 16#include <linux/mm.h>
 17#include <linux/bootmem.h>
 18#include <linux/memblock.h>
 19#include <linux/gfp.h>
 20#include <linux/delay.h>
 21#include <linux/init.h>
 22#include <linux/pci.h>		/* for hppa_dma_ops and pcxl_dma_ops */
 23#include <linux/initrd.h>
 24#include <linux/swap.h>
 25#include <linux/unistd.h>
 26#include <linux/nodemask.h>	/* for node_online_map */
 27#include <linux/pagemap.h>	/* for release_pages */
 28#include <linux/compat.h>
 
 29
 30#include <asm/pgalloc.h>
 31#include <asm/pgtable.h>
 32#include <asm/tlb.h>
 33#include <asm/pdc_chassis.h>
 34#include <asm/mmzone.h>
 35#include <asm/sections.h>
 36#include <asm/msgbuf.h>
 
 
 
 37
 38extern int  data_start;
 39extern void parisc_kernel_start(void);	/* Kernel entry point in head.S */
 40
 41#if CONFIG_PGTABLE_LEVELS == 3
 42/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
 43 * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
 44 * guarantee that global objects will be laid out in memory in the same order
 45 * as the order of declaration, so put these in different sections and use
 46 * the linker script to order them. */
 47pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
 48#endif
 49
 50pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
 51pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
 52
 53#ifdef CONFIG_DISCONTIGMEM
 54struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
 55signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
 56#endif
 57
 58static struct resource data_resource = {
 59	.name	= "Kernel data",
 60	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 61};
 62
 63static struct resource code_resource = {
 64	.name	= "Kernel code",
 65	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 66};
 67
 68static struct resource pdcdata_resource = {
 69	.name	= "PDC data (Page Zero)",
 70	.start	= 0,
 71	.end	= 0x9ff,
 72	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM,
 73};
 74
 75static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
 76
 77/* The following array is initialized from the firmware specific
 78 * information retrieved in kernel/inventory.c.
 79 */
 80
 81physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
 82int npmem_ranges __read_mostly;
 83
 84/*
 85 * get_memblock() allocates pages via memblock.
 86 * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it
 87 * doesn't allocate from bottom to top which is needed because we only created
 88 * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code.
 89 */
 90static void * __init get_memblock(unsigned long size)
 91{
 92	static phys_addr_t search_addr __initdata;
 93	phys_addr_t phys;
 94
 95	if (!search_addr)
 96		search_addr = PAGE_ALIGN(__pa((unsigned long) &_end));
 97	search_addr = ALIGN(search_addr, size);
 98	while (!memblock_is_region_memory(search_addr, size) ||
 99		memblock_is_region_reserved(search_addr, size)) {
100		search_addr += size;
101	}
102	phys = search_addr;
103
104	if (phys)
105		memblock_reserve(phys, size);
106	else
107		panic("get_memblock() failed.\n");
108
109	memset(__va(phys), 0, size);
110
111	return __va(phys);
112}
113
114#ifdef CONFIG_64BIT
115#define MAX_MEM         (~0UL)
116#else /* !CONFIG_64BIT */
117#define MAX_MEM         (3584U*1024U*1024U)
118#endif /* !CONFIG_64BIT */
119
120static unsigned long mem_limit __read_mostly = MAX_MEM;
121
122static void __init mem_limit_func(void)
123{
124	char *cp, *end;
125	unsigned long limit;
126
127	/* We need this before __setup() functions are called */
128
129	limit = MAX_MEM;
130	for (cp = boot_command_line; *cp; ) {
131		if (memcmp(cp, "mem=", 4) == 0) {
132			cp += 4;
133			limit = memparse(cp, &end);
134			if (end != cp)
135				break;
136			cp = end;
137		} else {
138			while (*cp != ' ' && *cp)
139				++cp;
140			while (*cp == ' ')
141				++cp;
142		}
143	}
144
145	if (limit < mem_limit)
146		mem_limit = limit;
147}
148
149#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
150
151static void __init setup_bootmem(void)
152{
153	unsigned long mem_max;
154#ifndef CONFIG_DISCONTIGMEM
155	physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
156	int npmem_holes;
157#endif
158	int i, sysram_resource_count;
159
160	disable_sr_hashing(); /* Turn off space register hashing */
161
162	/*
163	 * Sort the ranges. Since the number of ranges is typically
164	 * small, and performance is not an issue here, just do
165	 * a simple insertion sort.
166	 */
167
168	for (i = 1; i < npmem_ranges; i++) {
169		int j;
170
171		for (j = i; j > 0; j--) {
172			unsigned long tmp;
173
174			if (pmem_ranges[j-1].start_pfn <
175			    pmem_ranges[j].start_pfn) {
176
177				break;
178			}
179			tmp = pmem_ranges[j-1].start_pfn;
180			pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
181			pmem_ranges[j].start_pfn = tmp;
182			tmp = pmem_ranges[j-1].pages;
183			pmem_ranges[j-1].pages = pmem_ranges[j].pages;
184			pmem_ranges[j].pages = tmp;
185		}
186	}
187
188#ifndef CONFIG_DISCONTIGMEM
189	/*
190	 * Throw out ranges that are too far apart (controlled by
191	 * MAX_GAP).
192	 */
193
194	for (i = 1; i < npmem_ranges; i++) {
195		if (pmem_ranges[i].start_pfn -
196			(pmem_ranges[i-1].start_pfn +
197			 pmem_ranges[i-1].pages) > MAX_GAP) {
198			npmem_ranges = i;
199			printk("Large gap in memory detected (%ld pages). "
200			       "Consider turning on CONFIG_DISCONTIGMEM\n",
201			       pmem_ranges[i].start_pfn -
202			       (pmem_ranges[i-1].start_pfn +
203			        pmem_ranges[i-1].pages));
204			break;
205		}
206	}
207#endif
208
209	/* Print the memory ranges */
210	pr_info("Memory Ranges:\n");
211
212	for (i = 0; i < npmem_ranges; i++) {
213		struct resource *res = &sysram_resources[i];
214		unsigned long start;
215		unsigned long size;
216
217		size = (pmem_ranges[i].pages << PAGE_SHIFT);
218		start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
219		pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
220			i, start, start + (size - 1), size >> 20);
221
222		/* request memory resource */
223		res->name = "System RAM";
224		res->start = start;
225		res->end = start + size - 1;
226		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
227		request_resource(&iomem_resource, res);
228	}
229
230	sysram_resource_count = npmem_ranges;
231
232	/*
233	 * For 32 bit kernels we limit the amount of memory we can
234	 * support, in order to preserve enough kernel address space
235	 * for other purposes. For 64 bit kernels we don't normally
236	 * limit the memory, but this mechanism can be used to
237	 * artificially limit the amount of memory (and it is written
238	 * to work with multiple memory ranges).
239	 */
240
241	mem_limit_func();       /* check for "mem=" argument */
242
243	mem_max = 0;
244	for (i = 0; i < npmem_ranges; i++) {
245		unsigned long rsize;
246
247		rsize = pmem_ranges[i].pages << PAGE_SHIFT;
248		if ((mem_max + rsize) > mem_limit) {
249			printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
250			if (mem_max == mem_limit)
251				npmem_ranges = i;
252			else {
253				pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
254						       - (mem_max >> PAGE_SHIFT);
255				npmem_ranges = i + 1;
256				mem_max = mem_limit;
257			}
258			break;
259		}
260		mem_max += rsize;
261	}
262
263	printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
264
265#ifndef CONFIG_DISCONTIGMEM
266	/* Merge the ranges, keeping track of the holes */
267
268	{
269		unsigned long end_pfn;
270		unsigned long hole_pages;
271
272		npmem_holes = 0;
273		end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
274		for (i = 1; i < npmem_ranges; i++) {
275
276			hole_pages = pmem_ranges[i].start_pfn - end_pfn;
277			if (hole_pages) {
278				pmem_holes[npmem_holes].start_pfn = end_pfn;
279				pmem_holes[npmem_holes++].pages = hole_pages;
280				end_pfn += hole_pages;
281			}
282			end_pfn += pmem_ranges[i].pages;
283		}
284
285		pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
286		npmem_ranges = 1;
287	}
288#endif
289
290#ifdef CONFIG_DISCONTIGMEM
291	for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
292		memset(NODE_DATA(i), 0, sizeof(pg_data_t));
293	}
294	memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
295
296	for (i = 0; i < npmem_ranges; i++) {
297		node_set_state(i, N_NORMAL_MEMORY);
298		node_set_online(i);
299	}
300#endif
301
302	/*
303	 * Initialize and free the full range of memory in each range.
304	 */
305
306	max_pfn = 0;
307	for (i = 0; i < npmem_ranges; i++) {
308		unsigned long start_pfn;
309		unsigned long npages;
310		unsigned long start;
311		unsigned long size;
312
313		start_pfn = pmem_ranges[i].start_pfn;
314		npages = pmem_ranges[i].pages;
315
316		start = start_pfn << PAGE_SHIFT;
317		size = npages << PAGE_SHIFT;
318
319		/* add system RAM memblock */
320		memblock_add(start, size);
321
322		if ((start_pfn + npages) > max_pfn)
323			max_pfn = start_pfn + npages;
324	}
325
 
 
 
 
 
 
 
326	/* IOMMU is always used to access "high mem" on those boxes
327	 * that can support enough mem that a PCI device couldn't
328	 * directly DMA to any physical addresses.
329	 * ISA DMA support will need to revisit this.
330	 */
331	max_low_pfn = max_pfn;
332
333	/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
334
335#define PDC_CONSOLE_IO_IODC_SIZE 32768
336
337	memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
338				PDC_CONSOLE_IO_IODC_SIZE));
339	memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
340			(unsigned long)(_end - KERNEL_BINARY_TEXT_START));
341
342#ifndef CONFIG_DISCONTIGMEM
343
344	/* reserve the holes */
345
346	for (i = 0; i < npmem_holes; i++) {
347		memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
348				(pmem_holes[i].pages << PAGE_SHIFT));
349	}
350#endif
351
352#ifdef CONFIG_BLK_DEV_INITRD
353	if (initrd_start) {
354		printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
355		if (__pa(initrd_start) < mem_max) {
356			unsigned long initrd_reserve;
357
358			if (__pa(initrd_end) > mem_max) {
359				initrd_reserve = mem_max - __pa(initrd_start);
360			} else {
361				initrd_reserve = initrd_end - initrd_start;
362			}
363			initrd_below_start_ok = 1;
364			printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
365
366			memblock_reserve(__pa(initrd_start), initrd_reserve);
367		}
368	}
369#endif
370
371	data_resource.start =  virt_to_phys(&data_start);
372	data_resource.end = virt_to_phys(_end) - 1;
373	code_resource.start = virt_to_phys(_text);
374	code_resource.end = virt_to_phys(&data_start)-1;
375
376	/* We don't know which region the kernel will be in, so try
377	 * all of them.
378	 */
379	for (i = 0; i < sysram_resource_count; i++) {
380		struct resource *res = &sysram_resources[i];
381		request_resource(res, &code_resource);
382		request_resource(res, &data_resource);
383	}
384	request_resource(&sysram_resources[0], &pdcdata_resource);
385
386	/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
387	pdc_pdt_init();
 
 
 
388}
389
390static int __init parisc_text_address(unsigned long vaddr)
391{
392	static unsigned long head_ptr __initdata;
393
394	if (!head_ptr)
395		head_ptr = PAGE_MASK & (unsigned long)
396			dereference_function_descriptor(&parisc_kernel_start);
397
398	return core_kernel_text(vaddr) || vaddr == head_ptr;
399}
400
401static void __init map_pages(unsigned long start_vaddr,
402			     unsigned long start_paddr, unsigned long size,
403			     pgprot_t pgprot, int force)
404{
405	pgd_t *pg_dir;
406	pmd_t *pmd;
407	pte_t *pg_table;
408	unsigned long end_paddr;
409	unsigned long start_pmd;
410	unsigned long start_pte;
411	unsigned long tmp1;
412	unsigned long tmp2;
413	unsigned long address;
414	unsigned long vaddr;
415	unsigned long ro_start;
416	unsigned long ro_end;
417	unsigned long kernel_end;
418
419	ro_start = __pa((unsigned long)_text);
420	ro_end   = __pa((unsigned long)&data_start);
 
421	kernel_end  = __pa((unsigned long)&_end);
422
423	end_paddr = start_paddr + size;
424
425	pg_dir = pgd_offset_k(start_vaddr);
426
427#if PTRS_PER_PMD == 1
428	start_pmd = 0;
429#else
430	start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
431#endif
432	start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
433
434	address = start_paddr;
435	vaddr = start_vaddr;
436	while (address < end_paddr) {
437#if PTRS_PER_PMD == 1
438		pmd = (pmd_t *)__pa(pg_dir);
439#else
440		pmd = (pmd_t *)pgd_address(*pg_dir);
441
442		/*
443		 * pmd is physical at this point
444		 */
445
446		if (!pmd) {
447			pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER);
448			pmd = (pmd_t *) __pa(pmd);
449		}
450
451		pgd_populate(NULL, pg_dir, __va(pmd));
452#endif
453		pg_dir++;
454
455		/* now change pmd to kernel virtual addresses */
456
457		pmd = (pmd_t *)__va(pmd) + start_pmd;
458		for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
459
460			/*
461			 * pg_table is physical at this point
462			 */
463
464			pg_table = (pte_t *)pmd_address(*pmd);
465			if (!pg_table) {
466				pg_table = (pte_t *) get_memblock(PAGE_SIZE);
467				pg_table = (pte_t *) __pa(pg_table);
468			}
469
470			pmd_populate_kernel(NULL, pmd, __va(pg_table));
471
472			/* now change pg_table to kernel virtual addresses */
473
474			pg_table = (pte_t *) __va(pg_table) + start_pte;
475			for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
476				pte_t pte;
 
 
477
478				if (force)
479					pte =  __mk_pte(address, pgprot);
480				else if (parisc_text_address(vaddr)) {
481					pte = __mk_pte(address, PAGE_KERNEL_EXEC);
482					if (address >= ro_start && address < kernel_end)
483						pte = pte_mkhuge(pte);
 
 
 
 
 
 
 
 
 
 
484				}
485				else
486#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
487				if (address >= ro_start && address < ro_end) {
488					pte = __mk_pte(address, PAGE_KERNEL_EXEC);
489					pte = pte_mkhuge(pte);
490				} else
491#endif
492				{
493					pte = __mk_pte(address, pgprot);
494					if (address >= ro_start && address < kernel_end)
495						pte = pte_mkhuge(pte);
496				}
497
498				if (address >= end_paddr) {
499					if (force)
500						break;
501					else
502						pte_val(pte) = 0;
503				}
504
505				set_pte(pg_table, pte);
506
507				address += PAGE_SIZE;
508				vaddr += PAGE_SIZE;
509			}
510			start_pte = 0;
511
512			if (address >= end_paddr)
513			    break;
514		}
515		start_pmd = 0;
516	}
517}
518
519void __ref free_initmem(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
520{
521	unsigned long init_begin = (unsigned long)__init_begin;
522	unsigned long init_end = (unsigned long)__init_end;
 
 
 
 
 
523
524	/* The init text pages are marked R-X.  We have to
525	 * flush the icache and mark them RW-
526	 *
527	 * This is tricky, because map_pages is in the init section.
528	 * Do a dummy remap of the data section first (the data
529	 * section is already PAGE_KERNEL) to pull in the TLB entries
530	 * for map_kernel */
531	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
532		  PAGE_KERNEL_RWX, 1);
533	/* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
534	 * map_pages */
535	map_pages(init_begin, __pa(init_begin), init_end - init_begin,
536		  PAGE_KERNEL, 1);
537
538	/* force the kernel to see the new TLB entries */
539	__flush_tlb_range(0, init_begin, init_end);
540
541	/* finally dump all the instructions which were cached, since the
542	 * pages are no-longer executable */
543	flush_icache_range(init_begin, init_end);
544	
545	free_initmem_default(POISON_FREE_INITMEM);
546
547	/* set up a new led state on systems shipped LED State panel */
548	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
549}
550
551
552#ifdef CONFIG_STRICT_KERNEL_RWX
553void mark_rodata_ro(void)
554{
555	/* rodata memory was already mapped with KERNEL_RO access rights by
556           pagetable_init() and map_pages(). No need to do additional stuff here */
557	printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
558		(unsigned long)(__end_rodata - __start_rodata) >> 10);
 
 
 
 
 
 
 
 
559}
560#endif
561
562
563/*
564 * Just an arbitrary offset to serve as a "hole" between mapping areas
565 * (between top of physical memory and a potential pcxl dma mapping
566 * area, and below the vmalloc mapping area).
567 *
568 * The current 32K value just means that there will be a 32K "hole"
569 * between mapping areas. That means that  any out-of-bounds memory
570 * accesses will hopefully be caught. The vmalloc() routines leaves
571 * a hole of 4kB between each vmalloced area for the same reason.
572 */
573
574 /* Leave room for gateway page expansion */
575#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
576#error KERNEL_MAP_START is in gateway reserved region
577#endif
578#define MAP_START (KERNEL_MAP_START)
579
580#define VM_MAP_OFFSET  (32*1024)
581#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
582				     & ~(VM_MAP_OFFSET-1)))
583
584void *parisc_vmalloc_start __read_mostly;
585EXPORT_SYMBOL(parisc_vmalloc_start);
586
587#ifdef CONFIG_PA11
588unsigned long pcxl_dma_start __read_mostly;
589#endif
590
591void __init mem_init(void)
592{
593	/* Do sanity checks on IPC (compat) structures */
594	BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
595#ifndef CONFIG_64BIT
596	BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
597	BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
598	BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
599#endif
600#ifdef CONFIG_COMPAT
601	BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
602	BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
603	BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
604	BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
605#endif
606
607	/* Do sanity checks on page table constants */
608	BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
609	BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
610	BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
611	BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
612			> BITS_PER_LONG);
 
 
 
 
 
 
 
 
 
 
 
613
614	high_memory = __va((max_pfn << PAGE_SHIFT));
615	set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
616	free_all_bootmem();
617
618#ifdef CONFIG_PA11
619	if (hppa_dma_ops == &pcxl_dma_ops) {
620		pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
621		parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
622						+ PCXL_DMA_MAP_SIZE);
623	} else {
624		pcxl_dma_start = 0;
625		parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
626	}
627#else
628	parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
629#endif
630
631	mem_init_print_info(NULL);
632
633#if 0
634	/*
635	 * Do not expose the virtual kernel memory layout to userspace.
636	 * But keep code for debugging purposes.
637	 */
638	printk("virtual kernel memory layout:\n"
639	       "    vmalloc : 0x%px - 0x%px   (%4ld MB)\n"
640	       "    memory  : 0x%px - 0x%px   (%4ld MB)\n"
641	       "      .init : 0x%px - 0x%px   (%4ld kB)\n"
642	       "      .data : 0x%px - 0x%px   (%4ld kB)\n"
643	       "      .text : 0x%px - 0x%px   (%4ld kB)\n",
 
644
645	       (void*)VMALLOC_START, (void*)VMALLOC_END,
646	       (VMALLOC_END - VMALLOC_START) >> 20,
647
 
 
 
648	       __va(0), high_memory,
649	       ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
650
651	       __init_begin, __init_end,
652	       ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
653
654	       _etext, _edata,
655	       ((unsigned long)_edata - (unsigned long)_etext) >> 10,
656
657	       _text, _etext,
658	       ((unsigned long)_etext - (unsigned long)_text) >> 10);
659#endif
660}
661
662unsigned long *empty_zero_page __read_mostly;
663EXPORT_SYMBOL(empty_zero_page);
664
665/*
666 * pagetable_init() sets up the page tables
667 *
668 * Note that gateway_init() places the Linux gateway page at page 0.
669 * Since gateway pages cannot be dereferenced this has the desirable
670 * side effect of trapping those pesky NULL-reference errors in the
671 * kernel.
672 */
673static void __init pagetable_init(void)
674{
675	int range;
676
677	/* Map each physical memory range to its kernel vaddr */
678
679	for (range = 0; range < npmem_ranges; range++) {
680		unsigned long start_paddr;
681		unsigned long end_paddr;
682		unsigned long size;
683
684		start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
685		size = pmem_ranges[range].pages << PAGE_SHIFT;
686		end_paddr = start_paddr + size;
687
688		map_pages((unsigned long)__va(start_paddr), start_paddr,
689			  size, PAGE_KERNEL, 0);
690	}
691
692#ifdef CONFIG_BLK_DEV_INITRD
693	if (initrd_end && initrd_end > mem_limit) {
694		printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
695		map_pages(initrd_start, __pa(initrd_start),
696			  initrd_end - initrd_start, PAGE_KERNEL, 0);
697	}
698#endif
699
700	empty_zero_page = get_memblock(PAGE_SIZE);
 
 
 
701}
702
703static void __init gateway_init(void)
704{
705	unsigned long linux_gateway_page_addr;
706	/* FIXME: This is 'const' in order to trick the compiler
707	   into not treating it as DP-relative data. */
708	extern void * const linux_gateway_page;
709
710	linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
711
712	/*
713	 * Setup Linux Gateway page.
714	 *
715	 * The Linux gateway page will reside in kernel space (on virtual
716	 * page 0), so it doesn't need to be aliased into user space.
717	 */
718
719	map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
720		  PAGE_SIZE, PAGE_GATEWAY, 1);
721}
722
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
723void __init paging_init(void)
724{
725	int i;
726
727	setup_bootmem();
728	pagetable_init();
729	gateway_init();
 
730	flush_cache_all_local(); /* start with known state */
731	flush_tlb_all_local(NULL);
732
733	for (i = 0; i < npmem_ranges; i++) {
734		unsigned long zones_size[MAX_NR_ZONES] = { 0, };
 
735
736		zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
737
738#ifdef CONFIG_DISCONTIGMEM
739		/* Need to initialize the pfnnid_map before we can initialize
740		   the zone */
741		{
742		    int j;
743		    for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
744			 j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
745			 j++) {
746			pfnnid_map[j] = i;
747		    }
748		}
749#endif
750
751		free_area_init_node(i, zones_size,
752				pmem_ranges[i].start_pfn, NULL);
753	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
754}
755
756#ifdef CONFIG_PA20
757
758/*
759 * Currently, all PA20 chips have 18 bit protection IDs, which is the
760 * limiting factor (space ids are 32 bits).
761 */
762
763#define NR_SPACE_IDS 262144
764
765#else
766
767/*
768 * Currently we have a one-to-one relationship between space IDs and
769 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
770 * support 15 bit protection IDs, so that is the limiting factor.
771 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
772 * probably not worth the effort for a special case here.
773 */
774
775#define NR_SPACE_IDS 32768
776
777#endif  /* !CONFIG_PA20 */
778
779#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
780#define SID_ARRAY_SIZE  (NR_SPACE_IDS / (8 * sizeof(long)))
781
782static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
783static unsigned long dirty_space_id[SID_ARRAY_SIZE];
784static unsigned long space_id_index;
785static unsigned long free_space_ids = NR_SPACE_IDS - 1;
786static unsigned long dirty_space_ids = 0;
787
788static DEFINE_SPINLOCK(sid_lock);
789
790unsigned long alloc_sid(void)
791{
792	unsigned long index;
793
794	spin_lock(&sid_lock);
795
796	if (free_space_ids == 0) {
797		if (dirty_space_ids != 0) {
798			spin_unlock(&sid_lock);
799			flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
800			spin_lock(&sid_lock);
801		}
802		BUG_ON(free_space_ids == 0);
803	}
804
805	free_space_ids--;
806
807	index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
808	space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
809	space_id_index = index;
810
811	spin_unlock(&sid_lock);
812
813	return index << SPACEID_SHIFT;
814}
815
816void free_sid(unsigned long spaceid)
817{
818	unsigned long index = spaceid >> SPACEID_SHIFT;
819	unsigned long *dirty_space_offset;
820
821	dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
822	index &= (BITS_PER_LONG - 1);
823
824	spin_lock(&sid_lock);
825
826	BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
827
828	*dirty_space_offset |= (1L << index);
829	dirty_space_ids++;
830
831	spin_unlock(&sid_lock);
832}
833
834
835#ifdef CONFIG_SMP
836static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
837{
838	int i;
839
840	/* NOTE: sid_lock must be held upon entry */
841
842	*ndirtyptr = dirty_space_ids;
843	if (dirty_space_ids != 0) {
844	    for (i = 0; i < SID_ARRAY_SIZE; i++) {
845		dirty_array[i] = dirty_space_id[i];
846		dirty_space_id[i] = 0;
847	    }
848	    dirty_space_ids = 0;
849	}
850
851	return;
852}
853
854static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
855{
856	int i;
857
858	/* NOTE: sid_lock must be held upon entry */
859
860	if (ndirty != 0) {
861		for (i = 0; i < SID_ARRAY_SIZE; i++) {
862			space_id[i] ^= dirty_array[i];
863		}
864
865		free_space_ids += ndirty;
866		space_id_index = 0;
867	}
868}
869
870#else /* CONFIG_SMP */
871
872static void recycle_sids(void)
873{
874	int i;
875
876	/* NOTE: sid_lock must be held upon entry */
877
878	if (dirty_space_ids != 0) {
879		for (i = 0; i < SID_ARRAY_SIZE; i++) {
880			space_id[i] ^= dirty_space_id[i];
881			dirty_space_id[i] = 0;
882		}
883
884		free_space_ids += dirty_space_ids;
885		dirty_space_ids = 0;
886		space_id_index = 0;
887	}
888}
889#endif
890
891/*
892 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
893 * purged, we can safely reuse the space ids that were released but
894 * not flushed from the tlb.
895 */
896
897#ifdef CONFIG_SMP
898
899static unsigned long recycle_ndirty;
900static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
901static unsigned int recycle_inuse;
902
903void flush_tlb_all(void)
904{
905	int do_recycle;
906
907	__inc_irq_stat(irq_tlb_count);
908	do_recycle = 0;
909	spin_lock(&sid_lock);
 
910	if (dirty_space_ids > RECYCLE_THRESHOLD) {
911	    BUG_ON(recycle_inuse);  /* FIXME: Use a semaphore/wait queue here */
912	    get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
913	    recycle_inuse++;
914	    do_recycle++;
915	}
916	spin_unlock(&sid_lock);
917	on_each_cpu(flush_tlb_all_local, NULL, 1);
918	if (do_recycle) {
919	    spin_lock(&sid_lock);
920	    recycle_sids(recycle_ndirty,recycle_dirty_array);
921	    recycle_inuse = 0;
922	    spin_unlock(&sid_lock);
923	}
924}
925#else
926void flush_tlb_all(void)
927{
 
928	__inc_irq_stat(irq_tlb_count);
929	spin_lock(&sid_lock);
930	flush_tlb_all_local(NULL);
931	recycle_sids();
932	spin_unlock(&sid_lock);
933}
934#endif
935
936#ifdef CONFIG_BLK_DEV_INITRD
937void free_initrd_mem(unsigned long start, unsigned long end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
938{
939	free_reserved_area((void *)start, (void *)end, -1, "initrd");
 
 
 
 
 
 
 
 
 
 
 
940}
941#endif