Loading...
1/*
2 * S390 version
3 * Copyright IBM Corp. 1999
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 *
6 * Derived from "arch/i386/mm/init.c"
7 * Copyright (C) 1995 Linus Torvalds
8 */
9
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/memory.h>
25#include <linux/pfn.h>
26#include <linux/poison.h>
27#include <linux/initrd.h>
28#include <linux/export.h>
29#include <linux/gfp.h>
30#include <linux/memblock.h>
31#include <asm/processor.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/dma.h>
36#include <asm/lowcore.h>
37#include <asm/tlb.h>
38#include <asm/tlbflush.h>
39#include <asm/sections.h>
40#include <asm/ctl_reg.h>
41#include <asm/sclp.h>
42
43pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
44
45unsigned long empty_zero_page, zero_page_mask;
46EXPORT_SYMBOL(empty_zero_page);
47EXPORT_SYMBOL(zero_page_mask);
48
49static void __init setup_zero_pages(void)
50{
51 unsigned int order;
52 struct page *page;
53 int i;
54
55 /* Latest machines require a mapping granularity of 512KB */
56 order = 7;
57
58 /* Limit number of empty zero pages for small memory sizes */
59 while (order > 2 && (totalram_pages >> 10) < (1UL << order))
60 order--;
61
62 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
63 if (!empty_zero_page)
64 panic("Out of memory in setup_zero_pages");
65
66 page = virt_to_page((void *) empty_zero_page);
67 split_page(page, order);
68 for (i = 1 << order; i > 0; i--) {
69 mark_page_reserved(page);
70 page++;
71 }
72
73 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
74}
75
76/*
77 * paging_init() sets up the page tables
78 */
79void __init paging_init(void)
80{
81 unsigned long max_zone_pfns[MAX_NR_ZONES];
82 unsigned long pgd_type, asce_bits;
83
84 init_mm.pgd = swapper_pg_dir;
85 if (VMALLOC_END > (1UL << 42)) {
86 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
87 pgd_type = _REGION2_ENTRY_EMPTY;
88 } else {
89 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
90 pgd_type = _REGION3_ENTRY_EMPTY;
91 }
92 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
93 S390_lowcore.kernel_asce = init_mm.context.asce;
94 clear_table((unsigned long *) init_mm.pgd, pgd_type,
95 sizeof(unsigned long)*2048);
96 vmem_map_init();
97
98 /* enable virtual mapping in kernel mode */
99 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
100 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
101 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
102 __arch_local_irq_stosm(0x04);
103
104 sparse_memory_present_with_active_regions(MAX_NUMNODES);
105 sparse_init();
106 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
107 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
108 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
109 free_area_init_nodes(max_zone_pfns);
110}
111
112void mark_rodata_ro(void)
113{
114 /* Text and rodata are already protected. Nothing to do here. */
115 pr_info("Write protecting the kernel read-only data: %luk\n",
116 ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10);
117}
118
119void __init mem_init(void)
120{
121 if (MACHINE_HAS_TLB_LC)
122 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
123 cpumask_set_cpu(0, mm_cpumask(&init_mm));
124 atomic_set(&init_mm.context.attach_count, 1);
125
126 set_max_mapnr(max_low_pfn);
127 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
128
129 /* Setup guest page hinting */
130 cmma_init();
131
132 /* this will put all low memory onto the freelists */
133 free_all_bootmem();
134 setup_zero_pages(); /* Setup zeroed pages. */
135
136 mem_init_print_info(NULL);
137}
138
139void free_initmem(void)
140{
141 free_initmem_default(POISON_FREE_INITMEM);
142}
143
144#ifdef CONFIG_BLK_DEV_INITRD
145void __init free_initrd_mem(unsigned long start, unsigned long end)
146{
147 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
148 "initrd");
149}
150#endif
151
152#ifdef CONFIG_MEMORY_HOTPLUG
153int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
154{
155 unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
156 unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
157 unsigned long start_pfn = PFN_DOWN(start);
158 unsigned long size_pages = PFN_DOWN(size);
159 unsigned long nr_pages;
160 int rc, zone_enum;
161
162 rc = vmem_add_mapping(start, size);
163 if (rc)
164 return rc;
165
166 while (size_pages > 0) {
167 if (start_pfn < dma_end_pfn) {
168 nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
169 dma_end_pfn - start_pfn : size_pages;
170 zone_enum = ZONE_DMA;
171 } else if (start_pfn < normal_end_pfn) {
172 nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
173 normal_end_pfn - start_pfn : size_pages;
174 zone_enum = ZONE_NORMAL;
175 } else {
176 nr_pages = size_pages;
177 zone_enum = ZONE_MOVABLE;
178 }
179 rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
180 start_pfn, size_pages);
181 if (rc)
182 break;
183 start_pfn += nr_pages;
184 size_pages -= nr_pages;
185 }
186 if (rc)
187 vmem_remove_mapping(start, size);
188 return rc;
189}
190
191unsigned long memory_block_size_bytes(void)
192{
193 /*
194 * Make sure the memory block size is always greater
195 * or equal than the memory increment size.
196 */
197 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
198}
199
200#ifdef CONFIG_MEMORY_HOTREMOVE
201int arch_remove_memory(u64 start, u64 size)
202{
203 /*
204 * There is no hardware or firmware interface which could trigger a
205 * hot memory remove on s390. So there is nothing that needs to be
206 * implemented.
207 */
208 return -EBUSY;
209}
210#endif
211#endif /* CONFIG_MEMORY_HOTPLUG */
1/*
2 * arch/s390/mm/init.c
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/init.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
12#include <linux/signal.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/ptrace.h>
19#include <linux/mman.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/smp.h>
23#include <linux/init.h>
24#include <linux/pagemap.h>
25#include <linux/bootmem.h>
26#include <linux/pfn.h>
27#include <linux/poison.h>
28#include <linux/initrd.h>
29#include <linux/export.h>
30#include <linux/gfp.h>
31#include <asm/processor.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/dma.h>
36#include <asm/lowcore.h>
37#include <asm/tlb.h>
38#include <asm/tlbflush.h>
39#include <asm/sections.h>
40#include <asm/ctl_reg.h>
41
42pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
43
44unsigned long empty_zero_page, zero_page_mask;
45EXPORT_SYMBOL(empty_zero_page);
46
47static unsigned long setup_zero_pages(void)
48{
49 struct cpuid cpu_id;
50 unsigned int order;
51 unsigned long size;
52 struct page *page;
53 int i;
54
55 get_cpu_id(&cpu_id);
56 switch (cpu_id.machine) {
57 case 0x9672: /* g5 */
58 case 0x2064: /* z900 */
59 case 0x2066: /* z900 */
60 case 0x2084: /* z990 */
61 case 0x2086: /* z990 */
62 case 0x2094: /* z9-109 */
63 case 0x2096: /* z9-109 */
64 order = 0;
65 break;
66 case 0x2097: /* z10 */
67 case 0x2098: /* z10 */
68 default:
69 order = 2;
70 break;
71 }
72
73 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
74 if (!empty_zero_page)
75 panic("Out of memory in setup_zero_pages");
76
77 page = virt_to_page((void *) empty_zero_page);
78 split_page(page, order);
79 for (i = 1 << order; i > 0; i--) {
80 SetPageReserved(page);
81 page++;
82 }
83
84 size = PAGE_SIZE << order;
85 zero_page_mask = (size - 1) & PAGE_MASK;
86
87 return 1UL << order;
88}
89
90/*
91 * paging_init() sets up the page tables
92 */
93void __init paging_init(void)
94{
95 unsigned long max_zone_pfns[MAX_NR_ZONES];
96 unsigned long pgd_type, asce_bits;
97
98 init_mm.pgd = swapper_pg_dir;
99#ifdef CONFIG_64BIT
100 if (VMALLOC_END > (1UL << 42)) {
101 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
102 pgd_type = _REGION2_ENTRY_EMPTY;
103 } else {
104 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
105 pgd_type = _REGION3_ENTRY_EMPTY;
106 }
107#else
108 asce_bits = _ASCE_TABLE_LENGTH;
109 pgd_type = _SEGMENT_ENTRY_EMPTY;
110#endif
111 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
112 clear_table((unsigned long *) init_mm.pgd, pgd_type,
113 sizeof(unsigned long)*2048);
114 vmem_map_init();
115
116 /* enable virtual mapping in kernel mode */
117 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
118 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
119 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
120 arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
121
122 atomic_set(&init_mm.context.attach_count, 1);
123
124 sparse_memory_present_with_active_regions(MAX_NUMNODES);
125 sparse_init();
126 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
127 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
128 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
129 free_area_init_nodes(max_zone_pfns);
130 fault_init();
131}
132
133void __init mem_init(void)
134{
135 unsigned long codesize, reservedpages, datasize, initsize;
136
137 max_mapnr = num_physpages = max_low_pfn;
138 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
139
140 /* Setup guest page hinting */
141 cmma_init();
142
143 /* this will put all low memory onto the freelists */
144 totalram_pages += free_all_bootmem();
145 totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
146
147 reservedpages = 0;
148
149 codesize = (unsigned long) &_etext - (unsigned long) &_text;
150 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
151 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
152 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
153 nr_free_pages() << (PAGE_SHIFT-10),
154 max_mapnr << (PAGE_SHIFT-10),
155 codesize >> 10,
156 reservedpages << (PAGE_SHIFT-10),
157 datasize >>10,
158 initsize >> 10);
159 printk("Write protected kernel read-only data: %#lx - %#lx\n",
160 (unsigned long)&_stext,
161 PFN_ALIGN((unsigned long)&_eshared) - 1);
162}
163
164#ifdef CONFIG_DEBUG_PAGEALLOC
165void kernel_map_pages(struct page *page, int numpages, int enable)
166{
167 pgd_t *pgd;
168 pud_t *pud;
169 pmd_t *pmd;
170 pte_t *pte;
171 unsigned long address;
172 int i;
173
174 for (i = 0; i < numpages; i++) {
175 address = page_to_phys(page + i);
176 pgd = pgd_offset_k(address);
177 pud = pud_offset(pgd, address);
178 pmd = pmd_offset(pud, address);
179 pte = pte_offset_kernel(pmd, address);
180 if (!enable) {
181 __ptep_ipte(address, pte);
182 pte_val(*pte) = _PAGE_TYPE_EMPTY;
183 continue;
184 }
185 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
186 /* Flush cpu write queue. */
187 mb();
188 }
189}
190#endif
191
192void free_init_pages(char *what, unsigned long begin, unsigned long end)
193{
194 unsigned long addr = begin;
195
196 if (begin >= end)
197 return;
198 for (; addr < end; addr += PAGE_SIZE) {
199 ClearPageReserved(virt_to_page(addr));
200 init_page_count(virt_to_page(addr));
201 memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
202 PAGE_SIZE);
203 free_page(addr);
204 totalram_pages++;
205 }
206 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
207}
208
209void free_initmem(void)
210{
211 free_init_pages("unused kernel memory",
212 (unsigned long)&__init_begin,
213 (unsigned long)&__init_end);
214}
215
216#ifdef CONFIG_BLK_DEV_INITRD
217void free_initrd_mem(unsigned long start, unsigned long end)
218{
219 free_init_pages("initrd memory", start, end);
220}
221#endif
222
223#ifdef CONFIG_MEMORY_HOTPLUG
224int arch_add_memory(int nid, u64 start, u64 size)
225{
226 unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
227 unsigned long start_pfn = PFN_DOWN(start);
228 unsigned long size_pages = PFN_DOWN(size);
229 struct zone *zone;
230 int rc;
231
232 rc = vmem_add_mapping(start, size);
233 if (rc)
234 return rc;
235 for_each_zone(zone) {
236 if (zone_idx(zone) != ZONE_MOVABLE) {
237 /* Add range within existing zone limits */
238 zone_start_pfn = zone->zone_start_pfn;
239 zone_end_pfn = zone->zone_start_pfn +
240 zone->spanned_pages;
241 } else {
242 /* Add remaining range to ZONE_MOVABLE */
243 zone_start_pfn = start_pfn;
244 zone_end_pfn = start_pfn + size_pages;
245 }
246 if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
247 continue;
248 nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
249 zone_end_pfn - start_pfn : size_pages;
250 rc = __add_pages(nid, zone, start_pfn, nr_pages);
251 if (rc)
252 break;
253 start_pfn += nr_pages;
254 size_pages -= nr_pages;
255 if (!size_pages)
256 break;
257 }
258 if (rc)
259 vmem_remove_mapping(start, size);
260 return rc;
261}
262#endif /* CONFIG_MEMORY_HOTPLUG */