Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/errno.h>
3#include <linux/gfp.h>
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/memremap.h>
7#include <linux/slab.h>
8
9#include <asm/page.h>
10
11#include <xen/balloon.h>
12#include <xen/page.h>
13#include <xen/xen.h>
14
15static DEFINE_MUTEX(list_lock);
16static struct page *page_list;
17static unsigned int list_count;
18
19static struct resource *target_resource;
20
21/*
22 * If arch is not happy with system "iomem_resource" being used for
23 * the region allocation it can provide it's own view by creating specific
24 * Xen resource with unused regions of guest physical address space provided
25 * by the hypervisor.
26 */
27int __weak __init arch_xen_unpopulated_init(struct resource **res)
28{
29 *res = &iomem_resource;
30
31 return 0;
32}
33
34static int fill_list(unsigned int nr_pages)
35{
36 struct dev_pagemap *pgmap;
37 struct resource *res, *tmp_res = NULL;
38 void *vaddr;
39 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
40 struct range mhp_range;
41 int ret;
42
43 res = kzalloc(sizeof(*res), GFP_KERNEL);
44 if (!res)
45 return -ENOMEM;
46
47 res->name = "Xen scratch";
48 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
49
50 mhp_range = mhp_get_pluggable_range(true);
51
52 ret = allocate_resource(target_resource, res,
53 alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end,
54 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
55 if (ret < 0) {
56 pr_err("Cannot allocate new IOMEM resource\n");
57 goto err_resource;
58 }
59
60 /*
61 * Reserve the region previously allocated from Xen resource to avoid
62 * re-using it by someone else.
63 */
64 if (target_resource != &iomem_resource) {
65 tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
66 if (!tmp_res) {
67 ret = -ENOMEM;
68 goto err_insert;
69 }
70
71 tmp_res->name = res->name;
72 tmp_res->start = res->start;
73 tmp_res->end = res->end;
74 tmp_res->flags = res->flags;
75
76 ret = request_resource(&iomem_resource, tmp_res);
77 if (ret < 0) {
78 pr_err("Cannot request resource %pR (%d)\n", tmp_res, ret);
79 kfree(tmp_res);
80 goto err_insert;
81 }
82 }
83
84 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
85 if (!pgmap) {
86 ret = -ENOMEM;
87 goto err_pgmap;
88 }
89
90 pgmap->type = MEMORY_DEVICE_GENERIC;
91 pgmap->range = (struct range) {
92 .start = res->start,
93 .end = res->end,
94 };
95 pgmap->nr_range = 1;
96 pgmap->owner = res;
97
98#ifdef CONFIG_XEN_HAVE_PVMMU
99 /*
100 * memremap will build page tables for the new memory so
101 * the p2m must contain invalid entries so the correct
102 * non-present PTEs will be written.
103 *
104 * If a failure occurs, the original (identity) p2m entries
105 * are not restored since this region is now known not to
106 * conflict with any devices.
107 */
108 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
109 xen_pfn_t pfn = PFN_DOWN(res->start);
110
111 for (i = 0; i < alloc_pages; i++) {
112 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
113 pr_warn("set_phys_to_machine() failed, no memory added\n");
114 ret = -ENOMEM;
115 goto err_memremap;
116 }
117 }
118 }
119#endif
120
121 vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
122 if (IS_ERR(vaddr)) {
123 pr_err("Cannot remap memory range\n");
124 ret = PTR_ERR(vaddr);
125 goto err_memremap;
126 }
127
128 for (i = 0; i < alloc_pages; i++) {
129 struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
130
131 pg->zone_device_data = page_list;
132 page_list = pg;
133 list_count++;
134 }
135
136 return 0;
137
138err_memremap:
139 kfree(pgmap);
140err_pgmap:
141 if (tmp_res) {
142 release_resource(tmp_res);
143 kfree(tmp_res);
144 }
145err_insert:
146 release_resource(res);
147err_resource:
148 kfree(res);
149 return ret;
150}
151
152/**
153 * xen_alloc_unpopulated_pages - alloc unpopulated pages
154 * @nr_pages: Number of pages
155 * @pages: pages returned
156 * @return 0 on success, error otherwise
157 */
158int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
159{
160 unsigned int i;
161 int ret = 0;
162
163 /*
164 * Fallback to default behavior if we do not have any suitable resource
165 * to allocate required region from and as the result we won't be able to
166 * construct pages.
167 */
168 if (!target_resource)
169 return xen_alloc_ballooned_pages(nr_pages, pages);
170
171 mutex_lock(&list_lock);
172 if (list_count < nr_pages) {
173 ret = fill_list(nr_pages - list_count);
174 if (ret)
175 goto out;
176 }
177
178 for (i = 0; i < nr_pages; i++) {
179 struct page *pg = page_list;
180
181 BUG_ON(!pg);
182 page_list = pg->zone_device_data;
183 list_count--;
184 pages[i] = pg;
185
186#ifdef CONFIG_XEN_HAVE_PVMMU
187 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
188 ret = xen_alloc_p2m_entry(page_to_pfn(pg));
189 if (ret < 0) {
190 unsigned int j;
191
192 for (j = 0; j <= i; j++) {
193 pages[j]->zone_device_data = page_list;
194 page_list = pages[j];
195 list_count++;
196 }
197 goto out;
198 }
199 }
200#endif
201 }
202
203out:
204 mutex_unlock(&list_lock);
205 return ret;
206}
207EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
208
209/**
210 * xen_free_unpopulated_pages - return unpopulated pages
211 * @nr_pages: Number of pages
212 * @pages: pages to return
213 */
214void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
215{
216 unsigned int i;
217
218 if (!target_resource) {
219 xen_free_ballooned_pages(nr_pages, pages);
220 return;
221 }
222
223 mutex_lock(&list_lock);
224 for (i = 0; i < nr_pages; i++) {
225 pages[i]->zone_device_data = page_list;
226 page_list = pages[i];
227 list_count++;
228 }
229 mutex_unlock(&list_lock);
230}
231EXPORT_SYMBOL(xen_free_unpopulated_pages);
232
233static int __init unpopulated_init(void)
234{
235 int ret;
236
237 if (!xen_domain())
238 return -ENODEV;
239
240 ret = arch_xen_unpopulated_init(&target_resource);
241 if (ret) {
242 pr_err("xen:unpopulated: Cannot initialize target resource\n");
243 target_resource = NULL;
244 }
245
246 return ret;
247}
248early_initcall(unpopulated_init);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/errno.h>
3#include <linux/gfp.h>
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/memremap.h>
7#include <linux/slab.h>
8
9#include <asm/page.h>
10
11#include <xen/page.h>
12#include <xen/xen.h>
13
14static DEFINE_MUTEX(list_lock);
15static LIST_HEAD(page_list);
16static unsigned int list_count;
17
18static int fill_list(unsigned int nr_pages)
19{
20 struct dev_pagemap *pgmap;
21 void *vaddr;
22 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
23 int ret;
24
25 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
26 if (!pgmap)
27 return -ENOMEM;
28
29 pgmap->type = MEMORY_DEVICE_GENERIC;
30 pgmap->res.name = "Xen scratch";
31 pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
32
33 ret = allocate_resource(&iomem_resource, &pgmap->res,
34 alloc_pages * PAGE_SIZE, 0, -1,
35 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
36 if (ret < 0) {
37 pr_err("Cannot allocate new IOMEM resource\n");
38 kfree(pgmap);
39 return ret;
40 }
41
42#ifdef CONFIG_XEN_HAVE_PVMMU
43 /*
44 * memremap will build page tables for the new memory so
45 * the p2m must contain invalid entries so the correct
46 * non-present PTEs will be written.
47 *
48 * If a failure occurs, the original (identity) p2m entries
49 * are not restored since this region is now known not to
50 * conflict with any devices.
51 */
52 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
53 xen_pfn_t pfn = PFN_DOWN(pgmap->res.start);
54
55 for (i = 0; i < alloc_pages; i++) {
56 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
57 pr_warn("set_phys_to_machine() failed, no memory added\n");
58 release_resource(&pgmap->res);
59 kfree(pgmap);
60 return -ENOMEM;
61 }
62 }
63 }
64#endif
65
66 vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
67 if (IS_ERR(vaddr)) {
68 pr_err("Cannot remap memory range\n");
69 release_resource(&pgmap->res);
70 kfree(pgmap);
71 return PTR_ERR(vaddr);
72 }
73
74 for (i = 0; i < alloc_pages; i++) {
75 struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
76
77 BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
78 list_add(&pg->lru, &page_list);
79 list_count++;
80 }
81
82 return 0;
83}
84
85/**
86 * xen_alloc_unpopulated_pages - alloc unpopulated pages
87 * @nr_pages: Number of pages
88 * @pages: pages returned
89 * @return 0 on success, error otherwise
90 */
91int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
92{
93 unsigned int i;
94 int ret = 0;
95
96 mutex_lock(&list_lock);
97 if (list_count < nr_pages) {
98 ret = fill_list(nr_pages - list_count);
99 if (ret)
100 goto out;
101 }
102
103 for (i = 0; i < nr_pages; i++) {
104 struct page *pg = list_first_entry_or_null(&page_list,
105 struct page,
106 lru);
107
108 BUG_ON(!pg);
109 list_del(&pg->lru);
110 list_count--;
111 pages[i] = pg;
112
113#ifdef CONFIG_XEN_HAVE_PVMMU
114 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
115 ret = xen_alloc_p2m_entry(page_to_pfn(pg));
116 if (ret < 0) {
117 unsigned int j;
118
119 for (j = 0; j <= i; j++) {
120 list_add(&pages[j]->lru, &page_list);
121 list_count++;
122 }
123 goto out;
124 }
125 }
126#endif
127 }
128
129out:
130 mutex_unlock(&list_lock);
131 return ret;
132}
133EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
134
135/**
136 * xen_free_unpopulated_pages - return unpopulated pages
137 * @nr_pages: Number of pages
138 * @pages: pages to return
139 */
140void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
141{
142 unsigned int i;
143
144 mutex_lock(&list_lock);
145 for (i = 0; i < nr_pages; i++) {
146 list_add(&pages[i]->lru, &page_list);
147 list_count++;
148 }
149 mutex_unlock(&list_lock);
150}
151EXPORT_SYMBOL(xen_free_unpopulated_pages);
152
153#ifdef CONFIG_XEN_PV
154static int __init init(void)
155{
156 unsigned int i;
157
158 if (!xen_domain())
159 return -ENODEV;
160
161 if (!xen_pv_domain())
162 return 0;
163
164 /*
165 * Initialize with pages from the extra memory regions (see
166 * arch/x86/xen/setup.c).
167 */
168 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
169 unsigned int j;
170
171 for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
172 struct page *pg =
173 pfn_to_page(xen_extra_mem[i].start_pfn + j);
174
175 list_add(&pg->lru, &page_list);
176 list_count++;
177 }
178 }
179
180 return 0;
181}
182subsys_initcall(init);
183#endif