Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * mm/balloon_compaction.c
  3 *
  4 * Common interface for making balloon pages movable by compaction.
  5 *
  6 * Copyright (C) 2012, Red Hat, Inc.  Rafael Aquini <aquini@redhat.com>
  7 */
  8#include <linux/mm.h>
  9#include <linux/slab.h>
 10#include <linux/export.h>
 11#include <linux/balloon_compaction.h>
 12
 13/*
 14 * balloon_devinfo_alloc - allocates a balloon device information descriptor.
 15 * @balloon_dev_descriptor: pointer to reference the balloon device which
 16 *                          this struct balloon_dev_info will be servicing.
 17 *
 18 * Driver must call it to properly allocate and initialize an instance of
 19 * struct balloon_dev_info which will be used to reference a balloon device
 20 * as well as to keep track of the balloon device page list.
 21 */
 22struct balloon_dev_info *balloon_devinfo_alloc(void *balloon_dev_descriptor)
 23{
 24	struct balloon_dev_info *b_dev_info;
 25	b_dev_info = kmalloc(sizeof(*b_dev_info), GFP_KERNEL);
 26	if (!b_dev_info)
 27		return ERR_PTR(-ENOMEM);
 28
 29	b_dev_info->balloon_device = balloon_dev_descriptor;
 30	b_dev_info->mapping = NULL;
 31	b_dev_info->isolated_pages = 0;
 32	spin_lock_init(&b_dev_info->pages_lock);
 33	INIT_LIST_HEAD(&b_dev_info->pages);
 34
 35	return b_dev_info;
 36}
 37EXPORT_SYMBOL_GPL(balloon_devinfo_alloc);
 38
 39/*
 40 * balloon_page_enqueue - allocates a new page and inserts it into the balloon
 41 *			  page list.
 42 * @b_dev_info: balloon device decriptor where we will insert a new page to
 43 *
 44 * Driver must call it to properly allocate a new enlisted balloon page
 45 * before definetively removing it from the guest system.
 46 * This function returns the page address for the recently enqueued page or
 47 * NULL in the case we fail to allocate a new page this turn.
 
 48 */
 49struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
 
 50{
 
 51	unsigned long flags;
 52	struct page *page = alloc_page(balloon_mapping_gfp_mask() |
 53					__GFP_NOMEMALLOC | __GFP_NORETRY);
 54	if (!page)
 55		return NULL;
 56
 57	/*
 58	 * Block others from accessing the 'page' when we get around to
 59	 * establishing additional references. We should be the only one
 60	 * holding a reference to the 'page' at this point.
 61	 */
 62	BUG_ON(!trylock_page(page));
 63	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 64	balloon_page_insert(page, b_dev_info->mapping, &b_dev_info->pages);
 
 
 
 
 65	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 66	unlock_page(page);
 67	return page;
 68}
 69EXPORT_SYMBOL_GPL(balloon_page_enqueue);
 70
 71/*
 72 * balloon_page_dequeue - removes a page from balloon's page list and returns
 73 *			  the its address to allow the driver release the page.
 74 * @b_dev_info: balloon device decriptor where we will grab a page from.
 
 
 75 *
 76 * Driver must call it to properly de-allocate a previous enlisted balloon page
 77 * before definetively releasing it back to the guest system.
 78 * This function returns the page address for the recently dequeued page or
 79 * NULL in the case we find balloon's page list temporarily empty due to
 80 * compaction isolated pages.
 
 
 
 
 
 81 */
 82struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
 
 83{
 84	struct page *page, *tmp;
 85	unsigned long flags;
 86	bool dequeued_page;
 87
 88	dequeued_page = false;
 89	list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
 
 
 
 90		/*
 91		 * Block others from accessing the 'page' while we get around
 92		 * establishing additional references and preparing the 'page'
 93		 * to be released by the balloon driver.
 94		 */
 95		if (trylock_page(page)) {
 96			spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 97			/*
 98			 * Raise the page refcount here to prevent any wrong
 99			 * attempt to isolate this page, in case of coliding
100			 * with balloon_page_isolate() just after we release
101			 * the page lock.
102			 *
103			 * balloon_page_free() will take care of dropping
104			 * this extra refcount later.
105			 */
106			get_page(page);
107			balloon_page_delete(page);
108			spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
109			unlock_page(page);
110			dequeued_page = true;
111			break;
112		}
 
 
 
 
 
113	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
115	if (!dequeued_page) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116		/*
117		 * If we are unable to dequeue a balloon page because the page
118		 * list is empty and there is no isolated pages, then something
119		 * went out of track and some balloon pages are lost.
120		 * BUG() here, otherwise the balloon driver may get stuck into
121		 * an infinite loop while attempting to release all its pages.
122		 */
123		spin_lock_irqsave(&b_dev_info->pages_lock, flags);
124		if (unlikely(list_empty(&b_dev_info->pages) &&
125			     !b_dev_info->isolated_pages))
126			BUG();
127		spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
128		page = NULL;
129	}
130	return page;
131}
132EXPORT_SYMBOL_GPL(balloon_page_dequeue);
133
134#ifdef CONFIG_BALLOON_COMPACTION
135/*
136 * balloon_mapping_alloc - allocates a special ->mapping for ballooned pages.
137 * @b_dev_info: holds the balloon device information descriptor.
138 * @a_ops: balloon_mapping address_space_operations descriptor.
139 *
140 * Driver must call it to properly allocate and initialize an instance of
141 * struct address_space which will be used as the special page->mapping for
142 * balloon device enlisted page instances.
143 */
144struct address_space *balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
145				const struct address_space_operations *a_ops)
146{
147	struct address_space *mapping;
148
149	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
150	if (!mapping)
151		return ERR_PTR(-ENOMEM);
152
153	/*
154	 * Give a clean 'zeroed' status to all elements of this special
155	 * balloon page->mapping struct address_space instance.
156	 */
157	address_space_init_once(mapping);
158
159	/*
160	 * Set mapping->flags appropriately, to allow balloon pages
161	 * ->mapping identification.
162	 */
163	mapping_set_balloon(mapping);
164	mapping_set_gfp_mask(mapping, balloon_mapping_gfp_mask());
165
166	/* balloon's page->mapping->a_ops callback descriptor */
167	mapping->a_ops = a_ops;
168
169	/*
170	 * Establish a pointer reference back to the balloon device descriptor
171	 * this particular page->mapping will be servicing.
172	 * This is used by compaction / migration procedures to identify and
173	 * access the balloon device pageset while isolating / migrating pages.
174	 *
175	 * As some balloon drivers can register multiple balloon devices
176	 * for a single guest, this also helps compaction / migration to
177	 * properly deal with multiple balloon pagesets, when required.
178	 */
179	mapping->private_data = b_dev_info;
180	b_dev_info->mapping = mapping;
181
182	return mapping;
183}
184EXPORT_SYMBOL_GPL(balloon_mapping_alloc);
185
186static inline void __isolate_balloon_page(struct page *page)
187{
188	struct balloon_dev_info *b_dev_info = page->mapping->private_data;
189	unsigned long flags;
 
190	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
191	list_del(&page->lru);
192	b_dev_info->isolated_pages++;
193	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 
 
194}
195
196static inline void __putback_balloon_page(struct page *page)
197{
198	struct balloon_dev_info *b_dev_info = page->mapping->private_data;
199	unsigned long flags;
 
200	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
201	list_add(&page->lru, &b_dev_info->pages);
202	b_dev_info->isolated_pages--;
203	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
204}
205
206static inline int __migrate_balloon_page(struct address_space *mapping,
207		struct page *newpage, struct page *page, enum migrate_mode mode)
 
208{
209	return page->mapping->a_ops->migratepage(mapping, newpage, page, mode);
210}
211
212/* __isolate_lru_page() counterpart for a ballooned page */
213bool balloon_page_isolate(struct page *page)
214{
215	/*
216	 * Avoid burning cycles with pages that are yet under __free_pages(),
217	 * or just got freed under us.
218	 *
219	 * In case we 'win' a race for a balloon page being freed under us and
220	 * raise its refcount preventing __free_pages() from doing its job
221	 * the put_page() at the end of this block will take care of
222	 * release this page, thus avoiding a nasty leakage.
223	 */
224	if (likely(get_page_unless_zero(page))) {
225		/*
226		 * As balloon pages are not isolated from LRU lists, concurrent
227		 * compaction threads can race against page migration functions
228		 * as well as race against the balloon driver releasing a page.
229		 *
230		 * In order to avoid having an already isolated balloon page
231		 * being (wrongly) re-isolated while it is under migration,
232		 * or to avoid attempting to isolate pages being released by
233		 * the balloon driver, lets be sure we have the page lock
234		 * before proceeding with the balloon page isolation steps.
235		 */
236		if (likely(trylock_page(page))) {
237			/*
238			 * A ballooned page, by default, has just one refcount.
239			 * Prevent concurrent compaction threads from isolating
240			 * an already isolated balloon page by refcount check.
241			 */
242			if (__is_movable_balloon_page(page) &&
243			    page_count(page) == 2) {
244				__isolate_balloon_page(page);
245				unlock_page(page);
246				return true;
247			}
248			unlock_page(page);
249		}
250		put_page(page);
251	}
252	return false;
253}
254
255/* putback_lru_page() counterpart for a ballooned page */
256void balloon_page_putback(struct page *page)
257{
258	/*
259	 * 'lock_page()' stabilizes the page and prevents races against
260	 * concurrent isolation threads attempting to re-isolate it.
261	 */
262	lock_page(page);
263
264	if (__is_movable_balloon_page(page)) {
265		__putback_balloon_page(page);
266		/* drop the extra ref count taken for page isolation */
267		put_page(page);
268	} else {
269		WARN_ON(1);
270		dump_page(page, "not movable balloon page");
271	}
272	unlock_page(page);
273}
274
275/* move_to_new_page() counterpart for a ballooned page */
276int balloon_page_migrate(struct page *newpage,
277			 struct page *page, enum migrate_mode mode)
278{
279	struct address_space *mapping;
280	int rc = -EAGAIN;
281
282	/*
283	 * Block others from accessing the 'newpage' when we get around to
284	 * establishing additional references. We should be the only one
285	 * holding a reference to the 'newpage' at this point.
286	 */
287	BUG_ON(!trylock_page(newpage));
288
289	if (WARN_ON(!__is_movable_balloon_page(page))) {
290		dump_page(page, "not movable balloon page");
291		unlock_page(newpage);
292		return rc;
293	}
294
295	mapping = page->mapping;
296	if (mapping)
297		rc = __migrate_balloon_page(mapping, newpage, page, mode);
298
299	unlock_page(newpage);
300	return rc;
301}
302#endif /* CONFIG_BALLOON_COMPACTION */
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * mm/balloon_compaction.c
  4 *
  5 * Common interface for making balloon pages movable by compaction.
  6 *
  7 * Copyright (C) 2012, Red Hat, Inc.  Rafael Aquini <aquini@redhat.com>
  8 */
  9#include <linux/mm.h>
 10#include <linux/slab.h>
 11#include <linux/export.h>
 12#include <linux/balloon_compaction.h>
 13
 14static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
 15				     struct page *page)
 
 
 
 
 
 
 
 
 16{
 17	/*
 18	 * Block others from accessing the 'page' when we get around to
 19	 * establishing additional references. We should be the only one
 20	 * holding a reference to the 'page' at this point. If we are not, then
 21	 * memory corruption is possible and we should stop execution.
 22	 */
 23	BUG_ON(!trylock_page(page));
 24	balloon_page_insert(b_dev_info, page);
 25	unlock_page(page);
 26	__count_vm_event(BALLOON_INFLATE);
 
 
 27}
 
 28
 29/**
 30 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page
 31 *				 list.
 32 * @b_dev_info: balloon device descriptor where we will insert a new page to
 33 * @pages: pages to enqueue - allocated using balloon_page_alloc.
 34 *
 35 * Driver must call this function to properly enqueue balloon pages before
 36 * definitively removing them from the guest system.
 37 *
 38 * Return: number of pages that were enqueued.
 39 */
 40size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
 41				 struct list_head *pages)
 42{
 43	struct page *page, *tmp;
 44	unsigned long flags;
 45	size_t n_pages = 0;
 
 
 
 46
 
 
 
 
 
 
 47	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 48	list_for_each_entry_safe(page, tmp, pages, lru) {
 49		list_del(&page->lru);
 50		balloon_page_enqueue_one(b_dev_info, page);
 51		n_pages++;
 52	}
 53	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 54	return n_pages;
 
 55}
 56EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
 57
 58/**
 59 * balloon_page_list_dequeue() - removes pages from balloon's page list and
 60 *				 returns a list of the pages.
 61 * @b_dev_info: balloon device descriptor where we will grab a page from.
 62 * @pages: pointer to the list of pages that would be returned to the caller.
 63 * @n_req_pages: number of requested pages.
 64 *
 65 * Driver must call this function to properly de-allocate a previous enlisted
 66 * balloon pages before definitively releasing it back to the guest system.
 67 * This function tries to remove @n_req_pages from the ballooned pages and
 68 * return them to the caller in the @pages list.
 69 *
 70 * Note that this function may fail to dequeue some pages even if the balloon
 71 * isn't empty - since the page list can be temporarily empty due to compaction
 72 * of isolated pages.
 73 *
 74 * Return: number of pages that were added to the @pages list.
 75 */
 76size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
 77				 struct list_head *pages, size_t n_req_pages)
 78{
 79	struct page *page, *tmp;
 80	unsigned long flags;
 81	size_t n_pages = 0;
 82
 83	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
 84	list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
 85		if (n_pages == n_req_pages)
 86			break;
 87
 88		/*
 89		 * Block others from accessing the 'page' while we get around to
 90		 * establishing additional references and preparing the 'page'
 91		 * to be released by the balloon driver.
 92		 */
 93		if (!trylock_page(page))
 94			continue;
 95
 96		if (IS_ENABLED(CONFIG_BALLOON_COMPACTION) &&
 97		    PageIsolated(page)) {
 98			/* raced with isolation */
 
 
 
 
 
 
 
 
 99			unlock_page(page);
100			continue;
 
101		}
102		balloon_page_delete(page);
103		__count_vm_event(BALLOON_DEFLATE);
104		list_add(&page->lru, pages);
105		unlock_page(page);
106		n_pages++;
107	}
108	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
109
110	return n_pages;
111}
112EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
113
114/*
115 * balloon_page_alloc - allocates a new page for insertion into the balloon
116 *			page list.
117 *
118 * Driver must call this function to properly allocate a new balloon page.
119 * Driver must call balloon_page_enqueue before definitively removing the page
120 * from the guest system.
121 *
122 * Return: struct page for the allocated page or NULL on allocation failure.
123 */
124struct page *balloon_page_alloc(void)
125{
126	struct page *page = alloc_page(balloon_mapping_gfp_mask() |
127				       __GFP_NOMEMALLOC | __GFP_NORETRY |
128				       __GFP_NOWARN);
129	return page;
130}
131EXPORT_SYMBOL_GPL(balloon_page_alloc);
132
133/*
134 * balloon_page_enqueue - inserts a new page into the balloon page list.
135 *
136 * @b_dev_info: balloon device descriptor where we will insert a new page
137 * @page: new page to enqueue - allocated using balloon_page_alloc.
138 *
139 * Drivers must call this function to properly enqueue a new allocated balloon
140 * page before definitively removing the page from the guest system.
141 *
142 * Drivers must not call balloon_page_enqueue on pages that have been pushed to
143 * a list with balloon_page_push before removing them with balloon_page_pop. To
144 * enqueue a list of pages, use balloon_page_list_enqueue instead.
145 */
146void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
147			  struct page *page)
148{
149	unsigned long flags;
150
151	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
152	balloon_page_enqueue_one(b_dev_info, page);
153	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
154}
155EXPORT_SYMBOL_GPL(balloon_page_enqueue);
156
157/*
158 * balloon_page_dequeue - removes a page from balloon's page list and returns
159 *			  its address to allow the driver to release the page.
160 * @b_dev_info: balloon device descriptor where we will grab a page from.
161 *
162 * Driver must call this function to properly dequeue a previously enqueued page
163 * before definitively releasing it back to the guest system.
164 *
165 * Caller must perform its own accounting to ensure that this
166 * function is called only if some pages are actually enqueued.
167 *
168 * Note that this function may fail to dequeue some pages even if there are
169 * some enqueued pages - since the page list can be temporarily empty due to
170 * the compaction of isolated pages.
171 *
172 * TODO: remove the caller accounting requirements, and allow caller to wait
173 * until all pages can be dequeued.
174 *
175 * Return: struct page for the dequeued page, or NULL if no page was dequeued.
176 */
177struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
178{
179	unsigned long flags;
180	LIST_HEAD(pages);
181	int n_pages;
182
183	n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1);
184
185	if (n_pages != 1) {
186		/*
187		 * If we are unable to dequeue a balloon page because the page
188		 * list is empty and there are no isolated pages, then something
189		 * went out of track and some balloon pages are lost.
190		 * BUG() here, otherwise the balloon driver may get stuck in
191		 * an infinite loop while attempting to release all its pages.
192		 */
193		spin_lock_irqsave(&b_dev_info->pages_lock, flags);
194		if (unlikely(list_empty(&b_dev_info->pages) &&
195			     !b_dev_info->isolated_pages))
196			BUG();
197		spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
198		return NULL;
199	}
200	return list_first_entry(&pages, struct page, lru);
201}
202EXPORT_SYMBOL_GPL(balloon_page_dequeue);
203
204#ifdef CONFIG_BALLOON_COMPACTION
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
206static bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
 
 
207
 
208{
209	struct balloon_dev_info *b_dev_info = balloon_page_device(page);
210	unsigned long flags;
211
212	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
213	list_del(&page->lru);
214	b_dev_info->isolated_pages++;
215	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
216
217	return true;
218}
219
220static void balloon_page_putback(struct page *page)
221{
222	struct balloon_dev_info *b_dev_info = balloon_page_device(page);
223	unsigned long flags;
224
225	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
226	list_add(&page->lru, &b_dev_info->pages);
227	b_dev_info->isolated_pages--;
228	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
229}
230
231/* move_to_new_page() counterpart for a ballooned page */
232static int balloon_page_migrate(struct page *newpage, struct page *page,
233		enum migrate_mode mode)
234{
235	struct balloon_dev_info *balloon = balloon_page_device(page);
 
236
 
 
 
237	/*
238	 * We can not easily support the no copy case here so ignore it as it
239	 * is unlikely to be used with balloon pages. See include/linux/hmm.h
240	 * for a user of the MIGRATE_SYNC_NO_COPY mode.
 
 
 
 
241	 */
242	if (mode == MIGRATE_SYNC_NO_COPY)
243		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
245	VM_BUG_ON_PAGE(!PageLocked(page), page);
246	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
 
 
 
 
 
 
247
248	return balloon->migratepage(balloon, newpage, page, mode);
 
 
 
 
 
 
 
 
249}
250
251const struct movable_operations balloon_mops = {
252	.migrate_page = balloon_page_migrate,
253	.isolate_page = balloon_page_isolate,
254	.putback_page = balloon_page_putback,
255};
256EXPORT_SYMBOL_GPL(balloon_mops);
 
 
 
 
 
 
 
257
 
 
 
 
 
 
 
 
 
 
 
 
 
258#endif /* CONFIG_BALLOON_COMPACTION */