Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
  3#include <linux/device.h>
  4#include <linux/io.h>
  5#include <linux/kasan.h>
  6#include <linux/memory_hotplug.h>
  7#include <linux/memremap.h>
  8#include <linux/pfn_t.h>
  9#include <linux/swap.h>
 
 10#include <linux/mmzone.h>
 11#include <linux/swapops.h>
 12#include <linux/types.h>
 13#include <linux/wait_bit.h>
 14#include <linux/xarray.h>
 15#include "internal.h"
 16
 17static DEFINE_XARRAY(pgmap_array);
 18
 19/*
 20 * The memremap() and memremap_pages() interfaces are alternately used
 21 * to map persistent memory namespaces. These interfaces place different
 22 * constraints on the alignment and size of the mapping (namespace).
 23 * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
 24 * only map subsections (2MB), and at least one architecture (PowerPC)
 25 * the minimum mapping granularity of memremap_pages() is 16MB.
 26 *
 27 * The role of memremap_compat_align() is to communicate the minimum
 28 * arch supported alignment of a namespace such that it can freely
 29 * switch modes without violating the arch constraint. Namely, do not
 30 * allow a namespace to be PAGE_SIZE aligned since that namespace may be
 31 * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
 32 */
 33#ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
 34unsigned long memremap_compat_align(void)
 35{
 36	return SUBSECTION_SIZE;
 37}
 38EXPORT_SYMBOL_GPL(memremap_compat_align);
 39#endif
 40
 41#ifdef CONFIG_FS_DAX
 42DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
 43EXPORT_SYMBOL(devmap_managed_key);
 44
 45static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
 46{
 47	if (pgmap->type == MEMORY_DEVICE_FS_DAX)
 48		static_branch_dec(&devmap_managed_key);
 49}
 50
 51static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
 52{
 53	if (pgmap->type == MEMORY_DEVICE_FS_DAX)
 54		static_branch_inc(&devmap_managed_key);
 55}
 56#else
 57static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
 58{
 59}
 60static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
 61{
 62}
 63#endif /* CONFIG_FS_DAX */
 64
 65static void pgmap_array_delete(struct range *range)
 66{
 67	xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
 68			NULL, GFP_KERNEL);
 69	synchronize_rcu();
 70}
 71
 72static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
 73{
 74	struct range *range = &pgmap->ranges[range_id];
 75	unsigned long pfn = PHYS_PFN(range->start);
 76
 77	if (range_id)
 78		return pfn;
 79	return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
 80}
 81
 82bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
 83{
 84	int i;
 85
 86	for (i = 0; i < pgmap->nr_range; i++) {
 87		struct range *range = &pgmap->ranges[i];
 88
 89		if (pfn >= PHYS_PFN(range->start) &&
 90		    pfn <= PHYS_PFN(range->end))
 91			return pfn >= pfn_first(pgmap, i);
 92	}
 93
 94	return false;
 95}
 96
 97static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
 98{
 99	const struct range *range = &pgmap->ranges[range_id];
100
101	return (range->start + range_len(range)) >> PAGE_SHIFT;
102}
103
104static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id)
105{
106	return (pfn_end(pgmap, range_id) -
107		pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift;
108}
109
110static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
111{
112	struct range *range = &pgmap->ranges[range_id];
113	struct page *first_page;
114
115	/* make sure to access a memmap that was actually initialized */
116	first_page = pfn_to_page(pfn_first(pgmap, range_id));
117
118	/* pages are dead and unused, undo the arch mapping */
119	mem_hotplug_begin();
120	remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
121				   PHYS_PFN(range_len(range)));
122	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
123		__remove_pages(PHYS_PFN(range->start),
124			       PHYS_PFN(range_len(range)), NULL);
125	} else {
126		arch_remove_memory(range->start, range_len(range),
127				pgmap_altmap(pgmap));
128		kasan_remove_zero_shadow(__va(range->start), range_len(range));
129	}
130	mem_hotplug_done();
131
132	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
133	pgmap_array_delete(range);
134}
135
136void memunmap_pages(struct dev_pagemap *pgmap)
137{
138	int i;
139
140	percpu_ref_kill(&pgmap->ref);
141	if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
142	    pgmap->type != MEMORY_DEVICE_COHERENT)
143		for (i = 0; i < pgmap->nr_range; i++)
144			percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i));
145
146	wait_for_completion(&pgmap->done);
147
148	for (i = 0; i < pgmap->nr_range; i++)
149		pageunmap_range(pgmap, i);
150	percpu_ref_exit(&pgmap->ref);
151
152	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
153	devmap_managed_enable_put(pgmap);
154}
155EXPORT_SYMBOL_GPL(memunmap_pages);
156
157static void devm_memremap_pages_release(void *data)
158{
159	memunmap_pages(data);
160}
161
162static void dev_pagemap_percpu_release(struct percpu_ref *ref)
163{
164	struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
165
166	complete(&pgmap->done);
167}
168
169static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
170		int range_id, int nid)
171{
172	const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE;
173	struct range *range = &pgmap->ranges[range_id];
174	struct dev_pagemap *conflict_pgmap;
175	int error, is_ram;
176
177	if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
178				"altmap not supported for multiple ranges\n"))
179		return -EINVAL;
180
181	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
182	if (conflict_pgmap) {
183		WARN(1, "Conflicting mapping in same section\n");
184		put_dev_pagemap(conflict_pgmap);
185		return -ENOMEM;
186	}
187
188	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
189	if (conflict_pgmap) {
190		WARN(1, "Conflicting mapping in same section\n");
191		put_dev_pagemap(conflict_pgmap);
192		return -ENOMEM;
193	}
194
195	is_ram = region_intersects(range->start, range_len(range),
196		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
197
198	if (is_ram != REGION_DISJOINT) {
199		WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
200				is_ram == REGION_MIXED ? "mixed" : "ram",
201				range->start, range->end);
202		return -ENXIO;
203	}
204
205	error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
206				PHYS_PFN(range->end), pgmap, GFP_KERNEL));
207	if (error)
208		return error;
209
210	if (nid < 0)
211		nid = numa_mem_id();
212
213	error = track_pfn_remap(NULL, &params->pgprot, PHYS_PFN(range->start), 0,
214			range_len(range));
215	if (error)
216		goto err_pfn_remap;
217
218	if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
219		error = -EINVAL;
220		goto err_kasan;
221	}
222
223	mem_hotplug_begin();
224
225	/*
226	 * For device private memory we call add_pages() as we only need to
227	 * allocate and initialize struct page for the device memory. More-
228	 * over the device memory is un-accessible thus we do not want to
229	 * create a linear mapping for the memory like arch_add_memory()
230	 * would do.
231	 *
232	 * For all other device memory types, which are accessible by
233	 * the CPU, we do want the linear mapping and thus use
234	 * arch_add_memory().
235	 */
236	if (is_private) {
237		error = add_pages(nid, PHYS_PFN(range->start),
238				PHYS_PFN(range_len(range)), params);
239	} else {
240		error = kasan_add_zero_shadow(__va(range->start), range_len(range));
241		if (error) {
242			mem_hotplug_done();
243			goto err_kasan;
244		}
245
246		error = arch_add_memory(nid, range->start, range_len(range),
247					params);
248	}
249
250	if (!error) {
251		struct zone *zone;
252
253		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
254		move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
255				PHYS_PFN(range_len(range)), params->altmap,
256				MIGRATE_MOVABLE);
257	}
258
259	mem_hotplug_done();
260	if (error)
261		goto err_add_memory;
262
263	/*
264	 * Initialization of the pages has been deferred until now in order
265	 * to allow us to do the work while not holding the hotplug lock.
266	 */
267	memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
268				PHYS_PFN(range->start),
269				PHYS_PFN(range_len(range)), pgmap);
270	if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
271	    pgmap->type != MEMORY_DEVICE_COHERENT)
272		percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id));
273	return 0;
274
275err_add_memory:
276	if (!is_private)
277		kasan_remove_zero_shadow(__va(range->start), range_len(range));
278err_kasan:
279	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
280err_pfn_remap:
281	pgmap_array_delete(range);
282	return error;
283}
284
285
286/*
287 * Not device managed version of devm_memremap_pages, undone by
288 * memunmap_pages().  Please use devm_memremap_pages if you have a struct
289 * device available.
290 */
291void *memremap_pages(struct dev_pagemap *pgmap, int nid)
292{
293	struct mhp_params params = {
294		.altmap = pgmap_altmap(pgmap),
295		.pgmap = pgmap,
296		.pgprot = PAGE_KERNEL,
297	};
298	const int nr_range = pgmap->nr_range;
299	int error, i;
300
301	if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
302		return ERR_PTR(-EINVAL);
303
304	switch (pgmap->type) {
305	case MEMORY_DEVICE_PRIVATE:
306		if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
307			WARN(1, "Device private memory not supported\n");
308			return ERR_PTR(-EINVAL);
309		}
310		if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
311			WARN(1, "Missing migrate_to_ram method\n");
312			return ERR_PTR(-EINVAL);
313		}
314		if (!pgmap->ops->page_free) {
315			WARN(1, "Missing page_free method\n");
316			return ERR_PTR(-EINVAL);
317		}
318		if (!pgmap->owner) {
319			WARN(1, "Missing owner\n");
320			return ERR_PTR(-EINVAL);
321		}
322		break;
323	case MEMORY_DEVICE_COHERENT:
324		if (!pgmap->ops->page_free) {
325			WARN(1, "Missing page_free method\n");
326			return ERR_PTR(-EINVAL);
327		}
328		if (!pgmap->owner) {
329			WARN(1, "Missing owner\n");
330			return ERR_PTR(-EINVAL);
331		}
332		break;
333	case MEMORY_DEVICE_FS_DAX:
334		if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
335			WARN(1, "File system DAX not supported\n");
336			return ERR_PTR(-EINVAL);
337		}
338		params.pgprot = pgprot_decrypted(params.pgprot);
339		break;
340	case MEMORY_DEVICE_GENERIC:
341		break;
342	case MEMORY_DEVICE_PCI_P2PDMA:
343		params.pgprot = pgprot_noncached(params.pgprot);
344		break;
345	default:
346		WARN(1, "Invalid pgmap type %d\n", pgmap->type);
347		break;
348	}
349
350	init_completion(&pgmap->done);
351	error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
352				GFP_KERNEL);
353	if (error)
354		return ERR_PTR(error);
355
356	devmap_managed_enable_get(pgmap);
357
358	/*
359	 * Clear the pgmap nr_range as it will be incremented for each
360	 * successfully processed range. This communicates how many
361	 * regions to unwind in the abort case.
362	 */
363	pgmap->nr_range = 0;
364	error = 0;
365	for (i = 0; i < nr_range; i++) {
366		error = pagemap_range(pgmap, &params, i, nid);
367		if (error)
368			break;
369		pgmap->nr_range++;
370	}
371
372	if (i < nr_range) {
373		memunmap_pages(pgmap);
374		pgmap->nr_range = nr_range;
375		return ERR_PTR(error);
376	}
377
378	return __va(pgmap->ranges[0].start);
379}
380EXPORT_SYMBOL_GPL(memremap_pages);
381
382/**
383 * devm_memremap_pages - remap and provide memmap backing for the given resource
384 * @dev: hosting device for @res
385 * @pgmap: pointer to a struct dev_pagemap
386 *
387 * Notes:
388 * 1/ At a minimum the res and type members of @pgmap must be initialized
389 *    by the caller before passing it to this function
390 *
391 * 2/ The altmap field may optionally be initialized, in which case
392 *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
393 *
394 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
395 *    'live' on entry and will be killed and reaped at
396 *    devm_memremap_pages_release() time, or if this routine fails.
397 *
398 * 4/ range is expected to be a host memory range that could feasibly be
399 *    treated as a "System RAM" range, i.e. not a device mmio range, but
400 *    this is not enforced.
401 */
402void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
403{
404	int error;
405	void *ret;
406
407	ret = memremap_pages(pgmap, dev_to_node(dev));
408	if (IS_ERR(ret))
409		return ret;
410
411	error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
412			pgmap);
413	if (error)
414		return ERR_PTR(error);
415	return ret;
416}
417EXPORT_SYMBOL_GPL(devm_memremap_pages);
418
419void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
420{
421	devm_release_action(dev, devm_memremap_pages_release, pgmap);
422}
423EXPORT_SYMBOL_GPL(devm_memunmap_pages);
424
425unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
426{
427	/* number of pfns from base where pfn_to_page() is valid */
428	if (altmap)
429		return altmap->reserve + altmap->free;
430	return 0;
431}
432
433void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
434{
435	altmap->alloc -= nr_pfns;
436}
437
438/**
439 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
440 * @pfn: page frame number to lookup page_map
441 * @pgmap: optional known pgmap that already has a reference
442 *
443 * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
444 * is non-NULL but does not cover @pfn the reference to it will be released.
445 */
446struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
447		struct dev_pagemap *pgmap)
448{
449	resource_size_t phys = PFN_PHYS(pfn);
450
451	/*
452	 * In the cached case we're already holding a live reference.
453	 */
454	if (pgmap) {
455		if (phys >= pgmap->range.start && phys <= pgmap->range.end)
456			return pgmap;
457		put_dev_pagemap(pgmap);
458	}
459
460	/* fall back to slow path lookup */
461	rcu_read_lock();
462	pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
463	if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref))
464		pgmap = NULL;
465	rcu_read_unlock();
466
467	return pgmap;
468}
469EXPORT_SYMBOL_GPL(get_dev_pagemap);
470
471void free_zone_device_page(struct page *page)
472{
473	if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free))
474		return;
475
476	mem_cgroup_uncharge(page_folio(page));
477
478	/*
479	 * Note: we don't expect anonymous compound pages yet. Once supported
480	 * and we could PTE-map them similar to THP, we'd have to clear
481	 * PG_anon_exclusive on all tail pages.
482	 */
483	VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page);
484	if (PageAnon(page))
485		__ClearPageAnonExclusive(page);
486
487	/*
488	 * When a device managed page is freed, the page->mapping field
489	 * may still contain a (stale) mapping value. For example, the
490	 * lower bits of page->mapping may still identify the page as an
491	 * anonymous page. Ultimately, this entire field is just stale
492	 * and wrong, and it will cause errors if not cleared.  One
493	 * example is:
494	 *
495	 *  migrate_vma_pages()
496	 *    migrate_vma_insert_page()
497	 *      page_add_new_anon_rmap()
498	 *        __page_set_anon_rmap()
499	 *          ...checks page->mapping, via PageAnon(page) call,
500	 *            and incorrectly concludes that the page is an
501	 *            anonymous page. Therefore, it incorrectly,
502	 *            silently fails to set up the new anon rmap.
503	 *
504	 * For other types of ZONE_DEVICE pages, migration is either
505	 * handled differently or not done at all, so there is no need
506	 * to clear page->mapping.
507	 */
508	page->mapping = NULL;
509	page->pgmap->ops->page_free(page);
510
511	if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
512	    page->pgmap->type != MEMORY_DEVICE_COHERENT)
513		/*
514		 * Reset the page count to 1 to prepare for handing out the page
515		 * again.
516		 */
517		set_page_count(page, 1);
518	else
519		put_dev_pagemap(page->pgmap);
520}
521
522void zone_device_page_init(struct page *page)
523{
524	/*
525	 * Drivers shouldn't be allocating pages after calling
526	 * memunmap_pages().
527	 */
528	WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref));
529	set_page_count(page, 1);
530	lock_page(page);
531}
532EXPORT_SYMBOL_GPL(zone_device_page_init);
533
534#ifdef CONFIG_FS_DAX
535bool __put_devmap_managed_page_refs(struct page *page, int refs)
536{
537	if (page->pgmap->type != MEMORY_DEVICE_FS_DAX)
538		return false;
539
540	/*
541	 * fsdax page refcounts are 1-based, rather than 0-based: if
542	 * refcount is 1, then the page is free and the refcount is
543	 * stable because nobody holds a reference on the page.
544	 */
545	if (page_ref_sub_return(page, refs) == 1)
546		wake_up_var(&page->_refcount);
547	return true;
548}
549EXPORT_SYMBOL(__put_devmap_managed_page_refs);
550#endif /* CONFIG_FS_DAX */
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
  3#include <linux/device.h>
  4#include <linux/io.h>
  5#include <linux/kasan.h>
  6#include <linux/memory_hotplug.h>
  7#include <linux/memremap.h>
  8#include <linux/pfn_t.h>
  9#include <linux/swap.h>
 10#include <linux/mm.h>
 11#include <linux/mmzone.h>
 12#include <linux/swapops.h>
 13#include <linux/types.h>
 14#include <linux/wait_bit.h>
 15#include <linux/xarray.h>
 16#include "internal.h"
 17
 18static DEFINE_XARRAY(pgmap_array);
 19
 20/*
 21 * The memremap() and memremap_pages() interfaces are alternately used
 22 * to map persistent memory namespaces. These interfaces place different
 23 * constraints on the alignment and size of the mapping (namespace).
 24 * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
 25 * only map subsections (2MB), and at least one architecture (PowerPC)
 26 * the minimum mapping granularity of memremap_pages() is 16MB.
 27 *
 28 * The role of memremap_compat_align() is to communicate the minimum
 29 * arch supported alignment of a namespace such that it can freely
 30 * switch modes without violating the arch constraint. Namely, do not
 31 * allow a namespace to be PAGE_SIZE aligned since that namespace may be
 32 * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
 33 */
 34#ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
 35unsigned long memremap_compat_align(void)
 36{
 37	return SUBSECTION_SIZE;
 38}
 39EXPORT_SYMBOL_GPL(memremap_compat_align);
 40#endif
 41
 42#ifdef CONFIG_FS_DAX
 43DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
 44EXPORT_SYMBOL(devmap_managed_key);
 45
 46static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
 47{
 48	if (pgmap->type == MEMORY_DEVICE_FS_DAX)
 49		static_branch_dec(&devmap_managed_key);
 50}
 51
 52static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
 53{
 54	if (pgmap->type == MEMORY_DEVICE_FS_DAX)
 55		static_branch_inc(&devmap_managed_key);
 56}
 57#else
 58static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
 59{
 60}
 61static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
 62{
 63}
 64#endif /* CONFIG_FS_DAX */
 65
 66static void pgmap_array_delete(struct range *range)
 67{
 68	xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
 69			NULL, GFP_KERNEL);
 70	synchronize_rcu();
 71}
 72
 73static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
 74{
 75	struct range *range = &pgmap->ranges[range_id];
 76	unsigned long pfn = PHYS_PFN(range->start);
 77
 78	if (range_id)
 79		return pfn;
 80	return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
 81}
 82
 83bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
 84{
 85	int i;
 86
 87	for (i = 0; i < pgmap->nr_range; i++) {
 88		struct range *range = &pgmap->ranges[i];
 89
 90		if (pfn >= PHYS_PFN(range->start) &&
 91		    pfn <= PHYS_PFN(range->end))
 92			return pfn >= pfn_first(pgmap, i);
 93	}
 94
 95	return false;
 96}
 97
 98static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
 99{
100	const struct range *range = &pgmap->ranges[range_id];
101
102	return (range->start + range_len(range)) >> PAGE_SHIFT;
103}
104
105static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id)
106{
107	return (pfn_end(pgmap, range_id) -
108		pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift;
109}
110
111static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
112{
113	struct range *range = &pgmap->ranges[range_id];
114	struct page *first_page;
115
116	/* make sure to access a memmap that was actually initialized */
117	first_page = pfn_to_page(pfn_first(pgmap, range_id));
118
119	/* pages are dead and unused, undo the arch mapping */
120	mem_hotplug_begin();
121	remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
122				   PHYS_PFN(range_len(range)));
123	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
124		__remove_pages(PHYS_PFN(range->start),
125			       PHYS_PFN(range_len(range)), NULL);
126	} else {
127		arch_remove_memory(range->start, range_len(range),
128				pgmap_altmap(pgmap));
129		kasan_remove_zero_shadow(__va(range->start), range_len(range));
130	}
131	mem_hotplug_done();
132
133	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true);
134	pgmap_array_delete(range);
135}
136
137void memunmap_pages(struct dev_pagemap *pgmap)
138{
139	int i;
140
141	percpu_ref_kill(&pgmap->ref);
142	if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
143	    pgmap->type != MEMORY_DEVICE_COHERENT)
144		for (i = 0; i < pgmap->nr_range; i++)
145			percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i));
146
147	wait_for_completion(&pgmap->done);
148
149	for (i = 0; i < pgmap->nr_range; i++)
150		pageunmap_range(pgmap, i);
151	percpu_ref_exit(&pgmap->ref);
152
153	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
154	devmap_managed_enable_put(pgmap);
155}
156EXPORT_SYMBOL_GPL(memunmap_pages);
157
158static void devm_memremap_pages_release(void *data)
159{
160	memunmap_pages(data);
161}
162
163static void dev_pagemap_percpu_release(struct percpu_ref *ref)
164{
165	struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
166
167	complete(&pgmap->done);
168}
169
170static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
171		int range_id, int nid)
172{
173	const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE;
174	struct range *range = &pgmap->ranges[range_id];
175	struct dev_pagemap *conflict_pgmap;
176	int error, is_ram;
177
178	if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
179				"altmap not supported for multiple ranges\n"))
180		return -EINVAL;
181
182	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
183	if (conflict_pgmap) {
184		WARN(1, "Conflicting mapping in same section\n");
185		put_dev_pagemap(conflict_pgmap);
186		return -ENOMEM;
187	}
188
189	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
190	if (conflict_pgmap) {
191		WARN(1, "Conflicting mapping in same section\n");
192		put_dev_pagemap(conflict_pgmap);
193		return -ENOMEM;
194	}
195
196	is_ram = region_intersects(range->start, range_len(range),
197		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
198
199	if (is_ram != REGION_DISJOINT) {
200		WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
201				is_ram == REGION_MIXED ? "mixed" : "ram",
202				range->start, range->end);
203		return -ENXIO;
204	}
205
206	error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
207				PHYS_PFN(range->end), pgmap, GFP_KERNEL));
208	if (error)
209		return error;
210
211	if (nid < 0)
212		nid = numa_mem_id();
213
214	error = track_pfn_remap(NULL, &params->pgprot, PHYS_PFN(range->start), 0,
215			range_len(range));
216	if (error)
217		goto err_pfn_remap;
218
219	if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
220		error = -EINVAL;
221		goto err_kasan;
222	}
223
224	mem_hotplug_begin();
225
226	/*
227	 * For device private memory we call add_pages() as we only need to
228	 * allocate and initialize struct page for the device memory. More-
229	 * over the device memory is un-accessible thus we do not want to
230	 * create a linear mapping for the memory like arch_add_memory()
231	 * would do.
232	 *
233	 * For all other device memory types, which are accessible by
234	 * the CPU, we do want the linear mapping and thus use
235	 * arch_add_memory().
236	 */
237	if (is_private) {
238		error = add_pages(nid, PHYS_PFN(range->start),
239				PHYS_PFN(range_len(range)), params);
240	} else {
241		error = kasan_add_zero_shadow(__va(range->start), range_len(range));
242		if (error) {
243			mem_hotplug_done();
244			goto err_kasan;
245		}
246
247		error = arch_add_memory(nid, range->start, range_len(range),
248					params);
249	}
250
251	if (!error) {
252		struct zone *zone;
253
254		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
255		move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
256				PHYS_PFN(range_len(range)), params->altmap,
257				MIGRATE_MOVABLE);
258	}
259
260	mem_hotplug_done();
261	if (error)
262		goto err_add_memory;
263
264	/*
265	 * Initialization of the pages has been deferred until now in order
266	 * to allow us to do the work while not holding the hotplug lock.
267	 */
268	memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
269				PHYS_PFN(range->start),
270				PHYS_PFN(range_len(range)), pgmap);
271	if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
272	    pgmap->type != MEMORY_DEVICE_COHERENT)
273		percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id));
274	return 0;
275
276err_add_memory:
277	if (!is_private)
278		kasan_remove_zero_shadow(__va(range->start), range_len(range));
279err_kasan:
280	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true);
281err_pfn_remap:
282	pgmap_array_delete(range);
283	return error;
284}
285
286
287/*
288 * Not device managed version of devm_memremap_pages, undone by
289 * memunmap_pages().  Please use devm_memremap_pages if you have a struct
290 * device available.
291 */
292void *memremap_pages(struct dev_pagemap *pgmap, int nid)
293{
294	struct mhp_params params = {
295		.altmap = pgmap_altmap(pgmap),
296		.pgmap = pgmap,
297		.pgprot = PAGE_KERNEL,
298	};
299	const int nr_range = pgmap->nr_range;
300	int error, i;
301
302	if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
303		return ERR_PTR(-EINVAL);
304
305	switch (pgmap->type) {
306	case MEMORY_DEVICE_PRIVATE:
307		if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
308			WARN(1, "Device private memory not supported\n");
309			return ERR_PTR(-EINVAL);
310		}
311		if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
312			WARN(1, "Missing migrate_to_ram method\n");
313			return ERR_PTR(-EINVAL);
314		}
315		if (!pgmap->ops->page_free) {
316			WARN(1, "Missing page_free method\n");
317			return ERR_PTR(-EINVAL);
318		}
319		if (!pgmap->owner) {
320			WARN(1, "Missing owner\n");
321			return ERR_PTR(-EINVAL);
322		}
323		break;
324	case MEMORY_DEVICE_COHERENT:
325		if (!pgmap->ops->page_free) {
326			WARN(1, "Missing page_free method\n");
327			return ERR_PTR(-EINVAL);
328		}
329		if (!pgmap->owner) {
330			WARN(1, "Missing owner\n");
331			return ERR_PTR(-EINVAL);
332		}
333		break;
334	case MEMORY_DEVICE_FS_DAX:
335		if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
336			WARN(1, "File system DAX not supported\n");
337			return ERR_PTR(-EINVAL);
338		}
339		params.pgprot = pgprot_decrypted(params.pgprot);
340		break;
341	case MEMORY_DEVICE_GENERIC:
342		break;
343	case MEMORY_DEVICE_PCI_P2PDMA:
344		params.pgprot = pgprot_noncached(params.pgprot);
345		break;
346	default:
347		WARN(1, "Invalid pgmap type %d\n", pgmap->type);
348		break;
349	}
350
351	init_completion(&pgmap->done);
352	error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
353				GFP_KERNEL);
354	if (error)
355		return ERR_PTR(error);
356
357	devmap_managed_enable_get(pgmap);
358
359	/*
360	 * Clear the pgmap nr_range as it will be incremented for each
361	 * successfully processed range. This communicates how many
362	 * regions to unwind in the abort case.
363	 */
364	pgmap->nr_range = 0;
365	error = 0;
366	for (i = 0; i < nr_range; i++) {
367		error = pagemap_range(pgmap, &params, i, nid);
368		if (error)
369			break;
370		pgmap->nr_range++;
371	}
372
373	if (i < nr_range) {
374		memunmap_pages(pgmap);
375		pgmap->nr_range = nr_range;
376		return ERR_PTR(error);
377	}
378
379	return __va(pgmap->ranges[0].start);
380}
381EXPORT_SYMBOL_GPL(memremap_pages);
382
383/**
384 * devm_memremap_pages - remap and provide memmap backing for the given resource
385 * @dev: hosting device for @res
386 * @pgmap: pointer to a struct dev_pagemap
387 *
388 * Notes:
389 * 1/ At a minimum the range and type members of @pgmap must be initialized
390 *    by the caller before passing it to this function
391 *
392 * 2/ The altmap field may optionally be initialized, in which case
393 *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
394 *
395 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
396 *    'live' on entry and will be killed and reaped at
397 *    devm_memremap_pages_release() time, or if this routine fails.
398 *
399 * 4/ range is expected to be a host memory range that could feasibly be
400 *    treated as a "System RAM" range, i.e. not a device mmio range, but
401 *    this is not enforced.
402 */
403void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
404{
405	int error;
406	void *ret;
407
408	ret = memremap_pages(pgmap, dev_to_node(dev));
409	if (IS_ERR(ret))
410		return ret;
411
412	error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
413			pgmap);
414	if (error)
415		return ERR_PTR(error);
416	return ret;
417}
418EXPORT_SYMBOL_GPL(devm_memremap_pages);
419
420void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
421{
422	devm_release_action(dev, devm_memremap_pages_release, pgmap);
423}
424EXPORT_SYMBOL_GPL(devm_memunmap_pages);
425
 
 
 
 
 
 
 
 
 
 
 
 
 
426/**
427 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
428 * @pfn: page frame number to lookup page_map
429 * @pgmap: optional known pgmap that already has a reference
430 *
431 * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
432 * is non-NULL but does not cover @pfn the reference to it will be released.
433 */
434struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
435		struct dev_pagemap *pgmap)
436{
437	resource_size_t phys = PFN_PHYS(pfn);
438
439	/*
440	 * In the cached case we're already holding a live reference.
441	 */
442	if (pgmap) {
443		if (phys >= pgmap->range.start && phys <= pgmap->range.end)
444			return pgmap;
445		put_dev_pagemap(pgmap);
446	}
447
448	/* fall back to slow path lookup */
449	rcu_read_lock();
450	pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
451	if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref))
452		pgmap = NULL;
453	rcu_read_unlock();
454
455	return pgmap;
456}
457EXPORT_SYMBOL_GPL(get_dev_pagemap);
458
459void free_zone_device_page(struct page *page)
460{
461	if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free))
462		return;
463
464	mem_cgroup_uncharge(page_folio(page));
465
466	/*
467	 * Note: we don't expect anonymous compound pages yet. Once supported
468	 * and we could PTE-map them similar to THP, we'd have to clear
469	 * PG_anon_exclusive on all tail pages.
470	 */
471	VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page);
472	if (PageAnon(page))
473		__ClearPageAnonExclusive(page);
474
475	/*
476	 * When a device managed page is freed, the folio->mapping field
477	 * may still contain a (stale) mapping value. For example, the
478	 * lower bits of folio->mapping may still identify the folio as an
479	 * anonymous folio. Ultimately, this entire field is just stale
480	 * and wrong, and it will cause errors if not cleared.
 
 
 
 
 
 
 
 
 
 
481	 *
482	 * For other types of ZONE_DEVICE pages, migration is either
483	 * handled differently or not done at all, so there is no need
484	 * to clear page->mapping.
485	 */
486	page->mapping = NULL;
487	page->pgmap->ops->page_free(page);
488
489	if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
490	    page->pgmap->type != MEMORY_DEVICE_COHERENT)
491		/*
492		 * Reset the page count to 1 to prepare for handing out the page
493		 * again.
494		 */
495		set_page_count(page, 1);
496	else
497		put_dev_pagemap(page->pgmap);
498}
499
500void zone_device_page_init(struct page *page)
501{
502	/*
503	 * Drivers shouldn't be allocating pages after calling
504	 * memunmap_pages().
505	 */
506	WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref));
507	set_page_count(page, 1);
508	lock_page(page);
509}
510EXPORT_SYMBOL_GPL(zone_device_page_init);
511
512#ifdef CONFIG_FS_DAX
513bool __put_devmap_managed_page_refs(struct page *page, int refs)
514{
515	if (page->pgmap->type != MEMORY_DEVICE_FS_DAX)
516		return false;
517
518	/*
519	 * fsdax page refcounts are 1-based, rather than 0-based: if
520	 * refcount is 1, then the page is free and the refcount is
521	 * stable because nobody holds a reference on the page.
522	 */
523	if (page_ref_sub_return(page, refs) == 1)
524		wake_up_var(&page->_refcount);
525	return true;
526}
527EXPORT_SYMBOL(__put_devmap_managed_page_refs);
528#endif /* CONFIG_FS_DAX */