Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
  3#include <linux/device.h>
  4#include <linux/io.h>
  5#include <linux/kasan.h>
  6#include <linux/memory_hotplug.h>
  7#include <linux/memremap.h>
  8#include <linux/pfn_t.h>
  9#include <linux/swap.h>
 10#include <linux/mm.h>
 11#include <linux/mmzone.h>
 12#include <linux/swapops.h>
 13#include <linux/types.h>
 14#include <linux/wait_bit.h>
 15#include <linux/xarray.h>
 16#include "internal.h"
 17
 18static DEFINE_XARRAY(pgmap_array);
 19
 20/*
 21 * The memremap() and memremap_pages() interfaces are alternately used
 22 * to map persistent memory namespaces. These interfaces place different
 23 * constraints on the alignment and size of the mapping (namespace).
 24 * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
 25 * only map subsections (2MB), and at least one architecture (PowerPC)
 26 * the minimum mapping granularity of memremap_pages() is 16MB.
 27 *
 28 * The role of memremap_compat_align() is to communicate the minimum
 29 * arch supported alignment of a namespace such that it can freely
 30 * switch modes without violating the arch constraint. Namely, do not
 31 * allow a namespace to be PAGE_SIZE aligned since that namespace may be
 32 * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
 33 */
 34#ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
 35unsigned long memremap_compat_align(void)
 36{
 37	return SUBSECTION_SIZE;
 38}
 39EXPORT_SYMBOL_GPL(memremap_compat_align);
 40#endif
 41
 42#ifdef CONFIG_FS_DAX
 43DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
 44EXPORT_SYMBOL(devmap_managed_key);
 
 45
 46static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
 47{
 48	if (pgmap->type == MEMORY_DEVICE_FS_DAX)
 49		static_branch_dec(&devmap_managed_key);
 50}
 51
 52static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
 53{
 54	if (pgmap->type == MEMORY_DEVICE_FS_DAX)
 55		static_branch_inc(&devmap_managed_key);
 
 
 
 
 
 
 56}
 57#else
 58static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
 59{
 
 60}
 61static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
 62{
 63}
 64#endif /* CONFIG_FS_DAX */
 65
 66static void pgmap_array_delete(struct range *range)
 67{
 68	xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
 69			NULL, GFP_KERNEL);
 70	synchronize_rcu();
 71}
 72
 73static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
 74{
 75	struct range *range = &pgmap->ranges[range_id];
 76	unsigned long pfn = PHYS_PFN(range->start);
 77
 78	if (range_id)
 79		return pfn;
 80	return pfn + vmem_altmap_offset(pgmap_altmap(pgmap));
 81}
 82
 83bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
 84{
 85	int i;
 86
 87	for (i = 0; i < pgmap->nr_range; i++) {
 88		struct range *range = &pgmap->ranges[i];
 89
 90		if (pfn >= PHYS_PFN(range->start) &&
 91		    pfn <= PHYS_PFN(range->end))
 92			return pfn >= pfn_first(pgmap, i);
 93	}
 94
 95	return false;
 96}
 97
 98static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
 99{
100	const struct range *range = &pgmap->ranges[range_id];
 
 
 
 
 
 
101
102	return (range->start + range_len(range)) >> PAGE_SHIFT;
 
 
 
 
 
103}
104
105static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id)
106{
107	return (pfn_end(pgmap, range_id) -
108		pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift;
 
 
 
 
 
 
 
 
 
 
109}
110
111static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
112{
113	struct range *range = &pgmap->ranges[range_id];
114	struct page *first_page;
 
 
 
 
 
 
 
115
116	/* make sure to access a memmap that was actually initialized */
117	first_page = pfn_to_page(pfn_first(pgmap, range_id));
118
119	/* pages are dead and unused, undo the arch mapping */
 
 
120	mem_hotplug_begin();
121	remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
122				   PHYS_PFN(range_len(range)));
123	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
124		__remove_pages(PHYS_PFN(range->start),
125			       PHYS_PFN(range_len(range)), NULL);
126	} else {
127		arch_remove_memory(range->start, range_len(range),
128				pgmap_altmap(pgmap));
129		kasan_remove_zero_shadow(__va(range->start), range_len(range));
130	}
131	mem_hotplug_done();
132
133	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true);
134	pgmap_array_delete(range);
135}
136
137void memunmap_pages(struct dev_pagemap *pgmap)
138{
139	int i;
140
141	percpu_ref_kill(&pgmap->ref);
142	if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
143	    pgmap->type != MEMORY_DEVICE_COHERENT)
144		for (i = 0; i < pgmap->nr_range; i++)
145			percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i));
146
147	wait_for_completion(&pgmap->done);
148
149	for (i = 0; i < pgmap->nr_range; i++)
150		pageunmap_range(pgmap, i);
151	percpu_ref_exit(&pgmap->ref);
152
153	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
154	devmap_managed_enable_put(pgmap);
155}
156EXPORT_SYMBOL_GPL(memunmap_pages);
157
158static void devm_memremap_pages_release(void *data)
159{
160	memunmap_pages(data);
161}
162
163static void dev_pagemap_percpu_release(struct percpu_ref *ref)
164{
165	struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
 
166
167	complete(&pgmap->done);
168}
169
170static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
171		int range_id, int nid)
 
 
 
 
172{
173	const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE;
174	struct range *range = &pgmap->ranges[range_id];
175	struct dev_pagemap *conflict_pgmap;
 
 
 
 
 
 
 
176	int error, is_ram;
 
177
178	if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0,
179				"altmap not supported for multiple ranges\n"))
180		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
182	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
183	if (conflict_pgmap) {
184		WARN(1, "Conflicting mapping in same section\n");
185		put_dev_pagemap(conflict_pgmap);
186		return -ENOMEM;
 
187	}
188
189	conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
190	if (conflict_pgmap) {
191		WARN(1, "Conflicting mapping in same section\n");
192		put_dev_pagemap(conflict_pgmap);
193		return -ENOMEM;
 
194	}
195
196	is_ram = region_intersects(range->start, range_len(range),
197		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
198
199	if (is_ram != REGION_DISJOINT) {
200		WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
201				is_ram == REGION_MIXED ? "mixed" : "ram",
202				range->start, range->end);
203		return -ENXIO;
204	}
205
206	error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
207				PHYS_PFN(range->end), pgmap, GFP_KERNEL));
208	if (error)
209		return error;
210
211	if (nid < 0)
212		nid = numa_mem_id();
213
214	error = track_pfn_remap(NULL, &params->pgprot, PHYS_PFN(range->start), 0,
215			range_len(range));
216	if (error)
217		goto err_pfn_remap;
218
219	if (!mhp_range_allowed(range->start, range_len(range), !is_private)) {
220		error = -EINVAL;
221		goto err_kasan;
222	}
223
224	mem_hotplug_begin();
225
226	/*
227	 * For device private memory we call add_pages() as we only need to
228	 * allocate and initialize struct page for the device memory. More-
229	 * over the device memory is un-accessible thus we do not want to
230	 * create a linear mapping for the memory like arch_add_memory()
231	 * would do.
232	 *
233	 * For all other device memory types, which are accessible by
234	 * the CPU, we do want the linear mapping and thus use
235	 * arch_add_memory().
236	 */
237	if (is_private) {
238		error = add_pages(nid, PHYS_PFN(range->start),
239				PHYS_PFN(range_len(range)), params);
240	} else {
241		error = kasan_add_zero_shadow(__va(range->start), range_len(range));
242		if (error) {
243			mem_hotplug_done();
244			goto err_kasan;
245		}
246
247		error = arch_add_memory(nid, range->start, range_len(range),
248					params);
249	}
250
251	if (!error) {
252		struct zone *zone;
253
254		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
255		move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
256				PHYS_PFN(range_len(range)), params->altmap,
257				MIGRATE_MOVABLE);
258	}
259
260	mem_hotplug_done();
261	if (error)
262		goto err_add_memory;
263
264	/*
265	 * Initialization of the pages has been deferred until now in order
266	 * to allow us to do the work while not holding the hotplug lock.
267	 */
268	memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
269				PHYS_PFN(range->start),
270				PHYS_PFN(range_len(range)), pgmap);
271	if (pgmap->type != MEMORY_DEVICE_PRIVATE &&
272	    pgmap->type != MEMORY_DEVICE_COHERENT)
273		percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id));
274	return 0;
275
276err_add_memory:
277	if (!is_private)
278		kasan_remove_zero_shadow(__va(range->start), range_len(range));
279err_kasan:
280	untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range), true);
281err_pfn_remap:
282	pgmap_array_delete(range);
283	return error;
284}
285
286
287/*
288 * Not device managed version of devm_memremap_pages, undone by
289 * memunmap_pages().  Please use devm_memremap_pages if you have a struct
290 * device available.
291 */
292void *memremap_pages(struct dev_pagemap *pgmap, int nid)
293{
294	struct mhp_params params = {
295		.altmap = pgmap_altmap(pgmap),
296		.pgmap = pgmap,
297		.pgprot = PAGE_KERNEL,
298	};
299	const int nr_range = pgmap->nr_range;
300	int error, i;
301
302	if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
303		return ERR_PTR(-EINVAL);
304
305	switch (pgmap->type) {
306	case MEMORY_DEVICE_PRIVATE:
307		if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
308			WARN(1, "Device private memory not supported\n");
309			return ERR_PTR(-EINVAL);
310		}
311		if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
312			WARN(1, "Missing migrate_to_ram method\n");
313			return ERR_PTR(-EINVAL);
314		}
315		if (!pgmap->ops->page_free) {
316			WARN(1, "Missing page_free method\n");
317			return ERR_PTR(-EINVAL);
318		}
319		if (!pgmap->owner) {
320			WARN(1, "Missing owner\n");
321			return ERR_PTR(-EINVAL);
322		}
323		break;
324	case MEMORY_DEVICE_COHERENT:
325		if (!pgmap->ops->page_free) {
326			WARN(1, "Missing page_free method\n");
327			return ERR_PTR(-EINVAL);
328		}
329		if (!pgmap->owner) {
330			WARN(1, "Missing owner\n");
331			return ERR_PTR(-EINVAL);
332		}
333		break;
334	case MEMORY_DEVICE_FS_DAX:
335		if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
336			WARN(1, "File system DAX not supported\n");
337			return ERR_PTR(-EINVAL);
338		}
339		params.pgprot = pgprot_decrypted(params.pgprot);
340		break;
341	case MEMORY_DEVICE_GENERIC:
342		break;
343	case MEMORY_DEVICE_PCI_P2PDMA:
344		params.pgprot = pgprot_noncached(params.pgprot);
345		break;
346	default:
347		WARN(1, "Invalid pgmap type %d\n", pgmap->type);
348		break;
349	}
350
351	init_completion(&pgmap->done);
352	error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
353				GFP_KERNEL);
354	if (error)
355		return ERR_PTR(error);
356
357	devmap_managed_enable_get(pgmap);
358
359	/*
360	 * Clear the pgmap nr_range as it will be incremented for each
361	 * successfully processed range. This communicates how many
362	 * regions to unwind in the abort case.
363	 */
364	pgmap->nr_range = 0;
365	error = 0;
366	for (i = 0; i < nr_range; i++) {
367		error = pagemap_range(pgmap, &params, i, nid);
368		if (error)
369			break;
370		pgmap->nr_range++;
371	}
372
373	if (i < nr_range) {
374		memunmap_pages(pgmap);
375		pgmap->nr_range = nr_range;
376		return ERR_PTR(error);
377	}
378
379	return __va(pgmap->ranges[0].start);
380}
381EXPORT_SYMBOL_GPL(memremap_pages);
382
383/**
384 * devm_memremap_pages - remap and provide memmap backing for the given resource
385 * @dev: hosting device for @res
386 * @pgmap: pointer to a struct dev_pagemap
387 *
388 * Notes:
389 * 1/ At a minimum the range and type members of @pgmap must be initialized
390 *    by the caller before passing it to this function
391 *
392 * 2/ The altmap field may optionally be initialized, in which case
393 *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
394 *
395 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
396 *    'live' on entry and will be killed and reaped at
397 *    devm_memremap_pages_release() time, or if this routine fails.
398 *
399 * 4/ range is expected to be a host memory range that could feasibly be
400 *    treated as a "System RAM" range, i.e. not a device mmio range, but
401 *    this is not enforced.
402 */
403void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
404{
405	int error;
406	void *ret;
407
408	ret = memremap_pages(pgmap, dev_to_node(dev));
409	if (IS_ERR(ret))
410		return ret;
411
412	error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
413			pgmap);
414	if (error)
415		return ERR_PTR(error);
416	return ret;
417}
418EXPORT_SYMBOL_GPL(devm_memremap_pages);
419
420void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
421{
422	devm_release_action(dev, devm_memremap_pages_release, pgmap);
423}
424EXPORT_SYMBOL_GPL(devm_memunmap_pages);
425
 
 
 
 
 
 
 
 
 
 
 
 
 
426/**
427 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
428 * @pfn: page frame number to lookup page_map
429 * @pgmap: optional known pgmap that already has a reference
430 *
431 * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
432 * is non-NULL but does not cover @pfn the reference to it will be released.
433 */
434struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
435		struct dev_pagemap *pgmap)
436{
437	resource_size_t phys = PFN_PHYS(pfn);
438
439	/*
440	 * In the cached case we're already holding a live reference.
441	 */
442	if (pgmap) {
443		if (phys >= pgmap->range.start && phys <= pgmap->range.end)
444			return pgmap;
445		put_dev_pagemap(pgmap);
446	}
447
448	/* fall back to slow path lookup */
449	rcu_read_lock();
450	pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
451	if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref))
452		pgmap = NULL;
453	rcu_read_unlock();
454
455	return pgmap;
456}
457EXPORT_SYMBOL_GPL(get_dev_pagemap);
458
459void free_zone_device_page(struct page *page)
 
460{
461	if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free))
462		return;
463
464	mem_cgroup_uncharge(page_folio(page));
465
466	/*
467	 * Note: we don't expect anonymous compound pages yet. Once supported
468	 * and we could PTE-map them similar to THP, we'd have to clear
469	 * PG_anon_exclusive on all tail pages.
470	 */
471	VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page);
472	if (PageAnon(page))
473		__ClearPageAnonExclusive(page);
 
474
475	/*
476	 * When a device managed page is freed, the folio->mapping field
477	 * may still contain a (stale) mapping value. For example, the
478	 * lower bits of folio->mapping may still identify the folio as an
479	 * anonymous folio. Ultimately, this entire field is just stale
480	 * and wrong, and it will cause errors if not cleared.
481	 *
482	 * For other types of ZONE_DEVICE pages, migration is either
483	 * handled differently or not done at all, so there is no need
484	 * to clear page->mapping.
485	 */
486	page->mapping = NULL;
487	page->pgmap->ops->page_free(page);
488
489	if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
490	    page->pgmap->type != MEMORY_DEVICE_COHERENT)
491		/*
492		 * Reset the page count to 1 to prepare for handing out the page
493		 * again.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494		 */
495		set_page_count(page, 1);
496	else
497		put_dev_pagemap(page->pgmap);
498}
499
500void zone_device_page_init(struct page *page)
501{
502	/*
503	 * Drivers shouldn't be allocating pages after calling
504	 * memunmap_pages().
505	 */
506	WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref));
507	set_page_count(page, 1);
508	lock_page(page);
509}
510EXPORT_SYMBOL_GPL(zone_device_page_init);
511
512#ifdef CONFIG_FS_DAX
513bool __put_devmap_managed_page_refs(struct page *page, int refs)
514{
515	if (page->pgmap->type != MEMORY_DEVICE_FS_DAX)
516		return false;
517
518	/*
519	 * fsdax page refcounts are 1-based, rather than 0-based: if
520	 * refcount is 1, then the page is free and the refcount is
521	 * stable because nobody holds a reference on the page.
522	 */
523	if (page_ref_sub_return(page, refs) == 1)
524		wake_up_var(&page->_refcount);
525	return true;
526}
527EXPORT_SYMBOL(__put_devmap_managed_page_refs);
528#endif /* CONFIG_FS_DAX */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
  3#include <linux/device.h>
  4#include <linux/io.h>
  5#include <linux/kasan.h>
  6#include <linux/memory_hotplug.h>
  7#include <linux/mm.h>
  8#include <linux/pfn_t.h>
  9#include <linux/swap.h>
 
 
 10#include <linux/swapops.h>
 11#include <linux/types.h>
 12#include <linux/wait_bit.h>
 13#include <linux/xarray.h>
 
 14
 15static DEFINE_XARRAY(pgmap_array);
 16
 17#ifdef CONFIG_DEV_PAGEMAP_OPS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 18DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
 19EXPORT_SYMBOL(devmap_managed_key);
 20static atomic_t devmap_managed_enable;
 21
 22static void devmap_managed_enable_put(void)
 23{
 24	if (atomic_dec_and_test(&devmap_managed_enable))
 25		static_branch_disable(&devmap_managed_key);
 26}
 27
 28static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
 29{
 30	if (!pgmap->ops || !pgmap->ops->page_free) {
 31		WARN(1, "Missing page_free method\n");
 32		return -EINVAL;
 33	}
 34
 35	if (atomic_inc_return(&devmap_managed_enable) == 1)
 36		static_branch_enable(&devmap_managed_key);
 37	return 0;
 38}
 39#else
 40static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
 41{
 42	return -EINVAL;
 43}
 44static void devmap_managed_enable_put(void)
 45{
 46}
 47#endif /* CONFIG_DEV_PAGEMAP_OPS */
 48
 49static void pgmap_array_delete(struct resource *res)
 50{
 51	xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
 52			NULL, GFP_KERNEL);
 53	synchronize_rcu();
 54}
 55
 56static unsigned long pfn_first(struct dev_pagemap *pgmap)
 57{
 58	return PHYS_PFN(pgmap->res.start) +
 59		vmem_altmap_offset(pgmap_altmap(pgmap));
 
 
 
 
 60}
 61
 62static unsigned long pfn_end(struct dev_pagemap *pgmap)
 63{
 64	const struct resource *res = &pgmap->res;
 
 
 
 
 
 
 
 
 65
 66	return (res->start + resource_size(res)) >> PAGE_SHIFT;
 67}
 68
 69static unsigned long pfn_next(unsigned long pfn)
 70{
 71	if (pfn % 1024 == 0)
 72		cond_resched();
 73	return pfn + 1;
 74}
 75
 76#define for_each_device_pfn(pfn, map) \
 77	for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
 78
 79static void dev_pagemap_kill(struct dev_pagemap *pgmap)
 80{
 81	if (pgmap->ops && pgmap->ops->kill)
 82		pgmap->ops->kill(pgmap);
 83	else
 84		percpu_ref_kill(pgmap->ref);
 85}
 86
 87static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
 88{
 89	if (pgmap->ops && pgmap->ops->cleanup) {
 90		pgmap->ops->cleanup(pgmap);
 91	} else {
 92		wait_for_completion(&pgmap->done);
 93		percpu_ref_exit(pgmap->ref);
 94	}
 95	/*
 96	 * Undo the pgmap ref assignment for the internal case as the
 97	 * caller may re-enable the same pgmap.
 98	 */
 99	if (pgmap->ref == &pgmap->internal_ref)
100		pgmap->ref = NULL;
101}
102
103void memunmap_pages(struct dev_pagemap *pgmap)
104{
105	struct resource *res = &pgmap->res;
106	struct page *first_page;
107	unsigned long pfn;
108	int nid;
109
110	dev_pagemap_kill(pgmap);
111	for_each_device_pfn(pfn, pgmap)
112		put_page(pfn_to_page(pfn));
113	dev_pagemap_cleanup(pgmap);
114
115	/* make sure to access a memmap that was actually initialized */
116	first_page = pfn_to_page(pfn_first(pgmap));
117
118	/* pages are dead and unused, undo the arch mapping */
119	nid = page_to_nid(first_page);
120
121	mem_hotplug_begin();
 
 
122	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
123		__remove_pages(page_zone(first_page), PHYS_PFN(res->start),
124			       PHYS_PFN(resource_size(res)), NULL);
125	} else {
126		arch_remove_memory(nid, res->start, resource_size(res),
127				pgmap_altmap(pgmap));
128		kasan_remove_zero_shadow(__va(res->start), resource_size(res));
129	}
130	mem_hotplug_done();
131
132	untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
133	pgmap_array_delete(res);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134	WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
135	devmap_managed_enable_put();
136}
137EXPORT_SYMBOL_GPL(memunmap_pages);
138
139static void devm_memremap_pages_release(void *data)
140{
141	memunmap_pages(data);
142}
143
144static void dev_pagemap_percpu_release(struct percpu_ref *ref)
145{
146	struct dev_pagemap *pgmap =
147		container_of(ref, struct dev_pagemap, internal_ref);
148
149	complete(&pgmap->done);
150}
151
152/*
153 * Not device managed version of dev_memremap_pages, undone by
154 * memunmap_pages().  Please use dev_memremap_pages if you have a struct
155 * device available.
156 */
157void *memremap_pages(struct dev_pagemap *pgmap, int nid)
158{
159	struct resource *res = &pgmap->res;
 
160	struct dev_pagemap *conflict_pgmap;
161	struct mhp_restrictions restrictions = {
162		/*
163		 * We do not want any optional features only our own memmap
164		 */
165		.altmap = pgmap_altmap(pgmap),
166	};
167	pgprot_t pgprot = PAGE_KERNEL;
168	int error, is_ram;
169	bool need_devmap_managed = true;
170
171	switch (pgmap->type) {
172	case MEMORY_DEVICE_PRIVATE:
173		if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
174			WARN(1, "Device private memory not supported\n");
175			return ERR_PTR(-EINVAL);
176		}
177		if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
178			WARN(1, "Missing migrate_to_ram method\n");
179			return ERR_PTR(-EINVAL);
180		}
181		break;
182	case MEMORY_DEVICE_FS_DAX:
183		if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
184		    IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
185			WARN(1, "File system DAX not supported\n");
186			return ERR_PTR(-EINVAL);
187		}
188		break;
189	case MEMORY_DEVICE_DEVDAX:
190	case MEMORY_DEVICE_PCI_P2PDMA:
191		need_devmap_managed = false;
192		break;
193	default:
194		WARN(1, "Invalid pgmap type %d\n", pgmap->type);
195		break;
196	}
197
198	if (!pgmap->ref) {
199		if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
200			return ERR_PTR(-EINVAL);
201
202		init_completion(&pgmap->done);
203		error = percpu_ref_init(&pgmap->internal_ref,
204				dev_pagemap_percpu_release, 0, GFP_KERNEL);
205		if (error)
206			return ERR_PTR(error);
207		pgmap->ref = &pgmap->internal_ref;
208	} else {
209		if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
210			WARN(1, "Missing reference count teardown definition\n");
211			return ERR_PTR(-EINVAL);
212		}
213	}
214
215	if (need_devmap_managed) {
216		error = devmap_managed_enable_get(pgmap);
217		if (error)
218			return ERR_PTR(error);
219	}
220
221	conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
222	if (conflict_pgmap) {
223		WARN(1, "Conflicting mapping in same section\n");
224		put_dev_pagemap(conflict_pgmap);
225		error = -ENOMEM;
226		goto err_array;
227	}
228
229	conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);
230	if (conflict_pgmap) {
231		WARN(1, "Conflicting mapping in same section\n");
232		put_dev_pagemap(conflict_pgmap);
233		error = -ENOMEM;
234		goto err_array;
235	}
236
237	is_ram = region_intersects(res->start, resource_size(res),
238		IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
239
240	if (is_ram != REGION_DISJOINT) {
241		WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
242				is_ram == REGION_MIXED ? "mixed" : "ram", res);
243		error = -ENXIO;
244		goto err_array;
245	}
246
247	error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
248				PHYS_PFN(res->end), pgmap, GFP_KERNEL));
249	if (error)
250		goto err_array;
251
252	if (nid < 0)
253		nid = numa_mem_id();
254
255	error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
256			resource_size(res));
257	if (error)
258		goto err_pfn_remap;
259
 
 
 
 
 
260	mem_hotplug_begin();
261
262	/*
263	 * For device private memory we call add_pages() as we only need to
264	 * allocate and initialize struct page for the device memory. More-
265	 * over the device memory is un-accessible thus we do not want to
266	 * create a linear mapping for the memory like arch_add_memory()
267	 * would do.
268	 *
269	 * For all other device memory types, which are accessible by
270	 * the CPU, we do want the linear mapping and thus use
271	 * arch_add_memory().
272	 */
273	if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
274		error = add_pages(nid, PHYS_PFN(res->start),
275				PHYS_PFN(resource_size(res)), &restrictions);
276	} else {
277		error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
278		if (error) {
279			mem_hotplug_done();
280			goto err_kasan;
281		}
282
283		error = arch_add_memory(nid, res->start, resource_size(res),
284					&restrictions);
285	}
286
287	if (!error) {
288		struct zone *zone;
289
290		zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
291		move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
292				PHYS_PFN(resource_size(res)), restrictions.altmap);
 
293	}
294
295	mem_hotplug_done();
296	if (error)
297		goto err_add_memory;
298
299	/*
300	 * Initialization of the pages has been deferred until now in order
301	 * to allow us to do the work while not holding the hotplug lock.
302	 */
303	memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
304				PHYS_PFN(res->start),
305				PHYS_PFN(resource_size(res)), pgmap);
306	percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
307	return __va(res->start);
308
309 err_add_memory:
310	kasan_remove_zero_shadow(__va(res->start), resource_size(res));
311 err_kasan:
312	untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
313 err_pfn_remap:
314	pgmap_array_delete(res);
315 err_array:
316	dev_pagemap_kill(pgmap);
317	dev_pagemap_cleanup(pgmap);
318	devmap_managed_enable_put();
319	return ERR_PTR(error);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320}
321EXPORT_SYMBOL_GPL(memremap_pages);
322
323/**
324 * devm_memremap_pages - remap and provide memmap backing for the given resource
325 * @dev: hosting device for @res
326 * @pgmap: pointer to a struct dev_pagemap
327 *
328 * Notes:
329 * 1/ At a minimum the res and type members of @pgmap must be initialized
330 *    by the caller before passing it to this function
331 *
332 * 2/ The altmap field may optionally be initialized, in which case
333 *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
334 *
335 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
336 *    'live' on entry and will be killed and reaped at
337 *    devm_memremap_pages_release() time, or if this routine fails.
338 *
339 * 4/ res is expected to be a host memory range that could feasibly be
340 *    treated as a "System RAM" range, i.e. not a device mmio range, but
341 *    this is not enforced.
342 */
343void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
344{
345	int error;
346	void *ret;
347
348	ret = memremap_pages(pgmap, dev_to_node(dev));
349	if (IS_ERR(ret))
350		return ret;
351
352	error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
353			pgmap);
354	if (error)
355		return ERR_PTR(error);
356	return ret;
357}
358EXPORT_SYMBOL_GPL(devm_memremap_pages);
359
360void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
361{
362	devm_release_action(dev, devm_memremap_pages_release, pgmap);
363}
364EXPORT_SYMBOL_GPL(devm_memunmap_pages);
365
366unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
367{
368	/* number of pfns from base where pfn_to_page() is valid */
369	if (altmap)
370		return altmap->reserve + altmap->free;
371	return 0;
372}
373
374void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
375{
376	altmap->alloc -= nr_pfns;
377}
378
379/**
380 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
381 * @pfn: page frame number to lookup page_map
382 * @pgmap: optional known pgmap that already has a reference
383 *
384 * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
385 * is non-NULL but does not cover @pfn the reference to it will be released.
386 */
387struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
388		struct dev_pagemap *pgmap)
389{
390	resource_size_t phys = PFN_PHYS(pfn);
391
392	/*
393	 * In the cached case we're already holding a live reference.
394	 */
395	if (pgmap) {
396		if (phys >= pgmap->res.start && phys <= pgmap->res.end)
397			return pgmap;
398		put_dev_pagemap(pgmap);
399	}
400
401	/* fall back to slow path lookup */
402	rcu_read_lock();
403	pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
404	if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
405		pgmap = NULL;
406	rcu_read_unlock();
407
408	return pgmap;
409}
410EXPORT_SYMBOL_GPL(get_dev_pagemap);
411
412#ifdef CONFIG_DEV_PAGEMAP_OPS
413void __put_devmap_managed_page(struct page *page)
414{
415	int count = page_ref_dec_return(page);
 
 
 
416
417	/*
418	 * If refcount is 1 then page is freed and refcount is stable as nobody
419	 * holds a reference on the page.
 
420	 */
421	if (count == 1) {
422		/* Clear Active bit in case of parallel mark_page_accessed */
423		__ClearPageActive(page);
424		__ClearPageWaiters(page);
425
426		mem_cgroup_uncharge(page);
 
 
 
 
 
 
 
 
 
 
 
 
427
 
 
428		/*
429		 * When a device_private page is freed, the page->mapping field
430		 * may still contain a (stale) mapping value. For example, the
431		 * lower bits of page->mapping may still identify the page as
432		 * an anonymous page. Ultimately, this entire field is just
433		 * stale and wrong, and it will cause errors if not cleared.
434		 * One example is:
435		 *
436		 *  migrate_vma_pages()
437		 *    migrate_vma_insert_page()
438		 *      page_add_new_anon_rmap()
439		 *        __page_set_anon_rmap()
440		 *          ...checks page->mapping, via PageAnon(page) call,
441		 *            and incorrectly concludes that the page is an
442		 *            anonymous page. Therefore, it incorrectly,
443		 *            silently fails to set up the new anon rmap.
444		 *
445		 * For other types of ZONE_DEVICE pages, migration is either
446		 * handled differently or not done at all, so there is no need
447		 * to clear page->mapping.
448		 */
449		if (is_device_private_page(page))
450			page->mapping = NULL;
 
 
451
452		page->pgmap->ops->page_free(page);
453	} else if (!count)
454		__put_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455}
456EXPORT_SYMBOL(__put_devmap_managed_page);
457#endif /* CONFIG_DEV_PAGEMAP_OPS */