Linux Audio

Check our new training course

Loading...
v5.14.15
 1// SPDX-License-Identifier: GPL-2.0-only
 2/*
 3 * Copyright (c) 2014-2016, Intel Corporation.
 
 
 
 
 
 
 
 
 
 4 */
 5#include "test/nfit_test.h"
 6#include <linux/blkdev.h>
 7#include <pmem.h>
 8#include <nd.h>
 9
10long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
11		long nr_pages, void **kaddr, pfn_t *pfn)
12{
13	resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
14
15	if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
16					PFN_PHYS(nr_pages))))
17		return -EIO;
18
19	/*
20	 * Limit dax to a single page at a time given vmalloc()-backed
21	 * in the nfit_test case.
22	 */
23	if (get_nfit_res(pmem->phys_addr + offset)) {
24		struct page *page;
25
26		if (kaddr)
27			*kaddr = pmem->virt_addr + offset;
28		page = vmalloc_to_page(pmem->virt_addr + offset);
29		if (pfn)
30			*pfn = page_to_pfn_t(page);
31		pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
32				__func__, pmem, pgoff, page_to_pfn(page));
33
34		return 1;
35	}
36
37	if (kaddr)
38		*kaddr = pmem->virt_addr + offset;
39	if (pfn)
40		*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
41
42	/*
43	 * If badblocks are present, limit known good range to the
44	 * requested range.
45	 */
46	if (unlikely(pmem->bb.count))
47		return nr_pages;
48	return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
49}
v4.17
 
 1/*
 2 * Copyright (c) 2014-2016, Intel Corporation.
 3 *
 4 * This program is free software; you can redistribute it and/or modify it
 5 * under the terms and conditions of the GNU General Public License,
 6 * version 2, as published by the Free Software Foundation.
 7 *
 8 * This program is distributed in the hope it will be useful, but WITHOUT
 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11 * more details.
12 */
13#include "test/nfit_test.h"
14#include <linux/blkdev.h>
15#include <pmem.h>
16#include <nd.h>
17
18long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
19		long nr_pages, void **kaddr, pfn_t *pfn)
20{
21	resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
22
23	if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
24					PFN_PHYS(nr_pages))))
25		return -EIO;
26
27	/*
28	 * Limit dax to a single page at a time given vmalloc()-backed
29	 * in the nfit_test case.
30	 */
31	if (get_nfit_res(pmem->phys_addr + offset)) {
32		struct page *page;
33
34		*kaddr = pmem->virt_addr + offset;
 
35		page = vmalloc_to_page(pmem->virt_addr + offset);
36		*pfn = page_to_pfn_t(page);
 
37		pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
38				__func__, pmem, pgoff, page_to_pfn(page));
39
40		return 1;
41	}
42
43	*kaddr = pmem->virt_addr + offset;
44	*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
 
 
45
46	/*
47	 * If badblocks are present, limit known good range to the
48	 * requested range.
49	 */
50	if (unlikely(pmem->bb.count))
51		return nr_pages;
52	return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
53}