Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2014-2016, Intel Corporation.
4 */
5#include "test/nfit_test.h"
6#include <linux/blkdev.h>
7#include <linux/dax.h>
8#include <pmem.h>
9#include <nd.h>
10
11long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
12 long nr_pages, enum dax_access_mode mode, void **kaddr,
13 pfn_t *pfn)
14{
15 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
16
17 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
18 PFN_PHYS(nr_pages))))
19 return -EIO;
20
21 /*
22 * Limit dax to a single page at a time given vmalloc()-backed
23 * in the nfit_test case.
24 */
25 if (get_nfit_res(pmem->phys_addr + offset)) {
26 struct page *page;
27
28 if (kaddr)
29 *kaddr = pmem->virt_addr + offset;
30 page = vmalloc_to_page(pmem->virt_addr + offset);
31 if (pfn)
32 *pfn = page_to_pfn_t(page);
33 pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
34 __func__, pmem, pgoff, page_to_pfn(page));
35
36 return 1;
37 }
38
39 if (kaddr)
40 *kaddr = pmem->virt_addr + offset;
41 if (pfn)
42 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
43
44 /*
45 * If badblocks are present, limit known good range to the
46 * requested range.
47 */
48 if (unlikely(pmem->bb.count))
49 return nr_pages;
50 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
51}
1/*
2 * Copyright (c) 2014-2016, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13#include "test/nfit_test.h"
14#include <linux/blkdev.h>
15#include <pmem.h>
16#include <nd.h>
17
18long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
19 long nr_pages, void **kaddr, pfn_t *pfn)
20{
21 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
22
23 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
24 PFN_PHYS(nr_pages))))
25 return -EIO;
26
27 /*
28 * Limit dax to a single page at a time given vmalloc()-backed
29 * in the nfit_test case.
30 */
31 if (get_nfit_res(pmem->phys_addr + offset)) {
32 struct page *page;
33
34 *kaddr = pmem->virt_addr + offset;
35 page = vmalloc_to_page(pmem->virt_addr + offset);
36 *pfn = page_to_pfn_t(page);
37 pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
38 __func__, pmem, pgoff, page_to_pfn(page));
39
40 return 1;
41 }
42
43 *kaddr = pmem->virt_addr + offset;
44 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
45
46 /*
47 * If badblocks are present, limit known good range to the
48 * requested range.
49 */
50 if (unlikely(pmem->bb.count))
51 return nr_pages;
52 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
53}