blob: c1ec099a3b1d0515f552d16a7934618f4fdcd944 [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
Dan Williamsf295e532016-06-17 11:08:06 -07002/*
3 * Copyright (c) 2014-2016, Intel Corporation.
Dan Williamsf295e532016-06-17 11:08:06 -07004 */
5#include "test/nfit_test.h"
6#include <linux/blkdev.h>
Jane Chue511c4a2022-05-13 15:10:58 -07007#include <linux/dax.h>
Dan Williamsf295e532016-06-17 11:08:06 -07008#include <pmem.h>
9#include <nd.h>
10
Dan Williamsc1d6e822017-01-24 23:02:09 -080011long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
Jane Chue511c4a2022-05-13 15:10:58 -070012 long nr_pages, enum dax_access_mode mode, void **kaddr,
13 pfn_t *pfn)
Dan Williamsf295e532016-06-17 11:08:06 -070014{
Dan Williamsc1d6e822017-01-24 23:02:09 -080015 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
Dan Williamsf295e532016-06-17 11:08:06 -070016
Dan Williamsc1d6e822017-01-24 23:02:09 -080017 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
18 PFN_PHYS(nr_pages))))
Dan Williamsf295e532016-06-17 11:08:06 -070019 return -EIO;
Dan Williamsee8520fe2016-06-15 20:34:17 -070020
21 /*
22 * Limit dax to a single page at a time given vmalloc()-backed
23 * in the nfit_test case.
24 */
25 if (get_nfit_res(pmem->phys_addr + offset)) {
26 struct page *page;
27
Huaisheng Ye45df5d32018-07-30 15:15:45 +080028 if (kaddr)
29 *kaddr = pmem->virt_addr + offset;
Dan Williamsee8520fe2016-06-15 20:34:17 -070030 page = vmalloc_to_page(pmem->virt_addr + offset);
Huaisheng Ye45df5d32018-07-30 15:15:45 +080031 if (pfn)
32 *pfn = page_to_pfn_t(page);
Dan Williamsc1d6e822017-01-24 23:02:09 -080033 pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
34 __func__, pmem, pgoff, page_to_pfn(page));
Dan Williamsee8520fe2016-06-15 20:34:17 -070035
Dan Williamsc1d6e822017-01-24 23:02:09 -080036 return 1;
Dan Williamsee8520fe2016-06-15 20:34:17 -070037 }
38
Huaisheng Ye45df5d32018-07-30 15:15:45 +080039 if (kaddr)
40 *kaddr = pmem->virt_addr + offset;
41 if (pfn)
42 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
Dan Williamsf295e532016-06-17 11:08:06 -070043
44 /*
45 * If badblocks are present, limit known good range to the
46 * requested range.
47 */
48 if (unlikely(pmem->bb.count))
Dan Williamsc1d6e822017-01-24 23:02:09 -080049 return nr_pages;
50 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
Dan Williamsf295e532016-06-17 11:08:06 -070051}