Thomas Gleixner | 2025cf9 | 2019-05-29 07:18:02 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2014-2016, Intel Corporation. |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 4 | */ |
| 5 | #include "test/nfit_test.h" |
| 6 | #include <linux/blkdev.h> |
Jane Chu | e511c4a | 2022-05-13 15:10:58 -0700 | [diff] [blame] | 7 | #include <linux/dax.h> |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 8 | #include <pmem.h> |
| 9 | #include <nd.h> |
| 10 | |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 11 | long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, |
Jane Chu | e511c4a | 2022-05-13 15:10:58 -0700 | [diff] [blame] | 12 | long nr_pages, enum dax_access_mode mode, void **kaddr, |
| 13 | pfn_t *pfn) |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 14 | { |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 15 | resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 16 | |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 17 | if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, |
| 18 | PFN_PHYS(nr_pages)))) |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 19 | return -EIO; |
Dan Williams | ee8520fe | 2016-06-15 20:34:17 -0700 | [diff] [blame] | 20 | |
| 21 | /* |
| 22 | * Limit dax to a single page at a time given vmalloc()-backed |
| 23 | * in the nfit_test case. |
| 24 | */ |
| 25 | if (get_nfit_res(pmem->phys_addr + offset)) { |
| 26 | struct page *page; |
| 27 | |
Huaisheng Ye | 45df5d3 | 2018-07-30 15:15:45 +0800 | [diff] [blame] | 28 | if (kaddr) |
| 29 | *kaddr = pmem->virt_addr + offset; |
Dan Williams | ee8520fe | 2016-06-15 20:34:17 -0700 | [diff] [blame] | 30 | page = vmalloc_to_page(pmem->virt_addr + offset); |
Huaisheng Ye | 45df5d3 | 2018-07-30 15:15:45 +0800 | [diff] [blame] | 31 | if (pfn) |
| 32 | *pfn = page_to_pfn_t(page); |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 33 | pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n", |
| 34 | __func__, pmem, pgoff, page_to_pfn(page)); |
Dan Williams | ee8520fe | 2016-06-15 20:34:17 -0700 | [diff] [blame] | 35 | |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 36 | return 1; |
Dan Williams | ee8520fe | 2016-06-15 20:34:17 -0700 | [diff] [blame] | 37 | } |
| 38 | |
Huaisheng Ye | 45df5d3 | 2018-07-30 15:15:45 +0800 | [diff] [blame] | 39 | if (kaddr) |
| 40 | *kaddr = pmem->virt_addr + offset; |
| 41 | if (pfn) |
| 42 | *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 43 | |
| 44 | /* |
| 45 | * If badblocks are present, limit known good range to the |
| 46 | * requested range. |
| 47 | */ |
| 48 | if (unlikely(pmem->bb.count)) |
Dan Williams | c1d6e82 | 2017-01-24 23:02:09 -0800 | [diff] [blame] | 49 | return nr_pages; |
| 50 | return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); |
Dan Williams | f295e53 | 2016-06-17 11:08:06 -0700 | [diff] [blame] | 51 | } |