blob: d18e5c332cb9f443b2279d0545779b67dcb7ec19 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_PAGE_H
3#define _ASM_X86_PAGE_H
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +01004
Ingo Molnar56cefce2009-02-13 13:23:02 +01005#include <linux/types.h>
6
Jeremy Fitzhardinge11b7c7d2008-01-30 13:32:44 +01007#ifdef __KERNEL__
8
Jeremy Fitzhardinge51c78eb2009-02-08 22:52:14 -08009#include <asm/page_types.h>
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010010
11#ifdef CONFIG_X86_64
Jeremy Fitzhardinge11b7c7d2008-01-30 13:32:44 +010012#include <asm/page_64.h>
13#else
14#include <asm/page_32.h>
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010015#endif /* CONFIG_X86_64 */
16
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +010017#ifndef __ASSEMBLY__
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010018
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +010019struct page;
20
Yinghai Lu0e691cf2013-01-24 12:20:05 -080021#include <linux/range.h>
22extern struct range pfn_mapped[];
23extern int nr_pfn_mapped;
24
Chuck Leverf2f7abc2008-03-04 14:55:45 -080025static inline void clear_user_page(void *page, unsigned long vaddr,
Jeremy Fitzhardinge51c78eb2009-02-08 22:52:14 -080026 struct page *pg)
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +010027{
28 clear_page(page);
29}
30
Chuck Leverf2f7abc2008-03-04 14:55:45 -080031static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
Jeremy Fitzhardinge51c78eb2009-02-08 22:52:14 -080032 struct page *topage)
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +010033{
34 copy_page(to, from);
35}
36
Matthew Wilcox (Oracle)6bc56a42023-01-16 19:18:09 +000037#define vma_alloc_zeroed_movable_folio(vma, vaddr) \
38 vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false)
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +010039
Yinghai Lucf4fb152016-05-06 15:01:34 -070040#ifndef __pa
Jeremy Fitzhardinge98fd5ae2008-01-30 13:32:43 +010041#define __pa(x) __phys_addr((unsigned long)(x))
Yinghai Lucf4fb152016-05-06 15:01:34 -070042#endif
43
Vegard Nossumaf5c2bd2008-10-03 17:54:25 +020044#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
Jeremy Fitzhardinge98fd5ae2008-01-30 13:32:43 +010045/* __pa_symbol should be used for C visible symbols.
46 This seems to be the official gcc blessed way to do such arithmetic. */
Namhyung Kim8fd49932010-08-11 15:37:41 +090047/*
48 * We need __phys_reloc_hide() here because gcc may assume that there is no
49 * overflow during __pa() calculation and can optimize it unexpectedly.
50 * Newer versions of gcc provide -fno-strict-overflow switch to handle this
51 * case properly. Once all supported versions of gcc understand it, we can
52 * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
53 */
Alexander Duyck7d742752012-11-16 13:55:46 -080054#define __pa_symbol(x) \
55 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
Jeremy Fitzhardinge98fd5ae2008-01-30 13:32:43 +010056
Yinghai Lucf4fb152016-05-06 15:01:34 -070057#ifndef __va
Jeremy Fitzhardinge98fd5ae2008-01-30 13:32:43 +010058#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
Yinghai Lucf4fb152016-05-06 15:01:34 -070059#endif
Jeremy Fitzhardinge98fd5ae2008-01-30 13:32:43 +010060
61#define __boot_va(x) __va(x)
62#define __boot_pa(x) __pa(x)
63
Vegard Nossumaf5c2bd2008-10-03 17:54:25 +020064/*
65 * virt_to_page(kaddr) returns a valid pointer if and only if
66 * virt_addr_valid(kaddr) returns true.
67 */
Jeremy Fitzhardinge98fd5ae2008-01-30 13:32:43 +010068#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
69#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
Vegard Nossumaf5c2bd2008-10-03 17:54:25 +020070extern bool __virt_addr_valid(unsigned long kaddr);
71#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
Jeremy Fitzhardinge98fd5ae2008-01-30 13:32:43 +010072
Adrian Hunter1fb85d02022-01-31 09:24:50 +020073static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
74{
75 return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
76}
77
78static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
79{
80 return __canonical_address(vaddr, vaddr_bits) == vaddr;
81}
82
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +010083#endif /* __ASSEMBLY__ */
84
Jeremy Fitzhardingee62f4472008-01-30 13:32:44 +010085#include <asm-generic/memory_model.h>
Arnd Bergmann5b17e1c2009-05-13 22:56:30 +000086#include <asm-generic/getorder.h>
Jeremy Fitzhardingee62f4472008-01-30 13:32:44 +010087
Kirill A. Shutemovfd8526a2013-11-19 15:17:50 +020088#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +010089
Jeremy Fitzhardinge11b7c7d2008-01-30 13:32:44 +010090#endif /* __KERNEL__ */
H. Peter Anvin1965aae2008-10-22 22:26:29 -070091#endif /* _ASM_X86_PAGE_H */