Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
Muchun Song | dff0338 | 2022-06-28 17:22:30 +0800 | [diff] [blame] | 3 | * HugeTLB Vmemmap Optimization (HVO) |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 4 | * |
Muchun Song | dff0338 | 2022-06-28 17:22:30 +0800 | [diff] [blame] | 5 | * Copyright (c) 2020, ByteDance. All rights reserved. |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 6 | * |
| 7 | * Author: Muchun Song <songmuchun@bytedance.com> |
| 8 | */ |
| 9 | #ifndef _LINUX_HUGETLB_VMEMMAP_H |
| 10 | #define _LINUX_HUGETLB_VMEMMAP_H |
| 11 | #include <linux/hugetlb.h> |
| 12 | |
Muchun Song | b65d4ad | 2021-06-30 18:47:17 -0700 | [diff] [blame] | 13 | /* |
Muchun Song | 6213834 | 2022-06-28 17:22:33 +0800 | [diff] [blame] | 14 | * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See |
Vegard Nossum | e5b16c8 | 2023-10-22 20:56:19 +0200 | [diff] [blame] | 15 | * Documentation/mm/vmemmap_dedup.rst. |
Muchun Song | b65d4ad | 2021-06-30 18:47:17 -0700 | [diff] [blame] | 16 | */ |
Muchun Song | 6213834 | 2022-06-28 17:22:33 +0800 | [diff] [blame] | 17 | #define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE |
Usama Arif | fde1c4e | 2023-09-13 11:54:01 +0100 | [diff] [blame] | 18 | #define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page)) |
| 19 | |
| 20 | #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP |
Usama Arif | c5ad323 | 2023-10-11 15:45:57 +0100 | [diff] [blame] | 21 | int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio); |
Mike Kravetz | cfb8c75 | 2023-10-18 19:31:06 -0700 | [diff] [blame] | 22 | long hugetlb_vmemmap_restore_folios(const struct hstate *h, |
| 23 | struct list_head *folio_list, |
| 24 | struct list_head *non_hvo_folios); |
Usama Arif | c5ad323 | 2023-10-11 15:45:57 +0100 | [diff] [blame] | 25 | void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio); |
Mike Kravetz | 79359d6 | 2023-10-18 19:31:05 -0700 | [diff] [blame] | 26 | void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list); |
Muchun Song | 6213834 | 2022-06-28 17:22:33 +0800 | [diff] [blame] | 27 | |
| 28 | static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h) |
Muchun Song | b65d4ad | 2021-06-30 18:47:17 -0700 | [diff] [blame] | 29 | { |
Muchun Song | 6213834 | 2022-06-28 17:22:33 +0800 | [diff] [blame] | 30 | return pages_per_huge_page(h) * sizeof(struct page); |
| 31 | } |
| 32 | |
| 33 | /* |
| 34 | * Return how many vmemmap size associated with a HugeTLB page that can be |
| 35 | * optimized and can be freed to the buddy allocator. |
| 36 | */ |
| 37 | static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) |
| 38 | { |
| 39 | int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE; |
| 40 | |
| 41 | if (!is_power_of_2(sizeof(struct page))) |
| 42 | return 0; |
| 43 | return size > 0 ? size : 0; |
Muchun Song | b65d4ad | 2021-06-30 18:47:17 -0700 | [diff] [blame] | 44 | } |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 45 | #else |
Usama Arif | c5ad323 | 2023-10-11 15:45:57 +0100 | [diff] [blame] | 46 | static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio) |
Muchun Song | ad2fa37 | 2021-06-30 18:47:21 -0700 | [diff] [blame] | 47 | { |
| 48 | return 0; |
| 49 | } |
| 50 | |
Mike Kravetz | cfb8c75 | 2023-10-18 19:31:06 -0700 | [diff] [blame] | 51 | static long hugetlb_vmemmap_restore_folios(const struct hstate *h, |
| 52 | struct list_head *folio_list, |
| 53 | struct list_head *non_hvo_folios) |
| 54 | { |
| 55 | list_splice_init(folio_list, non_hvo_folios); |
| 56 | return 0; |
| 57 | } |
| 58 | |
Usama Arif | c5ad323 | 2023-10-11 15:45:57 +0100 | [diff] [blame] | 59 | static inline void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio) |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 60 | { |
| 61 | } |
Muchun Song | b65d4ad | 2021-06-30 18:47:17 -0700 | [diff] [blame] | 62 | |
Mike Kravetz | 79359d6 | 2023-10-18 19:31:05 -0700 | [diff] [blame] | 63 | static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) |
| 64 | { |
| 65 | } |
| 66 | |
Muchun Song | 6213834 | 2022-06-28 17:22:33 +0800 | [diff] [blame] | 67 | static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) |
Muchun Song | b65d4ad | 2021-06-30 18:47:17 -0700 | [diff] [blame] | 68 | { |
| 69 | return 0; |
| 70 | } |
Muchun Song | 47010c0 | 2022-04-28 23:16:15 -0700 | [diff] [blame] | 71 | #endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */ |
Muchun Song | 6213834 | 2022-06-28 17:22:33 +0800 | [diff] [blame] | 72 | |
| 73 | static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h) |
| 74 | { |
| 75 | return hugetlb_vmemmap_optimizable_size(h) != 0; |
| 76 | } |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 77 | #endif /* _LINUX_HUGETLB_VMEMMAP_H */ |