blob: 2fcae92d33590c80f5e6ee949973a0ff441be345 [file] [log] [blame]
Muchun Songf41f2ed2021-06-30 18:47:13 -07001// SPDX-License-Identifier: GPL-2.0
2/*
Muchun Songdff03382022-06-28 17:22:30 +08003 * HugeTLB Vmemmap Optimization (HVO)
Muchun Songf41f2ed2021-06-30 18:47:13 -07004 *
Muchun Songdff03382022-06-28 17:22:30 +08005 * Copyright (c) 2020, ByteDance. All rights reserved.
Muchun Songf41f2ed2021-06-30 18:47:13 -07006 *
7 * Author: Muchun Song <songmuchun@bytedance.com>
8 */
9#ifndef _LINUX_HUGETLB_VMEMMAP_H
10#define _LINUX_HUGETLB_VMEMMAP_H
11#include <linux/hugetlb.h>
12
Muchun Songb65d4ad2021-06-30 18:47:17 -070013/*
Muchun Song62138342022-06-28 17:22:33 +080014 * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
Vegard Nossume5b16c82023-10-22 20:56:19 +020015 * Documentation/mm/vmemmap_dedup.rst.
Muchun Songb65d4ad2021-06-30 18:47:17 -070016 */
Muchun Song62138342022-06-28 17:22:33 +080017#define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE
Usama Ariffde1c4e2023-09-13 11:54:01 +010018#define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))
19
20#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
Usama Arifc5ad3232023-10-11 15:45:57 +010021int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio);
Mike Kravetzcfb8c752023-10-18 19:31:06 -070022long hugetlb_vmemmap_restore_folios(const struct hstate *h,
23 struct list_head *folio_list,
24 struct list_head *non_hvo_folios);
Usama Arifc5ad3232023-10-11 15:45:57 +010025void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio);
Mike Kravetz79359d62023-10-18 19:31:05 -070026void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
Muchun Song62138342022-06-28 17:22:33 +080027
28static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
Muchun Songb65d4ad2021-06-30 18:47:17 -070029{
Muchun Song62138342022-06-28 17:22:33 +080030 return pages_per_huge_page(h) * sizeof(struct page);
31}
32
33/*
34 * Return how many vmemmap size associated with a HugeTLB page that can be
35 * optimized and can be freed to the buddy allocator.
36 */
37static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
38{
39 int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;
40
41 if (!is_power_of_2(sizeof(struct page)))
42 return 0;
43 return size > 0 ? size : 0;
Muchun Songb65d4ad2021-06-30 18:47:17 -070044}
Muchun Songf41f2ed2021-06-30 18:47:13 -070045#else
Usama Arifc5ad3232023-10-11 15:45:57 +010046static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
Muchun Songad2fa372021-06-30 18:47:21 -070047{
48 return 0;
49}
50
Mike Kravetzcfb8c752023-10-18 19:31:06 -070051static long hugetlb_vmemmap_restore_folios(const struct hstate *h,
52 struct list_head *folio_list,
53 struct list_head *non_hvo_folios)
54{
55 list_splice_init(folio_list, non_hvo_folios);
56 return 0;
57}
58
Usama Arifc5ad3232023-10-11 15:45:57 +010059static inline void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
Muchun Songf41f2ed2021-06-30 18:47:13 -070060{
61}
Muchun Songb65d4ad2021-06-30 18:47:17 -070062
Mike Kravetz79359d62023-10-18 19:31:05 -070063static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
64{
65}
66
Muchun Song62138342022-06-28 17:22:33 +080067static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
Muchun Songb65d4ad2021-06-30 18:47:17 -070068{
69 return 0;
70}
Muchun Song47010c02022-04-28 23:16:15 -070071#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
Muchun Song62138342022-06-28 17:22:33 +080072
73static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
74{
75 return hugetlb_vmemmap_optimizable_size(h) != 0;
76}
Muchun Songf41f2ed2021-06-30 18:47:13 -070077#endif /* _LINUX_HUGETLB_VMEMMAP_H */