Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * hugetlbpage-backed filesystem. Based on ramfs. |
| 3 | * |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 4 | * Nadia Yvette Chambers, 2002 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
| 6 | * Copyright (C) 2002 Linus Torvalds. |
Paul Gortmaker | 3e89e1c | 2016-01-14 15:21:52 -0800 | [diff] [blame] | 7 | * License: GPL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | */ |
| 9 | |
Andrew Morton | 9b857d2 | 2014-06-04 16:07:21 -0700 | [diff] [blame] | 10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/thread_info.h> |
| 13 | #include <asm/current.h> |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 14 | #include <linux/falloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/fs.h> |
| 16 | #include <linux/mount.h> |
| 17 | #include <linux/file.h> |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 18 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/writeback.h> |
| 20 | #include <linux/pagemap.h> |
| 21 | #include <linux/highmem.h> |
| 22 | #include <linux/init.h> |
| 23 | #include <linux/string.h> |
Randy Dunlap | 16f7e0f | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 24 | #include <linux/capability.h> |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 25 | #include <linux/ctype.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/backing-dev.h> |
| 27 | #include <linux/hugetlb.h> |
| 28 | #include <linux/pagevec.h> |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 29 | #include <linux/fs_parser.h> |
Benjamin Herrenschmidt | 036e085 | 2007-05-06 14:50:12 -0700 | [diff] [blame] | 30 | #include <linux/mman.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/slab.h> |
| 32 | #include <linux/dnotify.h> |
| 33 | #include <linux/statfs.h> |
| 34 | #include <linux/security.h> |
Nick Black | 1fd7317d | 2009-09-22 16:43:33 -0700 | [diff] [blame] | 35 | #include <linux/magic.h> |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 36 | #include <linux/migrate.h> |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 37 | #include <linux/uio.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 39 | #include <linux/uaccess.h> |
Shijie Hu | 8859025 | 2020-06-03 16:03:34 -0700 | [diff] [blame] | 40 | #include <linux/sched/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 42 | static const struct address_space_operations hugetlbfs_aops; |
Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 43 | const struct file_operations hugetlbfs_file_operations; |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 44 | static const struct inode_operations hugetlbfs_dir_inode_operations; |
| 45 | static const struct inode_operations hugetlbfs_inode_operations; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 47 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; |
| 48 | |
| 49 | struct hugetlbfs_fs_context { |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 50 | struct hstate *hstate; |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 51 | unsigned long long max_size_opt; |
| 52 | unsigned long long min_size_opt; |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 53 | long max_hpages; |
| 54 | long nr_inodes; |
| 55 | long min_hpages; |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 56 | enum hugetlbfs_size_type max_val_type; |
| 57 | enum hugetlbfs_size_type min_val_type; |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 58 | kuid_t uid; |
| 59 | kgid_t gid; |
| 60 | umode_t mode; |
David Gibson | a1d776e | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 61 | }; |
| 62 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | int sysctl_hugetlb_shm_group; |
| 64 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 65 | enum hugetlb_param { |
| 66 | Opt_gid, |
| 67 | Opt_min_size, |
| 68 | Opt_mode, |
| 69 | Opt_nr_inodes, |
| 70 | Opt_pagesize, |
| 71 | Opt_size, |
| 72 | Opt_uid, |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 73 | }; |
| 74 | |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 75 | static const struct fs_parameter_spec hugetlb_fs_parameters[] = { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 76 | fsparam_u32 ("gid", Opt_gid), |
| 77 | fsparam_string("min_size", Opt_min_size), |
Mike Kravetz | e0f7e2b | 2021-07-23 15:50:44 -0700 | [diff] [blame] | 78 | fsparam_u32oct("mode", Opt_mode), |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 79 | fsparam_string("nr_inodes", Opt_nr_inodes), |
| 80 | fsparam_string("pagesize", Opt_pagesize), |
| 81 | fsparam_string("size", Opt_size), |
| 82 | fsparam_u32 ("uid", Opt_uid), |
| 83 | {} |
| 84 | }; |
| 85 | |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 86 | #ifdef CONFIG_NUMA |
| 87 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, |
| 88 | struct inode *inode, pgoff_t index) |
| 89 | { |
| 90 | vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, |
| 91 | index); |
| 92 | } |
| 93 | |
| 94 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) |
| 95 | { |
| 96 | mpol_cond_put(vma->vm_policy); |
| 97 | } |
| 98 | #else |
| 99 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, |
| 100 | struct inode *inode, pgoff_t index) |
| 101 | { |
| 102 | } |
| 103 | |
| 104 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) |
| 105 | { |
| 106 | } |
| 107 | #endif |
| 108 | |
Mike Kravetz | 63489f8 | 2018-03-22 16:17:13 -0700 | [diff] [blame] | 109 | /* |
| 110 | * Mask used when checking the page offset value passed in via system |
| 111 | * calls. This value will be converted to a loff_t which is signed. |
| 112 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the |
| 113 | * value. The extra bit (- 1 in the shift value) is to take the sign |
| 114 | * bit into account. |
| 115 | */ |
| 116 | #define PGOFF_LOFFT_MAX \ |
| 117 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) |
| 118 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
| 120 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 121 | struct inode *inode = file_inode(file); |
Peter Xu | 22247ef | 2021-05-14 17:27:04 -0700 | [diff] [blame] | 122 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | loff_t len, vma_len; |
| 124 | int ret; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 125 | struct hstate *h = hstate_file(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | |
Hugh Dickins | 68589bc | 2006-11-14 02:03:32 -0800 | [diff] [blame] | 127 | /* |
David Gibson | dec4ad8 | 2007-08-30 23:56:40 -0700 | [diff] [blame] | 128 | * vma address alignment (but not the pgoff alignment) has |
| 129 | * already been checked by prepare_hugepage_range. If you add |
| 130 | * any error returns here, do so after setting VM_HUGETLB, so |
| 131 | * is_vm_hugetlb_page tests below unmap_region go the right |
Peter Collingbourne | 45e5530 | 2020-08-06 23:23:37 -0700 | [diff] [blame] | 132 | * way when do_mmap unwinds (may be important on powerpc |
David Gibson | dec4ad8 | 2007-08-30 23:56:40 -0700 | [diff] [blame] | 133 | * and ia64). |
Hugh Dickins | 68589bc | 2006-11-14 02:03:32 -0800 | [diff] [blame] | 134 | */ |
Suren Baghdasaryan | 1c71222 | 2023-01-26 11:37:49 -0800 | [diff] [blame] | 135 | vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); |
Hugh Dickins | 68589bc | 2006-11-14 02:03:32 -0800 | [diff] [blame] | 136 | vma->vm_ops = &hugetlb_vm_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
Peter Xu | 22247ef | 2021-05-14 17:27:04 -0700 | [diff] [blame] | 138 | ret = seal_check_future_write(info->seals, vma); |
| 139 | if (ret) |
| 140 | return ret; |
| 141 | |
Mike Kravetz | 045c7a3 | 2017-04-13 14:56:32 -0700 | [diff] [blame] | 142 | /* |
Mike Kravetz | 63489f8 | 2018-03-22 16:17:13 -0700 | [diff] [blame] | 143 | * page based offset in vm_pgoff could be sufficiently large to |
Mike Kravetz | 5df63c2 | 2018-04-05 16:18:21 -0700 | [diff] [blame] | 144 | * overflow a loff_t when converted to byte offset. This can |
| 145 | * only happen on architectures where sizeof(loff_t) == |
| 146 | * sizeof(unsigned long). So, only check in those instances. |
Mike Kravetz | 045c7a3 | 2017-04-13 14:56:32 -0700 | [diff] [blame] | 147 | */ |
Mike Kravetz | 5df63c2 | 2018-04-05 16:18:21 -0700 | [diff] [blame] | 148 | if (sizeof(unsigned long) == sizeof(loff_t)) { |
| 149 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) |
| 150 | return -EINVAL; |
| 151 | } |
Mike Kravetz | 045c7a3 | 2017-04-13 14:56:32 -0700 | [diff] [blame] | 152 | |
Mike Kravetz | 63489f8 | 2018-03-22 16:17:13 -0700 | [diff] [blame] | 153 | /* must be huge page aligned */ |
Becky Bruce | 2b37c35 | 2011-07-25 17:11:49 -0700 | [diff] [blame] | 154 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
David Gibson | dec4ad8 | 2007-08-30 23:56:40 -0700 | [diff] [blame] | 155 | return -EINVAL; |
| 156 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
Mike Kravetz | 045c7a3 | 2017-04-13 14:56:32 -0700 | [diff] [blame] | 158 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
| 159 | /* check for overflow */ |
| 160 | if (len < vma_len) |
| 161 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 163 | inode_lock(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | file_accessed(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | |
| 166 | ret = -ENOMEM; |
Mike Kravetz | 33b8f84 | 2021-02-24 12:09:54 -0800 | [diff] [blame] | 167 | if (!hugetlb_reserve_pages(inode, |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 168 | vma->vm_pgoff >> huge_page_order(h), |
Mel Gorman | 5a6fe12 | 2009-02-10 14:02:27 +0000 | [diff] [blame] | 169 | len >> huge_page_shift(h), vma, |
| 170 | vma->vm_flags)) |
Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 171 | goto out; |
David Gibson | b45b5bd | 2006-03-22 00:08:55 -0800 | [diff] [blame] | 172 | |
Adam Litke | 4c88726 | 2005-10-29 18:16:46 -0700 | [diff] [blame] | 173 | ret = 0; |
Zhang, Yanmin | b6174df | 2006-07-10 04:44:49 -0700 | [diff] [blame] | 174 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) |
Mike Kravetz | 045c7a3 | 2017-04-13 14:56:32 -0700 | [diff] [blame] | 175 | i_size_write(inode, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | out: |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 177 | inode_unlock(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | |
| 179 | return ret; |
| 180 | } |
| 181 | |
| 182 | /* |
Michel Lespinasse | 3e4e28c | 2020-06-08 21:33:51 -0700 | [diff] [blame] | 183 | * Called under mmap_write_lock(mm). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | */ |
| 185 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | static unsigned long |
Shijie Hu | 8859025 | 2020-06-03 16:03:34 -0700 | [diff] [blame] | 187 | hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, |
| 188 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 189 | { |
| 190 | struct hstate *h = hstate_file(file); |
| 191 | struct vm_unmapped_area_info info; |
| 192 | |
| 193 | info.flags = 0; |
| 194 | info.length = len; |
| 195 | info.low_limit = current->mm->mmap_base; |
Christophe Leroy | 2cb4de0 | 2022-04-09 19:17:28 +0200 | [diff] [blame] | 196 | info.high_limit = arch_get_mmap_end(addr, len, flags); |
Shijie Hu | 8859025 | 2020-06-03 16:03:34 -0700 | [diff] [blame] | 197 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
| 198 | info.align_offset = 0; |
| 199 | return vm_unmapped_area(&info); |
| 200 | } |
| 201 | |
| 202 | static unsigned long |
| 203 | hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, |
| 204 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 205 | { |
| 206 | struct hstate *h = hstate_file(file); |
| 207 | struct vm_unmapped_area_info info; |
| 208 | |
| 209 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
| 210 | info.length = len; |
Linus Torvalds | 6b00864 | 2023-04-18 17:40:09 -0400 | [diff] [blame] | 211 | info.low_limit = PAGE_SIZE; |
Christophe Leroy | 5f24d5a | 2022-04-21 16:35:46 -0700 | [diff] [blame] | 212 | info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); |
Shijie Hu | 8859025 | 2020-06-03 16:03:34 -0700 | [diff] [blame] | 213 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
| 214 | info.align_offset = 0; |
| 215 | addr = vm_unmapped_area(&info); |
| 216 | |
| 217 | /* |
| 218 | * A failed mmap() very likely causes application failure, |
| 219 | * so fall back to the bottom-up function here. This scenario |
| 220 | * can happen with large stack limits and large mmap() |
| 221 | * allocations. |
| 222 | */ |
| 223 | if (unlikely(offset_in_page(addr))) { |
| 224 | VM_BUG_ON(addr != -ENOMEM); |
| 225 | info.flags = 0; |
| 226 | info.low_limit = current->mm->mmap_base; |
Christophe Leroy | 2cb4de0 | 2022-04-09 19:17:28 +0200 | [diff] [blame] | 227 | info.high_limit = arch_get_mmap_end(addr, len, flags); |
Shijie Hu | 8859025 | 2020-06-03 16:03:34 -0700 | [diff] [blame] | 228 | addr = vm_unmapped_area(&info); |
| 229 | } |
| 230 | |
| 231 | return addr; |
| 232 | } |
| 233 | |
Christophe Leroy | 4b439e2 | 2022-04-09 19:17:27 +0200 | [diff] [blame] | 234 | unsigned long |
| 235 | generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 236 | unsigned long len, unsigned long pgoff, |
| 237 | unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | { |
| 239 | struct mm_struct *mm = current->mm; |
| 240 | struct vm_area_struct *vma; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 241 | struct hstate *h = hstate_file(file); |
Christophe Leroy | 2cb4de0 | 2022-04-09 19:17:28 +0200 | [diff] [blame] | 242 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 244 | if (len & ~huge_page_mask(h)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | return -EINVAL; |
| 246 | if (len > TASK_SIZE) |
| 247 | return -ENOMEM; |
| 248 | |
Benjamin Herrenschmidt | 036e085 | 2007-05-06 14:50:12 -0700 | [diff] [blame] | 249 | if (flags & MAP_FIXED) { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 250 | if (prepare_hugepage_range(file, addr, len)) |
Benjamin Herrenschmidt | 036e085 | 2007-05-06 14:50:12 -0700 | [diff] [blame] | 251 | return -EINVAL; |
| 252 | return addr; |
| 253 | } |
| 254 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | if (addr) { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 256 | addr = ALIGN(addr, huge_page_size(h)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | vma = find_vma(mm, addr); |
Christophe Leroy | 5f24d5a | 2022-04-21 16:35:46 -0700 | [diff] [blame] | 258 | if (mmap_end - len >= addr && |
Hugh Dickins | 1be7107 | 2017-06-19 04:03:24 -0700 | [diff] [blame] | 259 | (!vma || addr + len <= vm_start_gap(vma))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | return addr; |
| 261 | } |
| 262 | |
Shijie Hu | 8859025 | 2020-06-03 16:03:34 -0700 | [diff] [blame] | 263 | /* |
| 264 | * Use mm->get_unmapped_area value as a hint to use topdown routine. |
| 265 | * If architectures have special needs, they should define their own |
| 266 | * version of hugetlb_get_unmapped_area. |
| 267 | */ |
| 268 | if (mm->get_unmapped_area == arch_get_unmapped_area_topdown) |
| 269 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
| 270 | pgoff, flags); |
| 271 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, |
| 272 | pgoff, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | } |
Christophe Leroy | 4b439e2 | 2022-04-09 19:17:27 +0200 | [diff] [blame] | 274 | |
| 275 | #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
| 276 | static unsigned long |
| 277 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 278 | unsigned long len, unsigned long pgoff, |
| 279 | unsigned long flags) |
| 280 | { |
| 281 | return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); |
| 282 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | #endif |
| 284 | |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 285 | /* |
| 286 | * Support for read() - Find the page attached to f_mapping and copy out the |
Miaohe Lin | 445c809 | 2022-07-26 22:29:17 +0800 | [diff] [blame] | 287 | * data. This provides functionality similar to filemap_read(). |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 288 | */ |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 289 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 290 | { |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 291 | struct file *file = iocb->ki_filp; |
| 292 | struct hstate *h = hstate_file(file); |
| 293 | struct address_space *mapping = file->f_mapping; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 294 | struct inode *inode = mapping->host; |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 295 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); |
| 296 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 297 | unsigned long end_index; |
| 298 | loff_t isize; |
| 299 | ssize_t retval = 0; |
| 300 | |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 301 | while (iov_iter_count(to)) { |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 302 | struct page *page; |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 303 | size_t nr, copied; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 304 | |
| 305 | /* nr is the maximum number of bytes to copy from this page */ |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 306 | nr = huge_page_size(h); |
Aneesh Kumar K.V | a05b085 | 2012-03-21 16:34:08 -0700 | [diff] [blame] | 307 | isize = i_size_read(inode); |
| 308 | if (!isize) |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 309 | break; |
Aneesh Kumar K.V | a05b085 | 2012-03-21 16:34:08 -0700 | [diff] [blame] | 310 | end_index = (isize - 1) >> huge_page_shift(h); |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 311 | if (index > end_index) |
| 312 | break; |
| 313 | if (index == end_index) { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 314 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
Aneesh Kumar K.V | a05b085 | 2012-03-21 16:34:08 -0700 | [diff] [blame] | 315 | if (nr <= offset) |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 316 | break; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 317 | } |
| 318 | nr = nr - offset; |
| 319 | |
| 320 | /* Find the page */ |
Aneesh Kumar K.V | a05b085 | 2012-03-21 16:34:08 -0700 | [diff] [blame] | 321 | page = find_lock_page(mapping, index); |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 322 | if (unlikely(page == NULL)) { |
| 323 | /* |
| 324 | * We have a HOLE, zero out the user-buffer for the |
| 325 | * length of the hole or request. |
| 326 | */ |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 327 | copied = iov_iter_zero(nr, to); |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 328 | } else { |
Aneesh Kumar K.V | a05b085 | 2012-03-21 16:34:08 -0700 | [diff] [blame] | 329 | unlock_page(page); |
| 330 | |
James Houghton | 8625147 | 2022-10-18 20:01:25 +0000 | [diff] [blame] | 331 | if (PageHWPoison(page)) { |
| 332 | put_page(page); |
| 333 | retval = -EIO; |
| 334 | break; |
| 335 | } |
| 336 | |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 337 | /* |
| 338 | * We have the page, copy it to user space buffer. |
| 339 | */ |
Al Viro | c7d57ab | 2022-06-23 17:24:09 -0400 | [diff] [blame] | 340 | copied = copy_page_to_iter(page, offset, nr, to); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 341 | put_page(page); |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 342 | } |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 343 | offset += copied; |
| 344 | retval += copied; |
| 345 | if (copied != nr && iov_iter_count(to)) { |
| 346 | if (!retval) |
| 347 | retval = -EFAULT; |
| 348 | break; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 349 | } |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 350 | index += offset >> huge_page_shift(h); |
| 351 | offset &= ~huge_page_mask(h); |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 352 | } |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 353 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; |
Badari Pulavarty | e63e1e5 | 2007-10-16 01:26:22 -0700 | [diff] [blame] | 354 | return retval; |
| 355 | } |
| 356 | |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 357 | static int hugetlbfs_write_begin(struct file *file, |
| 358 | struct address_space *mapping, |
Matthew Wilcox (Oracle) | 9d6b0cd | 2022-02-22 14:31:43 -0500 | [diff] [blame] | 359 | loff_t pos, unsigned len, |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 360 | struct page **pagep, void **fsdata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | { |
| 362 | return -EINVAL; |
| 363 | } |
| 364 | |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 365 | static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, |
| 366 | loff_t pos, unsigned len, unsigned copied, |
| 367 | struct page *page, void *fsdata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | { |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 369 | BUG(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | return -EINVAL; |
| 371 | } |
| 372 | |
Sidhartha Kumar | ece6268 | 2022-09-22 10:42:06 -0500 | [diff] [blame] | 373 | static void hugetlb_delete_from_page_cache(struct folio *folio) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | { |
Sidhartha Kumar | ece6268 | 2022-09-22 10:42:06 -0500 | [diff] [blame] | 375 | folio_clear_dirty(folio); |
| 376 | folio_clear_uptodate(folio); |
| 377 | filemap_remove_folio(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | } |
| 379 | |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 380 | /* |
| 381 | * Called with i_mmap_rwsem held for inode based vma maps. This makes |
| 382 | * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault |
| 383 | * mutex for the page in the mapping. So, we can not race with page being |
| 384 | * faulted into the vma. |
| 385 | */ |
| 386 | static bool hugetlb_vma_maps_page(struct vm_area_struct *vma, |
| 387 | unsigned long addr, struct page *page) |
| 388 | { |
| 389 | pte_t *ptep, pte; |
| 390 | |
Peter Xu | 9c67a20 | 2022-12-16 10:52:29 -0500 | [diff] [blame] | 391 | ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma))); |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 392 | if (!ptep) |
| 393 | return false; |
| 394 | |
| 395 | pte = huge_ptep_get(ptep); |
| 396 | if (huge_pte_none(pte) || !pte_present(pte)) |
| 397 | return false; |
| 398 | |
| 399 | if (pte_page(pte) == page) |
| 400 | return true; |
| 401 | |
| 402 | return false; |
| 403 | } |
| 404 | |
| 405 | /* |
| 406 | * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? |
| 407 | * No, because the interval tree returns us only those vmas |
| 408 | * which overlap the truncated area starting at pgoff, |
| 409 | * and no vma on a 32-bit arch can span beyond the 4GB. |
| 410 | */ |
| 411 | static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) |
| 412 | { |
Peter Xu | 243b1f2 | 2022-12-16 10:50:52 -0500 | [diff] [blame] | 413 | unsigned long offset = 0; |
| 414 | |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 415 | if (vma->vm_pgoff < start) |
Peter Xu | 243b1f2 | 2022-12-16 10:50:52 -0500 | [diff] [blame] | 416 | offset = (start - vma->vm_pgoff) << PAGE_SHIFT; |
| 417 | |
| 418 | return vma->vm_start + offset; |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 419 | } |
| 420 | |
| 421 | static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) |
| 422 | { |
| 423 | unsigned long t_end; |
| 424 | |
| 425 | if (!end) |
| 426 | return vma->vm_end; |
| 427 | |
| 428 | t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; |
| 429 | if (t_end > vma->vm_end) |
| 430 | t_end = vma->vm_end; |
| 431 | return t_end; |
| 432 | } |
| 433 | |
| 434 | /* |
| 435 | * Called with hugetlb fault mutex held. Therefore, no more mappings to |
| 436 | * this folio can be created while executing the routine. |
| 437 | */ |
| 438 | static void hugetlb_unmap_file_folio(struct hstate *h, |
| 439 | struct address_space *mapping, |
| 440 | struct folio *folio, pgoff_t index) |
| 441 | { |
| 442 | struct rb_root_cached *root = &mapping->i_mmap; |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 443 | struct hugetlb_vma_lock *vma_lock; |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 444 | struct page *page = &folio->page; |
| 445 | struct vm_area_struct *vma; |
| 446 | unsigned long v_start; |
| 447 | unsigned long v_end; |
| 448 | pgoff_t start, end; |
| 449 | |
| 450 | start = index * pages_per_huge_page(h); |
| 451 | end = (index + 1) * pages_per_huge_page(h); |
| 452 | |
| 453 | i_mmap_lock_write(mapping); |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 454 | retry: |
| 455 | vma_lock = NULL; |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 456 | vma_interval_tree_foreach(vma, root, start, end - 1) { |
| 457 | v_start = vma_offset_start(vma, start); |
| 458 | v_end = vma_offset_end(vma, end); |
| 459 | |
Peter Xu | 243b1f2 | 2022-12-16 10:50:52 -0500 | [diff] [blame] | 460 | if (!hugetlb_vma_maps_page(vma, v_start, page)) |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 461 | continue; |
| 462 | |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 463 | if (!hugetlb_vma_trylock_write(vma)) { |
| 464 | vma_lock = vma->vm_private_data; |
| 465 | /* |
| 466 | * If we can not get vma lock, we need to drop |
| 467 | * immap_sema and take locks in order. First, |
| 468 | * take a ref on the vma_lock structure so that |
| 469 | * we can be guaranteed it will not go away when |
| 470 | * dropping immap_sema. |
| 471 | */ |
| 472 | kref_get(&vma_lock->refs); |
| 473 | break; |
| 474 | } |
| 475 | |
Peter Xu | 243b1f2 | 2022-12-16 10:50:52 -0500 | [diff] [blame] | 476 | unmap_hugepage_range(vma, v_start, v_end, NULL, |
| 477 | ZAP_FLAG_DROP_MARKER); |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 478 | hugetlb_vma_unlock_write(vma); |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 479 | } |
| 480 | |
| 481 | i_mmap_unlock_write(mapping); |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 482 | |
| 483 | if (vma_lock) { |
| 484 | /* |
| 485 | * Wait on vma_lock. We know it is still valid as we have |
| 486 | * a reference. We must 'open code' vma locking as we do |
| 487 | * not know if vma_lock is still attached to vma. |
| 488 | */ |
| 489 | down_write(&vma_lock->rw_sema); |
| 490 | i_mmap_lock_write(mapping); |
| 491 | |
| 492 | vma = vma_lock->vma; |
| 493 | if (!vma) { |
| 494 | /* |
| 495 | * If lock is no longer attached to vma, then just |
| 496 | * unlock, drop our reference and retry looking for |
| 497 | * other vmas. |
| 498 | */ |
| 499 | up_write(&vma_lock->rw_sema); |
| 500 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); |
| 501 | goto retry; |
| 502 | } |
| 503 | |
| 504 | /* |
| 505 | * vma_lock is still attached to vma. Check to see if vma |
| 506 | * still maps page and if so, unmap. |
| 507 | */ |
| 508 | v_start = vma_offset_start(vma, start); |
| 509 | v_end = vma_offset_end(vma, end); |
Peter Xu | 243b1f2 | 2022-12-16 10:50:52 -0500 | [diff] [blame] | 510 | if (hugetlb_vma_maps_page(vma, v_start, page)) |
| 511 | unmap_hugepage_range(vma, v_start, v_end, NULL, |
| 512 | ZAP_FLAG_DROP_MARKER); |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 513 | |
| 514 | kref_put(&vma_lock->refs, hugetlb_vma_lock_release); |
| 515 | hugetlb_vma_unlock_write(vma); |
| 516 | |
| 517 | goto retry; |
| 518 | } |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 519 | } |
| 520 | |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 521 | static void |
Peter Xu | 05e90bd | 2022-05-12 20:22:55 -0700 | [diff] [blame] | 522 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, |
| 523 | zap_flags_t zap_flags) |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 524 | { |
| 525 | struct vm_area_struct *vma; |
| 526 | |
| 527 | /* |
Sean Christopherson | d6aba4c | 2022-01-14 14:08:30 -0800 | [diff] [blame] | 528 | * end == 0 indicates that the entire range after start should be |
| 529 | * unmapped. Note, end is exclusive, whereas the interval tree takes |
| 530 | * an inclusive "last". |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 531 | */ |
Sean Christopherson | d6aba4c | 2022-01-14 14:08:30 -0800 | [diff] [blame] | 532 | vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 533 | unsigned long v_start; |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 534 | unsigned long v_end; |
| 535 | |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 536 | if (!hugetlb_vma_trylock_write(vma)) |
| 537 | continue; |
| 538 | |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 539 | v_start = vma_offset_start(vma, start); |
| 540 | v_end = vma_offset_end(vma, end); |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 541 | |
Peter Xu | 243b1f2 | 2022-12-16 10:50:52 -0500 | [diff] [blame] | 542 | unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); |
Mike Kravetz | 40549ba | 2022-09-14 15:18:09 -0700 | [diff] [blame] | 543 | |
| 544 | /* |
| 545 | * Note that vma lock only exists for shared/non-private |
| 546 | * vmas. Therefore, lock is not held when calling |
| 547 | * unmap_hugepage_range for private vmas. |
| 548 | */ |
| 549 | hugetlb_vma_unlock_write(vma); |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 550 | } |
| 551 | } |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 552 | |
| 553 | /* |
Mike Kravetz | c862722 | 2022-09-14 15:18:05 -0700 | [diff] [blame] | 554 | * Called with hugetlb fault mutex held. |
| 555 | * Returns true if page was actually removed, false otherwise. |
| 556 | */ |
| 557 | static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, |
| 558 | struct address_space *mapping, |
| 559 | struct folio *folio, pgoff_t index, |
| 560 | bool truncate_op) |
| 561 | { |
| 562 | bool ret = false; |
| 563 | |
| 564 | /* |
| 565 | * If folio is mapped, it was faulted in after being |
| 566 | * unmapped in caller. Unmap (again) while holding |
| 567 | * the fault mutex. The mutex will prevent faults |
| 568 | * until we finish removing the folio. |
| 569 | */ |
Mike Kravetz | 378397c | 2022-09-14 15:18:08 -0700 | [diff] [blame] | 570 | if (unlikely(folio_mapped(folio))) |
| 571 | hugetlb_unmap_file_folio(h, mapping, folio, index); |
Mike Kravetz | c862722 | 2022-09-14 15:18:05 -0700 | [diff] [blame] | 572 | |
| 573 | folio_lock(folio); |
| 574 | /* |
Mike Kravetz | fa27759 | 2022-09-14 15:18:10 -0700 | [diff] [blame] | 575 | * We must remove the folio from page cache before removing |
| 576 | * the region/ reserve map (hugetlb_unreserve_pages). In |
| 577 | * rare out of memory conditions, removal of the region/reserve |
| 578 | * map could fail. Correspondingly, the subpool and global |
| 579 | * reserve usage count can need to be adjusted. |
Mike Kravetz | c862722 | 2022-09-14 15:18:05 -0700 | [diff] [blame] | 580 | */ |
Sidhartha Kumar | ece6268 | 2022-09-22 10:42:06 -0500 | [diff] [blame] | 581 | VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); |
| 582 | hugetlb_delete_from_page_cache(folio); |
Mike Kravetz | fa27759 | 2022-09-14 15:18:10 -0700 | [diff] [blame] | 583 | ret = true; |
| 584 | if (!truncate_op) { |
| 585 | if (unlikely(hugetlb_unreserve_pages(inode, index, |
| 586 | index + 1, 1))) |
| 587 | hugetlb_fix_reserve_counts(inode); |
Mike Kravetz | c862722 | 2022-09-14 15:18:05 -0700 | [diff] [blame] | 588 | } |
| 589 | |
| 590 | folio_unlock(folio); |
| 591 | return ret; |
| 592 | } |
| 593 | |
| 594 | /* |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 595 | * remove_inode_hugepages handles two distinct cases: truncation and hole |
| 596 | * punch. There are subtle differences in operation for each case. |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 597 | * |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 598 | * truncation is indicated by end of range being LLONG_MAX |
| 599 | * In this case, we first scan the range and release found pages. |
Miaohe Lin | 1935ebd | 2021-02-24 12:10:21 -0800 | [diff] [blame] | 600 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve |
Mike Kravetz | c862722 | 2022-09-14 15:18:05 -0700 | [diff] [blame] | 601 | * maps and global counts. Page faults can race with truncation. |
| 602 | * During faults, hugetlb_no_page() checks i_size before page allocation, |
| 603 | * and again after obtaining page table lock. It will 'back out' |
| 604 | * allocations in the truncated range. |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 605 | * hole punch is indicated if end is not LLONG_MAX |
| 606 | * In the hole punch case we scan the range and release found pages. |
Miaohe Lin | 1935ebd | 2021-02-24 12:10:21 -0800 | [diff] [blame] | 607 | * Only when releasing a page is the associated region/reserve map |
| 608 | * deleted. The region/reserve map for ranges without associated |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 609 | * pages are not modified. Page faults can race with hole punch. |
| 610 | * This is indicated if we find a mapped page. |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 611 | * Note: If the passed end of range value is beyond the end of file, but |
| 612 | * not LLONG_MAX this routine still performs a hole punch operation. |
| 613 | */ |
| 614 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, |
| 615 | loff_t lend) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 617 | struct hstate *h = hstate_inode(inode); |
David Gibson | b45b5bd | 2006-03-22 00:08:55 -0800 | [diff] [blame] | 618 | struct address_space *mapping = &inode->i_data; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 619 | const pgoff_t start = lstart >> huge_page_shift(h); |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 620 | const pgoff_t end = lend >> huge_page_shift(h); |
Matthew Wilcox (Oracle) | 1508062 | 2022-06-04 16:39:04 -0400 | [diff] [blame] | 621 | struct folio_batch fbatch; |
Jan Kara | d72dc8a | 2017-09-06 16:21:18 -0700 | [diff] [blame] | 622 | pgoff_t next, index; |
Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 623 | int i, freed = 0; |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 624 | bool truncate_op = (lend == LLONG_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | |
Matthew Wilcox (Oracle) | 1508062 | 2022-06-04 16:39:04 -0400 | [diff] [blame] | 626 | folio_batch_init(&fbatch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | next = start; |
Matthew Wilcox (Oracle) | 1508062 | 2022-06-04 16:39:04 -0400 | [diff] [blame] | 628 | while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { |
| 629 | for (i = 0; i < folio_batch_count(&fbatch); ++i) { |
| 630 | struct folio *folio = fbatch.folios[i]; |
Miaohe Lin | d4241a0 | 2021-05-04 18:33:34 -0700 | [diff] [blame] | 631 | u32 hash = 0; |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 632 | |
Matthew Wilcox (Oracle) | 1508062 | 2022-06-04 16:39:04 -0400 | [diff] [blame] | 633 | index = folio->index; |
Mike Kravetz | 188a397 | 2022-09-14 15:18:02 -0700 | [diff] [blame] | 634 | hash = hugetlb_fault_mutex_hash(mapping, index); |
| 635 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 636 | |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 637 | /* |
Mike Kravetz | c862722 | 2022-09-14 15:18:05 -0700 | [diff] [blame] | 638 | * Remove folio that was part of folio_batch. |
Mike Kravetz | 4aae8d1 | 2016-01-15 16:57:40 -0800 | [diff] [blame] | 639 | */ |
Mike Kravetz | c862722 | 2022-09-14 15:18:05 -0700 | [diff] [blame] | 640 | if (remove_inode_single_folio(h, inode, mapping, folio, |
| 641 | index, truncate_op)) |
| 642 | freed++; |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 643 | |
Mike Kravetz | 188a397 | 2022-09-14 15:18:02 -0700 | [diff] [blame] | 644 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | } |
Matthew Wilcox (Oracle) | 1508062 | 2022-06-04 16:39:04 -0400 | [diff] [blame] | 646 | folio_batch_release(&fbatch); |
Mike Kravetz | 1817889 | 2015-11-20 15:57:13 -0800 | [diff] [blame] | 647 | cond_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | } |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 649 | |
| 650 | if (truncate_op) |
| 651 | (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | } |
| 653 | |
Al Viro | 2bbbda3 | 2010-06-04 19:52:12 -0400 | [diff] [blame] | 654 | static void hugetlbfs_evict_inode(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | { |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 656 | struct resv_map *resv_map; |
| 657 | |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 658 | remove_inode_hugepages(inode, 0, LLONG_MAX); |
Mike Kravetz | f27a513 | 2019-05-13 17:22:55 -0700 | [diff] [blame] | 659 | |
| 660 | /* |
| 661 | * Get the resv_map from the address space embedded in the inode. |
| 662 | * This is the address space which points to any resv_map allocated |
| 663 | * at inode creation time. If this is a device special inode, |
| 664 | * i_mapping may not point to the original address space. |
| 665 | */ |
| 666 | resv_map = (struct resv_map *)(&inode->i_data)->private_data; |
| 667 | /* Only regular and link inodes have associated reserve maps */ |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 668 | if (resv_map) |
| 669 | resv_map_release(&resv_map->refs); |
Jan Kara | dbd5768 | 2012-05-03 14:48:02 +0200 | [diff] [blame] | 670 | clear_inode(inode); |
Christoph Hellwig | 149f421 | 2005-10-29 18:16:43 -0700 | [diff] [blame] | 671 | } |
| 672 | |
Miaohe Lin | e5d319d | 2021-02-24 12:10:25 -0800 | [diff] [blame] | 673 | static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | { |
Hugh Dickins | 856fc29 | 2006-10-28 10:38:43 -0700 | [diff] [blame] | 675 | pgoff_t pgoff; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | struct address_space *mapping = inode->i_mapping; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 677 | struct hstate *h = hstate_inode(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 679 | BUG_ON(offset & ~huge_page_mask(h)); |
Hugh Dickins | 856fc29 | 2006-10-28 10:38:43 -0700 | [diff] [blame] | 680 | pgoff = offset >> PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | |
Mike Kravetz | 87bf91d | 2020-04-01 21:11:08 -0700 | [diff] [blame] | 682 | i_size_write(inode, offset); |
Mike Kravetz | 188a397 | 2022-09-14 15:18:02 -0700 | [diff] [blame] | 683 | i_mmap_lock_write(mapping); |
Davidlohr Bueso | f808c13 | 2017-09-08 16:15:08 -0700 | [diff] [blame] | 684 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
Peter Xu | 05e90bd | 2022-05-12 20:22:55 -0700 | [diff] [blame] | 685 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, |
| 686 | ZAP_FLAG_DROP_MARKER); |
Mike Kravetz | c86aa7b | 2018-12-28 00:39:42 -0800 | [diff] [blame] | 687 | i_mmap_unlock_write(mapping); |
Mike Kravetz | e7c5809 | 2019-01-08 15:23:32 -0800 | [diff] [blame] | 688 | remove_inode_hugepages(inode, offset, LLONG_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | } |
| 690 | |
Mike Kravetz | 68d3252 | 2022-06-13 13:36:48 -0700 | [diff] [blame] | 691 | static void hugetlbfs_zero_partial_page(struct hstate *h, |
| 692 | struct address_space *mapping, |
| 693 | loff_t start, |
| 694 | loff_t end) |
| 695 | { |
| 696 | pgoff_t idx = start >> huge_page_shift(h); |
| 697 | struct folio *folio; |
| 698 | |
| 699 | folio = filemap_lock_folio(mapping, idx); |
Christoph Hellwig | 66dabbb | 2023-03-07 15:34:10 +0100 | [diff] [blame] | 700 | if (IS_ERR(folio)) |
Mike Kravetz | 68d3252 | 2022-06-13 13:36:48 -0700 | [diff] [blame] | 701 | return; |
| 702 | |
| 703 | start = start & ~huge_page_mask(h); |
| 704 | end = end & ~huge_page_mask(h); |
| 705 | if (!end) |
| 706 | end = huge_page_size(h); |
| 707 | |
| 708 | folio_zero_segment(folio, (size_t)start, (size_t)end); |
| 709 | |
| 710 | folio_unlock(folio); |
| 711 | folio_put(folio); |
| 712 | } |
| 713 | |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 714 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
| 715 | { |
Mike Kravetz | 68d3252 | 2022-06-13 13:36:48 -0700 | [diff] [blame] | 716 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
| 717 | struct address_space *mapping = inode->i_mapping; |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 718 | struct hstate *h = hstate_inode(inode); |
| 719 | loff_t hpage_size = huge_page_size(h); |
| 720 | loff_t hole_start, hole_end; |
| 721 | |
| 722 | /* |
Mike Kravetz | 68d3252 | 2022-06-13 13:36:48 -0700 | [diff] [blame] | 723 | * hole_start and hole_end indicate the full pages within the hole. |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 724 | */ |
| 725 | hole_start = round_up(offset, hpage_size); |
| 726 | hole_end = round_down(offset + len, hpage_size); |
| 727 | |
Mike Kravetz | 68d3252 | 2022-06-13 13:36:48 -0700 | [diff] [blame] | 728 | inode_lock(inode); |
| 729 | |
| 730 | /* protected by i_rwsem */ |
| 731 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { |
| 732 | inode_unlock(inode); |
| 733 | return -EPERM; |
| 734 | } |
| 735 | |
| 736 | i_mmap_lock_write(mapping); |
| 737 | |
| 738 | /* If range starts before first full page, zero partial page. */ |
| 739 | if (offset < hole_start) |
| 740 | hugetlbfs_zero_partial_page(h, mapping, |
| 741 | offset, min(offset + len, hole_start)); |
| 742 | |
| 743 | /* Unmap users of full pages in the hole. */ |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 744 | if (hole_end > hole_start) { |
Davidlohr Bueso | f808c13 | 2017-09-08 16:15:08 -0700 | [diff] [blame] | 745 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 746 | hugetlb_vmdelete_list(&mapping->i_mmap, |
Peter Xu | 05e90bd | 2022-05-12 20:22:55 -0700 | [diff] [blame] | 747 | hole_start >> PAGE_SHIFT, |
| 748 | hole_end >> PAGE_SHIFT, 0); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 749 | } |
| 750 | |
Mike Kravetz | 68d3252 | 2022-06-13 13:36:48 -0700 | [diff] [blame] | 751 | /* If range extends beyond last full page, zero partial page. */ |
| 752 | if ((offset + len) > hole_end && (offset + len) > hole_start) |
| 753 | hugetlbfs_zero_partial_page(h, mapping, |
| 754 | hole_end, offset + len); |
| 755 | |
| 756 | i_mmap_unlock_write(mapping); |
| 757 | |
| 758 | /* Remove full pages from the file. */ |
| 759 | if (hole_end > hole_start) |
| 760 | remove_inode_hugepages(inode, hole_start, hole_end); |
| 761 | |
| 762 | inode_unlock(inode); |
| 763 | |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 764 | return 0; |
| 765 | } |
| 766 | |
| 767 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, |
| 768 | loff_t len) |
| 769 | { |
| 770 | struct inode *inode = file_inode(file); |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 771 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 772 | struct address_space *mapping = inode->i_mapping; |
| 773 | struct hstate *h = hstate_inode(inode); |
| 774 | struct vm_area_struct pseudo_vma; |
| 775 | struct mm_struct *mm = current->mm; |
| 776 | loff_t hpage_size = huge_page_size(h); |
| 777 | unsigned long hpage_shift = huge_page_shift(h); |
| 778 | pgoff_t start, index, end; |
| 779 | int error; |
| 780 | u32 hash; |
| 781 | |
| 782 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
| 783 | return -EOPNOTSUPP; |
| 784 | |
| 785 | if (mode & FALLOC_FL_PUNCH_HOLE) |
| 786 | return hugetlbfs_punch_hole(inode, offset, len); |
| 787 | |
| 788 | /* |
| 789 | * Default preallocate case. |
| 790 | * For this range, start is rounded down and end is rounded up |
| 791 | * as well as being converted to page offsets. |
| 792 | */ |
| 793 | start = offset >> hpage_shift; |
| 794 | end = (offset + len + hpage_size - 1) >> hpage_shift; |
| 795 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 796 | inode_lock(inode); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 797 | |
| 798 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ |
| 799 | error = inode_newsize_ok(inode, offset + len); |
| 800 | if (error) |
| 801 | goto out; |
| 802 | |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 803 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { |
| 804 | error = -EPERM; |
| 805 | goto out; |
| 806 | } |
| 807 | |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 808 | /* |
| 809 | * Initialize a pseudo vma as this is required by the huge page |
| 810 | * allocation routines. If NUMA is configured, use page index |
| 811 | * as input to create an allocation policy. |
| 812 | */ |
Kirill A. Shutemov | 2c4541e | 2018-07-26 16:37:30 -0700 | [diff] [blame] | 813 | vma_init(&pseudo_vma, mm); |
Suren Baghdasaryan | 1c71222 | 2023-01-26 11:37:49 -0800 | [diff] [blame] | 814 | vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 815 | pseudo_vma.vm_file = file; |
| 816 | |
| 817 | for (index = start; index < end; index++) { |
| 818 | /* |
| 819 | * This is supposed to be the vaddr where the page is being |
| 820 | * faulted in, but we have no vaddr here. |
| 821 | */ |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 822 | struct folio *folio; |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 823 | unsigned long addr; |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 824 | |
| 825 | cond_resched(); |
| 826 | |
| 827 | /* |
| 828 | * fallocate(2) manpage permits EINTR; we may have been |
| 829 | * interrupted because we are using up too much memory. |
| 830 | */ |
| 831 | if (signal_pending(current)) { |
| 832 | error = -EINTR; |
| 833 | break; |
| 834 | } |
| 835 | |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 836 | /* addr is the offset within the file (zero based) */ |
| 837 | addr = index * hpage_size; |
| 838 | |
Mike Kravetz | 188a397 | 2022-09-14 15:18:02 -0700 | [diff] [blame] | 839 | /* mutex taken here, fault path and hole punch */ |
Wei Yang | 188b04a | 2019-11-30 17:57:02 -0800 | [diff] [blame] | 840 | hash = hugetlb_fault_mutex_hash(mapping, index); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 841 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
| 842 | |
| 843 | /* See if already present in mapping to avoid alloc/free */ |
Mike Kravetz | fd4aed8 | 2023-06-21 14:24:03 -0700 | [diff] [blame] | 844 | folio = filemap_get_folio(mapping, index); |
| 845 | if (!IS_ERR(folio)) { |
| 846 | folio_put(folio); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 847 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 848 | continue; |
| 849 | } |
| 850 | |
Miaohe Lin | 88ce3fe | 2021-02-24 12:10:11 -0800 | [diff] [blame] | 851 | /* |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 852 | * Allocate folio without setting the avoid_reserve argument. |
Miaohe Lin | 88ce3fe | 2021-02-24 12:10:11 -0800 | [diff] [blame] | 853 | * There certainly are no reserves associated with the |
| 854 | * pseudo_vma. However, there could be shared mappings with |
| 855 | * reserves for the file at the inode level. If we fallocate |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 856 | * folios in these areas, we need to consume the reserves |
Miaohe Lin | 88ce3fe | 2021-02-24 12:10:11 -0800 | [diff] [blame] | 857 | * to keep reservation accounting consistent. |
| 858 | */ |
Ackerley Tng | adef080 | 2023-05-02 23:56:22 +0000 | [diff] [blame] | 859 | hugetlb_set_vma_policy(&pseudo_vma, inode, index); |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 860 | folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 861 | hugetlb_drop_vma_policy(&pseudo_vma); |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 862 | if (IS_ERR(folio)) { |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 863 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 864 | error = PTR_ERR(folio); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 865 | goto out; |
| 866 | } |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 867 | clear_huge_page(&folio->page, addr, pages_per_huge_page(h)); |
| 868 | __folio_mark_uptodate(folio); |
Sidhartha Kumar | 9b91c0e | 2023-01-25 09:05:35 -0800 | [diff] [blame] | 869 | error = hugetlb_add_to_page_cache(folio, mapping, index); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 870 | if (unlikely(error)) { |
Sidhartha Kumar | d2d7bb4 | 2023-01-25 09:05:34 -0800 | [diff] [blame] | 871 | restore_reserve_on_error(h, &pseudo_vma, addr, folio); |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 872 | folio_put(folio); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 873 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 874 | goto out; |
| 875 | } |
| 876 | |
| 877 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 878 | |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 879 | folio_set_hugetlb_migratable(folio); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 880 | /* |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 881 | * folio_unlock because locked by hugetlb_add_to_page_cache() |
| 882 | * folio_put() due to reference from alloc_hugetlb_folio() |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 883 | */ |
Sidhartha Kumar | d0ce0e4 | 2023-01-25 09:05:33 -0800 | [diff] [blame] | 884 | folio_unlock(folio); |
| 885 | folio_put(folio); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 886 | } |
| 887 | |
| 888 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) |
| 889 | i_size_write(inode, offset + len); |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 890 | inode->i_ctime = current_time(inode); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 891 | out: |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 892 | inode_unlock(inode); |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 893 | return error; |
| 894 | } |
| 895 | |
Christian Brauner | c1632a0 | 2023-01-13 12:49:11 +0100 | [diff] [blame] | 896 | static int hugetlbfs_setattr(struct mnt_idmap *idmap, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 897 | struct dentry *dentry, struct iattr *attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 898 | { |
David Howells | 2b0143b | 2015-03-17 22:25:59 +0000 | [diff] [blame] | 899 | struct inode *inode = d_inode(dentry); |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 900 | struct hstate *h = hstate_inode(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | int error; |
| 902 | unsigned int ia_valid = attr->ia_valid; |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 903 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | |
Christian Brauner | c1632a0 | 2023-01-13 12:49:11 +0100 | [diff] [blame] | 905 | error = setattr_prepare(&nop_mnt_idmap, dentry, attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | if (error) |
Christoph Hellwig | 1025774 | 2010-06-04 11:30:02 +0200 | [diff] [blame] | 907 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | |
| 909 | if (ia_valid & ATTR_SIZE) { |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 910 | loff_t oldsize = inode->i_size; |
| 911 | loff_t newsize = attr->ia_size; |
| 912 | |
| 913 | if (newsize & ~huge_page_mask(h)) |
Christoph Hellwig | 1025774 | 2010-06-04 11:30:02 +0200 | [diff] [blame] | 914 | return -EINVAL; |
Miaohe Lin | 398c0da | 2021-02-24 12:10:18 -0800 | [diff] [blame] | 915 | /* protected by i_rwsem */ |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 916 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || |
| 917 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) |
| 918 | return -EPERM; |
Miaohe Lin | e5d319d | 2021-02-24 12:10:25 -0800 | [diff] [blame] | 919 | hugetlb_vmtruncate(inode, newsize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | } |
Christoph Hellwig | 1025774 | 2010-06-04 11:30:02 +0200 | [diff] [blame] | 921 | |
Christian Brauner | c1632a0 | 2023-01-13 12:49:11 +0100 | [diff] [blame] | 922 | setattr_copy(&nop_mnt_idmap, inode, attr); |
Christoph Hellwig | 1025774 | 2010-06-04 11:30:02 +0200 | [diff] [blame] | 923 | mark_inode_dirty(inode); |
| 924 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | } |
| 926 | |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 927 | static struct inode *hugetlbfs_get_root(struct super_block *sb, |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 928 | struct hugetlbfs_fs_context *ctx) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | { |
| 930 | struct inode *inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | |
| 932 | inode = new_inode(sb); |
| 933 | if (inode) { |
Christoph Hellwig | 85fe402 | 2010-10-23 11:19:54 -0400 | [diff] [blame] | 934 | inode->i_ino = get_next_ino(); |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 935 | inode->i_mode = S_IFDIR | ctx->mode; |
| 936 | inode->i_uid = ctx->uid; |
| 937 | inode->i_gid = ctx->gid; |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 938 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 939 | inode->i_op = &hugetlbfs_dir_inode_operations; |
| 940 | inode->i_fop = &simple_dir_operations; |
| 941 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ |
| 942 | inc_nlink(inode); |
Aneesh Kumar K.V | 65ed760 | 2012-04-25 16:01:50 -0700 | [diff] [blame] | 943 | lockdep_annotate_inode_mutex_key(inode); |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 944 | } |
| 945 | return inode; |
| 946 | } |
| 947 | |
Michal Hocko | b610ded | 2013-08-13 16:00:55 -0700 | [diff] [blame] | 948 | /* |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 949 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never |
Michal Hocko | b610ded | 2013-08-13 16:00:55 -0700 | [diff] [blame] | 950 | * be taken from reclaim -- unlike regular filesystems. This needs an |
Kirill A. Shutemov | 88f306b | 2016-01-15 16:57:31 -0800 | [diff] [blame] | 951 | * annotation because huge_pmd_share() does an allocation under hugetlb's |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 952 | * i_mmap_rwsem. |
Michal Hocko | b610ded | 2013-08-13 16:00:55 -0700 | [diff] [blame] | 953 | */ |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 954 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; |
Michal Hocko | b610ded | 2013-08-13 16:00:55 -0700 | [diff] [blame] | 955 | |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 956 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
| 957 | struct inode *dir, |
Al Viro | 18df225 | 2011-07-24 23:17:40 -0400 | [diff] [blame] | 958 | umode_t mode, dev_t dev) |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 959 | { |
| 960 | struct inode *inode; |
Mike Kravetz | 58b6e5e | 2019-04-05 18:39:06 -0700 | [diff] [blame] | 961 | struct resv_map *resv_map = NULL; |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 962 | |
Mike Kravetz | 58b6e5e | 2019-04-05 18:39:06 -0700 | [diff] [blame] | 963 | /* |
| 964 | * Reserve maps are only needed for inodes that can have associated |
| 965 | * page allocations. |
| 966 | */ |
| 967 | if (S_ISREG(mode) || S_ISLNK(mode)) { |
| 968 | resv_map = resv_map_alloc(); |
| 969 | if (!resv_map) |
| 970 | return NULL; |
| 971 | } |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 972 | |
| 973 | inode = new_inode(sb); |
| 974 | if (inode) { |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 975 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
| 976 | |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 977 | inode->i_ino = get_next_ino(); |
Christian Brauner | f2d4014 | 2023-01-13 12:49:25 +0100 | [diff] [blame] | 978 | inode_init_owner(&nop_mnt_idmap, inode, dir, mode); |
Davidlohr Bueso | c8c06ef | 2014-12-12 16:54:24 -0800 | [diff] [blame] | 979 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, |
| 980 | &hugetlbfs_i_mmap_rwsem_key); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 982 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 983 | inode->i_mapping->private_data = resv_map; |
Marc-André Lureau | ff62a34 | 2018-01-31 16:19:25 -0800 | [diff] [blame] | 984 | info->seals = F_SEAL_SEAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 | switch (mode & S_IFMT) { |
| 986 | default: |
| 987 | init_special_inode(inode, mode, dev); |
| 988 | break; |
| 989 | case S_IFREG: |
| 990 | inode->i_op = &hugetlbfs_inode_operations; |
| 991 | inode->i_fop = &hugetlbfs_file_operations; |
| 992 | break; |
| 993 | case S_IFDIR: |
| 994 | inode->i_op = &hugetlbfs_dir_inode_operations; |
| 995 | inode->i_fop = &simple_dir_operations; |
| 996 | |
| 997 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 998 | inc_nlink(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | break; |
| 1000 | case S_IFLNK: |
| 1001 | inode->i_op = &page_symlink_inode_operations; |
Al Viro | 21fc61c | 2015-11-17 01:07:57 -0500 | [diff] [blame] | 1002 | inode_nohighmem(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1003 | break; |
| 1004 | } |
Josh Boyer | e096d0c | 2011-08-25 07:48:12 -0400 | [diff] [blame] | 1005 | lockdep_annotate_inode_mutex_key(inode); |
Mike Kravetz | 58b6e5e | 2019-04-05 18:39:06 -0700 | [diff] [blame] | 1006 | } else { |
| 1007 | if (resv_map) |
| 1008 | kref_put(&resv_map->refs, resv_map_release); |
| 1009 | } |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 1010 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1011 | return inode; |
| 1012 | } |
| 1013 | |
| 1014 | /* |
| 1015 | * File creation. Allocate an inode, and we're done.. |
| 1016 | */ |
Christian Brauner | 5ebb29b | 2023-01-13 12:49:16 +0100 | [diff] [blame] | 1017 | static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 1018 | struct dentry *dentry, umode_t mode, dev_t dev) |
Piotr Sarna | 1ab5b82 | 2019-11-30 17:56:43 -0800 | [diff] [blame] | 1019 | { |
Al Viro | 19ee534 | 2022-09-24 06:59:59 +0200 | [diff] [blame] | 1020 | struct inode *inode; |
| 1021 | |
| 1022 | inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); |
| 1023 | if (!inode) |
| 1024 | return -ENOSPC; |
| 1025 | dir->i_ctime = dir->i_mtime = current_time(dir); |
| 1026 | d_instantiate(dentry, inode); |
| 1027 | dget(dentry);/* Extra count - pin the dentry in core */ |
| 1028 | return 0; |
Piotr Sarna | 1ab5b82 | 2019-11-30 17:56:43 -0800 | [diff] [blame] | 1029 | } |
| 1030 | |
Christian Brauner | c54bd91 | 2023-01-13 12:49:15 +0100 | [diff] [blame] | 1031 | static int hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 1032 | struct dentry *dentry, umode_t mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1033 | { |
Christian Brauner | 5ebb29b | 2023-01-13 12:49:16 +0100 | [diff] [blame] | 1034 | int retval = hugetlbfs_mknod(&nop_mnt_idmap, dir, dentry, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 1035 | mode | S_IFDIR, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 | if (!retval) |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 1037 | inc_nlink(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1038 | return retval; |
| 1039 | } |
| 1040 | |
Christian Brauner | 6c960e6 | 2023-01-13 12:49:13 +0100 | [diff] [blame] | 1041 | static int hugetlbfs_create(struct mnt_idmap *idmap, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 1042 | struct inode *dir, struct dentry *dentry, |
| 1043 | umode_t mode, bool excl) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1044 | { |
Christian Brauner | 5ebb29b | 2023-01-13 12:49:16 +0100 | [diff] [blame] | 1045 | return hugetlbfs_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFREG, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | } |
| 1047 | |
Christian Brauner | 011e2b7 | 2023-01-13 12:49:18 +0100 | [diff] [blame] | 1048 | static int hugetlbfs_tmpfile(struct mnt_idmap *idmap, |
Miklos Szeredi | 863f144 | 2022-09-24 07:00:00 +0200 | [diff] [blame] | 1049 | struct inode *dir, struct file *file, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 1050 | umode_t mode) |
Piotr Sarna | 1ab5b82 | 2019-11-30 17:56:43 -0800 | [diff] [blame] | 1051 | { |
Al Viro | 19ee534 | 2022-09-24 06:59:59 +0200 | [diff] [blame] | 1052 | struct inode *inode; |
| 1053 | |
| 1054 | inode = hugetlbfs_get_inode(dir->i_sb, dir, mode | S_IFREG, 0); |
| 1055 | if (!inode) |
| 1056 | return -ENOSPC; |
| 1057 | dir->i_ctime = dir->i_mtime = current_time(dir); |
Miklos Szeredi | 863f144 | 2022-09-24 07:00:00 +0200 | [diff] [blame] | 1058 | d_tmpfile(file, inode); |
| 1059 | return finish_open_simple(file, 0); |
Piotr Sarna | 1ab5b82 | 2019-11-30 17:56:43 -0800 | [diff] [blame] | 1060 | } |
| 1061 | |
Christian Brauner | 7a77db9 | 2023-01-13 12:49:14 +0100 | [diff] [blame] | 1062 | static int hugetlbfs_symlink(struct mnt_idmap *idmap, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 1063 | struct inode *dir, struct dentry *dentry, |
| 1064 | const char *symname) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 | { |
| 1066 | struct inode *inode; |
| 1067 | int error = -ENOSPC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | |
Al Viro | 7d54fa6 | 2011-07-24 20:20:48 -0400 | [diff] [blame] | 1069 | inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1070 | if (inode) { |
| 1071 | int l = strlen(symname)+1; |
| 1072 | error = page_symlink(inode, symname, l); |
| 1073 | if (!error) { |
| 1074 | d_instantiate(dentry, inode); |
| 1075 | dget(dentry); |
| 1076 | } else |
| 1077 | iput(inode); |
| 1078 | } |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 1079 | dir->i_ctime = dir->i_mtime = current_time(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1080 | |
| 1081 | return error; |
| 1082 | } |
| 1083 | |
Matthew Wilcox (Oracle) | b890ec2 | 2022-06-06 10:47:21 -0400 | [diff] [blame] | 1084 | #ifdef CONFIG_MIGRATION |
| 1085 | static int hugetlbfs_migrate_folio(struct address_space *mapping, |
| 1086 | struct folio *dst, struct folio *src, |
Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 1087 | enum migrate_mode mode) |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1088 | { |
| 1089 | int rc; |
| 1090 | |
Matthew Wilcox (Oracle) | b890ec2 | 2022-06-06 10:47:21 -0400 | [diff] [blame] | 1091 | rc = migrate_huge_page_move_mapping(mapping, dst, src); |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 1092 | if (rc != MIGRATEPAGE_SUCCESS) |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1093 | return rc; |
Mike Kravetz | cb6acd0 | 2019-02-28 16:22:02 -0800 | [diff] [blame] | 1094 | |
Sidhartha Kumar | 149562f | 2022-09-22 10:42:05 -0500 | [diff] [blame] | 1095 | if (hugetlb_folio_subpool(src)) { |
| 1096 | hugetlb_set_folio_subpool(dst, |
| 1097 | hugetlb_folio_subpool(src)); |
| 1098 | hugetlb_set_folio_subpool(src, NULL); |
Mike Kravetz | cb6acd0 | 2019-02-28 16:22:02 -0800 | [diff] [blame] | 1099 | } |
| 1100 | |
Jérôme Glisse | 2916ecc | 2017-09-08 16:12:06 -0700 | [diff] [blame] | 1101 | if (mode != MIGRATE_SYNC_NO_COPY) |
Matthew Wilcox (Oracle) | b890ec2 | 2022-06-06 10:47:21 -0400 | [diff] [blame] | 1102 | folio_migrate_copy(dst, src); |
Jérôme Glisse | 2916ecc | 2017-09-08 16:12:06 -0700 | [diff] [blame] | 1103 | else |
Matthew Wilcox (Oracle) | b890ec2 | 2022-06-06 10:47:21 -0400 | [diff] [blame] | 1104 | folio_migrate_flags(dst, src); |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1105 | |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 1106 | return MIGRATEPAGE_SUCCESS; |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1107 | } |
Matthew Wilcox (Oracle) | b890ec2 | 2022-06-06 10:47:21 -0400 | [diff] [blame] | 1108 | #else |
| 1109 | #define hugetlbfs_migrate_folio NULL |
| 1110 | #endif |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 1111 | |
Naoya Horiguchi | 78bb920 | 2017-07-10 15:47:50 -0700 | [diff] [blame] | 1112 | static int hugetlbfs_error_remove_page(struct address_space *mapping, |
| 1113 | struct page *page) |
| 1114 | { |
Naoya Horiguchi | 78bb920 | 2017-07-10 15:47:50 -0700 | [diff] [blame] | 1115 | return 0; |
| 1116 | } |
| 1117 | |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 1118 | /* |
| 1119 | * Display the mount options in /proc/mounts. |
| 1120 | */ |
| 1121 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) |
| 1122 | { |
| 1123 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); |
| 1124 | struct hugepage_subpool *spool = sbinfo->spool; |
| 1125 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); |
| 1126 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); |
| 1127 | char mod; |
| 1128 | |
| 1129 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) |
| 1130 | seq_printf(m, ",uid=%u", |
| 1131 | from_kuid_munged(&init_user_ns, sbinfo->uid)); |
| 1132 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) |
| 1133 | seq_printf(m, ",gid=%u", |
| 1134 | from_kgid_munged(&init_user_ns, sbinfo->gid)); |
| 1135 | if (sbinfo->mode != 0755) |
| 1136 | seq_printf(m, ",mode=%o", sbinfo->mode); |
| 1137 | if (sbinfo->max_inodes != -1) |
| 1138 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); |
| 1139 | |
| 1140 | hpage_size /= 1024; |
| 1141 | mod = 'K'; |
| 1142 | if (hpage_size >= 1024) { |
| 1143 | hpage_size /= 1024; |
| 1144 | mod = 'M'; |
| 1145 | } |
| 1146 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); |
| 1147 | if (spool) { |
| 1148 | if (spool->max_hpages != -1) |
| 1149 | seq_printf(m, ",size=%llu", |
| 1150 | (unsigned long long)spool->max_hpages << hpage_shift); |
| 1151 | if (spool->min_hpages != -1) |
| 1152 | seq_printf(m, ",min_size=%llu", |
| 1153 | (unsigned long long)spool->min_hpages << hpage_shift); |
| 1154 | } |
| 1155 | return 0; |
| 1156 | } |
| 1157 | |
David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 1158 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | { |
David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 1160 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); |
David Howells | 2b0143b | 2015-03-17 22:25:59 +0000 | [diff] [blame] | 1161 | struct hstate *h = hstate_inode(d_inode(dentry)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1162 | |
| 1163 | buf->f_type = HUGETLBFS_MAGIC; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 1164 | buf->f_bsize = huge_page_size(h); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 | if (sbinfo) { |
| 1166 | spin_lock(&sbinfo->stat_lock); |
Miaohe Lin | 1168076 | 2022-07-26 22:29:18 +0800 | [diff] [blame] | 1167 | /* If no limits set, just report 0 or -1 for max/free/used |
David Gibson | 74a8a65 | 2005-11-21 21:32:24 -0800 | [diff] [blame] | 1168 | * blocks, like simple_statfs() */ |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 1169 | if (sbinfo->spool) { |
| 1170 | long free_pages; |
| 1171 | |
Mina Almasry | 4b25f03 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 1172 | spin_lock_irq(&sbinfo->spool->lock); |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 1173 | buf->f_blocks = sbinfo->spool->max_hpages; |
| 1174 | free_pages = sbinfo->spool->max_hpages |
| 1175 | - sbinfo->spool->used_hpages; |
| 1176 | buf->f_bavail = buf->f_bfree = free_pages; |
Mina Almasry | 4b25f03 | 2022-05-09 18:20:50 -0700 | [diff] [blame] | 1177 | spin_unlock_irq(&sbinfo->spool->lock); |
David Gibson | 74a8a65 | 2005-11-21 21:32:24 -0800 | [diff] [blame] | 1178 | buf->f_files = sbinfo->max_inodes; |
| 1179 | buf->f_ffree = sbinfo->free_inodes; |
| 1180 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1181 | spin_unlock(&sbinfo->stat_lock); |
| 1182 | } |
| 1183 | buf->f_namelen = NAME_MAX; |
| 1184 | return 0; |
| 1185 | } |
| 1186 | |
| 1187 | static void hugetlbfs_put_super(struct super_block *sb) |
| 1188 | { |
| 1189 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); |
| 1190 | |
| 1191 | if (sbi) { |
| 1192 | sb->s_fs_info = NULL; |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 1193 | |
| 1194 | if (sbi->spool) |
| 1195 | hugepage_put_subpool(sbi->spool); |
| 1196 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | kfree(sbi); |
| 1198 | } |
| 1199 | } |
| 1200 | |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1201 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
| 1202 | { |
| 1203 | if (sbinfo->free_inodes >= 0) { |
| 1204 | spin_lock(&sbinfo->stat_lock); |
| 1205 | if (unlikely(!sbinfo->free_inodes)) { |
| 1206 | spin_unlock(&sbinfo->stat_lock); |
| 1207 | return 0; |
| 1208 | } |
| 1209 | sbinfo->free_inodes--; |
| 1210 | spin_unlock(&sbinfo->stat_lock); |
| 1211 | } |
| 1212 | |
| 1213 | return 1; |
| 1214 | } |
| 1215 | |
| 1216 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
| 1217 | { |
| 1218 | if (sbinfo->free_inodes >= 0) { |
| 1219 | spin_lock(&sbinfo->stat_lock); |
| 1220 | sbinfo->free_inodes++; |
| 1221 | spin_unlock(&sbinfo->stat_lock); |
| 1222 | } |
| 1223 | } |
| 1224 | |
| 1225 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 1226 | static struct kmem_cache *hugetlbfs_inode_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1227 | |
| 1228 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) |
| 1229 | { |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1230 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | struct hugetlbfs_inode_info *p; |
| 1232 | |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1233 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1234 | return NULL; |
Muchun Song | fd60b28 | 2022-03-22 14:41:03 -0700 | [diff] [blame] | 1235 | p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1236 | if (unlikely(!p)) { |
| 1237 | hugetlbfs_inc_free_inodes(sbinfo); |
| 1238 | return NULL; |
| 1239 | } |
Mike Kravetz | 4742a35 | 2017-03-31 15:12:01 -0700 | [diff] [blame] | 1240 | |
| 1241 | /* |
| 1242 | * Any time after allocation, hugetlbfs_destroy_inode can be called |
| 1243 | * for the inode. mpol_free_shared_policy is unconditionally called |
| 1244 | * as part of hugetlbfs_destroy_inode. So, initialize policy here |
| 1245 | * in case of a quick call to destroy. |
| 1246 | * |
| 1247 | * Note that the policy is initialized even if we are creating a |
| 1248 | * private inode. This simplifies hugetlbfs_destroy_inode. |
| 1249 | */ |
| 1250 | mpol_shared_policy_init(&p->policy, NULL); |
| 1251 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1252 | return &p->vfs_inode; |
| 1253 | } |
| 1254 | |
Al Viro | b62de32 | 2019-04-15 23:16:38 -0400 | [diff] [blame] | 1255 | static void hugetlbfs_free_inode(struct inode *inode) |
Nick Piggin | fa0d7e3d | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 1256 | { |
Nick Piggin | fa0d7e3d | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 1257 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); |
| 1258 | } |
| 1259 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1260 | static void hugetlbfs_destroy_inode(struct inode *inode) |
| 1261 | { |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1262 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1264 | } |
| 1265 | |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 1266 | static const struct address_space_operations hugetlbfs_aops = { |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1267 | .write_begin = hugetlbfs_write_begin, |
| 1268 | .write_end = hugetlbfs_write_end, |
Matthew Wilcox (Oracle) | 46de8b97 | 2022-02-09 20:22:13 +0000 | [diff] [blame] | 1269 | .dirty_folio = noop_dirty_folio, |
Matthew Wilcox (Oracle) | b890ec2 | 2022-06-06 10:47:21 -0400 | [diff] [blame] | 1270 | .migrate_folio = hugetlbfs_migrate_folio, |
Naoya Horiguchi | 78bb920 | 2017-07-10 15:47:50 -0700 | [diff] [blame] | 1271 | .error_remove_page = hugetlbfs_error_remove_page, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1272 | }; |
| 1273 | |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1274 | |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 1275 | static void init_once(void *foo) |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1276 | { |
Li zeming | dbaf7dc | 2022-11-07 09:56:59 +0800 | [diff] [blame] | 1277 | struct hugetlbfs_inode_info *ei = foo; |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1278 | |
Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 1279 | inode_init_once(&ei->vfs_inode); |
Christoph Hellwig | 9652798 | 2005-10-29 18:16:42 -0700 | [diff] [blame] | 1280 | } |
| 1281 | |
Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 1282 | const struct file_operations hugetlbfs_file_operations = { |
Al Viro | 34d0640 | 2015-04-03 11:31:35 -0400 | [diff] [blame] | 1283 | .read_iter = hugetlbfs_read_iter, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1284 | .mmap = hugetlbfs_file_mmap, |
Christoph Hellwig | 1b061d9 | 2010-05-26 17:53:41 +0200 | [diff] [blame] | 1285 | .fsync = noop_fsync, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | .get_unmapped_area = hugetlb_get_unmapped_area, |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 1287 | .llseek = default_llseek, |
| 1288 | .fallocate = hugetlbfs_fallocate, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | }; |
| 1290 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 1291 | static const struct inode_operations hugetlbfs_dir_inode_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | .create = hugetlbfs_create, |
| 1293 | .lookup = simple_lookup, |
| 1294 | .link = simple_link, |
| 1295 | .unlink = simple_unlink, |
| 1296 | .symlink = hugetlbfs_symlink, |
| 1297 | .mkdir = hugetlbfs_mkdir, |
| 1298 | .rmdir = simple_rmdir, |
| 1299 | .mknod = hugetlbfs_mknod, |
| 1300 | .rename = simple_rename, |
| 1301 | .setattr = hugetlbfs_setattr, |
Piotr Sarna | 1ab5b82 | 2019-11-30 17:56:43 -0800 | [diff] [blame] | 1302 | .tmpfile = hugetlbfs_tmpfile, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | }; |
| 1304 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 1305 | static const struct inode_operations hugetlbfs_inode_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1306 | .setattr = hugetlbfs_setattr, |
| 1307 | }; |
| 1308 | |
Josef 'Jeff' Sipek | ee9b6d6 | 2007-02-12 00:55:41 -0800 | [diff] [blame] | 1309 | static const struct super_operations hugetlbfs_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1310 | .alloc_inode = hugetlbfs_alloc_inode, |
Al Viro | b62de32 | 2019-04-15 23:16:38 -0400 | [diff] [blame] | 1311 | .free_inode = hugetlbfs_free_inode, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1312 | .destroy_inode = hugetlbfs_destroy_inode, |
Al Viro | 2bbbda3 | 2010-06-04 19:52:12 -0400 | [diff] [blame] | 1313 | .evict_inode = hugetlbfs_evict_inode, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | .statfs = hugetlbfs_statfs, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1315 | .put_super = hugetlbfs_put_super, |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 1316 | .show_options = hugetlbfs_show_options, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1317 | }; |
| 1318 | |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1319 | /* |
| 1320 | * Convert size option passed from command line to number of huge pages |
| 1321 | * in the pool specified by hstate. Size option could be in bytes |
| 1322 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). |
| 1323 | */ |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 1324 | static long |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1325 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 1326 | enum hugetlbfs_size_type val_type) |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1327 | { |
| 1328 | if (val_type == NO_SIZE) |
| 1329 | return -1; |
| 1330 | |
| 1331 | if (val_type == SIZE_PERCENT) { |
| 1332 | size_opt <<= huge_page_shift(h); |
| 1333 | size_opt *= h->max_huge_pages; |
| 1334 | do_div(size_opt, 100); |
| 1335 | } |
| 1336 | |
| 1337 | size_opt >>= huge_page_shift(h); |
| 1338 | return size_opt; |
| 1339 | } |
| 1340 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1341 | /* |
| 1342 | * Parse one mount parameter. |
| 1343 | */ |
| 1344 | static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1345 | { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1346 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
| 1347 | struct fs_parse_result result; |
| 1348 | char *rest; |
| 1349 | unsigned long ps; |
| 1350 | int opt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1351 | |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 1352 | opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1353 | if (opt < 0) |
| 1354 | return opt; |
| 1355 | |
| 1356 | switch (opt) { |
| 1357 | case Opt_uid: |
| 1358 | ctx->uid = make_kuid(current_user_ns(), result.uint_32); |
| 1359 | if (!uid_valid(ctx->uid)) |
| 1360 | goto bad_val; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1361 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1363 | case Opt_gid: |
| 1364 | ctx->gid = make_kgid(current_user_ns(), result.uint_32); |
| 1365 | if (!gid_valid(ctx->gid)) |
| 1366 | goto bad_val; |
| 1367 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1368 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1369 | case Opt_mode: |
| 1370 | ctx->mode = result.uint_32 & 01777U; |
| 1371 | return 0; |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 1372 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1373 | case Opt_size: |
| 1374 | /* memparse() will accept a K/M/G without a digit */ |
Hawkins Jiawei | 26215b7e | 2022-10-21 07:16:08 +0800 | [diff] [blame] | 1375 | if (!param->string || !isdigit(param->string[0])) |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1376 | goto bad_val; |
| 1377 | ctx->max_size_opt = memparse(param->string, &rest); |
| 1378 | ctx->max_val_type = SIZE_STD; |
| 1379 | if (*rest == '%') |
| 1380 | ctx->max_val_type = SIZE_PERCENT; |
| 1381 | return 0; |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 1382 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1383 | case Opt_nr_inodes: |
| 1384 | /* memparse() will accept a K/M/G without a digit */ |
Hawkins Jiawei | 26215b7e | 2022-10-21 07:16:08 +0800 | [diff] [blame] | 1385 | if (!param->string || !isdigit(param->string[0])) |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1386 | goto bad_val; |
| 1387 | ctx->nr_inodes = memparse(param->string, &rest); |
| 1388 | return 0; |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 1389 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1390 | case Opt_pagesize: |
| 1391 | ps = memparse(param->string, &rest); |
| 1392 | ctx->hstate = size_to_hstate(ps); |
| 1393 | if (!ctx->hstate) { |
Miaohe Lin | d003651 | 2022-07-26 22:29:14 +0800 | [diff] [blame] | 1394 | pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); |
Lee Schermerhorn | b4c07bc | 2007-07-15 23:40:54 -0700 | [diff] [blame] | 1395 | return -EINVAL; |
Randy Dunlap | e73a75f | 2007-07-15 23:40:52 -0700 | [diff] [blame] | 1396 | } |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1397 | return 0; |
| 1398 | |
| 1399 | case Opt_min_size: |
| 1400 | /* memparse() will accept a K/M/G without a digit */ |
Hawkins Jiawei | 26215b7e | 2022-10-21 07:16:08 +0800 | [diff] [blame] | 1401 | if (!param->string || !isdigit(param->string[0])) |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1402 | goto bad_val; |
| 1403 | ctx->min_size_opt = memparse(param->string, &rest); |
| 1404 | ctx->min_val_type = SIZE_STD; |
| 1405 | if (*rest == '%') |
| 1406 | ctx->min_val_type = SIZE_PERCENT; |
| 1407 | return 0; |
| 1408 | |
| 1409 | default: |
| 1410 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 | } |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 1412 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1413 | bad_val: |
Al Viro | b5db30c | 2019-12-21 21:34:06 -0500 | [diff] [blame] | 1414 | return invalfc(fc, "Bad value '%s' for mount option '%s'\n", |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1415 | param->string, param->key); |
| 1416 | } |
| 1417 | |
| 1418 | /* |
| 1419 | * Validate the parsed options. |
| 1420 | */ |
| 1421 | static int hugetlbfs_validate(struct fs_context *fc) |
| 1422 | { |
| 1423 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
| 1424 | |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1425 | /* |
| 1426 | * Use huge page pool size (in hstate) to convert the size |
| 1427 | * options to number of huge pages. If NO_SIZE, -1 is returned. |
| 1428 | */ |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1429 | ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, |
| 1430 | ctx->max_size_opt, |
| 1431 | ctx->max_val_type); |
| 1432 | ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, |
| 1433 | ctx->min_size_opt, |
| 1434 | ctx->min_val_type); |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1435 | |
| 1436 | /* |
| 1437 | * If max_size was specified, then min_size must be smaller |
| 1438 | */ |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1439 | if (ctx->max_val_type > NO_SIZE && |
| 1440 | ctx->min_hpages > ctx->max_hpages) { |
| 1441 | pr_err("Minimum size can not be greater than maximum size\n"); |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1442 | return -EINVAL; |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 1443 | } |
| 1444 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1445 | return 0; |
| 1446 | } |
| 1447 | |
| 1448 | static int |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1449 | hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1450 | { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1451 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1452 | struct hugetlbfs_sb_info *sbinfo; |
| 1453 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1454 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); |
| 1455 | if (!sbinfo) |
| 1456 | return -ENOMEM; |
| 1457 | sb->s_fs_info = sbinfo; |
| 1458 | spin_lock_init(&sbinfo->stat_lock); |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1459 | sbinfo->hstate = ctx->hstate; |
| 1460 | sbinfo->max_inodes = ctx->nr_inodes; |
| 1461 | sbinfo->free_inodes = ctx->nr_inodes; |
| 1462 | sbinfo->spool = NULL; |
| 1463 | sbinfo->uid = ctx->uid; |
| 1464 | sbinfo->gid = ctx->gid; |
| 1465 | sbinfo->mode = ctx->mode; |
David Howells | 4a25220 | 2017-07-05 16:24:18 +0100 | [diff] [blame] | 1466 | |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1467 | /* |
| 1468 | * Allocate and initialize subpool if maximum or minimum size is |
Miaohe Lin | 1935ebd | 2021-02-24 12:10:21 -0800 | [diff] [blame] | 1469 | * specified. Any needed reservations (for minimum size) are taken |
Miaohe Lin | 445c809 | 2022-07-26 22:29:17 +0800 | [diff] [blame] | 1470 | * when the subpool is created. |
Mike Kravetz | 7ca02d0a | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 1471 | */ |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1472 | if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { |
| 1473 | sbinfo->spool = hugepage_new_subpool(ctx->hstate, |
| 1474 | ctx->max_hpages, |
| 1475 | ctx->min_hpages); |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 1476 | if (!sbinfo->spool) |
| 1477 | goto out_free; |
| 1478 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1479 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1480 | sb->s_blocksize = huge_page_size(ctx->hstate); |
| 1481 | sb->s_blocksize_bits = huge_page_shift(ctx->hstate); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | sb->s_magic = HUGETLBFS_MAGIC; |
| 1483 | sb->s_op = &hugetlbfs_ops; |
| 1484 | sb->s_time_gran = 1; |
Mike Kravetz | 1556829 | 2020-08-11 18:31:35 -0700 | [diff] [blame] | 1485 | |
| 1486 | /* |
| 1487 | * Due to the special and limited functionality of hugetlbfs, it does |
| 1488 | * not work well as a stacking filesystem. |
| 1489 | */ |
| 1490 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1491 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); |
Al Viro | 48fde70 | 2012-01-08 22:15:13 -0500 | [diff] [blame] | 1492 | if (!sb->s_root) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1493 | goto out_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1494 | return 0; |
| 1495 | out_free: |
Fabian Frederick | 6e6870d | 2014-06-04 16:10:40 -0700 | [diff] [blame] | 1496 | kfree(sbinfo->spool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1497 | kfree(sbinfo); |
| 1498 | return -ENOMEM; |
| 1499 | } |
| 1500 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1501 | static int hugetlbfs_get_tree(struct fs_context *fc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1502 | { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1503 | int err = hugetlbfs_validate(fc); |
| 1504 | if (err) |
| 1505 | return err; |
Al Viro | 2ac295d | 2019-06-01 20:48:55 -0400 | [diff] [blame] | 1506 | return get_tree_nodev(fc, hugetlbfs_fill_super); |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1507 | } |
| 1508 | |
| 1509 | static void hugetlbfs_fs_context_free(struct fs_context *fc) |
| 1510 | { |
| 1511 | kfree(fc->fs_private); |
| 1512 | } |
| 1513 | |
| 1514 | static const struct fs_context_operations hugetlbfs_fs_context_ops = { |
| 1515 | .free = hugetlbfs_fs_context_free, |
| 1516 | .parse_param = hugetlbfs_parse_param, |
| 1517 | .get_tree = hugetlbfs_get_tree, |
| 1518 | }; |
| 1519 | |
| 1520 | static int hugetlbfs_init_fs_context(struct fs_context *fc) |
| 1521 | { |
| 1522 | struct hugetlbfs_fs_context *ctx; |
| 1523 | |
| 1524 | ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); |
| 1525 | if (!ctx) |
| 1526 | return -ENOMEM; |
| 1527 | |
| 1528 | ctx->max_hpages = -1; /* No limit on size by default */ |
| 1529 | ctx->nr_inodes = -1; /* No limit on number of inodes by default */ |
| 1530 | ctx->uid = current_fsuid(); |
| 1531 | ctx->gid = current_fsgid(); |
| 1532 | ctx->mode = 0755; |
| 1533 | ctx->hstate = &default_hstate; |
| 1534 | ctx->min_hpages = -1; /* No default minimum size */ |
| 1535 | ctx->max_val_type = NO_SIZE; |
| 1536 | ctx->min_val_type = NO_SIZE; |
| 1537 | fc->fs_private = ctx; |
| 1538 | fc->ops = &hugetlbfs_fs_context_ops; |
| 1539 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1540 | } |
| 1541 | |
| 1542 | static struct file_system_type hugetlbfs_fs_type = { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1543 | .name = "hugetlbfs", |
| 1544 | .init_fs_context = hugetlbfs_init_fs_context, |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 1545 | .parameters = hugetlb_fs_parameters, |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1546 | .kill_sb = kill_litter_super, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1547 | }; |
| 1548 | |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1549 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1550 | |
From: Mel Gorman | ef1ff6b | 2009-09-23 15:56:05 -0700 | [diff] [blame] | 1551 | static int can_do_hugetlb_shm(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 | { |
Eric W. Biederman | a0eb3a0 | 2012-02-07 16:19:25 -0800 | [diff] [blame] | 1553 | kgid_t shm_group; |
| 1554 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); |
| 1555 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1556 | } |
| 1557 | |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1558 | static int get_hstate_idx(int page_size_log) |
| 1559 | { |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 1560 | struct hstate *h = hstate_sizelog(page_size_log); |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1561 | |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1562 | if (!h) |
| 1563 | return -1; |
Miaohe Lin | 04adbc3 | 2021-05-04 18:33:22 -0700 | [diff] [blame] | 1564 | return hstate_index(h); |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1565 | } |
| 1566 | |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 1567 | /* |
| 1568 | * Note that size should be aligned to proper hugepage size in caller side, |
| 1569 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. |
| 1570 | */ |
| 1571 | struct file *hugetlb_file_setup(const char *name, size_t size, |
zhangyiru | 83c1fd7 | 2021-11-08 18:31:27 -0800 | [diff] [blame] | 1572 | vm_flags_t acctflag, int creat_flags, |
| 1573 | int page_size_log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1575 | struct inode *inode; |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1576 | struct vfsmount *mnt; |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1577 | int hstate_idx; |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1578 | struct file *file; |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1579 | |
| 1580 | hstate_idx = get_hstate_idx(page_size_log); |
| 1581 | if (hstate_idx < 0) |
| 1582 | return ERR_PTR(-ENODEV); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1583 | |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1584 | mnt = hugetlbfs_vfsmount[hstate_idx]; |
| 1585 | if (!mnt) |
Akinobu Mita | 5bc9859 | 2007-05-06 14:50:18 -0700 | [diff] [blame] | 1586 | return ERR_PTR(-ENOENT); |
| 1587 | |
From: Mel Gorman | ef1ff6b | 2009-09-23 15:56:05 -0700 | [diff] [blame] | 1588 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
zhangyiru | 83c1fd7 | 2021-11-08 18:31:27 -0800 | [diff] [blame] | 1589 | struct ucounts *ucounts = current_ucounts(); |
| 1590 | |
| 1591 | if (user_shm_lock(size, ucounts)) { |
| 1592 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", |
David Rientjes | 21a3c27 | 2012-03-21 16:34:13 -0700 | [diff] [blame] | 1593 | current->comm, current->pid); |
zhangyiru | 83c1fd7 | 2021-11-08 18:31:27 -0800 | [diff] [blame] | 1594 | user_shm_unlock(size, ucounts); |
Hugh Dickins | 353d5c3 | 2009-08-24 16:30:28 +0100 | [diff] [blame] | 1595 | } |
zhangyiru | 83c1fd7 | 2021-11-08 18:31:27 -0800 | [diff] [blame] | 1596 | return ERR_PTR(-EPERM); |
Ravikiran G Thirumalai | 2584e51 | 2009-03-31 15:21:26 -0700 | [diff] [blame] | 1597 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1598 | |
Anatol Pomozov | 39b65252 | 2012-09-12 20:11:55 -0700 | [diff] [blame] | 1599 | file = ERR_PTR(-ENOSPC); |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1600 | inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1601 | if (!inode) |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1602 | goto out; |
Stephen Smalley | e1832f2 | 2015-08-06 15:46:55 -0700 | [diff] [blame] | 1603 | if (creat_flags == HUGETLB_SHMFS_INODE) |
| 1604 | inode->i_flags |= S_PRIVATE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1606 | inode->i_size = size; |
Miklos Szeredi | 6d6b77f | 2011-10-28 14:13:28 +0200 | [diff] [blame] | 1607 | clear_nlink(inode); |
Dave Hansen | ce8d2cd | 2007-10-16 23:31:13 -0700 | [diff] [blame] | 1608 | |
Mike Kravetz | 33b8f84 | 2021-02-24 12:09:54 -0800 | [diff] [blame] | 1609 | if (!hugetlb_reserve_pages(inode, 0, |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1610 | size >> huge_page_shift(hstate_inode(inode)), NULL, |
| 1611 | acctflag)) |
| 1612 | file = ERR_PTR(-ENOMEM); |
| 1613 | else |
| 1614 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, |
| 1615 | &hugetlbfs_file_operations); |
| 1616 | if (!IS_ERR(file)) |
| 1617 | return file; |
Dave Hansen | ce8d2cd | 2007-10-16 23:31:13 -0700 | [diff] [blame] | 1618 | |
David Gibson | b45b5bd | 2006-03-22 00:08:55 -0800 | [diff] [blame] | 1619 | iput(inode); |
Al Viro | e68375c | 2018-06-09 09:50:46 -0400 | [diff] [blame] | 1620 | out: |
Anatol Pomozov | 39b65252 | 2012-09-12 20:11:55 -0700 | [diff] [blame] | 1621 | return file; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | } |
| 1623 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1624 | static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) |
| 1625 | { |
| 1626 | struct fs_context *fc; |
| 1627 | struct vfsmount *mnt; |
| 1628 | |
| 1629 | fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); |
| 1630 | if (IS_ERR(fc)) { |
| 1631 | mnt = ERR_CAST(fc); |
| 1632 | } else { |
| 1633 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
| 1634 | ctx->hstate = h; |
| 1635 | mnt = fc_mount(fc); |
| 1636 | put_fs_context(fc); |
| 1637 | } |
| 1638 | if (IS_ERR(mnt)) |
Miaohe Lin | a25fddc | 2021-02-24 12:10:14 -0800 | [diff] [blame] | 1639 | pr_err("Cannot mount internal hugetlbfs for page size %luK", |
Miaohe Lin | d003651 | 2022-07-26 22:29:14 +0800 | [diff] [blame] | 1640 | huge_page_size(h) / SZ_1K); |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1641 | return mnt; |
| 1642 | } |
| 1643 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1644 | static int __init init_hugetlbfs_fs(void) |
| 1645 | { |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1646 | struct vfsmount *mnt; |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1647 | struct hstate *h; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1648 | int error; |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1649 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1650 | |
Nishanth Aravamudan | 457c1b2 | 2014-05-06 12:50:00 -0700 | [diff] [blame] | 1651 | if (!hugepages_supported()) { |
Andrew Morton | 9b857d2 | 2014-06-04 16:07:21 -0700 | [diff] [blame] | 1652 | pr_info("disabling because there are no supported hugepage sizes\n"); |
Nishanth Aravamudan | 457c1b2 | 2014-05-06 12:50:00 -0700 | [diff] [blame] | 1653 | return -ENOTSUPP; |
| 1654 | } |
| 1655 | |
Hillf Danton | d1d5e05ff | 2012-03-21 16:34:15 -0700 | [diff] [blame] | 1656 | error = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1657 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", |
| 1658 | sizeof(struct hugetlbfs_inode_info), |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 1659 | 0, SLAB_ACCOUNT, init_once); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1660 | if (hugetlbfs_inode_cachep == NULL) |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1661 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1662 | |
| 1663 | error = register_filesystem(&hugetlbfs_fs_type); |
| 1664 | if (error) |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1665 | goto out_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1666 | |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1667 | /* default hstate mount is required */ |
Miaohe Lin | 3b2275a | 2021-02-24 12:10:04 -0800 | [diff] [blame] | 1668 | mnt = mount_one_hugetlbfs(&default_hstate); |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1669 | if (IS_ERR(mnt)) { |
| 1670 | error = PTR_ERR(mnt); |
| 1671 | goto out_unreg; |
| 1672 | } |
| 1673 | hugetlbfs_vfsmount[default_hstate_idx] = mnt; |
| 1674 | |
| 1675 | /* other hstates are optional */ |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1676 | i = 0; |
| 1677 | for_each_hstate(h) { |
Jan Stancek | 15f0ec9 | 2020-01-03 18:37:18 +0100 | [diff] [blame] | 1678 | if (i == default_hstate_idx) { |
| 1679 | i++; |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1680 | continue; |
Jan Stancek | 15f0ec9 | 2020-01-03 18:37:18 +0100 | [diff] [blame] | 1681 | } |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1682 | |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1683 | mnt = mount_one_hugetlbfs(h); |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1684 | if (IS_ERR(mnt)) |
| 1685 | hugetlbfs_vfsmount[i] = NULL; |
| 1686 | else |
| 1687 | hugetlbfs_vfsmount[i] = mnt; |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 1688 | i++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1689 | } |
David Howells | 3202198 | 2018-11-01 23:07:26 +0000 | [diff] [blame] | 1690 | |
| 1691 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1692 | |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1693 | out_unreg: |
| 1694 | (void)unregister_filesystem(&hugetlbfs_fs_type); |
| 1695 | out_free: |
Hillf Danton | d1d5e05ff | 2012-03-21 16:34:15 -0700 | [diff] [blame] | 1696 | kmem_cache_destroy(hugetlbfs_inode_cachep); |
Mike Kravetz | 8fc312b | 2019-11-30 17:56:34 -0800 | [diff] [blame] | 1697 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1698 | return error; |
| 1699 | } |
Paul Gortmaker | 3e89e1c | 2016-01-14 15:21:52 -0800 | [diff] [blame] | 1700 | fs_initcall(init_hugetlbfs_fs) |