blob: 7b17ccfa039d8141e01066eb7fc41c759ca1af1f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01004 * Nadia Yvette Chambers, 2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 2002 Linus Torvalds.
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08007 * License: GPL
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Andrew Morton9b857d22014-06-04 16:07:21 -070010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/thread_info.h>
13#include <asm/current.h>
Mike Kravetz70c35472015-09-08 15:01:54 -070014#include <linux/falloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/fs.h>
16#include <linux/mount.h>
17#include <linux/file.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070018#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/writeback.h>
20#include <linux/pagemap.h>
21#include <linux/highmem.h>
22#include <linux/init.h>
23#include <linux/string.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080024#include <linux/capability.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070025#include <linux/ctype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/backing-dev.h>
27#include <linux/hugetlb.h>
28#include <linux/pagevec.h>
David Howells32021982018-11-01 23:07:26 +000029#include <linux/fs_parser.h>
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -070030#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/slab.h>
32#include <linux/dnotify.h>
33#include <linux/statfs.h>
34#include <linux/security.h>
Nick Black1fd7317d2009-09-22 16:43:33 -070035#include <linux/magic.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090036#include <linux/migrate.h>
Al Viro34d06402015-04-03 11:31:35 -040037#include <linux/uio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080039#include <linux/uaccess.h>
Shijie Hu88590252020-06-03 16:03:34 -070040#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070042static const struct address_space_operations hugetlbfs_aops;
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -080043const struct file_operations hugetlbfs_file_operations;
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -080044static const struct inode_operations hugetlbfs_dir_inode_operations;
45static const struct inode_operations hugetlbfs_inode_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
David Howells32021982018-11-01 23:07:26 +000047enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
48
49struct hugetlbfs_fs_context {
David Howells4a252202017-07-05 16:24:18 +010050 struct hstate *hstate;
David Howells32021982018-11-01 23:07:26 +000051 unsigned long long max_size_opt;
52 unsigned long long min_size_opt;
David Howells4a252202017-07-05 16:24:18 +010053 long max_hpages;
54 long nr_inodes;
55 long min_hpages;
David Howells32021982018-11-01 23:07:26 +000056 enum hugetlbfs_size_type max_val_type;
57 enum hugetlbfs_size_type min_val_type;
David Howells4a252202017-07-05 16:24:18 +010058 kuid_t uid;
59 kgid_t gid;
60 umode_t mode;
David Gibsona1d776e2012-03-21 16:34:12 -070061};
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063int sysctl_hugetlb_shm_group;
64
David Howells32021982018-11-01 23:07:26 +000065enum hugetlb_param {
66 Opt_gid,
67 Opt_min_size,
68 Opt_mode,
69 Opt_nr_inodes,
70 Opt_pagesize,
71 Opt_size,
72 Opt_uid,
Randy Dunlape73a75f2007-07-15 23:40:52 -070073};
74
Al Virod7167b12019-09-07 07:23:15 -040075static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
David Howells32021982018-11-01 23:07:26 +000076 fsparam_u32 ("gid", Opt_gid),
77 fsparam_string("min_size", Opt_min_size),
Mike Kravetze0f7e2b2021-07-23 15:50:44 -070078 fsparam_u32oct("mode", Opt_mode),
David Howells32021982018-11-01 23:07:26 +000079 fsparam_string("nr_inodes", Opt_nr_inodes),
80 fsparam_string("pagesize", Opt_pagesize),
81 fsparam_string("size", Opt_size),
82 fsparam_u32 ("uid", Opt_uid),
83 {}
84};
85
Mike Kravetz70c35472015-09-08 15:01:54 -070086#ifdef CONFIG_NUMA
87static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
88 struct inode *inode, pgoff_t index)
89{
90 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
91 index);
92}
93
94static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
95{
96 mpol_cond_put(vma->vm_policy);
97}
98#else
99static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
100 struct inode *inode, pgoff_t index)
101{
102}
103
104static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
105{
106}
107#endif
108
Mike Kravetz63489f82018-03-22 16:17:13 -0700109/*
110 * Mask used when checking the page offset value passed in via system
111 * calls. This value will be converted to a loff_t which is signed.
112 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
113 * value. The extra bit (- 1 in the shift value) is to take the sign
114 * bit into account.
115 */
116#define PGOFF_LOFFT_MAX \
117 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
120{
Al Viro496ad9a2013-01-23 17:07:38 -0500121 struct inode *inode = file_inode(file);
Peter Xu22247ef2021-05-14 17:27:04 -0700122 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 loff_t len, vma_len;
124 int ret;
Andi Kleena5516432008-07-23 21:27:41 -0700125 struct hstate *h = hstate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Hugh Dickins68589bc2006-11-14 02:03:32 -0800127 /*
David Gibsondec4ad82007-08-30 23:56:40 -0700128 * vma address alignment (but not the pgoff alignment) has
129 * already been checked by prepare_hugepage_range. If you add
130 * any error returns here, do so after setting VM_HUGETLB, so
131 * is_vm_hugetlb_page tests below unmap_region go the right
Peter Collingbourne45e55302020-08-06 23:23:37 -0700132 * way when do_mmap unwinds (may be important on powerpc
David Gibsondec4ad82007-08-30 23:56:40 -0700133 * and ia64).
Hugh Dickins68589bc2006-11-14 02:03:32 -0800134 */
Suren Baghdasaryan1c712222023-01-26 11:37:49 -0800135 vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
Hugh Dickins68589bc2006-11-14 02:03:32 -0800136 vma->vm_ops = &hugetlb_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
Peter Xu22247ef2021-05-14 17:27:04 -0700138 ret = seal_check_future_write(info->seals, vma);
139 if (ret)
140 return ret;
141
Mike Kravetz045c7a32017-04-13 14:56:32 -0700142 /*
Mike Kravetz63489f82018-03-22 16:17:13 -0700143 * page based offset in vm_pgoff could be sufficiently large to
Mike Kravetz5df63c22018-04-05 16:18:21 -0700144 * overflow a loff_t when converted to byte offset. This can
145 * only happen on architectures where sizeof(loff_t) ==
146 * sizeof(unsigned long). So, only check in those instances.
Mike Kravetz045c7a32017-04-13 14:56:32 -0700147 */
Mike Kravetz5df63c22018-04-05 16:18:21 -0700148 if (sizeof(unsigned long) == sizeof(loff_t)) {
149 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
150 return -EINVAL;
151 }
Mike Kravetz045c7a32017-04-13 14:56:32 -0700152
Mike Kravetz63489f82018-03-22 16:17:13 -0700153 /* must be huge page aligned */
Becky Bruce2b37c352011-07-25 17:11:49 -0700154 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
David Gibsondec4ad82007-08-30 23:56:40 -0700155 return -EINVAL;
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
Mike Kravetz045c7a32017-04-13 14:56:32 -0700158 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
159 /* check for overflow */
160 if (len < vma_len)
161 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Al Viro59551022016-01-22 15:40:57 -0500163 inode_lock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 file_accessed(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 ret = -ENOMEM;
Mike Kravetz33b8f842021-02-24 12:09:54 -0800167 if (!hugetlb_reserve_pages(inode,
Andi Kleena5516432008-07-23 21:27:41 -0700168 vma->vm_pgoff >> huge_page_order(h),
Mel Gorman5a6fe122009-02-10 14:02:27 +0000169 len >> huge_page_shift(h), vma,
170 vma->vm_flags))
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700171 goto out;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800172
Adam Litke4c887262005-10-29 18:16:46 -0700173 ret = 0;
Zhang, Yanminb6174df2006-07-10 04:44:49 -0700174 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
Mike Kravetz045c7a32017-04-13 14:56:32 -0700175 i_size_write(inode, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176out:
Al Viro59551022016-01-22 15:40:57 -0500177 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
179 return ret;
180}
181
182/*
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -0700183 * Called under mmap_write_lock(mm).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 */
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186static unsigned long
Shijie Hu88590252020-06-03 16:03:34 -0700187hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
188 unsigned long len, unsigned long pgoff, unsigned long flags)
189{
190 struct hstate *h = hstate_file(file);
191 struct vm_unmapped_area_info info;
192
193 info.flags = 0;
194 info.length = len;
195 info.low_limit = current->mm->mmap_base;
Christophe Leroy2cb4de02022-04-09 19:17:28 +0200196 info.high_limit = arch_get_mmap_end(addr, len, flags);
Shijie Hu88590252020-06-03 16:03:34 -0700197 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
198 info.align_offset = 0;
199 return vm_unmapped_area(&info);
200}
201
202static unsigned long
203hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
204 unsigned long len, unsigned long pgoff, unsigned long flags)
205{
206 struct hstate *h = hstate_file(file);
207 struct vm_unmapped_area_info info;
208
209 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
210 info.length = len;
Linus Torvalds6b008642023-04-18 17:40:09 -0400211 info.low_limit = PAGE_SIZE;
Christophe Leroy5f24d5a2022-04-21 16:35:46 -0700212 info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
Shijie Hu88590252020-06-03 16:03:34 -0700213 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
214 info.align_offset = 0;
215 addr = vm_unmapped_area(&info);
216
217 /*
218 * A failed mmap() very likely causes application failure,
219 * so fall back to the bottom-up function here. This scenario
220 * can happen with large stack limits and large mmap()
221 * allocations.
222 */
223 if (unlikely(offset_in_page(addr))) {
224 VM_BUG_ON(addr != -ENOMEM);
225 info.flags = 0;
226 info.low_limit = current->mm->mmap_base;
Christophe Leroy2cb4de02022-04-09 19:17:28 +0200227 info.high_limit = arch_get_mmap_end(addr, len, flags);
Shijie Hu88590252020-06-03 16:03:34 -0700228 addr = vm_unmapped_area(&info);
229 }
230
231 return addr;
232}
233
Christophe Leroy4b439e22022-04-09 19:17:27 +0200234unsigned long
235generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
236 unsigned long len, unsigned long pgoff,
237 unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
239 struct mm_struct *mm = current->mm;
240 struct vm_area_struct *vma;
Andi Kleena5516432008-07-23 21:27:41 -0700241 struct hstate *h = hstate_file(file);
Christophe Leroy2cb4de02022-04-09 19:17:28 +0200242 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Andi Kleena5516432008-07-23 21:27:41 -0700244 if (len & ~huge_page_mask(h))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 return -EINVAL;
246 if (len > TASK_SIZE)
247 return -ENOMEM;
248
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700249 if (flags & MAP_FIXED) {
Andi Kleena5516432008-07-23 21:27:41 -0700250 if (prepare_hugepage_range(file, addr, len))
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700251 return -EINVAL;
252 return addr;
253 }
254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 if (addr) {
Andi Kleena5516432008-07-23 21:27:41 -0700256 addr = ALIGN(addr, huge_page_size(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 vma = find_vma(mm, addr);
Christophe Leroy5f24d5a2022-04-21 16:35:46 -0700258 if (mmap_end - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -0700259 (!vma || addr + len <= vm_start_gap(vma)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 return addr;
261 }
262
Shijie Hu88590252020-06-03 16:03:34 -0700263 /*
264 * Use mm->get_unmapped_area value as a hint to use topdown routine.
265 * If architectures have special needs, they should define their own
266 * version of hugetlb_get_unmapped_area.
267 */
268 if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
269 return hugetlb_get_unmapped_area_topdown(file, addr, len,
270 pgoff, flags);
271 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
272 pgoff, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273}
Christophe Leroy4b439e22022-04-09 19:17:27 +0200274
275#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
276static unsigned long
277hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
278 unsigned long len, unsigned long pgoff,
279 unsigned long flags)
280{
281 return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
282}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283#endif
284
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700285/*
286 * Support for read() - Find the page attached to f_mapping and copy out the
Miaohe Lin445c8092022-07-26 22:29:17 +0800287 * data. This provides functionality similar to filemap_read().
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700288 */
Al Viro34d06402015-04-03 11:31:35 -0400289static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700290{
Al Viro34d06402015-04-03 11:31:35 -0400291 struct file *file = iocb->ki_filp;
292 struct hstate *h = hstate_file(file);
293 struct address_space *mapping = file->f_mapping;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700294 struct inode *inode = mapping->host;
Al Viro34d06402015-04-03 11:31:35 -0400295 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
296 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700297 unsigned long end_index;
298 loff_t isize;
299 ssize_t retval = 0;
300
Al Viro34d06402015-04-03 11:31:35 -0400301 while (iov_iter_count(to)) {
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700302 struct page *page;
Al Viro34d06402015-04-03 11:31:35 -0400303 size_t nr, copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700304
305 /* nr is the maximum number of bytes to copy from this page */
Andi Kleena5516432008-07-23 21:27:41 -0700306 nr = huge_page_size(h);
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700307 isize = i_size_read(inode);
308 if (!isize)
Al Viro34d06402015-04-03 11:31:35 -0400309 break;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700310 end_index = (isize - 1) >> huge_page_shift(h);
Al Viro34d06402015-04-03 11:31:35 -0400311 if (index > end_index)
312 break;
313 if (index == end_index) {
Andi Kleena5516432008-07-23 21:27:41 -0700314 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700315 if (nr <= offset)
Al Viro34d06402015-04-03 11:31:35 -0400316 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700317 }
318 nr = nr - offset;
319
320 /* Find the page */
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700321 page = find_lock_page(mapping, index);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700322 if (unlikely(page == NULL)) {
323 /*
324 * We have a HOLE, zero out the user-buffer for the
325 * length of the hole or request.
326 */
Al Viro34d06402015-04-03 11:31:35 -0400327 copied = iov_iter_zero(nr, to);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700328 } else {
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700329 unlock_page(page);
330
James Houghton86251472022-10-18 20:01:25 +0000331 if (PageHWPoison(page)) {
332 put_page(page);
333 retval = -EIO;
334 break;
335 }
336
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700337 /*
338 * We have the page, copy it to user space buffer.
339 */
Al Viroc7d57ab2022-06-23 17:24:09 -0400340 copied = copy_page_to_iter(page, offset, nr, to);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300341 put_page(page);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700342 }
Al Viro34d06402015-04-03 11:31:35 -0400343 offset += copied;
344 retval += copied;
345 if (copied != nr && iov_iter_count(to)) {
346 if (!retval)
347 retval = -EFAULT;
348 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700349 }
Andi Kleena5516432008-07-23 21:27:41 -0700350 index += offset >> huge_page_shift(h);
351 offset &= ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700352 }
Al Viro34d06402015-04-03 11:31:35 -0400353 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700354 return retval;
355}
356
Nick Piggin800d15a2007-10-16 01:25:03 -0700357static int hugetlbfs_write_begin(struct file *file,
358 struct address_space *mapping,
Matthew Wilcox (Oracle)9d6b0cd2022-02-22 14:31:43 -0500359 loff_t pos, unsigned len,
Nick Piggin800d15a2007-10-16 01:25:03 -0700360 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
362 return -EINVAL;
363}
364
Nick Piggin800d15a2007-10-16 01:25:03 -0700365static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
366 loff_t pos, unsigned len, unsigned copied,
367 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
Nick Piggin800d15a2007-10-16 01:25:03 -0700369 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 return -EINVAL;
371}
372
Sidhartha Kumarece62682022-09-22 10:42:06 -0500373static void hugetlb_delete_from_page_cache(struct folio *folio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374{
Sidhartha Kumarece62682022-09-22 10:42:06 -0500375 folio_clear_dirty(folio);
376 folio_clear_uptodate(folio);
377 filemap_remove_folio(folio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378}
379
Mike Kravetz378397c2022-09-14 15:18:08 -0700380/*
381 * Called with i_mmap_rwsem held for inode based vma maps. This makes
382 * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault
383 * mutex for the page in the mapping. So, we can not race with page being
384 * faulted into the vma.
385 */
386static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
387 unsigned long addr, struct page *page)
388{
389 pte_t *ptep, pte;
390
Peter Xu9c67a202022-12-16 10:52:29 -0500391 ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma)));
Mike Kravetz378397c2022-09-14 15:18:08 -0700392 if (!ptep)
393 return false;
394
395 pte = huge_ptep_get(ptep);
396 if (huge_pte_none(pte) || !pte_present(pte))
397 return false;
398
399 if (pte_page(pte) == page)
400 return true;
401
402 return false;
403}
404
405/*
406 * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
407 * No, because the interval tree returns us only those vmas
408 * which overlap the truncated area starting at pgoff,
409 * and no vma on a 32-bit arch can span beyond the 4GB.
410 */
411static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
412{
Peter Xu243b1f22022-12-16 10:50:52 -0500413 unsigned long offset = 0;
414
Mike Kravetz378397c2022-09-14 15:18:08 -0700415 if (vma->vm_pgoff < start)
Peter Xu243b1f22022-12-16 10:50:52 -0500416 offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
417
418 return vma->vm_start + offset;
Mike Kravetz378397c2022-09-14 15:18:08 -0700419}
420
421static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
422{
423 unsigned long t_end;
424
425 if (!end)
426 return vma->vm_end;
427
428 t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start;
429 if (t_end > vma->vm_end)
430 t_end = vma->vm_end;
431 return t_end;
432}
433
434/*
435 * Called with hugetlb fault mutex held. Therefore, no more mappings to
436 * this folio can be created while executing the routine.
437 */
438static void hugetlb_unmap_file_folio(struct hstate *h,
439 struct address_space *mapping,
440 struct folio *folio, pgoff_t index)
441{
442 struct rb_root_cached *root = &mapping->i_mmap;
Mike Kravetz40549ba2022-09-14 15:18:09 -0700443 struct hugetlb_vma_lock *vma_lock;
Mike Kravetz378397c2022-09-14 15:18:08 -0700444 struct page *page = &folio->page;
445 struct vm_area_struct *vma;
446 unsigned long v_start;
447 unsigned long v_end;
448 pgoff_t start, end;
449
450 start = index * pages_per_huge_page(h);
451 end = (index + 1) * pages_per_huge_page(h);
452
453 i_mmap_lock_write(mapping);
Mike Kravetz40549ba2022-09-14 15:18:09 -0700454retry:
455 vma_lock = NULL;
Mike Kravetz378397c2022-09-14 15:18:08 -0700456 vma_interval_tree_foreach(vma, root, start, end - 1) {
457 v_start = vma_offset_start(vma, start);
458 v_end = vma_offset_end(vma, end);
459
Peter Xu243b1f22022-12-16 10:50:52 -0500460 if (!hugetlb_vma_maps_page(vma, v_start, page))
Mike Kravetz378397c2022-09-14 15:18:08 -0700461 continue;
462
Mike Kravetz40549ba2022-09-14 15:18:09 -0700463 if (!hugetlb_vma_trylock_write(vma)) {
464 vma_lock = vma->vm_private_data;
465 /*
466 * If we can not get vma lock, we need to drop
467 * immap_sema and take locks in order. First,
468 * take a ref on the vma_lock structure so that
469 * we can be guaranteed it will not go away when
470 * dropping immap_sema.
471 */
472 kref_get(&vma_lock->refs);
473 break;
474 }
475
Peter Xu243b1f22022-12-16 10:50:52 -0500476 unmap_hugepage_range(vma, v_start, v_end, NULL,
477 ZAP_FLAG_DROP_MARKER);
Mike Kravetz40549ba2022-09-14 15:18:09 -0700478 hugetlb_vma_unlock_write(vma);
Mike Kravetz378397c2022-09-14 15:18:08 -0700479 }
480
481 i_mmap_unlock_write(mapping);
Mike Kravetz40549ba2022-09-14 15:18:09 -0700482
483 if (vma_lock) {
484 /*
485 * Wait on vma_lock. We know it is still valid as we have
486 * a reference. We must 'open code' vma locking as we do
487 * not know if vma_lock is still attached to vma.
488 */
489 down_write(&vma_lock->rw_sema);
490 i_mmap_lock_write(mapping);
491
492 vma = vma_lock->vma;
493 if (!vma) {
494 /*
495 * If lock is no longer attached to vma, then just
496 * unlock, drop our reference and retry looking for
497 * other vmas.
498 */
499 up_write(&vma_lock->rw_sema);
500 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
501 goto retry;
502 }
503
504 /*
505 * vma_lock is still attached to vma. Check to see if vma
506 * still maps page and if so, unmap.
507 */
508 v_start = vma_offset_start(vma, start);
509 v_end = vma_offset_end(vma, end);
Peter Xu243b1f22022-12-16 10:50:52 -0500510 if (hugetlb_vma_maps_page(vma, v_start, page))
511 unmap_hugepage_range(vma, v_start, v_end, NULL,
512 ZAP_FLAG_DROP_MARKER);
Mike Kravetz40549ba2022-09-14 15:18:09 -0700513
514 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
515 hugetlb_vma_unlock_write(vma);
516
517 goto retry;
518 }
Mike Kravetz378397c2022-09-14 15:18:08 -0700519}
520
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800521static void
Peter Xu05e90bd2022-05-12 20:22:55 -0700522hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
523 zap_flags_t zap_flags)
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800524{
525 struct vm_area_struct *vma;
526
527 /*
Sean Christophersond6aba4c2022-01-14 14:08:30 -0800528 * end == 0 indicates that the entire range after start should be
529 * unmapped. Note, end is exclusive, whereas the interval tree takes
530 * an inclusive "last".
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800531 */
Sean Christophersond6aba4c2022-01-14 14:08:30 -0800532 vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
Mike Kravetz378397c2022-09-14 15:18:08 -0700533 unsigned long v_start;
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800534 unsigned long v_end;
535
Mike Kravetz40549ba2022-09-14 15:18:09 -0700536 if (!hugetlb_vma_trylock_write(vma))
537 continue;
538
Mike Kravetz378397c2022-09-14 15:18:08 -0700539 v_start = vma_offset_start(vma, start);
540 v_end = vma_offset_end(vma, end);
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800541
Peter Xu243b1f22022-12-16 10:50:52 -0500542 unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags);
Mike Kravetz40549ba2022-09-14 15:18:09 -0700543
544 /*
545 * Note that vma lock only exists for shared/non-private
546 * vmas. Therefore, lock is not held when calling
547 * unmap_hugepage_range for private vmas.
548 */
549 hugetlb_vma_unlock_write(vma);
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800550 }
551}
Mike Kravetzb5cec282015-09-08 15:01:41 -0700552
553/*
Mike Kravetzc8627222022-09-14 15:18:05 -0700554 * Called with hugetlb fault mutex held.
555 * Returns true if page was actually removed, false otherwise.
556 */
557static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
558 struct address_space *mapping,
559 struct folio *folio, pgoff_t index,
560 bool truncate_op)
561{
562 bool ret = false;
563
564 /*
565 * If folio is mapped, it was faulted in after being
566 * unmapped in caller. Unmap (again) while holding
567 * the fault mutex. The mutex will prevent faults
568 * until we finish removing the folio.
569 */
Mike Kravetz378397c2022-09-14 15:18:08 -0700570 if (unlikely(folio_mapped(folio)))
571 hugetlb_unmap_file_folio(h, mapping, folio, index);
Mike Kravetzc8627222022-09-14 15:18:05 -0700572
573 folio_lock(folio);
574 /*
Mike Kravetzfa277592022-09-14 15:18:10 -0700575 * We must remove the folio from page cache before removing
576 * the region/ reserve map (hugetlb_unreserve_pages). In
577 * rare out of memory conditions, removal of the region/reserve
578 * map could fail. Correspondingly, the subpool and global
579 * reserve usage count can need to be adjusted.
Mike Kravetzc8627222022-09-14 15:18:05 -0700580 */
Sidhartha Kumarece62682022-09-22 10:42:06 -0500581 VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio);
582 hugetlb_delete_from_page_cache(folio);
Mike Kravetzfa277592022-09-14 15:18:10 -0700583 ret = true;
584 if (!truncate_op) {
585 if (unlikely(hugetlb_unreserve_pages(inode, index,
586 index + 1, 1)))
587 hugetlb_fix_reserve_counts(inode);
Mike Kravetzc8627222022-09-14 15:18:05 -0700588 }
589
590 folio_unlock(folio);
591 return ret;
592}
593
594/*
Mike Kravetzb5cec282015-09-08 15:01:41 -0700595 * remove_inode_hugepages handles two distinct cases: truncation and hole
596 * punch. There are subtle differences in operation for each case.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800597 *
Mike Kravetzb5cec282015-09-08 15:01:41 -0700598 * truncation is indicated by end of range being LLONG_MAX
599 * In this case, we first scan the range and release found pages.
Miaohe Lin1935ebd2021-02-24 12:10:21 -0800600 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
Mike Kravetzc8627222022-09-14 15:18:05 -0700601 * maps and global counts. Page faults can race with truncation.
602 * During faults, hugetlb_no_page() checks i_size before page allocation,
603 * and again after obtaining page table lock. It will 'back out'
604 * allocations in the truncated range.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700605 * hole punch is indicated if end is not LLONG_MAX
606 * In the hole punch case we scan the range and release found pages.
Miaohe Lin1935ebd2021-02-24 12:10:21 -0800607 * Only when releasing a page is the associated region/reserve map
608 * deleted. The region/reserve map for ranges without associated
Mike Kravetze7c58092019-01-08 15:23:32 -0800609 * pages are not modified. Page faults can race with hole punch.
610 * This is indicated if we find a mapped page.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700611 * Note: If the passed end of range value is beyond the end of file, but
612 * not LLONG_MAX this routine still performs a hole punch operation.
613 */
614static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
615 loff_t lend)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616{
Andi Kleena5516432008-07-23 21:27:41 -0700617 struct hstate *h = hstate_inode(inode);
David Gibsonb45b5bd2006-03-22 00:08:55 -0800618 struct address_space *mapping = &inode->i_data;
Andi Kleena5516432008-07-23 21:27:41 -0700619 const pgoff_t start = lstart >> huge_page_shift(h);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700620 const pgoff_t end = lend >> huge_page_shift(h);
Matthew Wilcox (Oracle)15080622022-06-04 16:39:04 -0400621 struct folio_batch fbatch;
Jan Karad72dc8a2017-09-06 16:21:18 -0700622 pgoff_t next, index;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700623 int i, freed = 0;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700624 bool truncate_op = (lend == LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Matthew Wilcox (Oracle)15080622022-06-04 16:39:04 -0400626 folio_batch_init(&fbatch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 next = start;
Matthew Wilcox (Oracle)15080622022-06-04 16:39:04 -0400628 while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
629 for (i = 0; i < folio_batch_count(&fbatch); ++i) {
630 struct folio *folio = fbatch.folios[i];
Miaohe Lind4241a02021-05-04 18:33:34 -0700631 u32 hash = 0;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700632
Matthew Wilcox (Oracle)15080622022-06-04 16:39:04 -0400633 index = folio->index;
Mike Kravetz188a3972022-09-14 15:18:02 -0700634 hash = hugetlb_fault_mutex_hash(mapping, index);
635 mutex_lock(&hugetlb_fault_mutex_table[hash]);
Mike Kravetze7c58092019-01-08 15:23:32 -0800636
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800637 /*
Mike Kravetzc8627222022-09-14 15:18:05 -0700638 * Remove folio that was part of folio_batch.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800639 */
Mike Kravetzc8627222022-09-14 15:18:05 -0700640 if (remove_inode_single_folio(h, inode, mapping, folio,
641 index, truncate_op))
642 freed++;
Mike Kravetze7c58092019-01-08 15:23:32 -0800643
Mike Kravetz188a3972022-09-14 15:18:02 -0700644 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 }
Matthew Wilcox (Oracle)15080622022-06-04 16:39:04 -0400646 folio_batch_release(&fbatch);
Mike Kravetz18178892015-11-20 15:57:13 -0800647 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 }
Mike Kravetzb5cec282015-09-08 15:01:41 -0700649
650 if (truncate_op)
651 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652}
653
Al Viro2bbbda32010-06-04 19:52:12 -0400654static void hugetlbfs_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
Joonsoo Kim9119a412014-04-03 14:47:25 -0700656 struct resv_map *resv_map;
657
Mike Kravetzb5cec282015-09-08 15:01:41 -0700658 remove_inode_hugepages(inode, 0, LLONG_MAX);
Mike Kravetzf27a5132019-05-13 17:22:55 -0700659
660 /*
661 * Get the resv_map from the address space embedded in the inode.
662 * This is the address space which points to any resv_map allocated
663 * at inode creation time. If this is a device special inode,
664 * i_mapping may not point to the original address space.
665 */
666 resv_map = (struct resv_map *)(&inode->i_data)->private_data;
667 /* Only regular and link inodes have associated reserve maps */
Joonsoo Kim9119a412014-04-03 14:47:25 -0700668 if (resv_map)
669 resv_map_release(&resv_map->refs);
Jan Karadbd57682012-05-03 14:48:02 +0200670 clear_inode(inode);
Christoph Hellwig149f4212005-10-29 18:16:43 -0700671}
672
Miaohe Line5d319d2021-02-24 12:10:25 -0800673static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674{
Hugh Dickins856fc292006-10-28 10:38:43 -0700675 pgoff_t pgoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 struct address_space *mapping = inode->i_mapping;
Andi Kleena5516432008-07-23 21:27:41 -0700677 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
Andi Kleena5516432008-07-23 21:27:41 -0700679 BUG_ON(offset & ~huge_page_mask(h));
Hugh Dickins856fc292006-10-28 10:38:43 -0700680 pgoff = offset >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
Mike Kravetz87bf91d2020-04-01 21:11:08 -0700682 i_size_write(inode, offset);
Mike Kravetz188a3972022-09-14 15:18:02 -0700683 i_mmap_lock_write(mapping);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700684 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
Peter Xu05e90bd2022-05-12 20:22:55 -0700685 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
686 ZAP_FLAG_DROP_MARKER);
Mike Kravetzc86aa7b2018-12-28 00:39:42 -0800687 i_mmap_unlock_write(mapping);
Mike Kravetze7c58092019-01-08 15:23:32 -0800688 remove_inode_hugepages(inode, offset, LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689}
690
Mike Kravetz68d32522022-06-13 13:36:48 -0700691static void hugetlbfs_zero_partial_page(struct hstate *h,
692 struct address_space *mapping,
693 loff_t start,
694 loff_t end)
695{
696 pgoff_t idx = start >> huge_page_shift(h);
697 struct folio *folio;
698
699 folio = filemap_lock_folio(mapping, idx);
Christoph Hellwig66dabbb2023-03-07 15:34:10 +0100700 if (IS_ERR(folio))
Mike Kravetz68d32522022-06-13 13:36:48 -0700701 return;
702
703 start = start & ~huge_page_mask(h);
704 end = end & ~huge_page_mask(h);
705 if (!end)
706 end = huge_page_size(h);
707
708 folio_zero_segment(folio, (size_t)start, (size_t)end);
709
710 folio_unlock(folio);
711 folio_put(folio);
712}
713
Mike Kravetz70c35472015-09-08 15:01:54 -0700714static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
715{
Mike Kravetz68d32522022-06-13 13:36:48 -0700716 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
717 struct address_space *mapping = inode->i_mapping;
Mike Kravetz70c35472015-09-08 15:01:54 -0700718 struct hstate *h = hstate_inode(inode);
719 loff_t hpage_size = huge_page_size(h);
720 loff_t hole_start, hole_end;
721
722 /*
Mike Kravetz68d32522022-06-13 13:36:48 -0700723 * hole_start and hole_end indicate the full pages within the hole.
Mike Kravetz70c35472015-09-08 15:01:54 -0700724 */
725 hole_start = round_up(offset, hpage_size);
726 hole_end = round_down(offset + len, hpage_size);
727
Mike Kravetz68d32522022-06-13 13:36:48 -0700728 inode_lock(inode);
729
730 /* protected by i_rwsem */
731 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
732 inode_unlock(inode);
733 return -EPERM;
734 }
735
736 i_mmap_lock_write(mapping);
737
738 /* If range starts before first full page, zero partial page. */
739 if (offset < hole_start)
740 hugetlbfs_zero_partial_page(h, mapping,
741 offset, min(offset + len, hole_start));
742
743 /* Unmap users of full pages in the hole. */
Mike Kravetz70c35472015-09-08 15:01:54 -0700744 if (hole_end > hole_start) {
Davidlohr Buesof808c132017-09-08 16:15:08 -0700745 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
Mike Kravetz70c35472015-09-08 15:01:54 -0700746 hugetlb_vmdelete_list(&mapping->i_mmap,
Peter Xu05e90bd2022-05-12 20:22:55 -0700747 hole_start >> PAGE_SHIFT,
748 hole_end >> PAGE_SHIFT, 0);
Mike Kravetz70c35472015-09-08 15:01:54 -0700749 }
750
Mike Kravetz68d32522022-06-13 13:36:48 -0700751 /* If range extends beyond last full page, zero partial page. */
752 if ((offset + len) > hole_end && (offset + len) > hole_start)
753 hugetlbfs_zero_partial_page(h, mapping,
754 hole_end, offset + len);
755
756 i_mmap_unlock_write(mapping);
757
758 /* Remove full pages from the file. */
759 if (hole_end > hole_start)
760 remove_inode_hugepages(inode, hole_start, hole_end);
761
762 inode_unlock(inode);
763
Mike Kravetz70c35472015-09-08 15:01:54 -0700764 return 0;
765}
766
767static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
768 loff_t len)
769{
770 struct inode *inode = file_inode(file);
Marc-André Lureauff62a342018-01-31 16:19:25 -0800771 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700772 struct address_space *mapping = inode->i_mapping;
773 struct hstate *h = hstate_inode(inode);
774 struct vm_area_struct pseudo_vma;
775 struct mm_struct *mm = current->mm;
776 loff_t hpage_size = huge_page_size(h);
777 unsigned long hpage_shift = huge_page_shift(h);
778 pgoff_t start, index, end;
779 int error;
780 u32 hash;
781
782 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
783 return -EOPNOTSUPP;
784
785 if (mode & FALLOC_FL_PUNCH_HOLE)
786 return hugetlbfs_punch_hole(inode, offset, len);
787
788 /*
789 * Default preallocate case.
790 * For this range, start is rounded down and end is rounded up
791 * as well as being converted to page offsets.
792 */
793 start = offset >> hpage_shift;
794 end = (offset + len + hpage_size - 1) >> hpage_shift;
795
Al Viro59551022016-01-22 15:40:57 -0500796 inode_lock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700797
798 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
799 error = inode_newsize_ok(inode, offset + len);
800 if (error)
801 goto out;
802
Marc-André Lureauff62a342018-01-31 16:19:25 -0800803 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
804 error = -EPERM;
805 goto out;
806 }
807
Mike Kravetz70c35472015-09-08 15:01:54 -0700808 /*
809 * Initialize a pseudo vma as this is required by the huge page
810 * allocation routines. If NUMA is configured, use page index
811 * as input to create an allocation policy.
812 */
Kirill A. Shutemov2c4541e2018-07-26 16:37:30 -0700813 vma_init(&pseudo_vma, mm);
Suren Baghdasaryan1c712222023-01-26 11:37:49 -0800814 vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
Mike Kravetz70c35472015-09-08 15:01:54 -0700815 pseudo_vma.vm_file = file;
816
817 for (index = start; index < end; index++) {
818 /*
819 * This is supposed to be the vaddr where the page is being
820 * faulted in, but we have no vaddr here.
821 */
Sidhartha Kumard0ce0e42023-01-25 09:05:33 -0800822 struct folio *folio;
Mike Kravetz70c35472015-09-08 15:01:54 -0700823 unsigned long addr;
Mike Kravetz70c35472015-09-08 15:01:54 -0700824
825 cond_resched();
826
827 /*
828 * fallocate(2) manpage permits EINTR; we may have been
829 * interrupted because we are using up too much memory.
830 */
831 if (signal_pending(current)) {
832 error = -EINTR;
833 break;
834 }
835
Mike Kravetz70c35472015-09-08 15:01:54 -0700836 /* addr is the offset within the file (zero based) */
837 addr = index * hpage_size;
838
Mike Kravetz188a3972022-09-14 15:18:02 -0700839 /* mutex taken here, fault path and hole punch */
Wei Yang188b04a2019-11-30 17:57:02 -0800840 hash = hugetlb_fault_mutex_hash(mapping, index);
Mike Kravetz70c35472015-09-08 15:01:54 -0700841 mutex_lock(&hugetlb_fault_mutex_table[hash]);
842
843 /* See if already present in mapping to avoid alloc/free */
Mike Kravetzfd4aed82023-06-21 14:24:03 -0700844 folio = filemap_get_folio(mapping, index);
845 if (!IS_ERR(folio)) {
846 folio_put(folio);
Mike Kravetz70c35472015-09-08 15:01:54 -0700847 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Mike Kravetz70c35472015-09-08 15:01:54 -0700848 continue;
849 }
850
Miaohe Lin88ce3fe2021-02-24 12:10:11 -0800851 /*
Sidhartha Kumard0ce0e42023-01-25 09:05:33 -0800852 * Allocate folio without setting the avoid_reserve argument.
Miaohe Lin88ce3fe2021-02-24 12:10:11 -0800853 * There certainly are no reserves associated with the
854 * pseudo_vma. However, there could be shared mappings with
855 * reserves for the file at the inode level. If we fallocate
Sidhartha Kumard0ce0e42023-01-25 09:05:33 -0800856 * folios in these areas, we need to consume the reserves
Miaohe Lin88ce3fe2021-02-24 12:10:11 -0800857 * to keep reservation accounting consistent.
858 */
Ackerley Tngadef0802023-05-02 23:56:22 +0000859 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
Sidhartha Kumard0ce0e42023-01-25 09:05:33 -0800860 folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0);
Mike Kravetz70c35472015-09-08 15:01:54 -0700861 hugetlb_drop_vma_policy(&pseudo_vma);
Sidhartha Kumard0ce0e42023-01-25 09:05:33 -0800862 if (IS_ERR(folio)) {
Mike Kravetz70c35472015-09-08 15:01:54 -0700863 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Sidhartha Kumard0ce0e42023-01-25 09:05:33 -0800864 error = PTR_ERR(folio);
Mike Kravetz70c35472015-09-08 15:01:54 -0700865 goto out;
866 }
Sidhartha Kumard0ce0e42023-01-25 09:05:33 -0800867 clear_huge_page(&folio->page, addr, pages_per_huge_page(h));
868 __folio_mark_uptodate(folio);
Sidhartha Kumar9b91c0e2023-01-25 09:05:35 -0800869 error = hugetlb_add_to_page_cache(folio, mapping, index);
Mike Kravetz70c35472015-09-08 15:01:54 -0700870 if (unlikely(error)) {
Sidhartha Kumard2d7bb42023-01-25 09:05:34 -0800871 restore_reserve_on_error(h, &pseudo_vma, addr, folio);
Sidhartha Kumard0ce0e42023-01-25 09:05:33 -0800872 folio_put(folio);
Mike Kravetz70c35472015-09-08 15:01:54 -0700873 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
874 goto out;
875 }
876
877 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
878
Sidhartha Kumard0ce0e42023-01-25 09:05:33 -0800879 folio_set_hugetlb_migratable(folio);
Mike Kravetz70c35472015-09-08 15:01:54 -0700880 /*
Sidhartha Kumard0ce0e42023-01-25 09:05:33 -0800881 * folio_unlock because locked by hugetlb_add_to_page_cache()
882 * folio_put() due to reference from alloc_hugetlb_folio()
Mike Kravetz70c35472015-09-08 15:01:54 -0700883 */
Sidhartha Kumard0ce0e42023-01-25 09:05:33 -0800884 folio_unlock(folio);
885 folio_put(folio);
Mike Kravetz70c35472015-09-08 15:01:54 -0700886 }
887
888 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
889 i_size_write(inode, offset + len);
Deepa Dinamani078cd822016-09-14 07:48:04 -0700890 inode->i_ctime = current_time(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700891out:
Al Viro59551022016-01-22 15:40:57 -0500892 inode_unlock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700893 return error;
894}
895
Christian Braunerc1632a02023-01-13 12:49:11 +0100896static int hugetlbfs_setattr(struct mnt_idmap *idmap,
Christian Brauner549c7292021-01-21 14:19:43 +0100897 struct dentry *dentry, struct iattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898{
David Howells2b0143b2015-03-17 22:25:59 +0000899 struct inode *inode = d_inode(dentry);
Andi Kleena5516432008-07-23 21:27:41 -0700900 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 int error;
902 unsigned int ia_valid = attr->ia_valid;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800903 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Christian Braunerc1632a02023-01-13 12:49:11 +0100905 error = setattr_prepare(&nop_mnt_idmap, dentry, attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 if (error)
Christoph Hellwig10257742010-06-04 11:30:02 +0200907 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
909 if (ia_valid & ATTR_SIZE) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800910 loff_t oldsize = inode->i_size;
911 loff_t newsize = attr->ia_size;
912
913 if (newsize & ~huge_page_mask(h))
Christoph Hellwig10257742010-06-04 11:30:02 +0200914 return -EINVAL;
Miaohe Lin398c0da2021-02-24 12:10:18 -0800915 /* protected by i_rwsem */
Marc-André Lureauff62a342018-01-31 16:19:25 -0800916 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
917 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
918 return -EPERM;
Miaohe Line5d319d2021-02-24 12:10:25 -0800919 hugetlb_vmtruncate(inode, newsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 }
Christoph Hellwig10257742010-06-04 11:30:02 +0200921
Christian Braunerc1632a02023-01-13 12:49:11 +0100922 setattr_copy(&nop_mnt_idmap, inode, attr);
Christoph Hellwig10257742010-06-04 11:30:02 +0200923 mark_inode_dirty(inode);
924 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925}
926
Al Viro7d54fa62011-07-24 20:20:48 -0400927static struct inode *hugetlbfs_get_root(struct super_block *sb,
David Howells32021982018-11-01 23:07:26 +0000928 struct hugetlbfs_fs_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929{
930 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
932 inode = new_inode(sb);
933 if (inode) {
Christoph Hellwig85fe4022010-10-23 11:19:54 -0400934 inode->i_ino = get_next_ino();
David Howells32021982018-11-01 23:07:26 +0000935 inode->i_mode = S_IFDIR | ctx->mode;
936 inode->i_uid = ctx->uid;
937 inode->i_gid = ctx->gid;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700938 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400939 inode->i_op = &hugetlbfs_dir_inode_operations;
940 inode->i_fop = &simple_dir_operations;
941 /* directory inodes start off with i_nlink == 2 (for "." entry) */
942 inc_nlink(inode);
Aneesh Kumar K.V65ed7602012-04-25 16:01:50 -0700943 lockdep_annotate_inode_mutex_key(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400944 }
945 return inode;
946}
947
Michal Hockob610ded2013-08-13 16:00:55 -0700948/*
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800949 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
Michal Hockob610ded2013-08-13 16:00:55 -0700950 * be taken from reclaim -- unlike regular filesystems. This needs an
Kirill A. Shutemov88f306b2016-01-15 16:57:31 -0800951 * annotation because huge_pmd_share() does an allocation under hugetlb's
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800952 * i_mmap_rwsem.
Michal Hockob610ded2013-08-13 16:00:55 -0700953 */
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800954static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
Michal Hockob610ded2013-08-13 16:00:55 -0700955
Al Viro7d54fa62011-07-24 20:20:48 -0400956static struct inode *hugetlbfs_get_inode(struct super_block *sb,
957 struct inode *dir,
Al Viro18df2252011-07-24 23:17:40 -0400958 umode_t mode, dev_t dev)
Al Viro7d54fa62011-07-24 20:20:48 -0400959{
960 struct inode *inode;
Mike Kravetz58b6e5e2019-04-05 18:39:06 -0700961 struct resv_map *resv_map = NULL;
Joonsoo Kim9119a412014-04-03 14:47:25 -0700962
Mike Kravetz58b6e5e2019-04-05 18:39:06 -0700963 /*
964 * Reserve maps are only needed for inodes that can have associated
965 * page allocations.
966 */
967 if (S_ISREG(mode) || S_ISLNK(mode)) {
968 resv_map = resv_map_alloc();
969 if (!resv_map)
970 return NULL;
971 }
Al Viro7d54fa62011-07-24 20:20:48 -0400972
973 inode = new_inode(sb);
974 if (inode) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800975 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
976
Al Viro7d54fa62011-07-24 20:20:48 -0400977 inode->i_ino = get_next_ino();
Christian Braunerf2d40142023-01-13 12:49:25 +0100978 inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800979 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
980 &hugetlbfs_i_mmap_rwsem_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 inode->i_mapping->a_ops = &hugetlbfs_aops;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700982 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Joonsoo Kim9119a412014-04-03 14:47:25 -0700983 inode->i_mapping->private_data = resv_map;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800984 info->seals = F_SEAL_SEAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 switch (mode & S_IFMT) {
986 default:
987 init_special_inode(inode, mode, dev);
988 break;
989 case S_IFREG:
990 inode->i_op = &hugetlbfs_inode_operations;
991 inode->i_fop = &hugetlbfs_file_operations;
992 break;
993 case S_IFDIR:
994 inode->i_op = &hugetlbfs_dir_inode_operations;
995 inode->i_fop = &simple_dir_operations;
996
997 /* directory inodes start off with i_nlink == 2 (for "." entry) */
Dave Hansend8c76e62006-09-30 23:29:04 -0700998 inc_nlink(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 break;
1000 case S_IFLNK:
1001 inode->i_op = &page_symlink_inode_operations;
Al Viro21fc61c2015-11-17 01:07:57 -05001002 inode_nohighmem(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 break;
1004 }
Josh Boyere096d0c2011-08-25 07:48:12 -04001005 lockdep_annotate_inode_mutex_key(inode);
Mike Kravetz58b6e5e2019-04-05 18:39:06 -07001006 } else {
1007 if (resv_map)
1008 kref_put(&resv_map->refs, resv_map_release);
1009 }
Joonsoo Kim9119a412014-04-03 14:47:25 -07001010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 return inode;
1012}
1013
1014/*
1015 * File creation. Allocate an inode, and we're done..
1016 */
Christian Brauner5ebb29b2023-01-13 12:49:16 +01001017static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
Christian Brauner549c7292021-01-21 14:19:43 +01001018 struct dentry *dentry, umode_t mode, dev_t dev)
Piotr Sarna1ab5b822019-11-30 17:56:43 -08001019{
Al Viro19ee5342022-09-24 06:59:59 +02001020 struct inode *inode;
1021
1022 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
1023 if (!inode)
1024 return -ENOSPC;
1025 dir->i_ctime = dir->i_mtime = current_time(dir);
1026 d_instantiate(dentry, inode);
1027 dget(dentry);/* Extra count - pin the dentry in core */
1028 return 0;
Piotr Sarna1ab5b822019-11-30 17:56:43 -08001029}
1030
Christian Braunerc54bd912023-01-13 12:49:15 +01001031static int hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
Christian Brauner549c7292021-01-21 14:19:43 +01001032 struct dentry *dentry, umode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033{
Christian Brauner5ebb29b2023-01-13 12:49:16 +01001034 int retval = hugetlbfs_mknod(&nop_mnt_idmap, dir, dentry,
Christian Brauner549c7292021-01-21 14:19:43 +01001035 mode | S_IFDIR, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 if (!retval)
Dave Hansend8c76e62006-09-30 23:29:04 -07001037 inc_nlink(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 return retval;
1039}
1040
Christian Brauner6c960e62023-01-13 12:49:13 +01001041static int hugetlbfs_create(struct mnt_idmap *idmap,
Christian Brauner549c7292021-01-21 14:19:43 +01001042 struct inode *dir, struct dentry *dentry,
1043 umode_t mode, bool excl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044{
Christian Brauner5ebb29b2023-01-13 12:49:16 +01001045 return hugetlbfs_mknod(&nop_mnt_idmap, dir, dentry, mode | S_IFREG, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046}
1047
Christian Brauner011e2b72023-01-13 12:49:18 +01001048static int hugetlbfs_tmpfile(struct mnt_idmap *idmap,
Miklos Szeredi863f1442022-09-24 07:00:00 +02001049 struct inode *dir, struct file *file,
Christian Brauner549c7292021-01-21 14:19:43 +01001050 umode_t mode)
Piotr Sarna1ab5b822019-11-30 17:56:43 -08001051{
Al Viro19ee5342022-09-24 06:59:59 +02001052 struct inode *inode;
1053
1054 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode | S_IFREG, 0);
1055 if (!inode)
1056 return -ENOSPC;
1057 dir->i_ctime = dir->i_mtime = current_time(dir);
Miklos Szeredi863f1442022-09-24 07:00:00 +02001058 d_tmpfile(file, inode);
1059 return finish_open_simple(file, 0);
Piotr Sarna1ab5b822019-11-30 17:56:43 -08001060}
1061
Christian Brauner7a77db92023-01-13 12:49:14 +01001062static int hugetlbfs_symlink(struct mnt_idmap *idmap,
Christian Brauner549c7292021-01-21 14:19:43 +01001063 struct inode *dir, struct dentry *dentry,
1064 const char *symname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065{
1066 struct inode *inode;
1067 int error = -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
Al Viro7d54fa62011-07-24 20:20:48 -04001069 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 if (inode) {
1071 int l = strlen(symname)+1;
1072 error = page_symlink(inode, symname, l);
1073 if (!error) {
1074 d_instantiate(dentry, inode);
1075 dget(dentry);
1076 } else
1077 iput(inode);
1078 }
Deepa Dinamani078cd822016-09-14 07:48:04 -07001079 dir->i_ctime = dir->i_mtime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
1081 return error;
1082}
1083
Matthew Wilcox (Oracle)b890ec22022-06-06 10:47:21 -04001084#ifdef CONFIG_MIGRATION
1085static int hugetlbfs_migrate_folio(struct address_space *mapping,
1086 struct folio *dst, struct folio *src,
Mel Gormana6bc32b2012-01-12 17:19:43 -08001087 enum migrate_mode mode)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001088{
1089 int rc;
1090
Matthew Wilcox (Oracle)b890ec22022-06-06 10:47:21 -04001091 rc = migrate_huge_page_move_mapping(mapping, dst, src);
Rafael Aquini78bd5202012-12-11 16:02:31 -08001092 if (rc != MIGRATEPAGE_SUCCESS)
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001093 return rc;
Mike Kravetzcb6acd02019-02-28 16:22:02 -08001094
Sidhartha Kumar149562f2022-09-22 10:42:05 -05001095 if (hugetlb_folio_subpool(src)) {
1096 hugetlb_set_folio_subpool(dst,
1097 hugetlb_folio_subpool(src));
1098 hugetlb_set_folio_subpool(src, NULL);
Mike Kravetzcb6acd02019-02-28 16:22:02 -08001099 }
1100
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001101 if (mode != MIGRATE_SYNC_NO_COPY)
Matthew Wilcox (Oracle)b890ec22022-06-06 10:47:21 -04001102 folio_migrate_copy(dst, src);
Jérôme Glisse2916ecc2017-09-08 16:12:06 -07001103 else
Matthew Wilcox (Oracle)b890ec22022-06-06 10:47:21 -04001104 folio_migrate_flags(dst, src);
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001105
Rafael Aquini78bd5202012-12-11 16:02:31 -08001106 return MIGRATEPAGE_SUCCESS;
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001107}
Matthew Wilcox (Oracle)b890ec22022-06-06 10:47:21 -04001108#else
1109#define hugetlbfs_migrate_folio NULL
1110#endif
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001111
Naoya Horiguchi78bb9202017-07-10 15:47:50 -07001112static int hugetlbfs_error_remove_page(struct address_space *mapping,
1113 struct page *page)
1114{
Naoya Horiguchi78bb9202017-07-10 15:47:50 -07001115 return 0;
1116}
1117
David Howells4a252202017-07-05 16:24:18 +01001118/*
1119 * Display the mount options in /proc/mounts.
1120 */
1121static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
1122{
1123 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
1124 struct hugepage_subpool *spool = sbinfo->spool;
1125 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
1126 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
1127 char mod;
1128
1129 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
1130 seq_printf(m, ",uid=%u",
1131 from_kuid_munged(&init_user_ns, sbinfo->uid));
1132 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
1133 seq_printf(m, ",gid=%u",
1134 from_kgid_munged(&init_user_ns, sbinfo->gid));
1135 if (sbinfo->mode != 0755)
1136 seq_printf(m, ",mode=%o", sbinfo->mode);
1137 if (sbinfo->max_inodes != -1)
1138 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
1139
1140 hpage_size /= 1024;
1141 mod = 'K';
1142 if (hpage_size >= 1024) {
1143 hpage_size /= 1024;
1144 mod = 'M';
1145 }
1146 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
1147 if (spool) {
1148 if (spool->max_hpages != -1)
1149 seq_printf(m, ",size=%llu",
1150 (unsigned long long)spool->max_hpages << hpage_shift);
1151 if (spool->min_hpages != -1)
1152 seq_printf(m, ",min_size=%llu",
1153 (unsigned long long)spool->min_hpages << hpage_shift);
1154 }
1155 return 0;
1156}
1157
David Howells726c3342006-06-23 02:02:58 -07001158static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159{
David Howells726c3342006-06-23 02:02:58 -07001160 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001161 struct hstate *h = hstate_inode(d_inode(dentry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
1163 buf->f_type = HUGETLBFS_MAGIC;
Andi Kleena5516432008-07-23 21:27:41 -07001164 buf->f_bsize = huge_page_size(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 if (sbinfo) {
1166 spin_lock(&sbinfo->stat_lock);
Miaohe Lin11680762022-07-26 22:29:18 +08001167 /* If no limits set, just report 0 or -1 for max/free/used
David Gibson74a8a652005-11-21 21:32:24 -08001168 * blocks, like simple_statfs() */
David Gibson90481622012-03-21 16:34:12 -07001169 if (sbinfo->spool) {
1170 long free_pages;
1171
Mina Almasry4b25f032022-05-09 18:20:50 -07001172 spin_lock_irq(&sbinfo->spool->lock);
David Gibson90481622012-03-21 16:34:12 -07001173 buf->f_blocks = sbinfo->spool->max_hpages;
1174 free_pages = sbinfo->spool->max_hpages
1175 - sbinfo->spool->used_hpages;
1176 buf->f_bavail = buf->f_bfree = free_pages;
Mina Almasry4b25f032022-05-09 18:20:50 -07001177 spin_unlock_irq(&sbinfo->spool->lock);
David Gibson74a8a652005-11-21 21:32:24 -08001178 buf->f_files = sbinfo->max_inodes;
1179 buf->f_ffree = sbinfo->free_inodes;
1180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 spin_unlock(&sbinfo->stat_lock);
1182 }
1183 buf->f_namelen = NAME_MAX;
1184 return 0;
1185}
1186
1187static void hugetlbfs_put_super(struct super_block *sb)
1188{
1189 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1190
1191 if (sbi) {
1192 sb->s_fs_info = NULL;
David Gibson90481622012-03-21 16:34:12 -07001193
1194 if (sbi->spool)
1195 hugepage_put_subpool(sbi->spool);
1196
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 kfree(sbi);
1198 }
1199}
1200
Christoph Hellwig96527982005-10-29 18:16:42 -07001201static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1202{
1203 if (sbinfo->free_inodes >= 0) {
1204 spin_lock(&sbinfo->stat_lock);
1205 if (unlikely(!sbinfo->free_inodes)) {
1206 spin_unlock(&sbinfo->stat_lock);
1207 return 0;
1208 }
1209 sbinfo->free_inodes--;
1210 spin_unlock(&sbinfo->stat_lock);
1211 }
1212
1213 return 1;
1214}
1215
1216static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1217{
1218 if (sbinfo->free_inodes >= 0) {
1219 spin_lock(&sbinfo->stat_lock);
1220 sbinfo->free_inodes++;
1221 spin_unlock(&sbinfo->stat_lock);
1222 }
1223}
1224
1225
Christoph Lametere18b8902006-12-06 20:33:20 -08001226static struct kmem_cache *hugetlbfs_inode_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
1228static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1229{
Christoph Hellwig96527982005-10-29 18:16:42 -07001230 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 struct hugetlbfs_inode_info *p;
1232
Christoph Hellwig96527982005-10-29 18:16:42 -07001233 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 return NULL;
Muchun Songfd60b282022-03-22 14:41:03 -07001235 p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL);
Christoph Hellwig96527982005-10-29 18:16:42 -07001236 if (unlikely(!p)) {
1237 hugetlbfs_inc_free_inodes(sbinfo);
1238 return NULL;
1239 }
Mike Kravetz4742a352017-03-31 15:12:01 -07001240
1241 /*
1242 * Any time after allocation, hugetlbfs_destroy_inode can be called
1243 * for the inode. mpol_free_shared_policy is unconditionally called
1244 * as part of hugetlbfs_destroy_inode. So, initialize policy here
1245 * in case of a quick call to destroy.
1246 *
1247 * Note that the policy is initialized even if we are creating a
1248 * private inode. This simplifies hugetlbfs_destroy_inode.
1249 */
1250 mpol_shared_policy_init(&p->policy, NULL);
1251
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 return &p->vfs_inode;
1253}
1254
Al Virob62de322019-04-15 23:16:38 -04001255static void hugetlbfs_free_inode(struct inode *inode)
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +11001256{
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +11001257 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1258}
1259
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260static void hugetlbfs_destroy_inode(struct inode *inode)
1261{
Christoph Hellwig96527982005-10-29 18:16:42 -07001262 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}
1265
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001266static const struct address_space_operations hugetlbfs_aops = {
Nick Piggin800d15a2007-10-16 01:25:03 -07001267 .write_begin = hugetlbfs_write_begin,
1268 .write_end = hugetlbfs_write_end,
Matthew Wilcox (Oracle)46de8b972022-02-09 20:22:13 +00001269 .dirty_folio = noop_dirty_folio,
Matthew Wilcox (Oracle)b890ec22022-06-06 10:47:21 -04001270 .migrate_folio = hugetlbfs_migrate_folio,
Naoya Horiguchi78bb9202017-07-10 15:47:50 -07001271 .error_remove_page = hugetlbfs_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272};
1273
Christoph Hellwig96527982005-10-29 18:16:42 -07001274
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001275static void init_once(void *foo)
Christoph Hellwig96527982005-10-29 18:16:42 -07001276{
Li zemingdbaf7dc2022-11-07 09:56:59 +08001277 struct hugetlbfs_inode_info *ei = foo;
Christoph Hellwig96527982005-10-29 18:16:42 -07001278
Christoph Lametera35afb82007-05-16 22:10:57 -07001279 inode_init_once(&ei->vfs_inode);
Christoph Hellwig96527982005-10-29 18:16:42 -07001280}
1281
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001282const struct file_operations hugetlbfs_file_operations = {
Al Viro34d06402015-04-03 11:31:35 -04001283 .read_iter = hugetlbfs_read_iter,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 .mmap = hugetlbfs_file_mmap,
Christoph Hellwig1b061d92010-05-26 17:53:41 +02001285 .fsync = noop_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 .get_unmapped_area = hugetlb_get_unmapped_area,
Mike Kravetz70c35472015-09-08 15:01:54 -07001287 .llseek = default_llseek,
1288 .fallocate = hugetlbfs_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289};
1290
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001291static const struct inode_operations hugetlbfs_dir_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 .create = hugetlbfs_create,
1293 .lookup = simple_lookup,
1294 .link = simple_link,
1295 .unlink = simple_unlink,
1296 .symlink = hugetlbfs_symlink,
1297 .mkdir = hugetlbfs_mkdir,
1298 .rmdir = simple_rmdir,
1299 .mknod = hugetlbfs_mknod,
1300 .rename = simple_rename,
1301 .setattr = hugetlbfs_setattr,
Piotr Sarna1ab5b822019-11-30 17:56:43 -08001302 .tmpfile = hugetlbfs_tmpfile,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303};
1304
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001305static const struct inode_operations hugetlbfs_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 .setattr = hugetlbfs_setattr,
1307};
1308
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -08001309static const struct super_operations hugetlbfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 .alloc_inode = hugetlbfs_alloc_inode,
Al Virob62de322019-04-15 23:16:38 -04001311 .free_inode = hugetlbfs_free_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 .destroy_inode = hugetlbfs_destroy_inode,
Al Viro2bbbda32010-06-04 19:52:12 -04001313 .evict_inode = hugetlbfs_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 .statfs = hugetlbfs_statfs,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 .put_super = hugetlbfs_put_super,
David Howells4a252202017-07-05 16:24:18 +01001316 .show_options = hugetlbfs_show_options,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317};
1318
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001319/*
1320 * Convert size option passed from command line to number of huge pages
1321 * in the pool specified by hstate. Size option could be in bytes
1322 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1323 */
David Howells4a252202017-07-05 16:24:18 +01001324static long
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001325hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
David Howells4a252202017-07-05 16:24:18 +01001326 enum hugetlbfs_size_type val_type)
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001327{
1328 if (val_type == NO_SIZE)
1329 return -1;
1330
1331 if (val_type == SIZE_PERCENT) {
1332 size_opt <<= huge_page_shift(h);
1333 size_opt *= h->max_huge_pages;
1334 do_div(size_opt, 100);
1335 }
1336
1337 size_opt >>= huge_page_shift(h);
1338 return size_opt;
1339}
1340
David Howells32021982018-11-01 23:07:26 +00001341/*
1342 * Parse one mount parameter.
1343 */
1344static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
David Howells32021982018-11-01 23:07:26 +00001346 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1347 struct fs_parse_result result;
1348 char *rest;
1349 unsigned long ps;
1350 int opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351
Al Virod7167b12019-09-07 07:23:15 -04001352 opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
David Howells32021982018-11-01 23:07:26 +00001353 if (opt < 0)
1354 return opt;
1355
1356 switch (opt) {
1357 case Opt_uid:
1358 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
1359 if (!uid_valid(ctx->uid))
1360 goto bad_val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
David Howells32021982018-11-01 23:07:26 +00001363 case Opt_gid:
1364 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
1365 if (!gid_valid(ctx->gid))
1366 goto bad_val;
1367 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368
David Howells32021982018-11-01 23:07:26 +00001369 case Opt_mode:
1370 ctx->mode = result.uint_32 & 01777U;
1371 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001372
David Howells32021982018-11-01 23:07:26 +00001373 case Opt_size:
1374 /* memparse() will accept a K/M/G without a digit */
Hawkins Jiawei26215b7e2022-10-21 07:16:08 +08001375 if (!param->string || !isdigit(param->string[0]))
David Howells32021982018-11-01 23:07:26 +00001376 goto bad_val;
1377 ctx->max_size_opt = memparse(param->string, &rest);
1378 ctx->max_val_type = SIZE_STD;
1379 if (*rest == '%')
1380 ctx->max_val_type = SIZE_PERCENT;
1381 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001382
David Howells32021982018-11-01 23:07:26 +00001383 case Opt_nr_inodes:
1384 /* memparse() will accept a K/M/G without a digit */
Hawkins Jiawei26215b7e2022-10-21 07:16:08 +08001385 if (!param->string || !isdigit(param->string[0]))
David Howells32021982018-11-01 23:07:26 +00001386 goto bad_val;
1387 ctx->nr_inodes = memparse(param->string, &rest);
1388 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001389
David Howells32021982018-11-01 23:07:26 +00001390 case Opt_pagesize:
1391 ps = memparse(param->string, &rest);
1392 ctx->hstate = size_to_hstate(ps);
1393 if (!ctx->hstate) {
Miaohe Lind0036512022-07-26 22:29:14 +08001394 pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
Lee Schermerhornb4c07bc2007-07-15 23:40:54 -07001395 return -EINVAL;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001396 }
David Howells32021982018-11-01 23:07:26 +00001397 return 0;
1398
1399 case Opt_min_size:
1400 /* memparse() will accept a K/M/G without a digit */
Hawkins Jiawei26215b7e2022-10-21 07:16:08 +08001401 if (!param->string || !isdigit(param->string[0]))
David Howells32021982018-11-01 23:07:26 +00001402 goto bad_val;
1403 ctx->min_size_opt = memparse(param->string, &rest);
1404 ctx->min_val_type = SIZE_STD;
1405 if (*rest == '%')
1406 ctx->min_val_type = SIZE_PERCENT;
1407 return 0;
1408
1409 default:
1410 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 }
Andi Kleena137e1c2008-07-23 21:27:43 -07001412
David Howells32021982018-11-01 23:07:26 +00001413bad_val:
Al Virob5db30c2019-12-21 21:34:06 -05001414 return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
David Howells32021982018-11-01 23:07:26 +00001415 param->string, param->key);
1416}
1417
1418/*
1419 * Validate the parsed options.
1420 */
1421static int hugetlbfs_validate(struct fs_context *fc)
1422{
1423 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1424
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001425 /*
1426 * Use huge page pool size (in hstate) to convert the size
1427 * options to number of huge pages. If NO_SIZE, -1 is returned.
1428 */
David Howells32021982018-11-01 23:07:26 +00001429 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1430 ctx->max_size_opt,
1431 ctx->max_val_type);
1432 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1433 ctx->min_size_opt,
1434 ctx->min_val_type);
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001435
1436 /*
1437 * If max_size was specified, then min_size must be smaller
1438 */
David Howells32021982018-11-01 23:07:26 +00001439 if (ctx->max_val_type > NO_SIZE &&
1440 ctx->min_hpages > ctx->max_hpages) {
1441 pr_err("Minimum size can not be greater than maximum size\n");
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001442 return -EINVAL;
Andi Kleena137e1c2008-07-23 21:27:43 -07001443 }
1444
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 return 0;
1446}
1447
1448static int
David Howells32021982018-11-01 23:07:26 +00001449hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450{
David Howells32021982018-11-01 23:07:26 +00001451 struct hugetlbfs_fs_context *ctx = fc->fs_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 struct hugetlbfs_sb_info *sbinfo;
1453
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1455 if (!sbinfo)
1456 return -ENOMEM;
1457 sb->s_fs_info = sbinfo;
1458 spin_lock_init(&sbinfo->stat_lock);
David Howells32021982018-11-01 23:07:26 +00001459 sbinfo->hstate = ctx->hstate;
1460 sbinfo->max_inodes = ctx->nr_inodes;
1461 sbinfo->free_inodes = ctx->nr_inodes;
1462 sbinfo->spool = NULL;
1463 sbinfo->uid = ctx->uid;
1464 sbinfo->gid = ctx->gid;
1465 sbinfo->mode = ctx->mode;
David Howells4a252202017-07-05 16:24:18 +01001466
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001467 /*
1468 * Allocate and initialize subpool if maximum or minimum size is
Miaohe Lin1935ebd2021-02-24 12:10:21 -08001469 * specified. Any needed reservations (for minimum size) are taken
Miaohe Lin445c8092022-07-26 22:29:17 +08001470 * when the subpool is created.
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001471 */
David Howells32021982018-11-01 23:07:26 +00001472 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1473 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1474 ctx->max_hpages,
1475 ctx->min_hpages);
David Gibson90481622012-03-21 16:34:12 -07001476 if (!sbinfo->spool)
1477 goto out_free;
1478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 sb->s_maxbytes = MAX_LFS_FILESIZE;
David Howells32021982018-11-01 23:07:26 +00001480 sb->s_blocksize = huge_page_size(ctx->hstate);
1481 sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 sb->s_magic = HUGETLBFS_MAGIC;
1483 sb->s_op = &hugetlbfs_ops;
1484 sb->s_time_gran = 1;
Mike Kravetz15568292020-08-11 18:31:35 -07001485
1486 /*
1487 * Due to the special and limited functionality of hugetlbfs, it does
1488 * not work well as a stacking filesystem.
1489 */
1490 sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
David Howells32021982018-11-01 23:07:26 +00001491 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
Al Viro48fde702012-01-08 22:15:13 -05001492 if (!sb->s_root)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 return 0;
1495out_free:
Fabian Frederick6e6870d2014-06-04 16:10:40 -07001496 kfree(sbinfo->spool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 kfree(sbinfo);
1498 return -ENOMEM;
1499}
1500
David Howells32021982018-11-01 23:07:26 +00001501static int hugetlbfs_get_tree(struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502{
David Howells32021982018-11-01 23:07:26 +00001503 int err = hugetlbfs_validate(fc);
1504 if (err)
1505 return err;
Al Viro2ac295d2019-06-01 20:48:55 -04001506 return get_tree_nodev(fc, hugetlbfs_fill_super);
David Howells32021982018-11-01 23:07:26 +00001507}
1508
1509static void hugetlbfs_fs_context_free(struct fs_context *fc)
1510{
1511 kfree(fc->fs_private);
1512}
1513
1514static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1515 .free = hugetlbfs_fs_context_free,
1516 .parse_param = hugetlbfs_parse_param,
1517 .get_tree = hugetlbfs_get_tree,
1518};
1519
1520static int hugetlbfs_init_fs_context(struct fs_context *fc)
1521{
1522 struct hugetlbfs_fs_context *ctx;
1523
1524 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1525 if (!ctx)
1526 return -ENOMEM;
1527
1528 ctx->max_hpages = -1; /* No limit on size by default */
1529 ctx->nr_inodes = -1; /* No limit on number of inodes by default */
1530 ctx->uid = current_fsuid();
1531 ctx->gid = current_fsgid();
1532 ctx->mode = 0755;
1533 ctx->hstate = &default_hstate;
1534 ctx->min_hpages = -1; /* No default minimum size */
1535 ctx->max_val_type = NO_SIZE;
1536 ctx->min_val_type = NO_SIZE;
1537 fc->fs_private = ctx;
1538 fc->ops = &hugetlbfs_fs_context_ops;
1539 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540}
1541
1542static struct file_system_type hugetlbfs_fs_type = {
David Howells32021982018-11-01 23:07:26 +00001543 .name = "hugetlbfs",
1544 .init_fs_context = hugetlbfs_init_fs_context,
Al Virod7167b12019-09-07 07:23:15 -04001545 .parameters = hugetlb_fs_parameters,
David Howells32021982018-11-01 23:07:26 +00001546 .kill_sb = kill_litter_super,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547};
1548
Andi Kleen42d73952012-12-11 16:01:34 -08001549static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001551static int can_do_hugetlb_shm(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552{
Eric W. Biedermana0eb3a02012-02-07 16:19:25 -08001553 kgid_t shm_group;
1554 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1555 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556}
1557
Andi Kleen42d73952012-12-11 16:01:34 -08001558static int get_hstate_idx(int page_size_log)
1559{
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001560 struct hstate *h = hstate_sizelog(page_size_log);
Andi Kleen42d73952012-12-11 16:01:34 -08001561
Andi Kleen42d73952012-12-11 16:01:34 -08001562 if (!h)
1563 return -1;
Miaohe Lin04adbc32021-05-04 18:33:22 -07001564 return hstate_index(h);
Andi Kleen42d73952012-12-11 16:01:34 -08001565}
1566
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001567/*
1568 * Note that size should be aligned to proper hugepage size in caller side,
1569 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1570 */
1571struct file *hugetlb_file_setup(const char *name, size_t size,
zhangyiru83c1fd72021-11-08 18:31:27 -08001572 vm_flags_t acctflag, int creat_flags,
1573 int page_size_log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 struct inode *inode;
Al Viroe68375c2018-06-09 09:50:46 -04001576 struct vfsmount *mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001577 int hstate_idx;
Al Viroe68375c2018-06-09 09:50:46 -04001578 struct file *file;
Andi Kleen42d73952012-12-11 16:01:34 -08001579
1580 hstate_idx = get_hstate_idx(page_size_log);
1581 if (hstate_idx < 0)
1582 return ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
Al Viroe68375c2018-06-09 09:50:46 -04001584 mnt = hugetlbfs_vfsmount[hstate_idx];
1585 if (!mnt)
Akinobu Mita5bc98592007-05-06 14:50:18 -07001586 return ERR_PTR(-ENOENT);
1587
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001588 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
zhangyiru83c1fd72021-11-08 18:31:27 -08001589 struct ucounts *ucounts = current_ucounts();
1590
1591 if (user_shm_lock(size, ucounts)) {
1592 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
David Rientjes21a3c272012-03-21 16:34:13 -07001593 current->comm, current->pid);
zhangyiru83c1fd72021-11-08 18:31:27 -08001594 user_shm_unlock(size, ucounts);
Hugh Dickins353d5c32009-08-24 16:30:28 +01001595 }
zhangyiru83c1fd72021-11-08 18:31:27 -08001596 return ERR_PTR(-EPERM);
Ravikiran G Thirumalai2584e512009-03-31 15:21:26 -07001597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
Anatol Pomozov39b652522012-09-12 20:11:55 -07001599 file = ERR_PTR(-ENOSPC);
Al Viroe68375c2018-06-09 09:50:46 -04001600 inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 if (!inode)
Al Viroe68375c2018-06-09 09:50:46 -04001602 goto out;
Stephen Smalleye1832f22015-08-06 15:46:55 -07001603 if (creat_flags == HUGETLB_SHMFS_INODE)
1604 inode->i_flags |= S_PRIVATE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 inode->i_size = size;
Miklos Szeredi6d6b77f2011-10-28 14:13:28 +02001607 clear_nlink(inode);
Dave Hansence8d2cd2007-10-16 23:31:13 -07001608
Mike Kravetz33b8f842021-02-24 12:09:54 -08001609 if (!hugetlb_reserve_pages(inode, 0,
Al Viroe68375c2018-06-09 09:50:46 -04001610 size >> huge_page_shift(hstate_inode(inode)), NULL,
1611 acctflag))
1612 file = ERR_PTR(-ENOMEM);
1613 else
1614 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1615 &hugetlbfs_file_operations);
1616 if (!IS_ERR(file))
1617 return file;
Dave Hansence8d2cd2007-10-16 23:31:13 -07001618
David Gibsonb45b5bd2006-03-22 00:08:55 -08001619 iput(inode);
Al Viroe68375c2018-06-09 09:50:46 -04001620out:
Anatol Pomozov39b652522012-09-12 20:11:55 -07001621 return file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622}
1623
David Howells32021982018-11-01 23:07:26 +00001624static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1625{
1626 struct fs_context *fc;
1627 struct vfsmount *mnt;
1628
1629 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1630 if (IS_ERR(fc)) {
1631 mnt = ERR_CAST(fc);
1632 } else {
1633 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1634 ctx->hstate = h;
1635 mnt = fc_mount(fc);
1636 put_fs_context(fc);
1637 }
1638 if (IS_ERR(mnt))
Miaohe Lina25fddc2021-02-24 12:10:14 -08001639 pr_err("Cannot mount internal hugetlbfs for page size %luK",
Miaohe Lind0036512022-07-26 22:29:14 +08001640 huge_page_size(h) / SZ_1K);
David Howells32021982018-11-01 23:07:26 +00001641 return mnt;
1642}
1643
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644static int __init init_hugetlbfs_fs(void)
1645{
David Howells32021982018-11-01 23:07:26 +00001646 struct vfsmount *mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001647 struct hstate *h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 int error;
Andi Kleen42d73952012-12-11 16:01:34 -08001649 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001651 if (!hugepages_supported()) {
Andrew Morton9b857d22014-06-04 16:07:21 -07001652 pr_info("disabling because there are no supported hugepage sizes\n");
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001653 return -ENOTSUPP;
1654 }
1655
Hillf Dantond1d5e05ff2012-03-21 16:34:15 -07001656 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1658 sizeof(struct hugetlbfs_inode_info),
Vladimir Davydov5d097052016-01-14 15:18:21 -08001659 0, SLAB_ACCOUNT, init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 if (hugetlbfs_inode_cachep == NULL)
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001661 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
1663 error = register_filesystem(&hugetlbfs_fs_type);
1664 if (error)
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001665 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001667 /* default hstate mount is required */
Miaohe Lin3b2275a2021-02-24 12:10:04 -08001668 mnt = mount_one_hugetlbfs(&default_hstate);
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001669 if (IS_ERR(mnt)) {
1670 error = PTR_ERR(mnt);
1671 goto out_unreg;
1672 }
1673 hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1674
1675 /* other hstates are optional */
Andi Kleen42d73952012-12-11 16:01:34 -08001676 i = 0;
1677 for_each_hstate(h) {
Jan Stancek15f0ec92020-01-03 18:37:18 +01001678 if (i == default_hstate_idx) {
1679 i++;
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001680 continue;
Jan Stancek15f0ec92020-01-03 18:37:18 +01001681 }
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001682
David Howells32021982018-11-01 23:07:26 +00001683 mnt = mount_one_hugetlbfs(h);
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001684 if (IS_ERR(mnt))
1685 hugetlbfs_vfsmount[i] = NULL;
1686 else
1687 hugetlbfs_vfsmount[i] = mnt;
Andi Kleen42d73952012-12-11 16:01:34 -08001688 i++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 }
David Howells32021982018-11-01 23:07:26 +00001690
1691 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001693 out_unreg:
1694 (void)unregister_filesystem(&hugetlbfs_fs_type);
1695 out_free:
Hillf Dantond1d5e05ff2012-03-21 16:34:15 -07001696 kmem_cache_destroy(hugetlbfs_inode_cachep);
Mike Kravetz8fc312b2019-11-30 17:56:34 -08001697 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 return error;
1699}
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08001700fs_initcall(init_hugetlbfs_fs)