Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Resizable virtual memory filesystem for Linux. |
| 3 | * |
| 4 | * Copyright (C) 2000 Linus Torvalds. |
| 5 | * 2000 Transmeta Corp. |
| 6 | * 2000-2001 Christoph Rohland |
| 7 | * 2000-2001 SAP AG |
| 8 | * 2002 Red Hat Inc. |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 9 | * Copyright (C) 2002-2011 Hugh Dickins. |
| 10 | * Copyright (C) 2011 Google Inc. |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 11 | * Copyright (C) 2002-2005 VERITAS Software Corporation. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * Copyright (C) 2004 Andi Kleen, SuSE Labs |
| 13 | * |
| 14 | * Extended attribute support for tmpfs: |
| 15 | * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> |
| 16 | * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> |
| 17 | * |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 18 | * tiny-shmem: |
| 19 | * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> |
| 20 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | * This file is released under the GPL. |
| 22 | */ |
| 23 | |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 24 | #include <linux/fs.h> |
| 25 | #include <linux/init.h> |
| 26 | #include <linux/vfs.h> |
| 27 | #include <linux/mount.h> |
Andrew Morton | 250297e | 2013-04-29 15:06:12 -0700 | [diff] [blame] | 28 | #include <linux/ramfs.h> |
Hugh Dickins | caefba1 | 2009-04-13 14:40:12 -0700 | [diff] [blame] | 29 | #include <linux/pagemap.h> |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 30 | #include <linux/file.h> |
Theodore Ts'o | e408e69 | 2022-07-14 21:59:12 -0400 | [diff] [blame] | 31 | #include <linux/fileattr.h> |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 32 | #include <linux/mm.h> |
Arnd Bergmann | 46c9a94 | 2018-08-17 15:45:09 -0700 | [diff] [blame] | 33 | #include <linux/random.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 34 | #include <linux/sched/signal.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 35 | #include <linux/export.h> |
Matthew Wilcox (Oracle) | 5ff2121 | 2023-02-06 19:08:50 +0000 | [diff] [blame] | 36 | #include <linux/shmem_fs.h> |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 37 | #include <linux/swap.h> |
Christoph Hellwig | e2e40f2 | 2015-02-22 08:58:50 -0800 | [diff] [blame] | 38 | #include <linux/uio.h> |
Mike Kravetz | 749df87 | 2017-09-06 16:24:16 -0700 | [diff] [blame] | 39 | #include <linux/hugetlb.h> |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 40 | #include <linux/fs_parser.h> |
Miaohe Lin | 86a2f3f | 2021-09-02 14:54:15 -0700 | [diff] [blame] | 41 | #include <linux/swapfile.h> |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 42 | #include <linux/iversion.h> |
NeilBrown | 014bb1d | 2022-05-09 18:20:47 -0700 | [diff] [blame] | 43 | #include "swap.h" |
Andrea Arcangeli | 95cc09d | 2017-02-22 15:43:31 -0800 | [diff] [blame] | 44 | |
Alexey Dobriyan | 68279f9 | 2023-10-11 19:55:00 +0300 | [diff] [blame] | 45 | static struct vfsmount *shm_mnt __ro_after_init; |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 46 | |
| 47 | #ifdef CONFIG_SHMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | /* |
| 49 | * This virtual memory filesystem is heavily based on the ramfs. It |
| 50 | * extends ramfs by the ability to use swap and honor resource limits |
| 51 | * which makes it a completely usable filesystem. |
| 52 | */ |
| 53 | |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 54 | #include <linux/xattr.h> |
Christoph Hellwig | a569425 | 2007-07-17 04:04:28 -0700 | [diff] [blame] | 55 | #include <linux/exportfs.h> |
Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 56 | #include <linux/posix_acl.h> |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 57 | #include <linux/posix_acl_xattr.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | #include <linux/mman.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #include <linux/string.h> |
| 60 | #include <linux/slab.h> |
| 61 | #include <linux/backing-dev.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | #include <linux/writeback.h> |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 63 | #include <linux/pagevec.h> |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 64 | #include <linux/percpu_counter.h> |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 65 | #include <linux/falloc.h> |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 66 | #include <linux/splice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | #include <linux/security.h> |
| 68 | #include <linux/swapops.h> |
| 69 | #include <linux/mempolicy.h> |
| 70 | #include <linux/namei.h> |
Hugh Dickins | b00dc3a | 2006-02-21 23:49:47 +0000 | [diff] [blame] | 71 | #include <linux/ctype.h> |
Lee Schermerhorn | 304dbdb | 2006-04-22 02:35:48 -0700 | [diff] [blame] | 72 | #include <linux/migrate.h> |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 73 | #include <linux/highmem.h> |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 74 | #include <linux/seq_file.h> |
Mimi Zohar | 9256292 | 2008-10-07 14:00:12 -0400 | [diff] [blame] | 75 | #include <linux/magic.h> |
David Herrmann | 9183df2 | 2014-08-08 14:25:29 -0700 | [diff] [blame] | 76 | #include <linux/syscalls.h> |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 77 | #include <linux/fcntl.h> |
David Herrmann | 9183df2 | 2014-08-08 14:25:29 -0700 | [diff] [blame] | 78 | #include <uapi/linux/memfd.h> |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 79 | #include <linux/rmap.h> |
Amir Goldstein | 2b4db79 | 2017-05-18 15:29:33 +0300 | [diff] [blame] | 80 | #include <linux/uuid.h> |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 81 | #include <linux/quotaops.h> |
Kent Overstreet | 1e2f2d3 | 2023-12-15 15:51:54 -0500 | [diff] [blame] | 82 | #include <linux/rcupdate_wait.h> |
Lee Schermerhorn | 304dbdb | 2006-04-22 02:35:48 -0700 | [diff] [blame] | 83 | |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 84 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Mel Gorman | dd56b04 | 2015-11-06 16:28:43 -0800 | [diff] [blame] | 86 | #include "internal.h" |
| 87 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 88 | #define BLOCKS_PER_PAGE (PAGE_SIZE/512) |
| 89 | #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | /* Pretend that each entry is of this size in directory's i_size */ |
| 92 | #define BOGO_DIRENT_SIZE 20 |
| 93 | |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 94 | /* Pretend that one inode + its dentry occupy this much memory */ |
| 95 | #define BOGO_INODE_SIZE 1024 |
| 96 | |
Hugh Dickins | 69f07ec | 2011-08-03 16:21:26 -0700 | [diff] [blame] | 97 | /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ |
| 98 | #define SHORT_SYMLINK_LEN 128 |
| 99 | |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 100 | /* |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 101 | * shmem_fallocate communicates with shmem_fault or shmem_writepage via |
Jan Kara | 9608703 | 2021-04-12 15:50:21 +0200 | [diff] [blame] | 102 | * inode->i_private (with i_rwsem making sure that it has only one user at |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 103 | * a time): we would prefer not to enlarge the shmem inode just for that. |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 104 | */ |
| 105 | struct shmem_falloc { |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 106 | wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 107 | pgoff_t start; /* start of range currently being fallocated */ |
| 108 | pgoff_t next; /* the next page offset to be fallocated */ |
| 109 | pgoff_t nr_falloced; /* how many new pages have been fallocated */ |
| 110 | pgoff_t nr_unswapped; /* how often writepage refused to swap out */ |
| 111 | }; |
| 112 | |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 113 | struct shmem_options { |
| 114 | unsigned long long blocks; |
| 115 | unsigned long long inodes; |
| 116 | struct mempolicy *mpol; |
| 117 | kuid_t uid; |
| 118 | kgid_t gid; |
| 119 | umode_t mode; |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 120 | bool full_inums; |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 121 | int huge; |
| 122 | int seen; |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 123 | bool noswap; |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 124 | unsigned short quota_types; |
Lukas Czerner | de4c0e7 | 2023-07-25 16:45:09 +0200 | [diff] [blame] | 125 | struct shmem_quota_limits qlimits; |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 126 | #define SHMEM_SEEN_BLOCKS 1 |
| 127 | #define SHMEM_SEEN_INODES 2 |
| 128 | #define SHMEM_SEEN_HUGE 4 |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 129 | #define SHMEM_SEEN_INUMS 8 |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 130 | #define SHMEM_SEEN_NOSWAP 16 |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 131 | #define SHMEM_SEEN_QUOTA 32 |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 132 | }; |
| 133 | |
Baolin Wang | 4b98995 | 2024-06-11 18:11:07 +0800 | [diff] [blame] | 134 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 135 | static unsigned long huge_shmem_orders_always __read_mostly; |
| 136 | static unsigned long huge_shmem_orders_madvise __read_mostly; |
| 137 | static unsigned long huge_shmem_orders_inherit __read_mostly; |
| 138 | static unsigned long huge_shmem_orders_within_size __read_mostly; |
| 139 | #endif |
| 140 | |
Andrew Morton | b76db735 | 2008-02-08 04:21:49 -0800 | [diff] [blame] | 141 | #ifdef CONFIG_TMPFS |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 142 | static unsigned long shmem_default_max_blocks(void) |
| 143 | { |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 144 | return totalram_pages() / 2; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | static unsigned long shmem_default_max_inodes(void) |
| 148 | { |
Arun KS | ca79b0c | 2018-12-28 00:34:29 -0800 | [diff] [blame] | 149 | unsigned long nr_pages = totalram_pages(); |
| 150 | |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 151 | return min3(nr_pages - totalhigh_pages(), nr_pages / 2, |
| 152 | ULONG_MAX / BOGO_INODE_SIZE); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 153 | } |
Andrew Morton | b76db735 | 2008-02-08 04:21:49 -0800 | [diff] [blame] | 154 | #endif |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 155 | |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 156 | static int shmem_swapin_folio(struct inode *inode, pgoff_t index, |
Hugh Dickins | 054a9f7 | 2023-09-29 20:31:27 -0700 | [diff] [blame] | 157 | struct folio **foliop, enum sgp_type sgp, gfp_t gfp, |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 158 | struct vm_area_struct *vma, vm_fault_t *fault_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) |
| 161 | { |
| 162 | return sb->s_fs_info; |
| 163 | } |
| 164 | |
| 165 | /* |
| 166 | * shmem_file_setup pre-accounts the whole fixed size of a VM object, |
| 167 | * for shared memory and for shared anonymous (/dev/zero) mappings |
| 168 | * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), |
| 169 | * consistent with the pre-accounting of private mappings ... |
| 170 | */ |
| 171 | static inline int shmem_acct_size(unsigned long flags, loff_t size) |
| 172 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 173 | return (flags & VM_NORESERVE) ? |
Al Viro | 191c542 | 2012-02-13 03:58:52 +0000 | [diff] [blame] | 174 | 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | static inline void shmem_unacct_size(unsigned long flags, loff_t size) |
| 178 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 179 | if (!(flags & VM_NORESERVE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | vm_unacct_memory(VM_ACCT(size)); |
| 181 | } |
| 182 | |
Konstantin Khlebnikov | 7714251 | 2014-08-06 16:06:34 -0700 | [diff] [blame] | 183 | static inline int shmem_reacct_size(unsigned long flags, |
| 184 | loff_t oldsize, loff_t newsize) |
| 185 | { |
| 186 | if (!(flags & VM_NORESERVE)) { |
| 187 | if (VM_ACCT(newsize) > VM_ACCT(oldsize)) |
| 188 | return security_vm_enough_memory_mm(current->mm, |
| 189 | VM_ACCT(newsize) - VM_ACCT(oldsize)); |
| 190 | else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) |
| 191 | vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); |
| 192 | } |
| 193 | return 0; |
| 194 | } |
| 195 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | /* |
| 197 | * ... whereas tmpfs objects are accounted incrementally as |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 198 | * pages are allocated, in order to allow large sparse files. |
Hugh Dickins | 4199f51 | 2023-09-29 20:30:03 -0700 | [diff] [blame] | 199 | * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. |
| 201 | */ |
Hugh Dickins | 4199f51 | 2023-09-29 20:30:03 -0700 | [diff] [blame] | 202 | static inline int shmem_acct_blocks(unsigned long flags, long pages) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | { |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 204 | if (!(flags & VM_NORESERVE)) |
| 205 | return 0; |
| 206 | |
| 207 | return security_vm_enough_memory_mm(current->mm, |
| 208 | pages * VM_ACCT(PAGE_SIZE)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) |
| 212 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 213 | if (flags & VM_NORESERVE) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 214 | vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | } |
| 216 | |
Hugh Dickins | 4199f51 | 2023-09-29 20:30:03 -0700 | [diff] [blame] | 217 | static int shmem_inode_acct_blocks(struct inode *inode, long pages) |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 218 | { |
| 219 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 220 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
Lukas Czerner | c7e263a | 2023-07-25 16:45:04 +0200 | [diff] [blame] | 221 | int err = -ENOSPC; |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 222 | |
Hugh Dickins | 4199f51 | 2023-09-29 20:30:03 -0700 | [diff] [blame] | 223 | if (shmem_acct_blocks(info->flags, pages)) |
Lukas Czerner | c7e263a | 2023-07-25 16:45:04 +0200 | [diff] [blame] | 224 | return err; |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 225 | |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 226 | might_sleep(); /* when quotas */ |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 227 | if (sbinfo->max_blocks) { |
Hugh Dickins | beb9868 | 2023-09-29 20:42:45 -0700 | [diff] [blame] | 228 | if (!percpu_counter_limited_add(&sbinfo->used_blocks, |
| 229 | sbinfo->max_blocks, pages)) |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 230 | goto unacct; |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 231 | |
| 232 | err = dquot_alloc_block_nodirty(inode, pages); |
Hugh Dickins | beb9868 | 2023-09-29 20:42:45 -0700 | [diff] [blame] | 233 | if (err) { |
| 234 | percpu_counter_sub(&sbinfo->used_blocks, pages); |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 235 | goto unacct; |
Hugh Dickins | beb9868 | 2023-09-29 20:42:45 -0700 | [diff] [blame] | 236 | } |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 237 | } else { |
| 238 | err = dquot_alloc_block_nodirty(inode, pages); |
| 239 | if (err) |
| 240 | goto unacct; |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 241 | } |
| 242 | |
Lukas Czerner | c7e263a | 2023-07-25 16:45:04 +0200 | [diff] [blame] | 243 | return 0; |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 244 | |
| 245 | unacct: |
| 246 | shmem_unacct_blocks(info->flags, pages); |
Lukas Czerner | c7e263a | 2023-07-25 16:45:04 +0200 | [diff] [blame] | 247 | return err; |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 248 | } |
| 249 | |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 250 | static void shmem_inode_unacct_blocks(struct inode *inode, long pages) |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 251 | { |
| 252 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 253 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
| 254 | |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 255 | might_sleep(); /* when quotas */ |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 256 | dquot_free_block_nodirty(inode, pages); |
| 257 | |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 258 | if (sbinfo->max_blocks) |
| 259 | percpu_counter_sub(&sbinfo->used_blocks, pages); |
| 260 | shmem_unacct_blocks(info->flags, pages); |
| 261 | } |
| 262 | |
Hugh Dickins | 759b977 | 2007-03-05 00:30:28 -0800 | [diff] [blame] | 263 | static const struct super_operations shmem_ops; |
Christoph Hellwig | aefacb2 | 2024-02-19 07:27:10 +0100 | [diff] [blame] | 264 | static const struct address_space_operations shmem_aops; |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 265 | static const struct file_operations shmem_file_operations; |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 266 | static const struct inode_operations shmem_inode_operations; |
| 267 | static const struct inode_operations shmem_dir_inode_operations; |
| 268 | static const struct inode_operations shmem_special_inode_operations; |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 269 | static const struct vm_operations_struct shmem_vm_ops; |
Pasha Tatashin | d09e8ca | 2022-11-15 02:06:01 +0000 | [diff] [blame] | 270 | static const struct vm_operations_struct shmem_anon_vm_ops; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 271 | static struct file_system_type shmem_fs_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | |
Christoph Hellwig | aefacb2 | 2024-02-19 07:27:10 +0100 | [diff] [blame] | 273 | bool shmem_mapping(struct address_space *mapping) |
| 274 | { |
| 275 | return mapping->a_ops == &shmem_aops; |
| 276 | } |
| 277 | EXPORT_SYMBOL_GPL(shmem_mapping); |
| 278 | |
Pasha Tatashin | d09e8ca | 2022-11-15 02:06:01 +0000 | [diff] [blame] | 279 | bool vma_is_anon_shmem(struct vm_area_struct *vma) |
| 280 | { |
| 281 | return vma->vm_ops == &shmem_anon_vm_ops; |
| 282 | } |
| 283 | |
Mike Rapoport | b0506e4 | 2017-02-22 15:43:28 -0800 | [diff] [blame] | 284 | bool vma_is_shmem(struct vm_area_struct *vma) |
| 285 | { |
Pasha Tatashin | d09e8ca | 2022-11-15 02:06:01 +0000 | [diff] [blame] | 286 | return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops; |
Mike Rapoport | b0506e4 | 2017-02-22 15:43:28 -0800 | [diff] [blame] | 287 | } |
| 288 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | static LIST_HEAD(shmem_swaplist); |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 290 | static DEFINE_MUTEX(shmem_swaplist_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 292 | #ifdef CONFIG_TMPFS_QUOTA |
| 293 | |
| 294 | static int shmem_enable_quotas(struct super_block *sb, |
| 295 | unsigned short quota_types) |
| 296 | { |
| 297 | int type, err = 0; |
| 298 | |
| 299 | sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; |
| 300 | for (type = 0; type < SHMEM_MAXQUOTAS; type++) { |
| 301 | if (!(quota_types & (1 << type))) |
| 302 | continue; |
| 303 | err = dquot_load_quota_sb(sb, type, QFMT_SHMEM, |
| 304 | DQUOT_USAGE_ENABLED | |
| 305 | DQUOT_LIMITS_ENABLED); |
| 306 | if (err) |
| 307 | goto out_err; |
| 308 | } |
| 309 | return 0; |
| 310 | |
| 311 | out_err: |
| 312 | pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n", |
| 313 | type, err); |
| 314 | for (type--; type >= 0; type--) |
| 315 | dquot_quota_off(sb, type); |
| 316 | return err; |
| 317 | } |
| 318 | |
| 319 | static void shmem_disable_quotas(struct super_block *sb) |
| 320 | { |
| 321 | int type; |
| 322 | |
| 323 | for (type = 0; type < SHMEM_MAXQUOTAS; type++) |
| 324 | dquot_quota_off(sb, type); |
| 325 | } |
| 326 | |
Jan Kara | ccb4901 | 2024-02-06 15:08:19 +0100 | [diff] [blame] | 327 | static struct dquot __rcu **shmem_get_dquots(struct inode *inode) |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 328 | { |
| 329 | return SHMEM_I(inode)->i_dquot; |
| 330 | } |
| 331 | #endif /* CONFIG_TMPFS_QUOTA */ |
| 332 | |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 333 | /* |
| 334 | * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and |
| 335 | * produces a novel ino for the newly allocated inode. |
| 336 | * |
| 337 | * It may also be called when making a hard link to permit the space needed by |
| 338 | * each dentry. However, in that case, no new inode number is needed since that |
| 339 | * internally draws from another pool of inode numbers (currently global |
| 340 | * get_next_ino()). This case is indicated by passing NULL as inop. |
| 341 | */ |
| 342 | #define SHMEM_INO_BATCH 1024 |
| 343 | static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 344 | { |
| 345 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 346 | ino_t ino; |
| 347 | |
| 348 | if (!(sb->s_flags & SB_KERNMOUNT)) { |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 349 | raw_spin_lock(&sbinfo->stat_lock); |
Byron Stanoszek | bb3e96d | 2020-09-18 21:20:18 -0700 | [diff] [blame] | 350 | if (sbinfo->max_inodes) { |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 351 | if (sbinfo->free_ispace < BOGO_INODE_SIZE) { |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 352 | raw_spin_unlock(&sbinfo->stat_lock); |
Byron Stanoszek | bb3e96d | 2020-09-18 21:20:18 -0700 | [diff] [blame] | 353 | return -ENOSPC; |
| 354 | } |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 355 | sbinfo->free_ispace -= BOGO_INODE_SIZE; |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 356 | } |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 357 | if (inop) { |
| 358 | ino = sbinfo->next_ino++; |
| 359 | if (unlikely(is_zero_ino(ino))) |
| 360 | ino = sbinfo->next_ino++; |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 361 | if (unlikely(!sbinfo->full_inums && |
| 362 | ino > UINT_MAX)) { |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 363 | /* |
| 364 | * Emulate get_next_ino uint wraparound for |
| 365 | * compatibility |
| 366 | */ |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 367 | if (IS_ENABLED(CONFIG_64BIT)) |
| 368 | pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", |
| 369 | __func__, MINOR(sb->s_dev)); |
| 370 | sbinfo->next_ino = 1; |
| 371 | ino = sbinfo->next_ino++; |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 372 | } |
| 373 | *inop = ino; |
| 374 | } |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 375 | raw_spin_unlock(&sbinfo->stat_lock); |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 376 | } else if (inop) { |
| 377 | /* |
| 378 | * __shmem_file_setup, one of our callers, is lock-free: it |
| 379 | * doesn't hold stat_lock in shmem_reserve_inode since |
| 380 | * max_inodes is always 0, and is called from potentially |
| 381 | * unknown contexts. As such, use a per-cpu batched allocator |
| 382 | * which doesn't require the per-sb stat_lock unless we are at |
| 383 | * the batch boundary. |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 384 | * |
| 385 | * We don't need to worry about inode{32,64} since SB_KERNMOUNT |
| 386 | * shmem mounts are not exposed to userspace, so we don't need |
| 387 | * to worry about things like glibc compatibility. |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 388 | */ |
| 389 | ino_t *next_ino; |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 390 | |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 391 | next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); |
| 392 | ino = *next_ino; |
| 393 | if (unlikely(ino % SHMEM_INO_BATCH == 0)) { |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 394 | raw_spin_lock(&sbinfo->stat_lock); |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 395 | ino = sbinfo->next_ino; |
| 396 | sbinfo->next_ino += SHMEM_INO_BATCH; |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 397 | raw_spin_unlock(&sbinfo->stat_lock); |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 398 | if (unlikely(is_zero_ino(ino))) |
| 399 | ino++; |
| 400 | } |
| 401 | *inop = ino; |
| 402 | *next_ino = ++ino; |
| 403 | put_cpu(); |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 404 | } |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 405 | |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 406 | return 0; |
| 407 | } |
| 408 | |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 409 | static void shmem_free_inode(struct super_block *sb, size_t freed_ispace) |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 410 | { |
| 411 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 412 | if (sbinfo->max_inodes) { |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 413 | raw_spin_lock(&sbinfo->stat_lock); |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 414 | sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace; |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 415 | raw_spin_unlock(&sbinfo->stat_lock); |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 416 | } |
| 417 | } |
| 418 | |
Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 419 | /** |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 420 | * shmem_recalc_inode - recalculate the block usage of an inode |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | * @inode: inode to recalc |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 422 | * @alloced: the change in number of pages allocated to inode |
| 423 | * @swapped: the change in number of pages swapped from inode |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | * |
| 425 | * We have to calculate the free blocks since the mm can drop |
| 426 | * undirtied hole pages behind our back. |
| 427 | * |
| 428 | * But normally info->alloced == inode->i_mapping->nrpages + info->swapped |
| 429 | * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | */ |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 431 | static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | { |
| 433 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 434 | long freed; |
| 435 | |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 436 | spin_lock(&info->lock); |
| 437 | info->alloced += alloced; |
| 438 | info->swapped += swapped; |
| 439 | freed = info->alloced - info->swapped - |
| 440 | READ_ONCE(inode->i_mapping->nrpages); |
| 441 | /* |
| 442 | * Special case: whereas normally shmem_recalc_inode() is called |
| 443 | * after i_mapping->nrpages has already been adjusted (up or down), |
| 444 | * shmem_writepage() has to raise swapped before nrpages is lowered - |
| 445 | * to stop a racing shmem_recalc_inode() from thinking that a page has |
| 446 | * been freed. Compensate here, to avoid the need for a followup call. |
| 447 | */ |
| 448 | if (swapped > 0) |
| 449 | freed += swapped; |
| 450 | if (freed > 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | info->alloced -= freed; |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 452 | spin_unlock(&info->lock); |
| 453 | |
| 454 | /* The quota case may block */ |
| 455 | if (freed > 0) |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 456 | shmem_inode_unacct_blocks(inode, freed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | } |
| 458 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 459 | bool shmem_charge(struct inode *inode, long pages) |
| 460 | { |
Hugh Dickins | 509f006 | 2023-07-25 16:45:10 +0200 | [diff] [blame] | 461 | struct address_space *mapping = inode->i_mapping; |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 462 | |
Hugh Dickins | 4199f51 | 2023-09-29 20:30:03 -0700 | [diff] [blame] | 463 | if (shmem_inode_acct_blocks(inode, pages)) |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 464 | return false; |
Mike Rapoport | b1cc94a | 2017-09-06 16:22:56 -0700 | [diff] [blame] | 465 | |
Hugh Dickins | aaa52e3 | 2018-11-30 14:10:29 -0800 | [diff] [blame] | 466 | /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ |
Hugh Dickins | 509f006 | 2023-07-25 16:45:10 +0200 | [diff] [blame] | 467 | xa_lock_irq(&mapping->i_pages); |
| 468 | mapping->nrpages += pages; |
| 469 | xa_unlock_irq(&mapping->i_pages); |
Hugh Dickins | aaa52e3 | 2018-11-30 14:10:29 -0800 | [diff] [blame] | 470 | |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 471 | shmem_recalc_inode(inode, pages, 0); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 472 | return true; |
| 473 | } |
| 474 | |
| 475 | void shmem_uncharge(struct inode *inode, long pages) |
| 476 | { |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 477 | /* pages argument is currently unused: keep it to help debugging */ |
Matthew Wilcox (Oracle) | 6ffcd82 | 2022-06-28 20:41:40 -0400 | [diff] [blame] | 478 | /* nrpages adjustment done by __filemap_remove_folio() or caller */ |
Hugh Dickins | aaa52e3 | 2018-11-30 14:10:29 -0800 | [diff] [blame] | 479 | |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 480 | shmem_recalc_inode(inode, 0, 0); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 481 | } |
| 482 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 483 | /* |
Matthew Wilcox | 62f945b | 2017-11-17 10:22:37 -0500 | [diff] [blame] | 484 | * Replace item expected in xarray by a new item, while holding xa_lock. |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 485 | */ |
Matthew Wilcox | 62f945b | 2017-11-17 10:22:37 -0500 | [diff] [blame] | 486 | static int shmem_replace_entry(struct address_space *mapping, |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 487 | pgoff_t index, void *expected, void *replacement) |
| 488 | { |
Matthew Wilcox | 62f945b | 2017-11-17 10:22:37 -0500 | [diff] [blame] | 489 | XA_STATE(xas, &mapping->i_pages, index); |
Johannes Weiner | 6dbaf22c | 2014-04-03 14:47:41 -0700 | [diff] [blame] | 490 | void *item; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 491 | |
| 492 | VM_BUG_ON(!expected); |
Johannes Weiner | 6dbaf22c | 2014-04-03 14:47:41 -0700 | [diff] [blame] | 493 | VM_BUG_ON(!replacement); |
Matthew Wilcox | 62f945b | 2017-11-17 10:22:37 -0500 | [diff] [blame] | 494 | item = xas_load(&xas); |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 495 | if (item != expected) |
| 496 | return -ENOENT; |
Matthew Wilcox | 62f945b | 2017-11-17 10:22:37 -0500 | [diff] [blame] | 497 | xas_store(&xas, replacement); |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 498 | return 0; |
| 499 | } |
| 500 | |
| 501 | /* |
Hugh Dickins | d189922 | 2012-07-11 14:02:47 -0700 | [diff] [blame] | 502 | * Sometimes, before we decide whether to proceed or to fail, we must check |
| 503 | * that an entry was not already brought back from swap by a racing thread. |
| 504 | * |
Matthew Wilcox (Oracle) | 32f51ea | 2024-08-21 20:34:37 +0100 | [diff] [blame] | 505 | * Checking folio is not enough: by the time a swapcache folio is locked, it |
| 506 | * might be reused, and again be swapcache, using the same swap as before. |
Hugh Dickins | d189922 | 2012-07-11 14:02:47 -0700 | [diff] [blame] | 507 | */ |
| 508 | static bool shmem_confirm_swap(struct address_space *mapping, |
| 509 | pgoff_t index, swp_entry_t swap) |
| 510 | { |
Matthew Wilcox | a12831b | 2017-11-22 08:34:58 -0500 | [diff] [blame] | 511 | return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); |
Hugh Dickins | d189922 | 2012-07-11 14:02:47 -0700 | [diff] [blame] | 512 | } |
| 513 | |
| 514 | /* |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 515 | * Definitions for "huge tmpfs": tmpfs mounted with the huge= option |
| 516 | * |
| 517 | * SHMEM_HUGE_NEVER: |
| 518 | * disables huge pages for the mount; |
| 519 | * SHMEM_HUGE_ALWAYS: |
| 520 | * enables huge pages for the mount; |
| 521 | * SHMEM_HUGE_WITHIN_SIZE: |
| 522 | * only allocate huge pages if the page will be fully within i_size, |
| 523 | * also respect fadvise()/madvise() hints; |
| 524 | * SHMEM_HUGE_ADVISE: |
| 525 | * only allocate huge pages if requested with fadvise()/madvise(); |
| 526 | */ |
| 527 | |
| 528 | #define SHMEM_HUGE_NEVER 0 |
| 529 | #define SHMEM_HUGE_ALWAYS 1 |
| 530 | #define SHMEM_HUGE_WITHIN_SIZE 2 |
| 531 | #define SHMEM_HUGE_ADVISE 3 |
| 532 | |
| 533 | /* |
| 534 | * Special values. |
| 535 | * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: |
| 536 | * |
| 537 | * SHMEM_HUGE_DENY: |
| 538 | * disables huge on shm_mnt and all mounts, for emergency use; |
| 539 | * SHMEM_HUGE_FORCE: |
| 540 | * enables huge on shm_mnt and all mounts, w/o needing option, for testing; |
| 541 | * |
| 542 | */ |
| 543 | #define SHMEM_HUGE_DENY (-1) |
| 544 | #define SHMEM_HUGE_FORCE (-2) |
| 545 | |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 546 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 547 | /* ifdef here to avoid bloating shmem.o when not necessary */ |
| 548 | |
Hugh Dickins | 5e6e5a1 | 2021-09-02 14:54:37 -0700 | [diff] [blame] | 549 | static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 550 | |
Baolin Wang | d58a2a5 | 2024-07-22 13:43:18 +0800 | [diff] [blame] | 551 | static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 552 | loff_t write_end, bool shmem_huge_force, |
| 553 | struct vm_area_struct *vma, |
Baolin Wang | d58a2a5 | 2024-07-22 13:43:18 +0800 | [diff] [blame] | 554 | unsigned long vm_flags) |
Hugh Dickins | c852023 | 2021-09-02 14:54:31 -0700 | [diff] [blame] | 555 | { |
Baolin Wang | 6beeab8 | 2024-07-22 13:43:19 +0800 | [diff] [blame] | 556 | struct mm_struct *mm = vma ? vma->vm_mm : NULL; |
Hugh Dickins | c852023 | 2021-09-02 14:54:31 -0700 | [diff] [blame] | 557 | loff_t i_size; |
Hugh Dickins | c852023 | 2021-09-02 14:54:31 -0700 | [diff] [blame] | 558 | |
Xavier Roche | f7cd16a | 2022-03-22 14:39:55 -0700 | [diff] [blame] | 559 | if (!S_ISREG(inode->i_mode)) |
| 560 | return false; |
David Stevens | 2cf1338 | 2023-01-13 11:30:11 +0900 | [diff] [blame] | 561 | if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags))) |
Hugh Dickins | c852023 | 2021-09-02 14:54:31 -0700 | [diff] [blame] | 562 | return false; |
Zach O'Keefe | 7c6c6cc | 2022-09-22 15:40:37 -0700 | [diff] [blame] | 563 | if (shmem_huge == SHMEM_HUGE_DENY) |
| 564 | return false; |
Zach O'Keefe | 3de0c26 | 2022-12-24 00:20:35 -0800 | [diff] [blame] | 565 | if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) |
| 566 | return true; |
Hugh Dickins | 5e6e5a1 | 2021-09-02 14:54:37 -0700 | [diff] [blame] | 567 | |
| 568 | switch (SHMEM_SB(inode->i_sb)->huge) { |
Hugh Dickins | c852023 | 2021-09-02 14:54:31 -0700 | [diff] [blame] | 569 | case SHMEM_HUGE_ALWAYS: |
| 570 | return true; |
| 571 | case SHMEM_HUGE_WITHIN_SIZE: |
Liu Yuntao | de6ee65 | 2021-09-24 15:43:32 -0700 | [diff] [blame] | 572 | index = round_up(index + 1, HPAGE_PMD_NR); |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 573 | i_size = max(write_end, i_size_read(inode)); |
| 574 | i_size = round_up(i_size, PAGE_SIZE); |
Liu Yuntao | de6ee65 | 2021-09-24 15:43:32 -0700 | [diff] [blame] | 575 | if (i_size >> PAGE_SHIFT >= index) |
Hugh Dickins | c852023 | 2021-09-02 14:54:31 -0700 | [diff] [blame] | 576 | return true; |
| 577 | fallthrough; |
| 578 | case SHMEM_HUGE_ADVISE: |
David Stevens | 2cf1338 | 2023-01-13 11:30:11 +0900 | [diff] [blame] | 579 | if (mm && (vm_flags & VM_HUGEPAGE)) |
Hugh Dickins | 5e6e5a1 | 2021-09-02 14:54:37 -0700 | [diff] [blame] | 580 | return true; |
| 581 | fallthrough; |
Hugh Dickins | c852023 | 2021-09-02 14:54:31 -0700 | [diff] [blame] | 582 | default: |
Hugh Dickins | c852023 | 2021-09-02 14:54:31 -0700 | [diff] [blame] | 583 | return false; |
| 584 | } |
| 585 | } |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 586 | |
Baolin Wang | 6beeab8 | 2024-07-22 13:43:19 +0800 | [diff] [blame] | 587 | static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 588 | loff_t write_end, bool shmem_huge_force, |
| 589 | struct vm_area_struct *vma, unsigned long vm_flags) |
Gavin Shan | 9fd154b | 2024-06-27 10:39:52 +1000 | [diff] [blame] | 590 | { |
| 591 | if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) |
| 592 | return false; |
| 593 | |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 594 | return __shmem_huge_global_enabled(inode, index, write_end, |
| 595 | shmem_huge_force, vma, vm_flags); |
Gavin Shan | 9fd154b | 2024-06-27 10:39:52 +1000 | [diff] [blame] | 596 | } |
| 597 | |
Arnd Bergmann | e5f2249 | 2019-07-16 16:25:51 -0700 | [diff] [blame] | 598 | #if defined(CONFIG_SYSFS) |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 599 | static int shmem_parse_huge(const char *str) |
| 600 | { |
| 601 | if (!strcmp(str, "never")) |
| 602 | return SHMEM_HUGE_NEVER; |
| 603 | if (!strcmp(str, "always")) |
| 604 | return SHMEM_HUGE_ALWAYS; |
| 605 | if (!strcmp(str, "within_size")) |
| 606 | return SHMEM_HUGE_WITHIN_SIZE; |
| 607 | if (!strcmp(str, "advise")) |
| 608 | return SHMEM_HUGE_ADVISE; |
| 609 | if (!strcmp(str, "deny")) |
| 610 | return SHMEM_HUGE_DENY; |
| 611 | if (!strcmp(str, "force")) |
| 612 | return SHMEM_HUGE_FORCE; |
| 613 | return -EINVAL; |
| 614 | } |
Arnd Bergmann | e5f2249 | 2019-07-16 16:25:51 -0700 | [diff] [blame] | 615 | #endif |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 616 | |
Arnd Bergmann | e5f2249 | 2019-07-16 16:25:51 -0700 | [diff] [blame] | 617 | #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 618 | static const char *shmem_format_huge(int huge) |
| 619 | { |
| 620 | switch (huge) { |
| 621 | case SHMEM_HUGE_NEVER: |
| 622 | return "never"; |
| 623 | case SHMEM_HUGE_ALWAYS: |
| 624 | return "always"; |
| 625 | case SHMEM_HUGE_WITHIN_SIZE: |
| 626 | return "within_size"; |
| 627 | case SHMEM_HUGE_ADVISE: |
| 628 | return "advise"; |
| 629 | case SHMEM_HUGE_DENY: |
| 630 | return "deny"; |
| 631 | case SHMEM_HUGE_FORCE: |
| 632 | return "force"; |
| 633 | default: |
| 634 | VM_BUG_ON(1); |
| 635 | return "bad_val"; |
| 636 | } |
| 637 | } |
Jérémy Lefaure | f1f5929 | 2016-12-12 16:43:23 -0800 | [diff] [blame] | 638 | #endif |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 639 | |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 640 | static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, |
Hugh Dickins | 1544405 | 2024-08-25 16:25:39 -0700 | [diff] [blame] | 641 | struct shrink_control *sc, unsigned long nr_to_free) |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 642 | { |
| 643 | LIST_HEAD(list), *pos, *next; |
| 644 | struct inode *inode; |
| 645 | struct shmem_inode_info *info; |
Matthew Wilcox (Oracle) | 0562457 | 2022-05-12 20:23:03 -0700 | [diff] [blame] | 646 | struct folio *folio; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 647 | unsigned long batch = sc ? sc->nr_to_scan : 128; |
Hugh Dickins | 1544405 | 2024-08-25 16:25:39 -0700 | [diff] [blame] | 648 | unsigned long split = 0, freed = 0; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 649 | |
| 650 | if (list_empty(&sbinfo->shrinklist)) |
| 651 | return SHRINK_STOP; |
| 652 | |
| 653 | spin_lock(&sbinfo->shrinklist_lock); |
| 654 | list_for_each_safe(pos, next, &sbinfo->shrinklist) { |
| 655 | info = list_entry(pos, struct shmem_inode_info, shrinklist); |
| 656 | |
| 657 | /* pin the inode */ |
| 658 | inode = igrab(&info->vfs_inode); |
| 659 | |
| 660 | /* inode is about to be evicted */ |
| 661 | if (!inode) { |
| 662 | list_del_init(&info->shrinklist); |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 663 | goto next; |
| 664 | } |
| 665 | |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 666 | list_move(&info->shrinklist, &list); |
| 667 | next: |
Gang Li | 62c9827 | 2022-01-14 14:05:23 -0800 | [diff] [blame] | 668 | sbinfo->shrinklist_len--; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 669 | if (!--batch) |
| 670 | break; |
| 671 | } |
| 672 | spin_unlock(&sbinfo->shrinklist_lock); |
| 673 | |
| 674 | list_for_each_safe(pos, next, &list) { |
Hugh Dickins | 1544405 | 2024-08-25 16:25:39 -0700 | [diff] [blame] | 675 | pgoff_t next, end; |
| 676 | loff_t i_size; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 677 | int ret; |
| 678 | |
| 679 | info = list_entry(pos, struct shmem_inode_info, shrinklist); |
| 680 | inode = &info->vfs_inode; |
| 681 | |
Hugh Dickins | 1544405 | 2024-08-25 16:25:39 -0700 | [diff] [blame] | 682 | if (nr_to_free && freed >= nr_to_free) |
Gang Li | 62c9827 | 2022-01-14 14:05:23 -0800 | [diff] [blame] | 683 | goto move_back; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 684 | |
Hugh Dickins | 1544405 | 2024-08-25 16:25:39 -0700 | [diff] [blame] | 685 | i_size = i_size_read(inode); |
| 686 | folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE); |
| 687 | if (!folio || xa_is_value(folio)) |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 688 | goto drop; |
| 689 | |
Hugh Dickins | 1544405 | 2024-08-25 16:25:39 -0700 | [diff] [blame] | 690 | /* No large folio at the end of the file: nothing to split */ |
Matthew Wilcox (Oracle) | 0562457 | 2022-05-12 20:23:03 -0700 | [diff] [blame] | 691 | if (!folio_test_large(folio)) { |
| 692 | folio_put(folio); |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 693 | goto drop; |
| 694 | } |
| 695 | |
Hugh Dickins | 1544405 | 2024-08-25 16:25:39 -0700 | [diff] [blame] | 696 | /* Check if there is anything to gain from splitting */ |
| 697 | next = folio_next_index(folio); |
| 698 | end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE)); |
| 699 | if (end <= folio->index || end >= next) { |
| 700 | folio_put(folio); |
| 701 | goto drop; |
| 702 | } |
| 703 | |
Kirill A. Shutemov | b3cd54b | 2018-03-22 16:17:35 -0700 | [diff] [blame] | 704 | /* |
Gang Li | 62c9827 | 2022-01-14 14:05:23 -0800 | [diff] [blame] | 705 | * Move the inode on the list back to shrinklist if we failed |
| 706 | * to lock the page at this time. |
Kirill A. Shutemov | b3cd54b | 2018-03-22 16:17:35 -0700 | [diff] [blame] | 707 | * |
| 708 | * Waiting for the lock may lead to deadlock in the |
| 709 | * reclaim path. |
| 710 | */ |
Matthew Wilcox (Oracle) | 0562457 | 2022-05-12 20:23:03 -0700 | [diff] [blame] | 711 | if (!folio_trylock(folio)) { |
| 712 | folio_put(folio); |
Gang Li | 62c9827 | 2022-01-14 14:05:23 -0800 | [diff] [blame] | 713 | goto move_back; |
Kirill A. Shutemov | b3cd54b | 2018-03-22 16:17:35 -0700 | [diff] [blame] | 714 | } |
| 715 | |
Matthew Wilcox (Oracle) | d788f5b | 2022-09-02 20:46:00 +0100 | [diff] [blame] | 716 | ret = split_folio(folio); |
Matthew Wilcox (Oracle) | 0562457 | 2022-05-12 20:23:03 -0700 | [diff] [blame] | 717 | folio_unlock(folio); |
| 718 | folio_put(folio); |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 719 | |
Gang Li | 62c9827 | 2022-01-14 14:05:23 -0800 | [diff] [blame] | 720 | /* If split failed move the inode on the list back to shrinklist */ |
Kirill A. Shutemov | b3cd54b | 2018-03-22 16:17:35 -0700 | [diff] [blame] | 721 | if (ret) |
Gang Li | 62c9827 | 2022-01-14 14:05:23 -0800 | [diff] [blame] | 722 | goto move_back; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 723 | |
Hugh Dickins | 1544405 | 2024-08-25 16:25:39 -0700 | [diff] [blame] | 724 | freed += next - end; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 725 | split++; |
| 726 | drop: |
| 727 | list_del_init(&info->shrinklist); |
Gang Li | 62c9827 | 2022-01-14 14:05:23 -0800 | [diff] [blame] | 728 | goto put; |
| 729 | move_back: |
| 730 | /* |
| 731 | * Make sure the inode is either on the global list or deleted |
| 732 | * from any local list before iput() since it could be deleted |
| 733 | * in another thread once we put the inode (then the local list |
| 734 | * is corrupted). |
| 735 | */ |
| 736 | spin_lock(&sbinfo->shrinklist_lock); |
| 737 | list_move(&info->shrinklist, &sbinfo->shrinklist); |
| 738 | sbinfo->shrinklist_len++; |
| 739 | spin_unlock(&sbinfo->shrinklist_lock); |
| 740 | put: |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 741 | iput(inode); |
| 742 | } |
| 743 | |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 744 | return split; |
| 745 | } |
| 746 | |
| 747 | static long shmem_unused_huge_scan(struct super_block *sb, |
| 748 | struct shrink_control *sc) |
| 749 | { |
| 750 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 751 | |
| 752 | if (!READ_ONCE(sbinfo->shrinklist_len)) |
| 753 | return SHRINK_STOP; |
| 754 | |
| 755 | return shmem_unused_huge_shrink(sbinfo, sc, 0); |
| 756 | } |
| 757 | |
| 758 | static long shmem_unused_huge_count(struct super_block *sb, |
| 759 | struct shrink_control *sc) |
| 760 | { |
| 761 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 762 | return READ_ONCE(sbinfo->shrinklist_len); |
| 763 | } |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 764 | #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 765 | |
| 766 | #define shmem_huge SHMEM_HUGE_DENY |
| 767 | |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 768 | static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, |
Hugh Dickins | 1544405 | 2024-08-25 16:25:39 -0700 | [diff] [blame] | 769 | struct shrink_control *sc, unsigned long nr_to_free) |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 770 | { |
| 771 | return 0; |
| 772 | } |
Baolin Wang | 6beeab8 | 2024-07-22 13:43:19 +0800 | [diff] [blame] | 773 | |
| 774 | static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 775 | loff_t write_end, bool shmem_huge_force, |
| 776 | struct vm_area_struct *vma, unsigned long vm_flags) |
Baolin Wang | 6beeab8 | 2024-07-22 13:43:19 +0800 | [diff] [blame] | 777 | { |
| 778 | return false; |
| 779 | } |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 780 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 781 | |
| 782 | /* |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 783 | * Somewhat like filemap_add_folio, but error if expected item has gone. |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 784 | */ |
Matthew Wilcox (Oracle) | b7dd44a | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 785 | static int shmem_add_to_page_cache(struct folio *folio, |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 786 | struct address_space *mapping, |
Hugh Dickins | 054a9f7 | 2023-09-29 20:31:27 -0700 | [diff] [blame] | 787 | pgoff_t index, void *expected, gfp_t gfp) |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 788 | { |
Matthew Wilcox (Oracle) | b7dd44a | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 789 | XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); |
| 790 | long nr = folio_nr_pages(folio); |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 791 | |
Matthew Wilcox (Oracle) | b7dd44a | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 792 | VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); |
| 793 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
| 794 | VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 795 | |
Matthew Wilcox (Oracle) | b7dd44a | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 796 | folio_ref_add(folio, nr); |
| 797 | folio->mapping = mapping; |
| 798 | folio->index = index; |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 799 | |
Hugh Dickins | 054a9f7 | 2023-09-29 20:31:27 -0700 | [diff] [blame] | 800 | gfp &= GFP_RECLAIM_MASK; |
Matthew Wilcox (Oracle) | b7dd44a | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 801 | folio_throttle_swaprate(folio, gfp); |
Johannes Weiner | 3fea5a4 | 2020-06-03 16:01:41 -0700 | [diff] [blame] | 802 | |
Matthew Wilcox | 552446a | 2017-12-01 13:25:14 -0500 | [diff] [blame] | 803 | do { |
Matthew Wilcox | 552446a | 2017-12-01 13:25:14 -0500 | [diff] [blame] | 804 | xas_lock_irq(&xas); |
Matthew Wilcox (Oracle) | 6b24ca4 | 2020-06-27 22:19:08 -0400 | [diff] [blame] | 805 | if (expected != xas_find_conflict(&xas)) { |
Matthew Wilcox | 552446a | 2017-12-01 13:25:14 -0500 | [diff] [blame] | 806 | xas_set_err(&xas, -EEXIST); |
Matthew Wilcox (Oracle) | 6b24ca4 | 2020-06-27 22:19:08 -0400 | [diff] [blame] | 807 | goto unlock; |
| 808 | } |
| 809 | if (expected && xas_find_conflict(&xas)) { |
| 810 | xas_set_err(&xas, -EEXIST); |
| 811 | goto unlock; |
| 812 | } |
Matthew Wilcox (Oracle) | b7dd44a | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 813 | xas_store(&xas, folio); |
Matthew Wilcox | 552446a | 2017-12-01 13:25:14 -0500 | [diff] [blame] | 814 | if (xas_error(&xas)) |
| 815 | goto unlock; |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 816 | if (folio_test_pmd_mappable(folio)) |
Matthew Wilcox (Oracle) | b7dd44a | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 817 | __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); |
Matthew Wilcox (Oracle) | b7dd44a | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 818 | __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); |
| 819 | __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 820 | mapping->nrpages += nr; |
Matthew Wilcox | 552446a | 2017-12-01 13:25:14 -0500 | [diff] [blame] | 821 | unlock: |
| 822 | xas_unlock_irq(&xas); |
| 823 | } while (xas_nomem(&xas, gfp)); |
| 824 | |
| 825 | if (xas_error(&xas)) { |
Hugh Dickins | 054a9f7 | 2023-09-29 20:31:27 -0700 | [diff] [blame] | 826 | folio->mapping = NULL; |
| 827 | folio_ref_sub(folio, nr); |
| 828 | return xas_error(&xas); |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 829 | } |
Matthew Wilcox | 552446a | 2017-12-01 13:25:14 -0500 | [diff] [blame] | 830 | |
| 831 | return 0; |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 832 | } |
| 833 | |
| 834 | /* |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 835 | * Somewhat like filemap_remove_folio, but substitutes swap for @folio. |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 836 | */ |
Matthew Wilcox (Oracle) | 4cd400f | 2022-09-02 20:46:03 +0100 | [diff] [blame] | 837 | static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 838 | { |
Matthew Wilcox (Oracle) | 4cd400f | 2022-09-02 20:46:03 +0100 | [diff] [blame] | 839 | struct address_space *mapping = folio->mapping; |
| 840 | long nr = folio_nr_pages(folio); |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 841 | int error; |
| 842 | |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 843 | xa_lock_irq(&mapping->i_pages); |
Matthew Wilcox (Oracle) | 4cd400f | 2022-09-02 20:46:03 +0100 | [diff] [blame] | 844 | error = shmem_replace_entry(mapping, folio->index, folio, radswap); |
| 845 | folio->mapping = NULL; |
| 846 | mapping->nrpages -= nr; |
| 847 | __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); |
| 848 | __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 849 | xa_unlock_irq(&mapping->i_pages); |
Baolin Wang | 872339c | 2024-08-12 15:42:08 +0800 | [diff] [blame] | 850 | folio_put_refs(folio, nr); |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 851 | BUG_ON(error); |
| 852 | } |
| 853 | |
| 854 | /* |
Daniel Gomez | 6ea0d1c | 2024-08-12 15:42:04 +0800 | [diff] [blame] | 855 | * Remove swap entry from page cache, free the swap and its page cache. Returns |
| 856 | * the number of pages being freed. 0 means entry not found in XArray (0 pages |
| 857 | * being freed). |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 858 | */ |
Daniel Gomez | 6ea0d1c | 2024-08-12 15:42:04 +0800 | [diff] [blame] | 859 | static long shmem_free_swap(struct address_space *mapping, |
| 860 | pgoff_t index, void *radswap) |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 861 | { |
Daniel Gomez | 6ea0d1c | 2024-08-12 15:42:04 +0800 | [diff] [blame] | 862 | int order = xa_get_order(&mapping->i_pages, index); |
Johannes Weiner | 6dbaf22c | 2014-04-03 14:47:41 -0700 | [diff] [blame] | 863 | void *old; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 864 | |
Matthew Wilcox | 55f3f7e | 2018-11-26 16:08:43 -0500 | [diff] [blame] | 865 | old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); |
Johannes Weiner | 6dbaf22c | 2014-04-03 14:47:41 -0700 | [diff] [blame] | 866 | if (old != radswap) |
Daniel Gomez | 6ea0d1c | 2024-08-12 15:42:04 +0800 | [diff] [blame] | 867 | return 0; |
| 868 | free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order); |
| 869 | |
| 870 | return 1 << order; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 871 | } |
| 872 | |
| 873 | /* |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 874 | * Determine (in bytes) how many of the shmem object's pages mapped by the |
Vlastimil Babka | 48131e0 | 2016-01-14 15:19:23 -0800 | [diff] [blame] | 875 | * given offsets are swapped out. |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 876 | * |
Jan Kara | 9608703 | 2021-04-12 15:50:21 +0200 | [diff] [blame] | 877 | * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 878 | * as long as the inode doesn't go away and racy results are not a problem. |
| 879 | */ |
Vlastimil Babka | 48131e0 | 2016-01-14 15:19:23 -0800 | [diff] [blame] | 880 | unsigned long shmem_partial_swap_usage(struct address_space *mapping, |
| 881 | pgoff_t start, pgoff_t end) |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 882 | { |
Matthew Wilcox | 7ae3424 | 2017-12-04 03:28:00 -0500 | [diff] [blame] | 883 | XA_STATE(xas, &mapping->i_pages, start); |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 884 | struct page *page; |
Vlastimil Babka | 48131e0 | 2016-01-14 15:19:23 -0800 | [diff] [blame] | 885 | unsigned long swapped = 0; |
Hugh Dickins | e5548f8 | 2023-08-22 22:14:47 -0700 | [diff] [blame] | 886 | unsigned long max = end - 1; |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 887 | |
| 888 | rcu_read_lock(); |
Hugh Dickins | e5548f8 | 2023-08-22 22:14:47 -0700 | [diff] [blame] | 889 | xas_for_each(&xas, page, max) { |
Matthew Wilcox | 7ae3424 | 2017-12-04 03:28:00 -0500 | [diff] [blame] | 890 | if (xas_retry(&xas, page)) |
Matthew Wilcox | 2cf938a | 2016-03-17 14:22:03 -0700 | [diff] [blame] | 891 | continue; |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 892 | if (xa_is_value(page)) |
Shakeel Butt | 354a595 | 2024-09-06 16:05:12 -0700 | [diff] [blame] | 893 | swapped += 1 << xas_get_order(&xas); |
Hugh Dickins | e5548f8 | 2023-08-22 22:14:47 -0700 | [diff] [blame] | 894 | if (xas.xa_index == max) |
| 895 | break; |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 896 | if (need_resched()) { |
Matthew Wilcox | 7ae3424 | 2017-12-04 03:28:00 -0500 | [diff] [blame] | 897 | xas_pause(&xas); |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 898 | cond_resched_rcu(); |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 899 | } |
| 900 | } |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 901 | rcu_read_unlock(); |
| 902 | |
| 903 | return swapped << PAGE_SHIFT; |
| 904 | } |
| 905 | |
| 906 | /* |
Vlastimil Babka | 48131e0 | 2016-01-14 15:19:23 -0800 | [diff] [blame] | 907 | * Determine (in bytes) how many of the shmem object's pages mapped by the |
| 908 | * given vma is swapped out. |
| 909 | * |
Jan Kara | 9608703 | 2021-04-12 15:50:21 +0200 | [diff] [blame] | 910 | * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, |
Vlastimil Babka | 48131e0 | 2016-01-14 15:19:23 -0800 | [diff] [blame] | 911 | * as long as the inode doesn't go away and racy results are not a problem. |
| 912 | */ |
| 913 | unsigned long shmem_swap_usage(struct vm_area_struct *vma) |
| 914 | { |
| 915 | struct inode *inode = file_inode(vma->vm_file); |
| 916 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 917 | struct address_space *mapping = inode->i_mapping; |
| 918 | unsigned long swapped; |
| 919 | |
| 920 | /* Be careful as we don't hold info->lock */ |
| 921 | swapped = READ_ONCE(info->swapped); |
| 922 | |
| 923 | /* |
| 924 | * The easier cases are when the shmem object has nothing in swap, or |
| 925 | * the vma maps it whole. Then we can simply use the stats that we |
| 926 | * already track. |
| 927 | */ |
| 928 | if (!swapped) |
| 929 | return 0; |
| 930 | |
| 931 | if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) |
| 932 | return swapped << PAGE_SHIFT; |
| 933 | |
| 934 | /* Here comes the more involved part */ |
Peter Xu | 02399c8 | 2021-11-05 13:36:02 -0700 | [diff] [blame] | 935 | return shmem_partial_swap_usage(mapping, vma->vm_pgoff, |
| 936 | vma->vm_pgoff + vma_pages(vma)); |
Vlastimil Babka | 48131e0 | 2016-01-14 15:19:23 -0800 | [diff] [blame] | 937 | } |
| 938 | |
| 939 | /* |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 940 | * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. |
| 941 | */ |
| 942 | void shmem_unlock_mapping(struct address_space *mapping) |
| 943 | { |
Matthew Wilcox (Oracle) | 105c988 | 2022-06-04 17:40:17 -0400 | [diff] [blame] | 944 | struct folio_batch fbatch; |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 945 | pgoff_t index = 0; |
| 946 | |
Matthew Wilcox (Oracle) | 105c988 | 2022-06-04 17:40:17 -0400 | [diff] [blame] | 947 | folio_batch_init(&fbatch); |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 948 | /* |
| 949 | * Minor point, but we might as well stop if someone else SHM_LOCKs it. |
| 950 | */ |
Matthew Wilcox (Oracle) | 105c988 | 2022-06-04 17:40:17 -0400 | [diff] [blame] | 951 | while (!mapping_unevictable(mapping) && |
| 952 | filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { |
| 953 | check_move_unevictable_folios(&fbatch); |
| 954 | folio_batch_release(&fbatch); |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 955 | cond_resched(); |
| 956 | } |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 957 | } |
| 958 | |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 959 | static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index) |
Hugh Dickins | 71725ed | 2020-04-06 20:07:57 -0700 | [diff] [blame] | 960 | { |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 961 | struct folio *folio; |
Hugh Dickins | 71725ed | 2020-04-06 20:07:57 -0700 | [diff] [blame] | 962 | |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 963 | /* |
Matthew Wilcox (Oracle) | a7f5862 | 2022-09-02 20:46:21 +0100 | [diff] [blame] | 964 | * At first avoid shmem_get_folio(,,,SGP_READ): that fails |
Hugh Dickins | 81914af | 2023-03-19 22:19:21 -0700 | [diff] [blame] | 965 | * beyond i_size, and reports fallocated folios as holes. |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 966 | */ |
Hugh Dickins | 81914af | 2023-03-19 22:19:21 -0700 | [diff] [blame] | 967 | folio = filemap_get_entry(inode->i_mapping, index); |
| 968 | if (!folio) |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 969 | return folio; |
Hugh Dickins | 81914af | 2023-03-19 22:19:21 -0700 | [diff] [blame] | 970 | if (!xa_is_value(folio)) { |
| 971 | folio_lock(folio); |
| 972 | if (folio->mapping == inode->i_mapping) |
| 973 | return folio; |
| 974 | /* The folio has been swapped out */ |
| 975 | folio_unlock(folio); |
| 976 | folio_put(folio); |
| 977 | } |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 978 | /* |
Hugh Dickins | 81914af | 2023-03-19 22:19:21 -0700 | [diff] [blame] | 979 | * But read a folio back from swap if any of it is within i_size |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 980 | * (although in some cases this is just a waste of time). |
| 981 | */ |
Matthew Wilcox (Oracle) | a7f5862 | 2022-09-02 20:46:21 +0100 | [diff] [blame] | 982 | folio = NULL; |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 983 | shmem_get_folio(inode, index, 0, &folio, SGP_READ); |
Matthew Wilcox (Oracle) | a7f5862 | 2022-09-02 20:46:21 +0100 | [diff] [blame] | 984 | return folio; |
Hugh Dickins | 71725ed | 2020-04-06 20:07:57 -0700 | [diff] [blame] | 985 | } |
| 986 | |
| 987 | /* |
Matthew Wilcox | 7f4446e | 2017-12-04 03:31:13 -0500 | [diff] [blame] | 988 | * Remove range of pages and swap entries from page cache, and free them. |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 989 | * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 990 | */ |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 991 | static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, |
| 992 | bool unfalloc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 993 | { |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 994 | struct address_space *mapping = inode->i_mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 | struct shmem_inode_info *info = SHMEM_I(inode); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 996 | pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 997 | pgoff_t end = (lend + 1) >> PAGE_SHIFT; |
Matthew Wilcox (Oracle) | 0e499ed | 2020-09-01 23:17:50 -0400 | [diff] [blame] | 998 | struct folio_batch fbatch; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 999 | pgoff_t indices[PAGEVEC_SIZE]; |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 1000 | struct folio *folio; |
| 1001 | bool same_folio; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 1002 | long nr_swaps_freed = 0; |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 1003 | pgoff_t index; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1004 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1005 | |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 1006 | if (lend == -1) |
| 1007 | end = -1; /* unsigned, so actually very big */ |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1008 | |
Hugh Dickins | d144bf6 | 2021-09-02 14:54:21 -0700 | [diff] [blame] | 1009 | if (info->fallocend > start && info->fallocend <= end && !unfalloc) |
| 1010 | info->fallocend = start; |
| 1011 | |
Matthew Wilcox (Oracle) | 51dcbda | 2021-12-07 14:15:07 -0500 | [diff] [blame] | 1012 | folio_batch_init(&fbatch); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1013 | index = start; |
Vishal Moola (Oracle) | 3392ca12 | 2022-10-17 09:17:59 -0700 | [diff] [blame] | 1014 | while (index < end && find_lock_entries(mapping, &index, end - 1, |
Matthew Wilcox (Oracle) | 51dcbda | 2021-12-07 14:15:07 -0500 | [diff] [blame] | 1015 | &fbatch, indices)) { |
| 1016 | for (i = 0; i < folio_batch_count(&fbatch); i++) { |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 1017 | folio = fbatch.folios[i]; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1018 | |
Matthew Wilcox (Oracle) | 7b774aa | 2021-12-03 08:50:01 -0500 | [diff] [blame] | 1019 | if (xa_is_value(folio)) { |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1020 | if (unfalloc) |
| 1021 | continue; |
Daniel Gomez | 6ea0d1c | 2024-08-12 15:42:04 +0800 | [diff] [blame] | 1022 | nr_swaps_freed += shmem_free_swap(mapping, |
Vishal Moola (Oracle) | 3392ca12 | 2022-10-17 09:17:59 -0700 | [diff] [blame] | 1023 | indices[i], folio); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1024 | continue; |
| 1025 | } |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 1026 | |
Matthew Wilcox (Oracle) | 7b774aa | 2021-12-03 08:50:01 -0500 | [diff] [blame] | 1027 | if (!unfalloc || !folio_test_uptodate(folio)) |
Matthew Wilcox (Oracle) | 1e84a3d | 2021-12-02 16:01:55 -0500 | [diff] [blame] | 1028 | truncate_inode_folio(mapping, folio); |
Matthew Wilcox (Oracle) | 7b774aa | 2021-12-03 08:50:01 -0500 | [diff] [blame] | 1029 | folio_unlock(folio); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1030 | } |
Matthew Wilcox (Oracle) | 51dcbda | 2021-12-07 14:15:07 -0500 | [diff] [blame] | 1031 | folio_batch_remove_exceptionals(&fbatch); |
| 1032 | folio_batch_release(&fbatch); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1033 | cond_resched(); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1034 | } |
| 1035 | |
Hugh Dickins | 44bcabd | 2022-12-04 16:51:50 -0800 | [diff] [blame] | 1036 | /* |
| 1037 | * When undoing a failed fallocate, we want none of the partial folio |
| 1038 | * zeroing and splitting below, but shall want to truncate the whole |
| 1039 | * folio when !uptodate indicates that it was added by this fallocate, |
| 1040 | * even when [lstart, lend] covers only a part of the folio. |
| 1041 | */ |
| 1042 | if (unfalloc) |
| 1043 | goto whole_folios; |
| 1044 | |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 1045 | same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); |
| 1046 | folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); |
| 1047 | if (folio) { |
| 1048 | same_folio = lend < folio_pos(folio) + folio_size(folio); |
| 1049 | folio_mark_dirty(folio); |
| 1050 | if (!truncate_inode_partial_folio(folio, lstart, lend)) { |
Sidhartha Kumar | 87b11f8 | 2023-06-27 10:43:49 -0700 | [diff] [blame] | 1051 | start = folio_next_index(folio); |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 1052 | if (same_folio) |
| 1053 | end = folio->index; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1054 | } |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 1055 | folio_unlock(folio); |
| 1056 | folio_put(folio); |
| 1057 | folio = NULL; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1058 | } |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 1059 | |
| 1060 | if (!same_folio) |
| 1061 | folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT); |
| 1062 | if (folio) { |
| 1063 | folio_mark_dirty(folio); |
| 1064 | if (!truncate_inode_partial_folio(folio, lstart, lend)) |
| 1065 | end = folio->index; |
| 1066 | folio_unlock(folio); |
| 1067 | folio_put(folio); |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 1068 | } |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1069 | |
Hugh Dickins | 44bcabd | 2022-12-04 16:51:50 -0800 | [diff] [blame] | 1070 | whole_folios: |
| 1071 | |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1072 | index = start; |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 1073 | while (index < end) { |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1074 | cond_resched(); |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1075 | |
Vishal Moola (Oracle) | 9fb6bee | 2022-10-17 09:18:00 -0700 | [diff] [blame] | 1076 | if (!find_get_entries(mapping, &index, end - 1, &fbatch, |
Matthew Wilcox (Oracle) | cf2039a | 2021-02-25 17:16:11 -0800 | [diff] [blame] | 1077 | indices)) { |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 1078 | /* If all gone or hole-punch or unfalloc, we're done */ |
| 1079 | if (index == start || end != -1) |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1080 | break; |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 1081 | /* But if truncating, restart to make sure all gone */ |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1082 | index = start; |
| 1083 | continue; |
| 1084 | } |
Matthew Wilcox (Oracle) | 0e499ed | 2020-09-01 23:17:50 -0400 | [diff] [blame] | 1085 | for (i = 0; i < folio_batch_count(&fbatch); i++) { |
Matthew Wilcox (Oracle) | b9a8a41 | 2020-05-27 17:59:22 -0400 | [diff] [blame] | 1086 | folio = fbatch.folios[i]; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1087 | |
Matthew Wilcox (Oracle) | 0e499ed | 2020-09-01 23:17:50 -0400 | [diff] [blame] | 1088 | if (xa_is_value(folio)) { |
Daniel Gomez | 6ea0d1c | 2024-08-12 15:42:04 +0800 | [diff] [blame] | 1089 | long swaps_freed; |
| 1090 | |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1091 | if (unfalloc) |
| 1092 | continue; |
Daniel Gomez | 6ea0d1c | 2024-08-12 15:42:04 +0800 | [diff] [blame] | 1093 | swaps_freed = shmem_free_swap(mapping, indices[i], folio); |
| 1094 | if (!swaps_freed) { |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 1095 | /* Swap was replaced by page: retry */ |
Vishal Moola (Oracle) | 9fb6bee | 2022-10-17 09:18:00 -0700 | [diff] [blame] | 1096 | index = indices[i]; |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 1097 | break; |
| 1098 | } |
Daniel Gomez | 6ea0d1c | 2024-08-12 15:42:04 +0800 | [diff] [blame] | 1099 | nr_swaps_freed += swaps_freed; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 1100 | continue; |
| 1101 | } |
| 1102 | |
Matthew Wilcox (Oracle) | 0e499ed | 2020-09-01 23:17:50 -0400 | [diff] [blame] | 1103 | folio_lock(folio); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1104 | |
Matthew Wilcox (Oracle) | 0e499ed | 2020-09-01 23:17:50 -0400 | [diff] [blame] | 1105 | if (!unfalloc || !folio_test_uptodate(folio)) { |
Matthew Wilcox (Oracle) | 0e499ed | 2020-09-01 23:17:50 -0400 | [diff] [blame] | 1106 | if (folio_mapping(folio) != mapping) { |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 1107 | /* Page was replaced by swap: retry */ |
Matthew Wilcox (Oracle) | 0e499ed | 2020-09-01 23:17:50 -0400 | [diff] [blame] | 1108 | folio_unlock(folio); |
Vishal Moola (Oracle) | 9fb6bee | 2022-10-17 09:18:00 -0700 | [diff] [blame] | 1109 | index = indices[i]; |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 1110 | break; |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1111 | } |
Matthew Wilcox (Oracle) | 0e499ed | 2020-09-01 23:17:50 -0400 | [diff] [blame] | 1112 | VM_BUG_ON_FOLIO(folio_test_writeback(folio), |
| 1113 | folio); |
David Stevens | 55ac8bb | 2023-04-18 17:40:31 +0900 | [diff] [blame] | 1114 | |
| 1115 | if (!folio_test_large(folio)) { |
| 1116 | truncate_inode_folio(mapping, folio); |
| 1117 | } else if (truncate_inode_partial_folio(folio, lstart, lend)) { |
| 1118 | /* |
| 1119 | * If we split a page, reset the loop so |
| 1120 | * that we pick up the new sub pages. |
| 1121 | * Otherwise the THP was entirely |
| 1122 | * dropped or the target range was |
| 1123 | * zeroed, so just continue the loop as |
| 1124 | * is. |
| 1125 | */ |
| 1126 | if (!folio_test_large(folio)) { |
| 1127 | folio_unlock(folio); |
| 1128 | index = start; |
| 1129 | break; |
| 1130 | } |
| 1131 | } |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 1132 | } |
Matthew Wilcox (Oracle) | 0e499ed | 2020-09-01 23:17:50 -0400 | [diff] [blame] | 1133 | folio_unlock(folio); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1134 | } |
Matthew Wilcox (Oracle) | 0e499ed | 2020-09-01 23:17:50 -0400 | [diff] [blame] | 1135 | folio_batch_remove_exceptionals(&fbatch); |
| 1136 | folio_batch_release(&fbatch); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1137 | } |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 1138 | |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 1139 | shmem_recalc_inode(inode, 0, -nr_swaps_freed); |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1140 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1141 | |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1142 | void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) |
| 1143 | { |
| 1144 | shmem_undo_range(inode, lstart, lend, false); |
Jeff Layton | cf2766b | 2023-10-04 14:53:07 -0400 | [diff] [blame] | 1145 | inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 1146 | inode_inc_iversion(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1147 | } |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 1148 | EXPORT_SYMBOL_GPL(shmem_truncate_range); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | |
Christian Brauner | b74d24f | 2023-01-13 12:49:12 +0100 | [diff] [blame] | 1150 | static int shmem_getattr(struct mnt_idmap *idmap, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 1151 | const struct path *path, struct kstat *stat, |
David Howells | a528d35 | 2017-01-31 16:46:22 +0000 | [diff] [blame] | 1152 | u32 request_mask, unsigned int query_flags) |
Yu Zhao | 44a3022 | 2015-09-08 15:03:33 -0700 | [diff] [blame] | 1153 | { |
David Howells | a528d35 | 2017-01-31 16:46:22 +0000 | [diff] [blame] | 1154 | struct inode *inode = path->dentry->d_inode; |
Yu Zhao | 44a3022 | 2015-09-08 15:03:33 -0700 | [diff] [blame] | 1155 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 1156 | |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 1157 | if (info->alloced - info->swapped != inode->i_mapping->nrpages) |
| 1158 | shmem_recalc_inode(inode, 0, 0); |
| 1159 | |
Theodore Ts'o | e408e69 | 2022-07-14 21:59:12 -0400 | [diff] [blame] | 1160 | if (info->fsflags & FS_APPEND_FL) |
| 1161 | stat->attributes |= STATX_ATTR_APPEND; |
| 1162 | if (info->fsflags & FS_IMMUTABLE_FL) |
| 1163 | stat->attributes |= STATX_ATTR_IMMUTABLE; |
| 1164 | if (info->fsflags & FS_NODUMP_FL) |
| 1165 | stat->attributes |= STATX_ATTR_NODUMP; |
| 1166 | stat->attributes_mask |= (STATX_ATTR_APPEND | |
| 1167 | STATX_ATTR_IMMUTABLE | |
| 1168 | STATX_ATTR_NODUMP); |
Jeff Layton | 0d72b928 | 2023-08-07 15:38:33 -0400 | [diff] [blame] | 1169 | generic_fillattr(idmap, request_mask, inode, stat); |
Yang Shi | 89fdcd2 | 2018-06-07 17:06:59 -0700 | [diff] [blame] | 1170 | |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 1171 | if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0)) |
Yang Shi | 89fdcd2 | 2018-06-07 17:06:59 -0700 | [diff] [blame] | 1172 | stat->blksize = HPAGE_PMD_SIZE; |
| 1173 | |
Xavier Roche | f7cd16a | 2022-03-22 14:39:55 -0700 | [diff] [blame] | 1174 | if (request_mask & STATX_BTIME) { |
| 1175 | stat->result_mask |= STATX_BTIME; |
| 1176 | stat->btime.tv_sec = info->i_crtime.tv_sec; |
| 1177 | stat->btime.tv_nsec = info->i_crtime.tv_nsec; |
| 1178 | } |
| 1179 | |
Yu Zhao | 44a3022 | 2015-09-08 15:03:33 -0700 | [diff] [blame] | 1180 | return 0; |
| 1181 | } |
| 1182 | |
Christian Brauner | c1632a0 | 2023-01-13 12:49:11 +0100 | [diff] [blame] | 1183 | static int shmem_setattr(struct mnt_idmap *idmap, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 1184 | struct dentry *dentry, struct iattr *attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1185 | { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 1186 | struct inode *inode = d_inode(dentry); |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 1187 | struct shmem_inode_info *info = SHMEM_I(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | int error; |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 1189 | bool update_mtime = false; |
| 1190 | bool update_ctime = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | |
Giuseppe Scrivano | 7a80e5b | 2023-01-20 10:43:46 +0100 | [diff] [blame] | 1192 | error = setattr_prepare(idmap, dentry, attr); |
Christoph Hellwig | db78b877 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 1193 | if (error) |
| 1194 | return error; |
| 1195 | |
Daniel Verkamp | 6fd7353 | 2022-12-15 00:12:01 +0000 | [diff] [blame] | 1196 | if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) { |
| 1197 | if ((inode->i_mode ^ attr->ia_mode) & 0111) { |
| 1198 | return -EPERM; |
| 1199 | } |
| 1200 | } |
| 1201 | |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 1202 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
| 1203 | loff_t oldsize = inode->i_size; |
| 1204 | loff_t newsize = attr->ia_size; |
npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 1205 | |
Jan Kara | 9608703 | 2021-04-12 15:50:21 +0200 | [diff] [blame] | 1206 | /* protected by i_rwsem */ |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 1207 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || |
| 1208 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) |
| 1209 | return -EPERM; |
| 1210 | |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 1211 | if (newsize != oldsize) { |
Konstantin Khlebnikov | 7714251 | 2014-08-06 16:06:34 -0700 | [diff] [blame] | 1212 | error = shmem_reacct_size(SHMEM_I(inode)->flags, |
| 1213 | oldsize, newsize); |
| 1214 | if (error) |
| 1215 | return error; |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 1216 | i_size_write(inode, newsize); |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 1217 | update_mtime = true; |
| 1218 | } else { |
| 1219 | update_ctime = false; |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 1220 | } |
Josef Bacik | afa2db2 | 2015-06-24 16:58:45 -0700 | [diff] [blame] | 1221 | if (newsize <= oldsize) { |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 1222 | loff_t holebegin = round_up(newsize, PAGE_SIZE); |
Hugh Dickins | d0424c4 | 2015-11-05 18:50:34 -0800 | [diff] [blame] | 1223 | if (oldsize > holebegin) |
| 1224 | unmap_mapping_range(inode->i_mapping, |
| 1225 | holebegin, 0, 1); |
| 1226 | if (info->alloced) |
| 1227 | shmem_truncate_range(inode, |
| 1228 | newsize, (loff_t)-1); |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 1229 | /* unmap again to remove racily COWed private pages */ |
Hugh Dickins | d0424c4 | 2015-11-05 18:50:34 -0800 | [diff] [blame] | 1230 | if (oldsize > holebegin) |
| 1231 | unmap_mapping_range(inode->i_mapping, |
| 1232 | holebegin, 0, 1); |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 1233 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1234 | } |
| 1235 | |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 1236 | if (is_quota_modification(idmap, inode, attr)) { |
| 1237 | error = dquot_initialize(inode); |
| 1238 | if (error) |
| 1239 | return error; |
| 1240 | } |
| 1241 | |
| 1242 | /* Transfer quota accounting */ |
| 1243 | if (i_uid_needs_update(idmap, attr, inode) || |
| 1244 | i_gid_needs_update(idmap, attr, inode)) { |
| 1245 | error = dquot_transfer(idmap, inode, attr); |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 1246 | if (error) |
| 1247 | return error; |
| 1248 | } |
| 1249 | |
Giuseppe Scrivano | 7a80e5b | 2023-01-20 10:43:46 +0100 | [diff] [blame] | 1250 | setattr_copy(idmap, inode, attr); |
Christoph Hellwig | db78b877 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 1251 | if (attr->ia_valid & ATTR_MODE) |
Giuseppe Scrivano | 7a80e5b | 2023-01-20 10:43:46 +0100 | [diff] [blame] | 1252 | error = posix_acl_chmod(idmap, dentry, inode->i_mode); |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 1253 | if (!error && update_ctime) { |
Jeff Layton | 6528733 | 2023-07-05 15:01:52 -0400 | [diff] [blame] | 1254 | inode_set_ctime_current(inode); |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 1255 | if (update_mtime) |
Jeff Layton | cf2766b | 2023-10-04 14:53:07 -0400 | [diff] [blame] | 1256 | inode_set_mtime_to_ts(inode, inode_get_ctime(inode)); |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 1257 | inode_inc_iversion(inode); |
| 1258 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | return error; |
| 1260 | } |
| 1261 | |
Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 1262 | static void shmem_evict_inode(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1264 | struct shmem_inode_info *info = SHMEM_I(inode); |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 1265 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 1266 | size_t freed = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | |
Hui Su | 30e6a51 | 2020-12-14 19:06:14 -0800 | [diff] [blame] | 1268 | if (shmem_mapping(inode->i_mapping)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | shmem_unacct_size(info->flags, inode->i_size); |
| 1270 | inode->i_size = 0; |
Hugh Dickins | bc78639 | 2022-03-22 14:39:58 -0700 | [diff] [blame] | 1271 | mapping_set_exiting(inode->i_mapping); |
npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 1272 | shmem_truncate_range(inode, 0, (loff_t)-1); |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 1273 | if (!list_empty(&info->shrinklist)) { |
| 1274 | spin_lock(&sbinfo->shrinklist_lock); |
| 1275 | if (!list_empty(&info->shrinklist)) { |
| 1276 | list_del_init(&info->shrinklist); |
| 1277 | sbinfo->shrinklist_len--; |
| 1278 | } |
| 1279 | spin_unlock(&sbinfo->shrinklist_lock); |
| 1280 | } |
Hugh Dickins | af53d3e | 2019-04-18 17:50:13 -0700 | [diff] [blame] | 1281 | while (!list_empty(&info->swaplist)) { |
| 1282 | /* Wait while shmem_unuse() is scanning this inode... */ |
| 1283 | wait_var_event(&info->stop_eviction, |
| 1284 | !atomic_read(&info->stop_eviction)); |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1285 | mutex_lock(&shmem_swaplist_mutex); |
Hugh Dickins | af53d3e | 2019-04-18 17:50:13 -0700 | [diff] [blame] | 1286 | /* ...but beware of the race if we peeked too early */ |
| 1287 | if (!atomic_read(&info->stop_eviction)) |
| 1288 | list_del_init(&info->swaplist); |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1289 | mutex_unlock(&shmem_swaplist_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1290 | } |
Al Viro | 3ed47db | 2016-01-22 18:08:52 -0500 | [diff] [blame] | 1291 | } |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 1292 | |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 1293 | simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL); |
| 1294 | shmem_free_inode(inode->i_sb, freed); |
Hugh Dickins | 0f3c42f | 2012-11-16 14:15:04 -0800 | [diff] [blame] | 1295 | WARN_ON(inode->i_blocks); |
Jan Kara | dbd5768 | 2012-05-03 14:48:02 +0200 | [diff] [blame] | 1296 | clear_inode(inode); |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 1297 | #ifdef CONFIG_TMPFS_QUOTA |
| 1298 | dquot_free_inode(inode); |
| 1299 | dquot_drop(inode); |
| 1300 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | } |
| 1302 | |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1303 | static int shmem_find_swap_entries(struct address_space *mapping, |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1304 | pgoff_t start, struct folio_batch *fbatch, |
| 1305 | pgoff_t *indices, unsigned int type) |
Matthew Wilcox | 478922e | 2016-12-14 15:08:52 -0800 | [diff] [blame] | 1306 | { |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1307 | XA_STATE(xas, &mapping->i_pages, start); |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1308 | struct folio *folio; |
Hugh Dickins | 8703954 | 2019-04-18 17:49:58 -0700 | [diff] [blame] | 1309 | swp_entry_t entry; |
Matthew Wilcox | 478922e | 2016-12-14 15:08:52 -0800 | [diff] [blame] | 1310 | |
| 1311 | rcu_read_lock(); |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1312 | xas_for_each(&xas, folio, ULONG_MAX) { |
| 1313 | if (xas_retry(&xas, folio)) |
Mike Kravetz | 5b9c98f | 2018-06-07 17:05:53 -0700 | [diff] [blame] | 1314 | continue; |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1315 | |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1316 | if (!xa_is_value(folio)) |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1317 | continue; |
| 1318 | |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1319 | entry = radix_to_swp_entry(folio); |
Miaohe Lin | 6cec2b9 | 2022-05-19 20:50:29 +0800 | [diff] [blame] | 1320 | /* |
| 1321 | * swapin error entries can be found in the mapping. But they're |
| 1322 | * deliberately ignored here as we've done everything we can do. |
| 1323 | */ |
Hugh Dickins | 8703954 | 2019-04-18 17:49:58 -0700 | [diff] [blame] | 1324 | if (swp_type(entry) != type) |
| 1325 | continue; |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1326 | |
Hugh Dickins | e384200 | 2022-05-21 19:53:04 -0700 | [diff] [blame] | 1327 | indices[folio_batch_count(fbatch)] = xas.xa_index; |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1328 | if (!folio_batch_add(fbatch, folio)) |
| 1329 | break; |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1330 | |
| 1331 | if (need_resched()) { |
| 1332 | xas_pause(&xas); |
| 1333 | cond_resched_rcu(); |
| 1334 | } |
Matthew Wilcox | 478922e | 2016-12-14 15:08:52 -0800 | [diff] [blame] | 1335 | } |
Matthew Wilcox | 478922e | 2016-12-14 15:08:52 -0800 | [diff] [blame] | 1336 | rcu_read_unlock(); |
Matthew Wilcox | e21a295 | 2017-11-22 08:36:00 -0500 | [diff] [blame] | 1337 | |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1338 | return xas.xa_index; |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1339 | } |
| 1340 | |
| 1341 | /* |
| 1342 | * Move the swapped pages for an inode to page cache. Returns the count |
| 1343 | * of pages swapped in, or the error in case of failure. |
| 1344 | */ |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1345 | static int shmem_unuse_swap_entries(struct inode *inode, |
| 1346 | struct folio_batch *fbatch, pgoff_t *indices) |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1347 | { |
| 1348 | int i = 0; |
| 1349 | int ret = 0; |
| 1350 | int error = 0; |
| 1351 | struct address_space *mapping = inode->i_mapping; |
| 1352 | |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1353 | for (i = 0; i < folio_batch_count(fbatch); i++) { |
| 1354 | struct folio *folio = fbatch->folios[i]; |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1355 | |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1356 | if (!xa_is_value(folio)) |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1357 | continue; |
Hugh Dickins | 054a9f7 | 2023-09-29 20:31:27 -0700 | [diff] [blame] | 1358 | error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE, |
| 1359 | mapping_gfp_mask(mapping), NULL, NULL); |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1360 | if (error == 0) { |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1361 | folio_unlock(folio); |
| 1362 | folio_put(folio); |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1363 | ret++; |
| 1364 | } |
| 1365 | if (error == -ENOMEM) |
| 1366 | break; |
| 1367 | error = 0; |
| 1368 | } |
| 1369 | return error ? error : ret; |
Matthew Wilcox | 478922e | 2016-12-14 15:08:52 -0800 | [diff] [blame] | 1370 | } |
| 1371 | |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1372 | /* |
| 1373 | * If swap found in inode, free it and move page from swapcache to filecache. |
| 1374 | */ |
Christoph Hellwig | 10a9c49 | 2022-01-21 22:14:57 -0800 | [diff] [blame] | 1375 | static int shmem_unuse_inode(struct inode *inode, unsigned int type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 | { |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1377 | struct address_space *mapping = inode->i_mapping; |
| 1378 | pgoff_t start = 0; |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1379 | struct folio_batch fbatch; |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1380 | pgoff_t indices[PAGEVEC_SIZE]; |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1381 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1382 | |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1383 | do { |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1384 | folio_batch_init(&fbatch); |
| 1385 | shmem_find_swap_entries(mapping, start, &fbatch, indices, type); |
| 1386 | if (folio_batch_count(&fbatch) == 0) { |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1387 | ret = 0; |
| 1388 | break; |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1389 | } |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1390 | |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1391 | ret = shmem_unuse_swap_entries(inode, &fbatch, indices); |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1392 | if (ret < 0) |
| 1393 | break; |
| 1394 | |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 1395 | start = indices[folio_batch_count(&fbatch) - 1]; |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1396 | } while (true); |
| 1397 | |
| 1398 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1399 | } |
| 1400 | |
| 1401 | /* |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1402 | * Read all the shared memory data that resides in the swap |
| 1403 | * device 'type' back into memory, so the swap device can be |
| 1404 | * unused. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1405 | */ |
Christoph Hellwig | 10a9c49 | 2022-01-21 22:14:57 -0800 | [diff] [blame] | 1406 | int shmem_unuse(unsigned int type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1407 | { |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1408 | struct shmem_inode_info *info, *next; |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1409 | int error = 0; |
| 1410 | |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1411 | if (list_empty(&shmem_swaplist)) |
| 1412 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1413 | |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1414 | mutex_lock(&shmem_swaplist_mutex); |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1415 | list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { |
| 1416 | if (!info->swapped) { |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1417 | list_del_init(&info->swaplist); |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1418 | continue; |
| 1419 | } |
Hugh Dickins | af53d3e | 2019-04-18 17:50:13 -0700 | [diff] [blame] | 1420 | /* |
| 1421 | * Drop the swaplist mutex while searching the inode for swap; |
| 1422 | * but before doing so, make sure shmem_evict_inode() will not |
| 1423 | * remove placeholder inode from swaplist, nor let it be freed |
| 1424 | * (igrab() would protect from unlink, but not from unmount). |
| 1425 | */ |
| 1426 | atomic_inc(&info->stop_eviction); |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1427 | mutex_unlock(&shmem_swaplist_mutex); |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1428 | |
Christoph Hellwig | 10a9c49 | 2022-01-21 22:14:57 -0800 | [diff] [blame] | 1429 | error = shmem_unuse_inode(&info->vfs_inode, type); |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1430 | cond_resched(); |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1431 | |
| 1432 | mutex_lock(&shmem_swaplist_mutex); |
| 1433 | next = list_next_entry(info, swaplist); |
| 1434 | if (!info->swapped) |
| 1435 | list_del_init(&info->swaplist); |
Hugh Dickins | af53d3e | 2019-04-18 17:50:13 -0700 | [diff] [blame] | 1436 | if (atomic_dec_and_test(&info->stop_eviction)) |
| 1437 | wake_up_var(&info->stop_eviction); |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1438 | if (error) |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 1439 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1440 | } |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1441 | mutex_unlock(&shmem_swaplist_mutex); |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 1442 | |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 1443 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1444 | } |
| 1445 | |
| 1446 | /* |
| 1447 | * Move the page from the page cache to the swap cache. |
| 1448 | */ |
| 1449 | static int shmem_writepage(struct page *page, struct writeback_control *wbc) |
| 1450 | { |
Matthew Wilcox (Oracle) | e2e3fdc | 2022-05-12 20:23:02 -0700 | [diff] [blame] | 1451 | struct folio *folio = page_folio(page); |
Luis Chamberlain | 8ccee8c | 2023-03-09 15:05:41 -0800 | [diff] [blame] | 1452 | struct address_space *mapping = folio->mapping; |
| 1453 | struct inode *inode = mapping->host; |
| 1454 | struct shmem_inode_info *info = SHMEM_I(inode); |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 1455 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1456 | swp_entry_t swap; |
| 1457 | pgoff_t index; |
Baolin Wang | 6501807 | 2024-08-12 15:42:02 +0800 | [diff] [blame] | 1458 | int nr_pages; |
Baolin Wang | 809bc865 | 2024-08-12 15:42:10 +0800 | [diff] [blame] | 1459 | bool split = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1460 | |
Hugh Dickins | 1e6decf | 2021-09-02 14:54:43 -0700 | [diff] [blame] | 1461 | /* |
Luis Chamberlain | cf7992b | 2023-03-09 15:05:42 -0800 | [diff] [blame] | 1462 | * Our capabilities prevent regular writeback or sync from ever calling |
| 1463 | * shmem_writepage; but a stacking filesystem might use ->writepage of |
| 1464 | * its underlying filesystem, in which case tmpfs should write out to |
| 1465 | * swap only in response to memory pressure, and not for the writeback |
| 1466 | * threads or sync. |
| 1467 | */ |
| 1468 | if (WARN_ON_ONCE(!wbc->for_reclaim)) |
| 1469 | goto redirty; |
| 1470 | |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 1471 | if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap)) |
Luis Chamberlain | 9a976f0 | 2023-03-09 15:05:43 -0800 | [diff] [blame] | 1472 | goto redirty; |
| 1473 | |
| 1474 | if (!total_swap_pages) |
| 1475 | goto redirty; |
| 1476 | |
Luis Chamberlain | cf7992b | 2023-03-09 15:05:42 -0800 | [diff] [blame] | 1477 | /* |
Baolin Wang | 809bc865 | 2024-08-12 15:42:10 +0800 | [diff] [blame] | 1478 | * If CONFIG_THP_SWAP is not enabled, the large folio should be |
| 1479 | * split when swapping. |
| 1480 | * |
| 1481 | * And shrinkage of pages beyond i_size does not split swap, so |
| 1482 | * swapout of a large folio crossing i_size needs to split too |
| 1483 | * (unless fallocate has been used to preallocate beyond EOF). |
Hugh Dickins | 1e6decf | 2021-09-02 14:54:43 -0700 | [diff] [blame] | 1484 | */ |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1485 | if (folio_test_large(folio)) { |
Baolin Wang | 809bc865 | 2024-08-12 15:42:10 +0800 | [diff] [blame] | 1486 | index = shmem_fallocend(inode, |
| 1487 | DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE)); |
| 1488 | if ((index > folio->index && index < folio_next_index(folio)) || |
| 1489 | !IS_ENABLED(CONFIG_THP_SWAP)) |
| 1490 | split = true; |
| 1491 | } |
| 1492 | |
| 1493 | if (split) { |
| 1494 | try_split: |
Hugh Dickins | 1e6decf | 2021-09-02 14:54:43 -0700 | [diff] [blame] | 1495 | /* Ensure the subpages are still dirty */ |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1496 | folio_test_set_dirty(folio); |
Baolin Wang | 809bc865 | 2024-08-12 15:42:10 +0800 | [diff] [blame] | 1497 | if (split_huge_page_to_list_to_order(page, wbc->list, 0)) |
Hugh Dickins | 1e6decf | 2021-09-02 14:54:43 -0700 | [diff] [blame] | 1498 | goto redirty; |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1499 | folio = page_folio(page); |
| 1500 | folio_clear_dirty(folio); |
Hugh Dickins | 1e6decf | 2021-09-02 14:54:43 -0700 | [diff] [blame] | 1501 | } |
| 1502 | |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1503 | index = folio->index; |
Baolin Wang | 6501807 | 2024-08-12 15:42:02 +0800 | [diff] [blame] | 1504 | nr_pages = folio_nr_pages(folio); |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1505 | |
| 1506 | /* |
| 1507 | * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC |
| 1508 | * value into swapfile.c, the only way we can correctly account for a |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1509 | * fallocated folio arriving here is now to initialize it and write it. |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1510 | * |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1511 | * That's okay for a folio already fallocated earlier, but if we have |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1512 | * not yet completed the fallocation, then (a) we want to keep track |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1513 | * of this folio in case we have to undo it, and (b) it may not be a |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1514 | * good idea to continue anyway, once we're pushing into swap. So |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1515 | * reactivate the folio, and let shmem_fallocate() quit when too many. |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1516 | */ |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1517 | if (!folio_test_uptodate(folio)) { |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1518 | if (inode->i_private) { |
| 1519 | struct shmem_falloc *shmem_falloc; |
| 1520 | spin_lock(&inode->i_lock); |
| 1521 | shmem_falloc = inode->i_private; |
| 1522 | if (shmem_falloc && |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 1523 | !shmem_falloc->waitq && |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1524 | index >= shmem_falloc->start && |
| 1525 | index < shmem_falloc->next) |
| 1526 | shmem_falloc->nr_unswapped++; |
| 1527 | else |
| 1528 | shmem_falloc = NULL; |
| 1529 | spin_unlock(&inode->i_lock); |
| 1530 | if (shmem_falloc) |
| 1531 | goto redirty; |
| 1532 | } |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1533 | folio_zero_range(folio, 0, folio_size(folio)); |
| 1534 | flush_dcache_folio(folio); |
| 1535 | folio_mark_uptodate(folio); |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1536 | } |
| 1537 | |
Matthew Wilcox (Oracle) | e2e3fdc | 2022-05-12 20:23:02 -0700 | [diff] [blame] | 1538 | swap = folio_alloc_swap(folio); |
Baolin Wang | 809bc865 | 2024-08-12 15:42:10 +0800 | [diff] [blame] | 1539 | if (!swap.val) { |
| 1540 | if (nr_pages > 1) |
| 1541 | goto try_split; |
| 1542 | |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 1543 | goto redirty; |
Baolin Wang | 809bc865 | 2024-08-12 15:42:10 +0800 | [diff] [blame] | 1544 | } |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1545 | |
Hugh Dickins | b1dea80 | 2011-05-11 15:13:36 -0700 | [diff] [blame] | 1546 | /* |
| 1547 | * Add inode to shmem_unuse()'s list of swapped-out inodes, |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1548 | * if it's not already there. Do it now before the folio is |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1549 | * moved to swap cache, when its pagelock no longer protects |
Hugh Dickins | b1dea80 | 2011-05-11 15:13:36 -0700 | [diff] [blame] | 1550 | * the inode from eviction. But don't unlock the mutex until |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1551 | * we've incremented swapped, because shmem_unuse_inode() will |
| 1552 | * prune a !swapped inode from the swaplist under this mutex. |
Hugh Dickins | b1dea80 | 2011-05-11 15:13:36 -0700 | [diff] [blame] | 1553 | */ |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 1554 | mutex_lock(&shmem_swaplist_mutex); |
| 1555 | if (list_empty(&info->swaplist)) |
Vineeth Remanan Pillai | b56a2d8 | 2019-03-05 15:47:03 -0800 | [diff] [blame] | 1556 | list_add(&info->swaplist, &shmem_swaplist); |
Hugh Dickins | b1dea80 | 2011-05-11 15:13:36 -0700 | [diff] [blame] | 1557 | |
Matthew Wilcox (Oracle) | a4c366f | 2022-09-02 20:46:08 +0100 | [diff] [blame] | 1558 | if (add_to_swap_cache(folio, swap, |
Joonsoo Kim | 3852f67 | 2020-08-11 18:30:47 -0700 | [diff] [blame] | 1559 | __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, |
| 1560 | NULL) == 0) { |
Baolin Wang | 6501807 | 2024-08-12 15:42:02 +0800 | [diff] [blame] | 1561 | shmem_recalc_inode(inode, 0, nr_pages); |
| 1562 | swap_shmem_alloc(swap, nr_pages); |
Matthew Wilcox (Oracle) | 4cd400f | 2022-09-02 20:46:03 +0100 | [diff] [blame] | 1563 | shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1564 | |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1565 | mutex_unlock(&shmem_swaplist_mutex); |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1566 | BUG_ON(folio_mapped(folio)); |
Nhat Pham | 501a06f | 2023-12-07 11:24:06 -0800 | [diff] [blame] | 1567 | return swap_writepage(&folio->page, wbc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1568 | } |
| 1569 | |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1570 | mutex_unlock(&shmem_swaplist_mutex); |
Matthew Wilcox (Oracle) | 4081f74 | 2022-09-02 20:46:09 +0100 | [diff] [blame] | 1571 | put_swap_folio(folio, swap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1572 | redirty: |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1573 | folio_mark_dirty(folio); |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1574 | if (wbc->for_reclaim) |
Matthew Wilcox (Oracle) | f530ed0 | 2022-09-02 20:46:02 +0100 | [diff] [blame] | 1575 | return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */ |
| 1576 | folio_unlock(folio); |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1577 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1578 | } |
| 1579 | |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 1580 | #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1581 | static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1582 | { |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 1583 | char buffer[64]; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1584 | |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1585 | if (!mpol || mpol->mode == MPOL_DEFAULT) |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 1586 | return; /* show nothing */ |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1587 | |
Hugh Dickins | a7a88b2 | 2013-01-02 02:04:23 -0800 | [diff] [blame] | 1588 | mpol_to_str(buffer, sizeof(buffer), mpol); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1589 | |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 1590 | seq_printf(seq, ",mpol=%s", buffer); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1591 | } |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1592 | |
| 1593 | static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) |
| 1594 | { |
| 1595 | struct mempolicy *mpol = NULL; |
| 1596 | if (sbinfo->mpol) { |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 1597 | raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1598 | mpol = sbinfo->mpol; |
| 1599 | mpol_get(mpol); |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 1600 | raw_spin_unlock(&sbinfo->stat_lock); |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1601 | } |
| 1602 | return mpol; |
| 1603 | } |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 1604 | #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ |
| 1605 | static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) |
| 1606 | { |
| 1607 | } |
| 1608 | static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) |
| 1609 | { |
| 1610 | return NULL; |
| 1611 | } |
| 1612 | #endif /* CONFIG_NUMA && CONFIG_TMPFS */ |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1613 | |
Hugh Dickins | ddc1a5c | 2023-10-19 13:39:08 -0700 | [diff] [blame] | 1614 | static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, |
| 1615 | pgoff_t index, unsigned int order, pgoff_t *ilx); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1616 | |
Hugh Dickins | ddc1a5c | 2023-10-19 13:39:08 -0700 | [diff] [blame] | 1617 | static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp, |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1618 | struct shmem_inode_info *info, pgoff_t index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1619 | { |
Hugh Dickins | ddc1a5c | 2023-10-19 13:39:08 -0700 | [diff] [blame] | 1620 | struct mempolicy *mpol; |
| 1621 | pgoff_t ilx; |
Matthew Wilcox (Oracle) | a4575c4 | 2023-12-13 21:58:42 +0000 | [diff] [blame] | 1622 | struct folio *folio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1623 | |
Hugh Dickins | ddc1a5c | 2023-10-19 13:39:08 -0700 | [diff] [blame] | 1624 | mpol = shmem_get_pgoff_policy(info, index, 0, &ilx); |
Matthew Wilcox (Oracle) | a4575c4 | 2023-12-13 21:58:42 +0000 | [diff] [blame] | 1625 | folio = swap_cluster_readahead(swap, gfp, mpol, ilx); |
Hugh Dickins | ddc1a5c | 2023-10-19 13:39:08 -0700 | [diff] [blame] | 1626 | mpol_cond_put(mpol); |
Mel Gorman | 18a2f37 | 2012-12-05 14:01:41 -0800 | [diff] [blame] | 1627 | |
Matthew Wilcox (Oracle) | a4575c4 | 2023-12-13 21:58:42 +0000 | [diff] [blame] | 1628 | return folio; |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1629 | } |
Mel Gorman | 18a2f37 | 2012-12-05 14:01:41 -0800 | [diff] [blame] | 1630 | |
Rik van Riel | 78cc8cd | 2021-02-25 17:16:22 -0800 | [diff] [blame] | 1631 | /* |
| 1632 | * Make sure huge_gfp is always more limited than limit_gfp. |
| 1633 | * Some of the flags set permissions, while others set limitations. |
| 1634 | */ |
| 1635 | static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) |
| 1636 | { |
| 1637 | gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM; |
| 1638 | gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY; |
Rik van Riel | 187df5d | 2021-02-25 17:16:29 -0800 | [diff] [blame] | 1639 | gfp_t zoneflags = limit_gfp & GFP_ZONEMASK; |
| 1640 | gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK); |
| 1641 | |
| 1642 | /* Allow allocations only from the originally specified zones. */ |
| 1643 | result |= zoneflags; |
Rik van Riel | 78cc8cd | 2021-02-25 17:16:22 -0800 | [diff] [blame] | 1644 | |
| 1645 | /* |
| 1646 | * Minimize the result gfp by taking the union with the deny flags, |
| 1647 | * and the intersection of the allow flags. |
| 1648 | */ |
| 1649 | result |= (limit_gfp & denyflags); |
| 1650 | result |= (huge_gfp & limit_gfp) & allowflags; |
| 1651 | |
| 1652 | return result; |
| 1653 | } |
| 1654 | |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1655 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Bang Li | 26c7d84 | 2024-07-05 11:23:09 +0800 | [diff] [blame] | 1656 | unsigned long shmem_allowable_huge_orders(struct inode *inode, |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1657 | struct vm_area_struct *vma, pgoff_t index, |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 1658 | loff_t write_end, bool shmem_huge_force) |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1659 | { |
| 1660 | unsigned long mask = READ_ONCE(huge_shmem_orders_always); |
| 1661 | unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); |
Baolin Wang | 6beeab8 | 2024-07-22 13:43:19 +0800 | [diff] [blame] | 1662 | unsigned long vm_flags = vma ? vma->vm_flags : 0; |
| 1663 | bool global_huge; |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1664 | loff_t i_size; |
| 1665 | int order; |
| 1666 | |
Baolin Wang | 6beeab8 | 2024-07-22 13:43:19 +0800 | [diff] [blame] | 1667 | if (vma && ((vm_flags & VM_NOHUGEPAGE) || |
| 1668 | test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))) |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1669 | return 0; |
| 1670 | |
| 1671 | /* If the hardware/firmware marked hugepage support disabled. */ |
| 1672 | if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) |
| 1673 | return 0; |
| 1674 | |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 1675 | global_huge = shmem_huge_global_enabled(inode, index, write_end, |
| 1676 | shmem_huge_force, vma, vm_flags); |
Baolin Wang | 6beeab8 | 2024-07-22 13:43:19 +0800 | [diff] [blame] | 1677 | if (!vma || !vma_is_anon_shmem(vma)) { |
| 1678 | /* |
| 1679 | * For tmpfs, we now only support PMD sized THP if huge page |
| 1680 | * is enabled, otherwise fallback to order 0. |
| 1681 | */ |
| 1682 | return global_huge ? BIT(HPAGE_PMD_ORDER) : 0; |
| 1683 | } |
| 1684 | |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1685 | /* |
| 1686 | * Following the 'deny' semantics of the top level, force the huge |
| 1687 | * option off from all mounts. |
| 1688 | */ |
| 1689 | if (shmem_huge == SHMEM_HUGE_DENY) |
| 1690 | return 0; |
| 1691 | |
| 1692 | /* |
| 1693 | * Only allow inherit orders if the top-level value is 'force', which |
| 1694 | * means non-PMD sized THP can not override 'huge' mount option now. |
| 1695 | */ |
| 1696 | if (shmem_huge == SHMEM_HUGE_FORCE) |
| 1697 | return READ_ONCE(huge_shmem_orders_inherit); |
| 1698 | |
| 1699 | /* Allow mTHP that will be fully within i_size. */ |
| 1700 | order = highest_order(within_size_orders); |
| 1701 | while (within_size_orders) { |
| 1702 | index = round_up(index + 1, order); |
| 1703 | i_size = round_up(i_size_read(inode), PAGE_SIZE); |
| 1704 | if (i_size >> PAGE_SHIFT >= index) { |
| 1705 | mask |= within_size_orders; |
| 1706 | break; |
| 1707 | } |
| 1708 | |
| 1709 | order = next_order(&within_size_orders, order); |
| 1710 | } |
| 1711 | |
| 1712 | if (vm_flags & VM_HUGEPAGE) |
| 1713 | mask |= READ_ONCE(huge_shmem_orders_madvise); |
| 1714 | |
| 1715 | if (global_huge) |
| 1716 | mask |= READ_ONCE(huge_shmem_orders_inherit); |
| 1717 | |
Baolin Wang | b66b1b7 | 2024-07-31 13:46:19 +0800 | [diff] [blame] | 1718 | return THP_ORDERS_ALL_FILE_DEFAULT & mask; |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1719 | } |
| 1720 | |
| 1721 | static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf, |
| 1722 | struct address_space *mapping, pgoff_t index, |
| 1723 | unsigned long orders) |
| 1724 | { |
Baolin Wang | 0bedf001 | 2024-07-22 13:43:17 +0800 | [diff] [blame] | 1725 | struct vm_area_struct *vma = vmf ? vmf->vma : NULL; |
Baolin Wang | 4cbf320 | 2024-07-31 13:46:20 +0800 | [diff] [blame] | 1726 | pgoff_t aligned_index; |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1727 | unsigned long pages; |
| 1728 | int order; |
| 1729 | |
Baolin Wang | 0bedf001 | 2024-07-22 13:43:17 +0800 | [diff] [blame] | 1730 | if (vma) { |
| 1731 | orders = thp_vma_suitable_orders(vma, vmf->address, orders); |
| 1732 | if (!orders) |
| 1733 | return 0; |
| 1734 | } |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1735 | |
| 1736 | /* Find the highest order that can add into the page cache */ |
| 1737 | order = highest_order(orders); |
| 1738 | while (orders) { |
| 1739 | pages = 1UL << order; |
Baolin Wang | 4cbf320 | 2024-07-31 13:46:20 +0800 | [diff] [blame] | 1740 | aligned_index = round_down(index, pages); |
Baolin Wang | 0bedf001 | 2024-07-22 13:43:17 +0800 | [diff] [blame] | 1741 | /* |
| 1742 | * Check for conflict before waiting on a huge allocation. |
| 1743 | * Conflict might be that a huge page has just been allocated |
| 1744 | * and added to page cache by a racing thread, or that there |
| 1745 | * is already at least one small page in the huge extent. |
| 1746 | * Be careful to retry when appropriate, but not forever! |
| 1747 | * Elsewhere -EEXIST would be the right code, but not here. |
| 1748 | */ |
Baolin Wang | 4cbf320 | 2024-07-31 13:46:20 +0800 | [diff] [blame] | 1749 | if (!xa_find(&mapping->i_pages, &aligned_index, |
| 1750 | aligned_index + pages - 1, XA_PRESENT)) |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1751 | break; |
| 1752 | order = next_order(&orders, order); |
| 1753 | } |
| 1754 | |
| 1755 | return orders; |
| 1756 | } |
| 1757 | #else |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1758 | static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf, |
| 1759 | struct address_space *mapping, pgoff_t index, |
| 1760 | unsigned long orders) |
| 1761 | { |
| 1762 | return 0; |
| 1763 | } |
| 1764 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 1765 | |
Kefeng Wang | 6f77546 | 2024-05-15 15:07:09 +0800 | [diff] [blame] | 1766 | static struct folio *shmem_alloc_folio(gfp_t gfp, int order, |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1767 | struct shmem_inode_info *info, pgoff_t index) |
| 1768 | { |
Hugh Dickins | ddc1a5c | 2023-10-19 13:39:08 -0700 | [diff] [blame] | 1769 | struct mempolicy *mpol; |
| 1770 | pgoff_t ilx; |
Kefeng Wang | 6f77546 | 2024-05-15 15:07:09 +0800 | [diff] [blame] | 1771 | struct folio *folio; |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1772 | |
Kefeng Wang | 6f77546 | 2024-05-15 15:07:09 +0800 | [diff] [blame] | 1773 | mpol = shmem_get_pgoff_policy(info, index, order, &ilx); |
| 1774 | folio = folio_alloc_mpol(gfp, order, mpol, ilx, numa_node_id()); |
Hugh Dickins | ddc1a5c | 2023-10-19 13:39:08 -0700 | [diff] [blame] | 1775 | mpol_cond_put(mpol); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1776 | |
Kefeng Wang | 6f77546 | 2024-05-15 15:07:09 +0800 | [diff] [blame] | 1777 | return folio; |
Mel Gorman | 18a2f37 | 2012-12-05 14:01:41 -0800 | [diff] [blame] | 1778 | } |
| 1779 | |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1780 | static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, |
| 1781 | gfp_t gfp, struct inode *inode, pgoff_t index, |
| 1782 | struct mm_struct *fault_mm, unsigned long orders) |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1783 | { |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 1784 | struct address_space *mapping = inode->i_mapping; |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 1785 | struct shmem_inode_info *info = SHMEM_I(inode); |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1786 | unsigned long suitable_orders = 0; |
| 1787 | struct folio *folio = NULL; |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 1788 | long pages; |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1789 | int error, order; |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1790 | |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 1791 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1792 | orders = 0; |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1793 | |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1794 | if (orders > 0) { |
Baolin Wang | 0bedf001 | 2024-07-22 13:43:17 +0800 | [diff] [blame] | 1795 | suitable_orders = shmem_suitable_orders(inode, vmf, |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1796 | mapping, index, orders); |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 1797 | |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1798 | order = highest_order(suitable_orders); |
| 1799 | while (suitable_orders) { |
| 1800 | pages = 1UL << order; |
| 1801 | index = round_down(index, pages); |
| 1802 | folio = shmem_alloc_folio(gfp, order, info, index); |
| 1803 | if (folio) |
| 1804 | goto allocated; |
| 1805 | |
| 1806 | if (pages == HPAGE_PMD_NR) |
| 1807 | count_vm_event(THP_FILE_FALLBACK); |
Ryan Roberts | 63d9866 | 2024-07-10 10:55:01 +0100 | [diff] [blame] | 1808 | count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK); |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1809 | order = next_order(&suitable_orders, order); |
| 1810 | } |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 1811 | } else { |
| 1812 | pages = 1; |
Kefeng Wang | 6f77546 | 2024-05-15 15:07:09 +0800 | [diff] [blame] | 1813 | folio = shmem_alloc_folio(gfp, 0, info, index); |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 1814 | } |
| 1815 | if (!folio) |
| 1816 | return ERR_PTR(-ENOMEM); |
| 1817 | |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 1818 | allocated: |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 1819 | __folio_set_locked(folio); |
| 1820 | __folio_set_swapbacked(folio); |
| 1821 | |
| 1822 | gfp &= GFP_RECLAIM_MASK; |
| 1823 | error = mem_cgroup_charge(folio, fault_mm, gfp); |
| 1824 | if (error) { |
| 1825 | if (xa_find(&mapping->i_pages, &index, |
| 1826 | index + pages - 1, XA_PRESENT)) { |
| 1827 | error = -EEXIST; |
Baolin Wang | 66f4458 | 2024-06-11 18:11:10 +0800 | [diff] [blame] | 1828 | } else if (pages > 1) { |
| 1829 | if (pages == HPAGE_PMD_NR) { |
| 1830 | count_vm_event(THP_FILE_FALLBACK); |
| 1831 | count_vm_event(THP_FILE_FALLBACK_CHARGE); |
| 1832 | } |
Ryan Roberts | 63d9866 | 2024-07-10 10:55:01 +0100 | [diff] [blame] | 1833 | count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); |
| 1834 | count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 1835 | } |
| 1836 | goto unlock; |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 1837 | } |
Mel Gorman | 18a2f37 | 2012-12-05 14:01:41 -0800 | [diff] [blame] | 1838 | |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 1839 | error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp); |
| 1840 | if (error) |
| 1841 | goto unlock; |
| 1842 | |
| 1843 | error = shmem_inode_acct_blocks(inode, pages); |
| 1844 | if (error) { |
| 1845 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
| 1846 | long freed; |
| 1847 | /* |
| 1848 | * Try to reclaim some space by splitting a few |
| 1849 | * large folios beyond i_size on the filesystem. |
| 1850 | */ |
Hugh Dickins | 1544405 | 2024-08-25 16:25:39 -0700 | [diff] [blame] | 1851 | shmem_unused_huge_shrink(sbinfo, NULL, pages); |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 1852 | /* |
| 1853 | * And do a shmem_recalc_inode() to account for freed pages: |
| 1854 | * except our folio is there in cache, so not quite balanced. |
| 1855 | */ |
| 1856 | spin_lock(&info->lock); |
| 1857 | freed = pages + info->alloced - info->swapped - |
| 1858 | READ_ONCE(mapping->nrpages); |
| 1859 | if (freed > 0) |
| 1860 | info->alloced -= freed; |
| 1861 | spin_unlock(&info->lock); |
| 1862 | if (freed > 0) |
| 1863 | shmem_inode_unacct_blocks(inode, freed); |
| 1864 | error = shmem_inode_acct_blocks(inode, pages); |
| 1865 | if (error) { |
| 1866 | filemap_remove_folio(folio); |
| 1867 | goto unlock; |
| 1868 | } |
| 1869 | } |
| 1870 | |
| 1871 | shmem_recalc_inode(inode, pages, 0); |
| 1872 | folio_add_lru(folio); |
| 1873 | return folio; |
| 1874 | |
| 1875 | unlock: |
| 1876 | folio_unlock(folio); |
| 1877 | folio_put(folio); |
| 1878 | return ERR_PTR(error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1879 | } |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1880 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1881 | /* |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1882 | * When a page is moved from swapcache to shmem filecache (either by the |
Matthew Wilcox (Oracle) | fc26bab | 2022-09-02 20:46:17 +0100 | [diff] [blame] | 1883 | * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1884 | * shmem_unuse_inode()), it may have been read in earlier from swap, in |
| 1885 | * ignorance of the mapping it belongs to. If that mapping has special |
| 1886 | * constraints (like the gma500 GEM driver, which requires RAM below 4GB), |
| 1887 | * we may need to copy to a suitable page before moving to filecache. |
| 1888 | * |
| 1889 | * In a future release, this may well be extended to respect cpuset and |
| 1890 | * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); |
| 1891 | * but for now it is a simple matter of zone. |
| 1892 | */ |
Matthew Wilcox (Oracle) | 069d849 | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 1893 | static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp) |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1894 | { |
Matthew Wilcox (Oracle) | 069d849 | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 1895 | return folio_zonenum(folio) > gfp_zone(gfp); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1896 | } |
| 1897 | |
Matthew Wilcox (Oracle) | 0d698e2 | 2022-09-02 20:46:14 +0100 | [diff] [blame] | 1898 | static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 1899 | struct shmem_inode_info *info, pgoff_t index, |
| 1900 | struct vm_area_struct *vma) |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1901 | { |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 1902 | struct folio *new, *old = *foliop; |
| 1903 | swp_entry_t entry = old->swap; |
| 1904 | struct address_space *swap_mapping = swap_address_space(entry); |
| 1905 | pgoff_t swap_index = swap_cache_index(entry); |
| 1906 | XA_STATE(xas, &swap_mapping->i_pages, swap_index); |
| 1907 | int nr_pages = folio_nr_pages(old); |
| 1908 | int error = 0, i; |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1909 | |
| 1910 | /* |
| 1911 | * We have arrived here because our zones are constrained, so don't |
| 1912 | * limit chance of success by further cpuset and node constraints. |
| 1913 | */ |
| 1914 | gfp &= ~GFP_CONSTRAINT_MASK; |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 1915 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 1916 | if (nr_pages > 1) { |
| 1917 | gfp_t huge_gfp = vma_thp_gfp_mask(vma); |
| 1918 | |
| 1919 | gfp = limit_gfp_mask(huge_gfp, gfp); |
| 1920 | } |
| 1921 | #endif |
| 1922 | |
| 1923 | new = shmem_alloc_folio(gfp, folio_order(old), info, index); |
Matthew Wilcox (Oracle) | 907ea17 | 2022-09-02 20:46:04 +0100 | [diff] [blame] | 1924 | if (!new) |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1925 | return -ENOMEM; |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1926 | |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 1927 | folio_ref_add(new, nr_pages); |
Matthew Wilcox (Oracle) | 907ea17 | 2022-09-02 20:46:04 +0100 | [diff] [blame] | 1928 | folio_copy(new, old); |
| 1929 | flush_dcache_folio(new); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1930 | |
Matthew Wilcox (Oracle) | 907ea17 | 2022-09-02 20:46:04 +0100 | [diff] [blame] | 1931 | __folio_set_locked(new); |
| 1932 | __folio_set_swapbacked(new); |
| 1933 | folio_mark_uptodate(new); |
David Hildenbrand | 3d2c908 | 2023-08-21 18:08:48 +0200 | [diff] [blame] | 1934 | new->swap = entry; |
Matthew Wilcox (Oracle) | 907ea17 | 2022-09-02 20:46:04 +0100 | [diff] [blame] | 1935 | folio_set_swapcache(new); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1936 | |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 1937 | /* Swap cache still stores N entries instead of a high-order entry */ |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 1938 | xa_lock_irq(&swap_mapping->i_pages); |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 1939 | for (i = 0; i < nr_pages; i++) { |
| 1940 | void *item = xas_load(&xas); |
| 1941 | |
| 1942 | if (item != old) { |
| 1943 | error = -ENOENT; |
| 1944 | break; |
| 1945 | } |
| 1946 | |
| 1947 | xas_store(&xas, new); |
| 1948 | xas_next(&xas); |
| 1949 | } |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1950 | if (!error) { |
Baolin Wang | 9094b4a | 2024-06-13 16:21:19 +0800 | [diff] [blame] | 1951 | mem_cgroup_replace_folio(old, new); |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 1952 | __lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages); |
| 1953 | __lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages); |
| 1954 | __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages); |
| 1955 | __lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages); |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1956 | } |
Matthew Wilcox | b93b016 | 2018-04-10 16:36:56 -0700 | [diff] [blame] | 1957 | xa_unlock_irq(&swap_mapping->i_pages); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1958 | |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1959 | if (unlikely(error)) { |
| 1960 | /* |
Matthew Wilcox (Oracle) | 32f51ea | 2024-08-21 20:34:37 +0100 | [diff] [blame] | 1961 | * Is this possible? I think not, now that our callers |
| 1962 | * check both the swapcache flag and folio->private |
| 1963 | * after getting the folio lock; but be defensive. |
| 1964 | * Reverse old to newpage for clear and free. |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1965 | */ |
Matthew Wilcox (Oracle) | 907ea17 | 2022-09-02 20:46:04 +0100 | [diff] [blame] | 1966 | old = new; |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1967 | } else { |
Matthew Wilcox (Oracle) | 907ea17 | 2022-09-02 20:46:04 +0100 | [diff] [blame] | 1968 | folio_add_lru(new); |
Matthew Wilcox (Oracle) | 0d698e2 | 2022-09-02 20:46:14 +0100 | [diff] [blame] | 1969 | *foliop = new; |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1970 | } |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1971 | |
Matthew Wilcox (Oracle) | 907ea17 | 2022-09-02 20:46:04 +0100 | [diff] [blame] | 1972 | folio_clear_swapcache(old); |
| 1973 | old->private = NULL; |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1974 | |
Matthew Wilcox (Oracle) | 907ea17 | 2022-09-02 20:46:04 +0100 | [diff] [blame] | 1975 | folio_unlock(old); |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 1976 | /* |
| 1977 | * The old folio are removed from swap cache, drop the 'nr_pages' |
| 1978 | * reference, as well as one temporary reference getting from swap |
| 1979 | * cache. |
| 1980 | */ |
| 1981 | folio_put_refs(old, nr_pages + 1); |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1982 | return error; |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1983 | } |
| 1984 | |
Miaohe Lin | 6cec2b9 | 2022-05-19 20:50:29 +0800 | [diff] [blame] | 1985 | static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, |
| 1986 | struct folio *folio, swp_entry_t swap) |
| 1987 | { |
| 1988 | struct address_space *mapping = inode->i_mapping; |
Miaohe Lin | 6cec2b9 | 2022-05-19 20:50:29 +0800 | [diff] [blame] | 1989 | swp_entry_t swapin_error; |
| 1990 | void *old; |
Baolin Wang | 40ff2d1 | 2024-08-12 15:42:06 +0800 | [diff] [blame] | 1991 | int nr_pages; |
Miaohe Lin | 6cec2b9 | 2022-05-19 20:50:29 +0800 | [diff] [blame] | 1992 | |
Axel Rasmussen | af19487 | 2023-07-07 14:55:33 -0700 | [diff] [blame] | 1993 | swapin_error = make_poisoned_swp_entry(); |
Miaohe Lin | 6cec2b9 | 2022-05-19 20:50:29 +0800 | [diff] [blame] | 1994 | old = xa_cmpxchg_irq(&mapping->i_pages, index, |
| 1995 | swp_to_radix_entry(swap), |
| 1996 | swp_to_radix_entry(swapin_error), 0); |
| 1997 | if (old != swp_to_radix_entry(swap)) |
| 1998 | return; |
| 1999 | |
Baolin Wang | 40ff2d1 | 2024-08-12 15:42:06 +0800 | [diff] [blame] | 2000 | nr_pages = folio_nr_pages(folio); |
Miaohe Lin | 6cec2b9 | 2022-05-19 20:50:29 +0800 | [diff] [blame] | 2001 | folio_wait_writeback(folio); |
Matthew Wilcox (Oracle) | 75fa68a | 2022-06-17 18:50:19 +0100 | [diff] [blame] | 2002 | delete_from_swap_cache(folio); |
Miaohe Lin | 6cec2b9 | 2022-05-19 20:50:29 +0800 | [diff] [blame] | 2003 | /* |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 2004 | * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks |
| 2005 | * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks) |
| 2006 | * in shmem_evict_inode(). |
Miaohe Lin | 6cec2b9 | 2022-05-19 20:50:29 +0800 | [diff] [blame] | 2007 | */ |
Baolin Wang | 40ff2d1 | 2024-08-12 15:42:06 +0800 | [diff] [blame] | 2008 | shmem_recalc_inode(inode, -nr_pages, -nr_pages); |
| 2009 | swap_free_nr(swap, nr_pages); |
Miaohe Lin | 6cec2b9 | 2022-05-19 20:50:29 +0800 | [diff] [blame] | 2010 | } |
| 2011 | |
Baolin Wang | 12885cb | 2024-08-12 15:42:09 +0800 | [diff] [blame] | 2012 | static int shmem_split_large_entry(struct inode *inode, pgoff_t index, |
| 2013 | swp_entry_t swap, gfp_t gfp) |
| 2014 | { |
| 2015 | struct address_space *mapping = inode->i_mapping; |
| 2016 | XA_STATE_ORDER(xas, &mapping->i_pages, index, 0); |
| 2017 | void *alloced_shadow = NULL; |
| 2018 | int alloced_order = 0, i; |
| 2019 | |
| 2020 | /* Convert user data gfp flags to xarray node gfp flags */ |
| 2021 | gfp &= GFP_RECLAIM_MASK; |
| 2022 | |
| 2023 | for (;;) { |
| 2024 | int order = -1, split_order = 0; |
| 2025 | void *old = NULL; |
| 2026 | |
| 2027 | xas_lock_irq(&xas); |
| 2028 | old = xas_load(&xas); |
| 2029 | if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) { |
| 2030 | xas_set_err(&xas, -EEXIST); |
| 2031 | goto unlock; |
| 2032 | } |
| 2033 | |
| 2034 | order = xas_get_order(&xas); |
| 2035 | |
| 2036 | /* Swap entry may have changed before we re-acquire the lock */ |
| 2037 | if (alloced_order && |
| 2038 | (old != alloced_shadow || order != alloced_order)) { |
| 2039 | xas_destroy(&xas); |
| 2040 | alloced_order = 0; |
| 2041 | } |
| 2042 | |
| 2043 | /* Try to split large swap entry in pagecache */ |
| 2044 | if (order > 0) { |
| 2045 | if (!alloced_order) { |
| 2046 | split_order = order; |
| 2047 | goto unlock; |
| 2048 | } |
| 2049 | xas_split(&xas, old, order); |
| 2050 | |
| 2051 | /* |
| 2052 | * Re-set the swap entry after splitting, and the swap |
| 2053 | * offset of the original large entry must be continuous. |
| 2054 | */ |
| 2055 | for (i = 0; i < 1 << order; i++) { |
| 2056 | pgoff_t aligned_index = round_down(index, 1 << order); |
| 2057 | swp_entry_t tmp; |
| 2058 | |
| 2059 | tmp = swp_entry(swp_type(swap), swp_offset(swap) + i); |
| 2060 | __xa_store(&mapping->i_pages, aligned_index + i, |
| 2061 | swp_to_radix_entry(tmp), 0); |
| 2062 | } |
| 2063 | } |
| 2064 | |
| 2065 | unlock: |
| 2066 | xas_unlock_irq(&xas); |
| 2067 | |
| 2068 | /* split needed, alloc here and retry. */ |
| 2069 | if (split_order) { |
| 2070 | xas_split_alloc(&xas, old, split_order, gfp); |
| 2071 | if (xas_error(&xas)) |
| 2072 | goto error; |
| 2073 | alloced_shadow = old; |
| 2074 | alloced_order = split_order; |
| 2075 | xas_reset(&xas); |
| 2076 | continue; |
| 2077 | } |
| 2078 | |
| 2079 | if (!xas_nomem(&xas, gfp)) |
| 2080 | break; |
| 2081 | } |
| 2082 | |
| 2083 | error: |
| 2084 | if (xas_error(&xas)) |
| 2085 | return xas_error(&xas); |
| 2086 | |
| 2087 | return alloced_order; |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 2088 | } |
| 2089 | |
| 2090 | /* |
Miaohe Lin | 833de10 | 2022-05-30 19:58:41 +0800 | [diff] [blame] | 2091 | * Swap in the folio pointed to by *foliop. |
| 2092 | * Caller has to make sure that *foliop contains a valid swapped folio. |
| 2093 | * Returns 0 and the folio in foliop if success. On failure, returns the |
| 2094 | * error code and NULL in *foliop. |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2095 | */ |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2096 | static int shmem_swapin_folio(struct inode *inode, pgoff_t index, |
| 2097 | struct folio **foliop, enum sgp_type sgp, |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 2098 | gfp_t gfp, struct vm_area_struct *vma, |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2099 | vm_fault_t *fault_type) |
| 2100 | { |
| 2101 | struct address_space *mapping = inode->i_mapping; |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 2102 | struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL; |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2103 | struct shmem_inode_info *info = SHMEM_I(inode); |
Kairui Song | cbc2bd9 | 2022-12-20 02:58:40 +0800 | [diff] [blame] | 2104 | struct swap_info_struct *si; |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2105 | struct folio *folio = NULL; |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2106 | swp_entry_t swap; |
Baolin Wang | 40ff2d1 | 2024-08-12 15:42:06 +0800 | [diff] [blame] | 2107 | int error, nr_pages; |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2108 | |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2109 | VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); |
| 2110 | swap = radix_to_swp_entry(*foliop); |
| 2111 | *foliop = NULL; |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2112 | |
Axel Rasmussen | af19487 | 2023-07-07 14:55:33 -0700 | [diff] [blame] | 2113 | if (is_poisoned_swp_entry(swap)) |
Miaohe Lin | 6cec2b9 | 2022-05-19 20:50:29 +0800 | [diff] [blame] | 2114 | return -EIO; |
| 2115 | |
Kairui Song | cbc2bd9 | 2022-12-20 02:58:40 +0800 | [diff] [blame] | 2116 | si = get_swap_device(swap); |
| 2117 | if (!si) { |
| 2118 | if (!shmem_confirm_swap(mapping, index, swap)) |
| 2119 | return -EEXIST; |
| 2120 | else |
| 2121 | return -EINVAL; |
| 2122 | } |
| 2123 | |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2124 | /* Look it up and read it in.. */ |
Matthew Wilcox (Oracle) | 5739a81 | 2022-09-02 20:46:16 +0100 | [diff] [blame] | 2125 | folio = swap_cache_get_folio(swap, NULL, 0); |
| 2126 | if (!folio) { |
Baolin Wang | 12885cb | 2024-08-12 15:42:09 +0800 | [diff] [blame] | 2127 | int split_order; |
| 2128 | |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2129 | /* Or update major stats only when swapin succeeds?? */ |
| 2130 | if (fault_type) { |
| 2131 | *fault_type |= VM_FAULT_MAJOR; |
| 2132 | count_vm_event(PGMAJFAULT); |
Hugh Dickins | 054a9f7 | 2023-09-29 20:31:27 -0700 | [diff] [blame] | 2133 | count_memcg_event_mm(fault_mm, PGMAJFAULT); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2134 | } |
Baolin Wang | 12885cb | 2024-08-12 15:42:09 +0800 | [diff] [blame] | 2135 | |
| 2136 | /* |
| 2137 | * Now swap device can only swap in order 0 folio, then we |
| 2138 | * should split the large swap entry stored in the pagecache |
| 2139 | * if necessary. |
| 2140 | */ |
| 2141 | split_order = shmem_split_large_entry(inode, index, swap, gfp); |
| 2142 | if (split_order < 0) { |
| 2143 | error = split_order; |
| 2144 | goto failed; |
| 2145 | } |
| 2146 | |
| 2147 | /* |
| 2148 | * If the large swap entry has already been split, it is |
| 2149 | * necessary to recalculate the new swap entry based on |
| 2150 | * the old order alignment. |
| 2151 | */ |
| 2152 | if (split_order > 0) { |
| 2153 | pgoff_t offset = index - round_down(index, 1 << split_order); |
| 2154 | |
| 2155 | swap = swp_entry(swp_type(swap), swp_offset(swap) + offset); |
| 2156 | } |
| 2157 | |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2158 | /* Here we actually start the io */ |
Hugh Dickins | ddc1a5c | 2023-10-19 13:39:08 -0700 | [diff] [blame] | 2159 | folio = shmem_swapin_cluster(swap, gfp, info, index); |
Matthew Wilcox (Oracle) | 5739a81 | 2022-09-02 20:46:16 +0100 | [diff] [blame] | 2160 | if (!folio) { |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2161 | error = -ENOMEM; |
| 2162 | goto failed; |
| 2163 | } |
| 2164 | } |
| 2165 | |
Miaohe Lin | 833de10 | 2022-05-30 19:58:41 +0800 | [diff] [blame] | 2166 | /* We have to do this with folio locked to prevent races */ |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2167 | folio_lock(folio); |
| 2168 | if (!folio_test_swapcache(folio) || |
David Hildenbrand | 3d2c908 | 2023-08-21 18:08:48 +0200 | [diff] [blame] | 2169 | folio->swap.val != swap.val || |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2170 | !shmem_confirm_swap(mapping, index, swap)) { |
| 2171 | error = -EEXIST; |
| 2172 | goto unlock; |
| 2173 | } |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2174 | if (!folio_test_uptodate(folio)) { |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2175 | error = -EIO; |
| 2176 | goto failed; |
| 2177 | } |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2178 | folio_wait_writeback(folio); |
Baolin Wang | 40ff2d1 | 2024-08-12 15:42:06 +0800 | [diff] [blame] | 2179 | nr_pages = folio_nr_pages(folio); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2180 | |
Steven Price | 8a84802 | 2020-05-13 16:37:49 +0100 | [diff] [blame] | 2181 | /* |
| 2182 | * Some architectures may have to restore extra metadata to the |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2183 | * folio after reading from swap. |
Steven Price | 8a84802 | 2020-05-13 16:37:49 +0100 | [diff] [blame] | 2184 | */ |
Barry Song | f238b8c | 2024-03-23 00:41:36 +1300 | [diff] [blame] | 2185 | arch_swap_restore(folio_swap(swap, folio), folio); |
Steven Price | 8a84802 | 2020-05-13 16:37:49 +0100 | [diff] [blame] | 2186 | |
Matthew Wilcox (Oracle) | 069d849 | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 2187 | if (shmem_should_replace_folio(folio, gfp)) { |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 2188 | error = shmem_replace_folio(&folio, gfp, info, index, vma); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2189 | if (error) |
| 2190 | goto failed; |
| 2191 | } |
| 2192 | |
Baolin Wang | 40ff2d1 | 2024-08-12 15:42:06 +0800 | [diff] [blame] | 2193 | error = shmem_add_to_page_cache(folio, mapping, |
| 2194 | round_down(index, nr_pages), |
Hugh Dickins | 054a9f7 | 2023-09-29 20:31:27 -0700 | [diff] [blame] | 2195 | swp_to_radix_entry(swap), gfp); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2196 | if (error) |
| 2197 | goto failed; |
| 2198 | |
Baolin Wang | 40ff2d1 | 2024-08-12 15:42:06 +0800 | [diff] [blame] | 2199 | shmem_recalc_inode(inode, 0, -nr_pages); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2200 | |
| 2201 | if (sgp == SGP_WRITE) |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2202 | folio_mark_accessed(folio); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2203 | |
Matthew Wilcox (Oracle) | 75fa68a | 2022-06-17 18:50:19 +0100 | [diff] [blame] | 2204 | delete_from_swap_cache(folio); |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2205 | folio_mark_dirty(folio); |
Baolin Wang | 40ff2d1 | 2024-08-12 15:42:06 +0800 | [diff] [blame] | 2206 | swap_free_nr(swap, nr_pages); |
Kairui Song | cbc2bd9 | 2022-12-20 02:58:40 +0800 | [diff] [blame] | 2207 | put_swap_device(si); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2208 | |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2209 | *foliop = folio; |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2210 | return 0; |
| 2211 | failed: |
| 2212 | if (!shmem_confirm_swap(mapping, index, swap)) |
| 2213 | error = -EEXIST; |
Miaohe Lin | 6cec2b9 | 2022-05-19 20:50:29 +0800 | [diff] [blame] | 2214 | if (error == -EIO) |
| 2215 | shmem_set_folio_swapin_error(inode, index, folio, swap); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2216 | unlock: |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2217 | if (folio) { |
| 2218 | folio_unlock(folio); |
| 2219 | folio_put(folio); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2220 | } |
Kairui Song | cbc2bd9 | 2022-12-20 02:58:40 +0800 | [diff] [blame] | 2221 | put_swap_device(si); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2222 | |
| 2223 | return error; |
| 2224 | } |
| 2225 | |
| 2226 | /* |
Matthew Wilcox (Oracle) | fc26bab | 2022-09-02 20:46:17 +0100 | [diff] [blame] | 2227 | * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2228 | * |
| 2229 | * If we allocate a new one we do not mark it dirty. That's up to the |
| 2230 | * vm. If we swap it in we mark it dirty since we also free the swap |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 2231 | * entry since a page cannot live in both the swap and page cache. |
| 2232 | * |
Hugh Dickins | e3e1a506 | 2023-09-29 20:26:53 -0700 | [diff] [blame] | 2233 | * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2234 | */ |
Matthew Wilcox (Oracle) | fc26bab | 2022-09-02 20:46:17 +0100 | [diff] [blame] | 2235 | static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 2236 | loff_t write_end, struct folio **foliop, enum sgp_type sgp, |
| 2237 | gfp_t gfp, struct vm_fault *vmf, vm_fault_t *fault_type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2238 | { |
Hugh Dickins | e3e1a506 | 2023-09-29 20:26:53 -0700 | [diff] [blame] | 2239 | struct vm_area_struct *vma = vmf ? vmf->vma : NULL; |
Hugh Dickins | 054a9f7 | 2023-09-29 20:31:27 -0700 | [diff] [blame] | 2240 | struct mm_struct *fault_mm; |
Matthew Wilcox (Oracle) | b7dd44a | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 2241 | struct folio *folio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2242 | int error; |
Baolin Wang | 6beeab8 | 2024-07-22 13:43:19 +0800 | [diff] [blame] | 2243 | bool alloced; |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 2244 | unsigned long orders = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2245 | |
Christoph Hellwig | 1cd81fa | 2024-02-19 07:27:12 +0100 | [diff] [blame] | 2246 | if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping))) |
| 2247 | return -EINVAL; |
| 2248 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2249 | if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2250 | return -EFBIG; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2251 | repeat: |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 2252 | if (sgp <= SGP_CACHE && |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2253 | ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2254 | return -EINVAL; |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2255 | |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2256 | alloced = false; |
Hugh Dickins | 054a9f7 | 2023-09-29 20:31:27 -0700 | [diff] [blame] | 2257 | fault_mm = vma ? vma->vm_mm : NULL; |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2258 | |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2259 | folio = filemap_get_entry(inode->i_mapping, index); |
Matthew Wilcox (Oracle) | b1d0ec3a | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2260 | if (folio && vma && userfaultfd_minor(vma)) { |
Christoph Hellwig | aaeb94e | 2023-03-07 15:34:08 +0100 | [diff] [blame] | 2261 | if (!xa_is_value(folio)) |
Matthew Wilcox (Oracle) | b1d0ec3a | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2262 | folio_put(folio); |
Axel Rasmussen | c949b09 | 2021-06-30 18:49:20 -0700 | [diff] [blame] | 2263 | *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); |
| 2264 | return 0; |
| 2265 | } |
| 2266 | |
Matthew Wilcox (Oracle) | b1d0ec3a | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2267 | if (xa_is_value(folio)) { |
Matthew Wilcox (Oracle) | da08e9b | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2268 | error = shmem_swapin_folio(inode, index, &folio, |
Baolin Wang | 736f0e0 | 2024-08-12 15:42:07 +0800 | [diff] [blame] | 2269 | sgp, gfp, vma, fault_type); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2270 | if (error == -EEXIST) |
| 2271 | goto repeat; |
| 2272 | |
Matthew Wilcox (Oracle) | fc26bab | 2022-09-02 20:46:17 +0100 | [diff] [blame] | 2273 | *foliop = folio; |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2274 | return error; |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 2275 | } |
| 2276 | |
Matthew Wilcox (Oracle) | b1d0ec3a | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2277 | if (folio) { |
Christoph Hellwig | aaeb94e | 2023-03-07 15:34:08 +0100 | [diff] [blame] | 2278 | folio_lock(folio); |
| 2279 | |
| 2280 | /* Has the folio been truncated or swapped out? */ |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2281 | if (unlikely(folio->mapping != inode->i_mapping)) { |
Christoph Hellwig | aaeb94e | 2023-03-07 15:34:08 +0100 | [diff] [blame] | 2282 | folio_unlock(folio); |
| 2283 | folio_put(folio); |
| 2284 | goto repeat; |
| 2285 | } |
Hugh Dickins | acdd9f8e | 2021-09-02 14:54:34 -0700 | [diff] [blame] | 2286 | if (sgp == SGP_WRITE) |
Matthew Wilcox (Oracle) | b1d0ec3a | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2287 | folio_mark_accessed(folio); |
| 2288 | if (folio_test_uptodate(folio)) |
Hugh Dickins | acdd9f8e | 2021-09-02 14:54:34 -0700 | [diff] [blame] | 2289 | goto out; |
Matthew Wilcox (Oracle) | fc26bab | 2022-09-02 20:46:17 +0100 | [diff] [blame] | 2290 | /* fallocated folio */ |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 2291 | if (sgp != SGP_READ) |
| 2292 | goto clear; |
Matthew Wilcox (Oracle) | b1d0ec3a | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2293 | folio_unlock(folio); |
| 2294 | folio_put(folio); |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 2295 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2296 | |
| 2297 | /* |
Matthew Wilcox (Oracle) | fc26bab | 2022-09-02 20:46:17 +0100 | [diff] [blame] | 2298 | * SGP_READ: succeed on hole, with NULL folio, letting caller zero. |
| 2299 | * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail. |
Hugh Dickins | acdd9f8e | 2021-09-02 14:54:34 -0700 | [diff] [blame] | 2300 | */ |
Matthew Wilcox (Oracle) | fc26bab | 2022-09-02 20:46:17 +0100 | [diff] [blame] | 2301 | *foliop = NULL; |
Hugh Dickins | acdd9f8e | 2021-09-02 14:54:34 -0700 | [diff] [blame] | 2302 | if (sgp == SGP_READ) |
| 2303 | return 0; |
| 2304 | if (sgp == SGP_NOALLOC) |
| 2305 | return -ENOENT; |
| 2306 | |
| 2307 | /* |
| 2308 | * Fast cache lookup and swap lookup did not find it: allocate. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2309 | */ |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 2310 | |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2311 | if (vma && userfaultfd_missing(vma)) { |
| 2312 | *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); |
| 2313 | return 0; |
| 2314 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2315 | |
Baolin Wang | 6beeab8 | 2024-07-22 13:43:19 +0800 | [diff] [blame] | 2316 | /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */ |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 2317 | orders = shmem_allowable_huge_orders(inode, vma, index, write_end, false); |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 2318 | if (orders > 0) { |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2319 | gfp_t huge_gfp; |
Kees Cook | 27d80fa2 | 2020-04-06 20:07:51 -0700 | [diff] [blame] | 2320 | |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2321 | huge_gfp = vma_thp_gfp_mask(vma); |
| 2322 | huge_gfp = limit_gfp_mask(huge_gfp, gfp); |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 2323 | folio = shmem_alloc_and_add_folio(vmf, huge_gfp, |
| 2324 | inode, index, fault_mm, orders); |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2325 | if (!IS_ERR(folio)) { |
Baolin Wang | 3d95bc21 | 2024-06-11 18:11:06 +0800 | [diff] [blame] | 2326 | if (folio_test_pmd_mappable(folio)) |
| 2327 | count_vm_event(THP_FILE_ALLOC); |
Ryan Roberts | 63d9866 | 2024-07-10 10:55:01 +0100 | [diff] [blame] | 2328 | count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2329 | goto alloced; |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2330 | } |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2331 | if (PTR_ERR(folio) == -EEXIST) |
| 2332 | goto repeat; |
| 2333 | } |
| 2334 | |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 2335 | folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0); |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2336 | if (IS_ERR(folio)) { |
| 2337 | error = PTR_ERR(folio); |
| 2338 | if (error == -EEXIST) |
| 2339 | goto repeat; |
| 2340 | folio = NULL; |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2341 | goto unlock; |
| 2342 | } |
| 2343 | |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2344 | alloced: |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2345 | alloced = true; |
Baolin Wang | e7a2ab7 | 2024-06-11 18:11:08 +0800 | [diff] [blame] | 2346 | if (folio_test_large(folio) && |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2347 | DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < |
Hugh Dickins | de5b852 | 2024-08-25 15:42:45 -0700 | [diff] [blame] | 2348 | folio_next_index(folio)) { |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2349 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
| 2350 | struct shmem_inode_info *info = SHMEM_I(inode); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2351 | /* |
Matthew Wilcox (Oracle) | fc26bab | 2022-09-02 20:46:17 +0100 | [diff] [blame] | 2352 | * Part of the large folio is beyond i_size: subject |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2353 | * to shrink under memory pressure. |
| 2354 | */ |
| 2355 | spin_lock(&sbinfo->shrinklist_lock); |
| 2356 | /* |
| 2357 | * _careful to defend against unlocked access to |
| 2358 | * ->shrink_list in shmem_unused_huge_shrink() |
| 2359 | */ |
| 2360 | if (list_empty_careful(&info->shrinklist)) { |
| 2361 | list_add_tail(&info->shrinklist, |
| 2362 | &sbinfo->shrinklist); |
| 2363 | sbinfo->shrinklist_len++; |
| 2364 | } |
| 2365 | spin_unlock(&sbinfo->shrinklist_lock); |
| 2366 | } |
| 2367 | |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2368 | if (sgp == SGP_WRITE) |
| 2369 | folio_set_referenced(folio); |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2370 | /* |
Matthew Wilcox (Oracle) | fc26bab | 2022-09-02 20:46:17 +0100 | [diff] [blame] | 2371 | * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio. |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2372 | */ |
| 2373 | if (sgp == SGP_FALLOC) |
| 2374 | sgp = SGP_WRITE; |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 2375 | clear: |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2376 | /* |
Matthew Wilcox (Oracle) | fc26bab | 2022-09-02 20:46:17 +0100 | [diff] [blame] | 2377 | * Let SGP_WRITE caller clear ends if write does not fill folio; |
| 2378 | * but SGP_FALLOC on a folio fallocated earlier must initialize |
Vineeth Remanan Pillai | c5bf121 | 2019-03-05 15:46:58 -0800 | [diff] [blame] | 2379 | * it now, lest undo on failure cancel our earlier guarantee. |
| 2380 | */ |
Matthew Wilcox (Oracle) | b1d0ec3a | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2381 | if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) { |
| 2382 | long i, n = folio_nr_pages(folio); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 2383 | |
Matthew Wilcox (Oracle) | b1d0ec3a | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2384 | for (i = 0; i < n; i++) |
| 2385 | clear_highpage(folio_page(folio, i)); |
| 2386 | flush_dcache_folio(folio); |
| 2387 | folio_mark_uptodate(folio); |
Hugh Dickins | 59a16ea | 2011-05-11 15:13:38 -0700 | [diff] [blame] | 2388 | } |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 2389 | |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 2390 | /* Perhaps the file has been truncated since we checked */ |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 2391 | if (sgp <= SGP_CACHE && |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2392 | ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 2393 | error = -EINVAL; |
Hugh Dickins | 267a4c7 | 2015-12-11 13:40:55 -0800 | [diff] [blame] | 2394 | goto unlock; |
Shaohua Li | ff36b801 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 2395 | } |
Matthew Wilcox (Oracle) | 63ec197 | 2020-10-13 16:51:38 -0700 | [diff] [blame] | 2396 | out: |
Matthew Wilcox (Oracle) | fc26bab | 2022-09-02 20:46:17 +0100 | [diff] [blame] | 2397 | *foliop = folio; |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 2398 | return 0; |
Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 2399 | |
Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 2400 | /* |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 2401 | * Error recovery. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2402 | */ |
Hugh Dickins | d189922 | 2012-07-11 14:02:47 -0700 | [diff] [blame] | 2403 | unlock: |
Hugh Dickins | 3022fd7 | 2023-09-29 20:32:40 -0700 | [diff] [blame] | 2404 | if (alloced) |
| 2405 | filemap_remove_folio(folio); |
| 2406 | shmem_recalc_inode(inode, 0, 0); |
Matthew Wilcox (Oracle) | b1d0ec3a | 2022-05-12 20:23:05 -0700 | [diff] [blame] | 2407 | if (folio) { |
| 2408 | folio_unlock(folio); |
| 2409 | folio_put(folio); |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 2410 | } |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 2411 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2412 | } |
| 2413 | |
Christoph Hellwig | d746860 | 2024-02-19 07:27:13 +0100 | [diff] [blame] | 2414 | /** |
| 2415 | * shmem_get_folio - find, and lock a shmem folio. |
| 2416 | * @inode: inode to search |
| 2417 | * @index: the page index. |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 2418 | * @write_end: end of a write, could extend inode size |
Christoph Hellwig | d746860 | 2024-02-19 07:27:13 +0100 | [diff] [blame] | 2419 | * @foliop: pointer to the folio if found |
| 2420 | * @sgp: SGP_* flags to control behavior |
| 2421 | * |
| 2422 | * Looks up the page cache entry at @inode & @index. If a folio is |
| 2423 | * present, it is returned locked with an increased refcount. |
| 2424 | * |
Christoph Hellwig | 9d8b367 | 2024-02-19 07:27:15 +0100 | [diff] [blame] | 2425 | * If the caller modifies data in the folio, it must call folio_mark_dirty() |
| 2426 | * before unlocking the folio to ensure that the folio is not reclaimed. |
| 2427 | * There is no need to reserve space before calling folio_mark_dirty(). |
| 2428 | * |
Christoph Hellwig | d746860 | 2024-02-19 07:27:13 +0100 | [diff] [blame] | 2429 | * When no folio is found, the behavior depends on @sgp: |
Akira Yokosawa | 8d4dd9d | 2024-02-27 14:06:48 +0900 | [diff] [blame] | 2430 | * - for SGP_READ, *@foliop is %NULL and 0 is returned |
| 2431 | * - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned |
Christoph Hellwig | d746860 | 2024-02-19 07:27:13 +0100 | [diff] [blame] | 2432 | * - for all other flags a new folio is allocated, inserted into the |
| 2433 | * page cache and returned locked in @foliop. |
| 2434 | * |
| 2435 | * Context: May sleep. |
| 2436 | * Return: 0 if successful, else a negative error code. |
| 2437 | */ |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 2438 | int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end, |
| 2439 | struct folio **foliop, enum sgp_type sgp) |
Matthew Wilcox (Oracle) | 4e1fc79 | 2022-09-02 20:46:20 +0100 | [diff] [blame] | 2440 | { |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 2441 | return shmem_get_folio_gfp(inode, index, write_end, foliop, sgp, |
Hugh Dickins | e3e1a506 | 2023-09-29 20:26:53 -0700 | [diff] [blame] | 2442 | mapping_gfp_mask(inode->i_mapping), NULL, NULL); |
Matthew Wilcox (Oracle) | 4e1fc79 | 2022-09-02 20:46:20 +0100 | [diff] [blame] | 2443 | } |
Christoph Hellwig | d746860 | 2024-02-19 07:27:13 +0100 | [diff] [blame] | 2444 | EXPORT_SYMBOL_GPL(shmem_get_folio); |
Matthew Wilcox (Oracle) | 4e1fc79 | 2022-09-02 20:46:20 +0100 | [diff] [blame] | 2445 | |
Linus Torvalds | 10d20bd | 2016-12-05 12:10:29 -0800 | [diff] [blame] | 2446 | /* |
| 2447 | * This is like autoremove_wake_function, but it removes the wait queue |
| 2448 | * entry unconditionally - even if something else had already woken the |
| 2449 | * target. |
| 2450 | */ |
Hugh Dickins | f0a9ad1 | 2023-09-29 20:27:53 -0700 | [diff] [blame] | 2451 | static int synchronous_wake_function(wait_queue_entry_t *wait, |
| 2452 | unsigned int mode, int sync, void *key) |
Linus Torvalds | 10d20bd | 2016-12-05 12:10:29 -0800 | [diff] [blame] | 2453 | { |
| 2454 | int ret = default_wake_function(wait, mode, sync, key); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 2455 | list_del_init(&wait->entry); |
Linus Torvalds | 10d20bd | 2016-12-05 12:10:29 -0800 | [diff] [blame] | 2456 | return ret; |
| 2457 | } |
| 2458 | |
Hugh Dickins | f0a9ad1 | 2023-09-29 20:27:53 -0700 | [diff] [blame] | 2459 | /* |
| 2460 | * Trinity finds that probing a hole which tmpfs is punching can |
| 2461 | * prevent the hole-punch from ever completing: which in turn |
| 2462 | * locks writers out with its hold on i_rwsem. So refrain from |
| 2463 | * faulting pages into the hole while it's being punched. Although |
| 2464 | * shmem_undo_range() does remove the additions, it may be unable to |
| 2465 | * keep up, as each new page needs its own unmap_mapping_range() call, |
| 2466 | * and the i_mmap tree grows ever slower to scan if new vmas are added. |
| 2467 | * |
| 2468 | * It does not matter if we sometimes reach this check just before the |
| 2469 | * hole-punch begins, so that one fault then races with the punch: |
| 2470 | * we just need to make racing faults a rare case. |
| 2471 | * |
| 2472 | * The implementation below would be much simpler if we just used a |
| 2473 | * standard mutex or completion: but we cannot take i_rwsem in fault, |
| 2474 | * and bloating every shmem inode for this unlikely case would be sad. |
| 2475 | */ |
| 2476 | static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode) |
| 2477 | { |
| 2478 | struct shmem_falloc *shmem_falloc; |
| 2479 | struct file *fpin = NULL; |
| 2480 | vm_fault_t ret = 0; |
| 2481 | |
| 2482 | spin_lock(&inode->i_lock); |
| 2483 | shmem_falloc = inode->i_private; |
| 2484 | if (shmem_falloc && |
| 2485 | shmem_falloc->waitq && |
| 2486 | vmf->pgoff >= shmem_falloc->start && |
| 2487 | vmf->pgoff < shmem_falloc->next) { |
| 2488 | wait_queue_head_t *shmem_falloc_waitq; |
| 2489 | DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); |
| 2490 | |
| 2491 | ret = VM_FAULT_NOPAGE; |
| 2492 | fpin = maybe_unlock_mmap_for_io(vmf, NULL); |
| 2493 | shmem_falloc_waitq = shmem_falloc->waitq; |
| 2494 | prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, |
| 2495 | TASK_UNINTERRUPTIBLE); |
| 2496 | spin_unlock(&inode->i_lock); |
| 2497 | schedule(); |
| 2498 | |
| 2499 | /* |
| 2500 | * shmem_falloc_waitq points into the shmem_fallocate() |
| 2501 | * stack of the hole-punching task: shmem_falloc_waitq |
| 2502 | * is usually invalid by the time we reach here, but |
| 2503 | * finish_wait() does not dereference it in that case; |
| 2504 | * though i_lock needed lest racing with wake_up_all(). |
| 2505 | */ |
| 2506 | spin_lock(&inode->i_lock); |
| 2507 | finish_wait(shmem_falloc_waitq, &shmem_fault_wait); |
| 2508 | } |
| 2509 | spin_unlock(&inode->i_lock); |
| 2510 | if (fpin) { |
| 2511 | fput(fpin); |
| 2512 | ret = VM_FAULT_RETRY; |
| 2513 | } |
| 2514 | return ret; |
| 2515 | } |
| 2516 | |
Souptick Joarder | 20acce6 | 2018-06-07 17:09:17 -0700 | [diff] [blame] | 2517 | static vm_fault_t shmem_fault(struct vm_fault *vmf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2518 | { |
Hugh Dickins | f0a9ad1 | 2023-09-29 20:27:53 -0700 | [diff] [blame] | 2519 | struct inode *inode = file_inode(vmf->vma->vm_file); |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 2520 | gfp_t gfp = mapping_gfp_mask(inode->i_mapping); |
Matthew Wilcox (Oracle) | 68a5410 | 2022-09-02 20:46:18 +0100 | [diff] [blame] | 2521 | struct folio *folio = NULL; |
Hugh Dickins | f0a9ad1 | 2023-09-29 20:27:53 -0700 | [diff] [blame] | 2522 | vm_fault_t ret = 0; |
Souptick Joarder | 20acce6 | 2018-06-07 17:09:17 -0700 | [diff] [blame] | 2523 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2524 | |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 2525 | /* |
| 2526 | * Trinity finds that probing a hole which tmpfs is punching can |
Hugh Dickins | f0a9ad1 | 2023-09-29 20:27:53 -0700 | [diff] [blame] | 2527 | * prevent the hole-punch from ever completing: noted in i_private. |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 2528 | */ |
| 2529 | if (unlikely(inode->i_private)) { |
Hugh Dickins | f0a9ad1 | 2023-09-29 20:27:53 -0700 | [diff] [blame] | 2530 | ret = shmem_falloc_wait(vmf, inode); |
| 2531 | if (ret) |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 2532 | return ret; |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 2533 | } |
| 2534 | |
Hugh Dickins | f0a9ad1 | 2023-09-29 20:27:53 -0700 | [diff] [blame] | 2535 | WARN_ON_ONCE(vmf->page != NULL); |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 2536 | err = shmem_get_folio_gfp(inode, vmf->pgoff, 0, &folio, SGP_CACHE, |
Hugh Dickins | e3e1a506 | 2023-09-29 20:26:53 -0700 | [diff] [blame] | 2537 | gfp, vmf, &ret); |
Souptick Joarder | 20acce6 | 2018-06-07 17:09:17 -0700 | [diff] [blame] | 2538 | if (err) |
| 2539 | return vmf_error(err); |
Hugh Dickins | f0a9ad1 | 2023-09-29 20:27:53 -0700 | [diff] [blame] | 2540 | if (folio) { |
Matthew Wilcox (Oracle) | 68a5410 | 2022-09-02 20:46:18 +0100 | [diff] [blame] | 2541 | vmf->page = folio_file_page(folio, vmf->pgoff); |
Hugh Dickins | f0a9ad1 | 2023-09-29 20:27:53 -0700 | [diff] [blame] | 2542 | ret |= VM_FAULT_LOCKED; |
| 2543 | } |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 2544 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2545 | } |
| 2546 | |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2547 | unsigned long shmem_get_unmapped_area(struct file *file, |
| 2548 | unsigned long uaddr, unsigned long len, |
| 2549 | unsigned long pgoff, unsigned long flags) |
| 2550 | { |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2551 | unsigned long addr; |
| 2552 | unsigned long offset; |
| 2553 | unsigned long inflated_len; |
| 2554 | unsigned long inflated_addr; |
| 2555 | unsigned long inflated_offset; |
Baolin Wang | 5a9dd10 | 2024-06-11 18:11:09 +0800 | [diff] [blame] | 2556 | unsigned long hpage_size; |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2557 | |
| 2558 | if (len > TASK_SIZE) |
| 2559 | return -ENOMEM; |
| 2560 | |
Rick Edgecombe | 529ce23 | 2024-03-25 19:16:44 -0700 | [diff] [blame] | 2561 | addr = mm_get_unmapped_area(current->mm, file, uaddr, len, pgoff, |
| 2562 | flags); |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2563 | |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 2564 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2565 | return addr; |
| 2566 | if (IS_ERR_VALUE(addr)) |
| 2567 | return addr; |
| 2568 | if (addr & ~PAGE_MASK) |
| 2569 | return addr; |
| 2570 | if (addr > TASK_SIZE - len) |
| 2571 | return addr; |
| 2572 | |
| 2573 | if (shmem_huge == SHMEM_HUGE_DENY) |
| 2574 | return addr; |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2575 | if (flags & MAP_FIXED) |
| 2576 | return addr; |
| 2577 | /* |
| 2578 | * Our priority is to support MAP_SHARED mapped hugely; |
| 2579 | * and support MAP_PRIVATE mapped hugely too, until it is COWed. |
Kirill A. Shutemov | 9915899 | 2020-01-13 16:29:13 -0800 | [diff] [blame] | 2580 | * But if caller specified an address hint and we allocated area there |
| 2581 | * successfully, respect that as before. |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2582 | */ |
Kirill A. Shutemov | 9915899 | 2020-01-13 16:29:13 -0800 | [diff] [blame] | 2583 | if (uaddr == addr) |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2584 | return addr; |
| 2585 | |
Baolin Wang | 5a9dd10 | 2024-06-11 18:11:09 +0800 | [diff] [blame] | 2586 | hpage_size = HPAGE_PMD_SIZE; |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2587 | if (shmem_huge != SHMEM_HUGE_FORCE) { |
| 2588 | struct super_block *sb; |
Baolin Wang | 5a9dd10 | 2024-06-11 18:11:09 +0800 | [diff] [blame] | 2589 | unsigned long __maybe_unused hpage_orders; |
| 2590 | int order = 0; |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2591 | |
| 2592 | if (file) { |
| 2593 | VM_BUG_ON(file->f_op != &shmem_file_operations); |
| 2594 | sb = file_inode(file)->i_sb; |
| 2595 | } else { |
| 2596 | /* |
| 2597 | * Called directly from mm/mmap.c, or drivers/char/mem.c |
| 2598 | * for "/dev/zero", to create a shared anonymous object. |
| 2599 | */ |
| 2600 | if (IS_ERR(shm_mnt)) |
| 2601 | return addr; |
| 2602 | sb = shm_mnt->mnt_sb; |
Baolin Wang | 5a9dd10 | 2024-06-11 18:11:09 +0800 | [diff] [blame] | 2603 | |
| 2604 | /* |
| 2605 | * Find the highest mTHP order used for anonymous shmem to |
| 2606 | * provide a suitable alignment address. |
| 2607 | */ |
| 2608 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 2609 | hpage_orders = READ_ONCE(huge_shmem_orders_always); |
| 2610 | hpage_orders |= READ_ONCE(huge_shmem_orders_within_size); |
| 2611 | hpage_orders |= READ_ONCE(huge_shmem_orders_madvise); |
| 2612 | if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER) |
| 2613 | hpage_orders |= READ_ONCE(huge_shmem_orders_inherit); |
| 2614 | |
| 2615 | if (hpage_orders > 0) { |
| 2616 | order = highest_order(hpage_orders); |
| 2617 | hpage_size = PAGE_SIZE << order; |
| 2618 | } |
| 2619 | #endif |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2620 | } |
Baolin Wang | 5a9dd10 | 2024-06-11 18:11:09 +0800 | [diff] [blame] | 2621 | if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order) |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2622 | return addr; |
| 2623 | } |
| 2624 | |
Baolin Wang | 5a9dd10 | 2024-06-11 18:11:09 +0800 | [diff] [blame] | 2625 | if (len < hpage_size) |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2626 | return addr; |
| 2627 | |
Baolin Wang | 5a9dd10 | 2024-06-11 18:11:09 +0800 | [diff] [blame] | 2628 | offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1); |
| 2629 | if (offset && offset + len < 2 * hpage_size) |
| 2630 | return addr; |
| 2631 | if ((addr & (hpage_size - 1)) == offset) |
| 2632 | return addr; |
| 2633 | |
| 2634 | inflated_len = len + hpage_size - PAGE_SIZE; |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2635 | if (inflated_len > TASK_SIZE) |
| 2636 | return addr; |
| 2637 | if (inflated_len < len) |
| 2638 | return addr; |
| 2639 | |
Rick Edgecombe | 529ce23 | 2024-03-25 19:16:44 -0700 | [diff] [blame] | 2640 | inflated_addr = mm_get_unmapped_area(current->mm, NULL, uaddr, |
| 2641 | inflated_len, 0, flags); |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2642 | if (IS_ERR_VALUE(inflated_addr)) |
| 2643 | return addr; |
| 2644 | if (inflated_addr & ~PAGE_MASK) |
| 2645 | return addr; |
| 2646 | |
Baolin Wang | 5a9dd10 | 2024-06-11 18:11:09 +0800 | [diff] [blame] | 2647 | inflated_offset = inflated_addr & (hpage_size - 1); |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2648 | inflated_addr += offset - inflated_offset; |
| 2649 | if (inflated_offset > offset) |
Baolin Wang | 5a9dd10 | 2024-06-11 18:11:09 +0800 | [diff] [blame] | 2650 | inflated_addr += hpage_size; |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2651 | |
| 2652 | if (inflated_addr > TASK_SIZE - len) |
| 2653 | return addr; |
| 2654 | return inflated_addr; |
| 2655 | } |
| 2656 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2657 | #ifdef CONFIG_NUMA |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2658 | static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2659 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 2660 | struct inode *inode = file_inode(vma->vm_file); |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2661 | return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2662 | } |
| 2663 | |
Adrian Bunk | d8dc74f | 2007-10-16 01:26:26 -0700 | [diff] [blame] | 2664 | static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, |
Hugh Dickins | ddc1a5c | 2023-10-19 13:39:08 -0700 | [diff] [blame] | 2665 | unsigned long addr, pgoff_t *ilx) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2666 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 2667 | struct inode *inode = file_inode(vma->vm_file); |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2668 | pgoff_t index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2669 | |
Hugh Dickins | ddc1a5c | 2023-10-19 13:39:08 -0700 | [diff] [blame] | 2670 | /* |
| 2671 | * Bias interleave by inode number to distribute better across nodes; |
| 2672 | * but this interface is independent of which page order is used, so |
| 2673 | * supplies only that bias, letting caller apply the offset (adjusted |
| 2674 | * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()). |
| 2675 | */ |
| 2676 | *ilx = inode->i_ino; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2677 | index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
| 2678 | return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2679 | } |
Hugh Dickins | ddc1a5c | 2023-10-19 13:39:08 -0700 | [diff] [blame] | 2680 | |
| 2681 | static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, |
| 2682 | pgoff_t index, unsigned int order, pgoff_t *ilx) |
| 2683 | { |
| 2684 | struct mempolicy *mpol; |
| 2685 | |
| 2686 | /* Bias interleave by inode number to distribute better across nodes */ |
| 2687 | *ilx = info->vfs_inode.i_ino + (index >> order); |
| 2688 | |
| 2689 | mpol = mpol_shared_policy_lookup(&info->policy, index); |
| 2690 | return mpol ? mpol : get_task_policy(current); |
| 2691 | } |
| 2692 | #else |
| 2693 | static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, |
| 2694 | pgoff_t index, unsigned int order, pgoff_t *ilx) |
| 2695 | { |
| 2696 | *ilx = 0; |
| 2697 | return NULL; |
| 2698 | } |
| 2699 | #endif /* CONFIG_NUMA */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2700 | |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 2701 | int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2702 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 2703 | struct inode *inode = file_inode(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2704 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 2705 | int retval = -ENOMEM; |
| 2706 | |
Hugh Dickins | ea0dfeb | 2020-04-20 18:14:14 -0700 | [diff] [blame] | 2707 | /* |
| 2708 | * What serializes the accesses to info->flags? |
| 2709 | * ipc_lock_object() when called from shmctl_do_lock(), |
| 2710 | * no serialization needed when called from shm_destroy(). |
| 2711 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2712 | if (lock && !(info->flags & VM_LOCKED)) { |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 2713 | if (!user_shm_lock(inode->i_size, ucounts)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2714 | goto out_nomem; |
| 2715 | info->flags |= VM_LOCKED; |
Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 2716 | mapping_set_unevictable(file->f_mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2717 | } |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 2718 | if (!lock && (info->flags & VM_LOCKED) && ucounts) { |
| 2719 | user_shm_unlock(inode->i_size, ucounts); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2720 | info->flags &= ~VM_LOCKED; |
Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 2721 | mapping_clear_unevictable(file->f_mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2722 | } |
| 2723 | retval = 0; |
Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 2724 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2725 | out_nomem: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2726 | return retval; |
| 2727 | } |
| 2728 | |
Adrian Bunk | 9b83a6a | 2007-02-28 20:11:03 -0800 | [diff] [blame] | 2729 | static int shmem_mmap(struct file *file, struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2730 | { |
Pasha Tatashin | d09e8ca | 2022-11-15 02:06:01 +0000 | [diff] [blame] | 2731 | struct inode *inode = file_inode(file); |
| 2732 | struct shmem_inode_info *info = SHMEM_I(inode); |
Peter Xu | 22247ef | 2021-05-14 17:27:04 -0700 | [diff] [blame] | 2733 | int ret; |
Joel Fernandes (Google) | ab3948f | 2019-03-05 15:47:54 -0800 | [diff] [blame] | 2734 | |
Lorenzo Stoakes | 28464bb | 2023-10-12 18:04:29 +0100 | [diff] [blame] | 2735 | ret = seal_check_write(info->seals, vma); |
Peter Xu | 22247ef | 2021-05-14 17:27:04 -0700 | [diff] [blame] | 2736 | if (ret) |
| 2737 | return ret; |
Joel Fernandes (Google) | ab3948f | 2019-03-05 15:47:54 -0800 | [diff] [blame] | 2738 | |
Catalin Marinas | 51b0bff | 2019-11-29 12:45:08 +0000 | [diff] [blame] | 2739 | /* arm64 - allow memory tagging on RAM-based files */ |
Suren Baghdasaryan | 1c71222 | 2023-01-26 11:37:49 -0800 | [diff] [blame] | 2740 | vm_flags_set(vma, VM_MTE_ALLOWED); |
Catalin Marinas | 51b0bff | 2019-11-29 12:45:08 +0000 | [diff] [blame] | 2741 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2742 | file_accessed(file); |
Pasha Tatashin | d09e8ca | 2022-11-15 02:06:01 +0000 | [diff] [blame] | 2743 | /* This is anonymous shared memory if it is unlinked at the time of mmap */ |
| 2744 | if (inode->i_nlink) |
| 2745 | vma->vm_ops = &shmem_vm_ops; |
| 2746 | else |
| 2747 | vma->vm_ops = &shmem_anon_vm_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2748 | return 0; |
| 2749 | } |
| 2750 | |
Hugh Dickins | e88e0d3 | 2023-08-10 23:27:07 -0700 | [diff] [blame] | 2751 | static int shmem_file_open(struct inode *inode, struct file *file) |
| 2752 | { |
| 2753 | file->f_mode |= FMODE_CAN_ODIRECT; |
| 2754 | return generic_file_open(inode, file); |
| 2755 | } |
| 2756 | |
Hugh Dickins | cb24133 | 2022-08-10 21:51:09 -0700 | [diff] [blame] | 2757 | #ifdef CONFIG_TMPFS_XATTR |
| 2758 | static int shmem_initxattrs(struct inode *, const struct xattr *, void *); |
| 2759 | |
| 2760 | /* |
| 2761 | * chattr's fsflags are unrelated to extended attributes, |
| 2762 | * but tmpfs has chosen to enable them under the same config option. |
| 2763 | */ |
| 2764 | static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) |
Theodore Ts'o | e408e69 | 2022-07-14 21:59:12 -0400 | [diff] [blame] | 2765 | { |
Hugh Dickins | cb24133 | 2022-08-10 21:51:09 -0700 | [diff] [blame] | 2766 | unsigned int i_flags = 0; |
| 2767 | |
| 2768 | if (fsflags & FS_NOATIME_FL) |
| 2769 | i_flags |= S_NOATIME; |
| 2770 | if (fsflags & FS_APPEND_FL) |
| 2771 | i_flags |= S_APPEND; |
| 2772 | if (fsflags & FS_IMMUTABLE_FL) |
| 2773 | i_flags |= S_IMMUTABLE; |
| 2774 | /* |
| 2775 | * But FS_NODUMP_FL does not require any action in i_flags. |
| 2776 | */ |
| 2777 | inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE); |
Theodore Ts'o | e408e69 | 2022-07-14 21:59:12 -0400 | [diff] [blame] | 2778 | } |
Hugh Dickins | cb24133 | 2022-08-10 21:51:09 -0700 | [diff] [blame] | 2779 | #else |
| 2780 | static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) |
| 2781 | { |
| 2782 | } |
| 2783 | #define shmem_initxattrs NULL |
| 2784 | #endif |
Theodore Ts'o | e408e69 | 2022-07-14 21:59:12 -0400 | [diff] [blame] | 2785 | |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 2786 | static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode) |
| 2787 | { |
| 2788 | return &SHMEM_I(inode)->dir_offsets; |
| 2789 | } |
| 2790 | |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 2791 | static struct inode *__shmem_get_inode(struct mnt_idmap *idmap, |
| 2792 | struct super_block *sb, |
| 2793 | struct inode *dir, umode_t mode, |
| 2794 | dev_t dev, unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2795 | { |
| 2796 | struct inode *inode; |
| 2797 | struct shmem_inode_info *info; |
| 2798 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 2799 | ino_t ino; |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 2800 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2801 | |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 2802 | err = shmem_reserve_inode(sb, &ino); |
| 2803 | if (err) |
| 2804 | return ERR_PTR(err); |
| 2805 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2806 | inode = new_inode(sb); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 2807 | if (!inode) { |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 2808 | shmem_free_inode(sb, 0); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 2809 | return ERR_PTR(-ENOSPC); |
| 2810 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2811 | |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 2812 | inode->i_ino = ino; |
| 2813 | inode_init_owner(idmap, inode, dir, mode); |
| 2814 | inode->i_blocks = 0; |
Jeff Layton | cf2766b | 2023-10-04 14:53:07 -0400 | [diff] [blame] | 2815 | simple_inode_init_ts(inode); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 2816 | inode->i_generation = get_random_u32(); |
| 2817 | info = SHMEM_I(inode); |
| 2818 | memset(info, 0, (char *)inode - (char *)info); |
| 2819 | spin_lock_init(&info->lock); |
| 2820 | atomic_set(&info->stop_eviction, 0); |
| 2821 | info->seals = F_SEAL_SEAL; |
| 2822 | info->flags = flags & VM_NORESERVE; |
Jeff Layton | cf2766b | 2023-10-04 14:53:07 -0400 | [diff] [blame] | 2823 | info->i_crtime = inode_get_mtime(inode); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 2824 | info->fsflags = (dir == NULL) ? 0 : |
| 2825 | SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED; |
| 2826 | if (info->fsflags) |
| 2827 | shmem_set_inode_flags(inode, info->fsflags); |
| 2828 | INIT_LIST_HEAD(&info->shrinklist); |
| 2829 | INIT_LIST_HEAD(&info->swaplist); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 2830 | simple_xattrs_init(&info->xattrs); |
| 2831 | cache_no_acl(inode); |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 2832 | if (sbinfo->noswap) |
| 2833 | mapping_set_unevictable(inode->i_mapping); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 2834 | mapping_set_large_folios(inode->i_mapping); |
Joel Fernandes (Google) | b45d71f | 2018-09-20 12:22:39 -0700 | [diff] [blame] | 2835 | |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 2836 | switch (mode & S_IFMT) { |
| 2837 | default: |
| 2838 | inode->i_op = &shmem_special_inode_operations; |
| 2839 | init_special_inode(inode, mode, dev); |
| 2840 | break; |
| 2841 | case S_IFREG: |
| 2842 | inode->i_mapping->a_ops = &shmem_aops; |
| 2843 | inode->i_op = &shmem_inode_operations; |
| 2844 | inode->i_fop = &shmem_file_operations; |
| 2845 | mpol_shared_policy_init(&info->policy, |
| 2846 | shmem_get_sbmpol(sbinfo)); |
| 2847 | break; |
| 2848 | case S_IFDIR: |
| 2849 | inc_nlink(inode); |
| 2850 | /* Some things misbehave if size == 0 on a directory */ |
| 2851 | inode->i_size = 2 * BOGO_DIRENT_SIZE; |
| 2852 | inode->i_op = &shmem_dir_inode_operations; |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 2853 | inode->i_fop = &simple_offset_dir_operations; |
| 2854 | simple_offset_init(shmem_get_offset_ctx(inode)); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 2855 | break; |
| 2856 | case S_IFLNK: |
| 2857 | /* |
| 2858 | * Must not load anything in the rbtree, |
| 2859 | * mpol_free_shared_policy will not be called. |
| 2860 | */ |
| 2861 | mpol_shared_policy_init(&info->policy, NULL); |
| 2862 | break; |
| 2863 | } |
| 2864 | |
| 2865 | lockdep_annotate_inode_mutex_key(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2866 | return inode; |
| 2867 | } |
| 2868 | |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 2869 | #ifdef CONFIG_TMPFS_QUOTA |
| 2870 | static struct inode *shmem_get_inode(struct mnt_idmap *idmap, |
| 2871 | struct super_block *sb, struct inode *dir, |
| 2872 | umode_t mode, dev_t dev, unsigned long flags) |
| 2873 | { |
| 2874 | int err; |
| 2875 | struct inode *inode; |
| 2876 | |
| 2877 | inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags); |
| 2878 | if (IS_ERR(inode)) |
| 2879 | return inode; |
| 2880 | |
| 2881 | err = dquot_initialize(inode); |
| 2882 | if (err) |
| 2883 | goto errout; |
| 2884 | |
| 2885 | err = dquot_alloc_inode(inode); |
| 2886 | if (err) { |
| 2887 | dquot_drop(inode); |
| 2888 | goto errout; |
| 2889 | } |
| 2890 | return inode; |
| 2891 | |
| 2892 | errout: |
| 2893 | inode->i_flags |= S_NOQUOTA; |
| 2894 | iput(inode); |
| 2895 | return ERR_PTR(err); |
| 2896 | } |
| 2897 | #else |
| 2898 | static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, |
| 2899 | struct super_block *sb, struct inode *dir, |
| 2900 | umode_t mode, dev_t dev, unsigned long flags) |
| 2901 | { |
| 2902 | return __shmem_get_inode(idmap, sb, dir, mode, dev, flags); |
| 2903 | } |
| 2904 | #endif /* CONFIG_TMPFS_QUOTA */ |
| 2905 | |
Axel Rasmussen | 3460f6e | 2021-06-30 18:49:17 -0700 | [diff] [blame] | 2906 | #ifdef CONFIG_USERFAULTFD |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 2907 | int shmem_mfill_atomic_pte(pmd_t *dst_pmd, |
Axel Rasmussen | 3460f6e | 2021-06-30 18:49:17 -0700 | [diff] [blame] | 2908 | struct vm_area_struct *dst_vma, |
| 2909 | unsigned long dst_addr, |
| 2910 | unsigned long src_addr, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 2911 | uffd_flags_t flags, |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 2912 | struct folio **foliop) |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 2913 | { |
| 2914 | struct inode *inode = file_inode(dst_vma->vm_file); |
| 2915 | struct shmem_inode_info *info = SHMEM_I(inode); |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 2916 | struct address_space *mapping = inode->i_mapping; |
| 2917 | gfp_t gfp = mapping_gfp_mask(mapping); |
| 2918 | pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 2919 | void *page_kaddr; |
Matthew Wilcox (Oracle) | b7dd44a | 2022-05-12 20:23:04 -0700 | [diff] [blame] | 2920 | struct folio *folio; |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 2921 | int ret; |
Axel Rasmussen | 3460f6e | 2021-06-30 18:49:17 -0700 | [diff] [blame] | 2922 | pgoff_t max_off; |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 2923 | |
Hugh Dickins | 4199f51 | 2023-09-29 20:30:03 -0700 | [diff] [blame] | 2924 | if (shmem_inode_acct_blocks(inode, 1)) { |
Axel Rasmussen | 7ed9d23 | 2021-05-14 17:27:19 -0700 | [diff] [blame] | 2925 | /* |
| 2926 | * We may have got a page, returned -ENOENT triggering a retry, |
| 2927 | * and now we find ourselves with -ENOMEM. Release the page, to |
| 2928 | * avoid a BUG_ON in our caller. |
| 2929 | */ |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 2930 | if (unlikely(*foliop)) { |
| 2931 | folio_put(*foliop); |
| 2932 | *foliop = NULL; |
Axel Rasmussen | 7ed9d23 | 2021-05-14 17:27:19 -0700 | [diff] [blame] | 2933 | } |
Axel Rasmussen | 7d64ae3 | 2021-06-30 18:49:31 -0700 | [diff] [blame] | 2934 | return -ENOMEM; |
Axel Rasmussen | 7ed9d23 | 2021-05-14 17:27:19 -0700 | [diff] [blame] | 2935 | } |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 2936 | |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 2937 | if (!*foliop) { |
Axel Rasmussen | 7d64ae3 | 2021-06-30 18:49:31 -0700 | [diff] [blame] | 2938 | ret = -ENOMEM; |
Kefeng Wang | 6f77546 | 2024-05-15 15:07:09 +0800 | [diff] [blame] | 2939 | folio = shmem_alloc_folio(gfp, 0, info, pgoff); |
Matthew Wilcox (Oracle) | 7a7256d | 2022-09-02 20:46:13 +0100 | [diff] [blame] | 2940 | if (!folio) |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 2941 | goto out_unacct_blocks; |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 2942 | |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 2943 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) { |
Matthew Wilcox (Oracle) | 7a7256d | 2022-09-02 20:46:13 +0100 | [diff] [blame] | 2944 | page_kaddr = kmap_local_folio(folio, 0); |
Ira Weiny | 5dc21f0 | 2022-10-25 15:01:08 -0700 | [diff] [blame] | 2945 | /* |
| 2946 | * The read mmap_lock is held here. Despite the |
| 2947 | * mmap_lock being read recursive a deadlock is still |
| 2948 | * possible if a writer has taken a lock. For example: |
| 2949 | * |
| 2950 | * process A thread 1 takes read lock on own mmap_lock |
| 2951 | * process A thread 2 calls mmap, blocks taking write lock |
| 2952 | * process B thread 1 takes page fault, read lock on own mmap lock |
| 2953 | * process B thread 2 calls mmap, blocks taking write lock |
| 2954 | * process A thread 1 blocks taking read lock on process B |
| 2955 | * process B thread 1 blocks taking read lock on process A |
| 2956 | * |
| 2957 | * Disable page faults to prevent potential deadlock |
| 2958 | * and retry the copy outside the mmap_lock. |
| 2959 | */ |
| 2960 | pagefault_disable(); |
Mike Rapoport | 8d10396 | 2017-09-06 16:23:02 -0700 | [diff] [blame] | 2961 | ret = copy_from_user(page_kaddr, |
| 2962 | (const void __user *)src_addr, |
| 2963 | PAGE_SIZE); |
Ira Weiny | 5dc21f0 | 2022-10-25 15:01:08 -0700 | [diff] [blame] | 2964 | pagefault_enable(); |
Matthew Wilcox (Oracle) | 7a7256d | 2022-09-02 20:46:13 +0100 | [diff] [blame] | 2965 | kunmap_local(page_kaddr); |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 2966 | |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 2967 | /* fallback to copy_from_user outside mmap_lock */ |
Mike Rapoport | 8d10396 | 2017-09-06 16:23:02 -0700 | [diff] [blame] | 2968 | if (unlikely(ret)) { |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 2969 | *foliop = folio; |
Axel Rasmussen | 7d64ae3 | 2021-06-30 18:49:31 -0700 | [diff] [blame] | 2970 | ret = -ENOENT; |
Mike Rapoport | 8d10396 | 2017-09-06 16:23:02 -0700 | [diff] [blame] | 2971 | /* don't free the page */ |
Axel Rasmussen | 7d64ae3 | 2021-06-30 18:49:31 -0700 | [diff] [blame] | 2972 | goto out_unacct_blocks; |
Mike Rapoport | 8d10396 | 2017-09-06 16:23:02 -0700 | [diff] [blame] | 2973 | } |
Muchun Song | 19b482c | 2022-03-22 14:42:05 -0700 | [diff] [blame] | 2974 | |
Matthew Wilcox (Oracle) | 7a7256d | 2022-09-02 20:46:13 +0100 | [diff] [blame] | 2975 | flush_dcache_folio(folio); |
Axel Rasmussen | 3460f6e | 2021-06-30 18:49:17 -0700 | [diff] [blame] | 2976 | } else { /* ZEROPAGE */ |
Matthew Wilcox (Oracle) | 7a7256d | 2022-09-02 20:46:13 +0100 | [diff] [blame] | 2977 | clear_user_highpage(&folio->page, dst_addr); |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 2978 | } |
| 2979 | } else { |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 2980 | folio = *foliop; |
Matthew Wilcox (Oracle) | 7a7256d | 2022-09-02 20:46:13 +0100 | [diff] [blame] | 2981 | VM_BUG_ON_FOLIO(folio_test_large(folio), folio); |
ZhangPeng | d7be6d7e | 2023-04-10 21:39:32 +0800 | [diff] [blame] | 2982 | *foliop = NULL; |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 2983 | } |
| 2984 | |
Matthew Wilcox (Oracle) | 7a7256d | 2022-09-02 20:46:13 +0100 | [diff] [blame] | 2985 | VM_BUG_ON(folio_test_locked(folio)); |
| 2986 | VM_BUG_ON(folio_test_swapbacked(folio)); |
| 2987 | __folio_set_locked(folio); |
| 2988 | __folio_set_swapbacked(folio); |
| 2989 | __folio_mark_uptodate(folio); |
Andrea Arcangeli | 9cc90c6 | 2017-02-22 15:43:49 -0800 | [diff] [blame] | 2990 | |
Andrea Arcangeli | e2a50c1 | 2018-11-30 14:09:37 -0800 | [diff] [blame] | 2991 | ret = -EFAULT; |
Andrea Arcangeli | e2a50c1 | 2018-11-30 14:09:37 -0800 | [diff] [blame] | 2992 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
Axel Rasmussen | 3460f6e | 2021-06-30 18:49:17 -0700 | [diff] [blame] | 2993 | if (unlikely(pgoff >= max_off)) |
Andrea Arcangeli | e2a50c1 | 2018-11-30 14:09:37 -0800 | [diff] [blame] | 2994 | goto out_release; |
| 2995 | |
Hugh Dickins | 054a9f7 | 2023-09-29 20:31:27 -0700 | [diff] [blame] | 2996 | ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp); |
| 2997 | if (ret) |
| 2998 | goto out_release; |
| 2999 | ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp); |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 3000 | if (ret) |
| 3001 | goto out_release; |
| 3002 | |
Axel Rasmussen | 61c5004 | 2023-03-14 15:12:48 -0700 | [diff] [blame] | 3003 | ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, |
Axel Rasmussen | d971293 | 2023-03-14 15:12:49 -0700 | [diff] [blame] | 3004 | &folio->page, true, flags); |
Axel Rasmussen | 7d64ae3 | 2021-06-30 18:49:31 -0700 | [diff] [blame] | 3005 | if (ret) |
| 3006 | goto out_delete_from_cache; |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 3007 | |
Hugh Dickins | 3c1b752 | 2023-08-03 22:46:11 -0700 | [diff] [blame] | 3008 | shmem_recalc_inode(inode, 1, 0); |
Matthew Wilcox (Oracle) | 7a7256d | 2022-09-02 20:46:13 +0100 | [diff] [blame] | 3009 | folio_unlock(folio); |
Axel Rasmussen | 7d64ae3 | 2021-06-30 18:49:31 -0700 | [diff] [blame] | 3010 | return 0; |
| 3011 | out_delete_from_cache: |
Matthew Wilcox (Oracle) | 7a7256d | 2022-09-02 20:46:13 +0100 | [diff] [blame] | 3012 | filemap_remove_folio(folio); |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 3013 | out_release: |
Matthew Wilcox (Oracle) | 7a7256d | 2022-09-02 20:46:13 +0100 | [diff] [blame] | 3014 | folio_unlock(folio); |
| 3015 | folio_put(folio); |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 3016 | out_unacct_blocks: |
Mike Rapoport | 0f07969 | 2017-09-06 16:22:59 -0700 | [diff] [blame] | 3017 | shmem_inode_unacct_blocks(inode, 1); |
Axel Rasmussen | 7d64ae3 | 2021-06-30 18:49:31 -0700 | [diff] [blame] | 3018 | return ret; |
Mike Rapoport | 4c27fe4 | 2017-02-22 15:43:25 -0800 | [diff] [blame] | 3019 | } |
Axel Rasmussen | 3460f6e | 2021-06-30 18:49:17 -0700 | [diff] [blame] | 3020 | #endif /* CONFIG_USERFAULTFD */ |
Mike Rapoport | 8d10396 | 2017-09-06 16:23:02 -0700 | [diff] [blame] | 3021 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3022 | #ifdef CONFIG_TMPFS |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 3023 | static const struct inode_operations shmem_symlink_inode_operations; |
Hugh Dickins | 69f07ec | 2011-08-03 16:21:26 -0700 | [diff] [blame] | 3024 | static const struct inode_operations shmem_short_symlink_operations; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3025 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3026 | static int |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 3027 | shmem_write_begin(struct file *file, struct address_space *mapping, |
Matthew Wilcox (Oracle) | 9d6b0cd | 2022-02-22 14:31:43 -0500 | [diff] [blame] | 3028 | loff_t pos, unsigned len, |
Matthew Wilcox (Oracle) | 1da8661 | 2024-07-15 14:24:01 -0400 | [diff] [blame] | 3029 | struct folio **foliop, void **fsdata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3030 | { |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 3031 | struct inode *inode = mapping->host; |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 3032 | struct shmem_inode_info *info = SHMEM_I(inode); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3033 | pgoff_t index = pos >> PAGE_SHIFT; |
Matthew Wilcox (Oracle) | eff1f90 | 2022-09-02 20:46:22 +0100 | [diff] [blame] | 3034 | struct folio *folio; |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 3035 | int ret = 0; |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 3036 | |
Jan Kara | 9608703 | 2021-04-12 15:50:21 +0200 | [diff] [blame] | 3037 | /* i_rwsem is held by caller */ |
Joel Fernandes (Google) | ab3948f | 2019-03-05 15:47:54 -0800 | [diff] [blame] | 3038 | if (unlikely(info->seals & (F_SEAL_GROW | |
| 3039 | F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { |
| 3040 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 3041 | return -EPERM; |
| 3042 | if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) |
| 3043 | return -EPERM; |
| 3044 | } |
| 3045 | |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 3046 | ret = shmem_get_folio(inode, index, pos + len, &folio, SGP_WRITE); |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 3047 | if (ret) |
| 3048 | return ret; |
| 3049 | |
Matthew Wilcox (Oracle) | 1da8661 | 2024-07-15 14:24:01 -0400 | [diff] [blame] | 3050 | if (folio_test_hwpoison(folio) || |
| 3051 | (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { |
Matthew Wilcox (Oracle) | eff1f90 | 2022-09-02 20:46:22 +0100 | [diff] [blame] | 3052 | folio_unlock(folio); |
| 3053 | folio_put(folio); |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 3054 | return -EIO; |
| 3055 | } |
| 3056 | |
Matthew Wilcox (Oracle) | 1da8661 | 2024-07-15 14:24:01 -0400 | [diff] [blame] | 3057 | *foliop = folio; |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 3058 | return 0; |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 3059 | } |
| 3060 | |
| 3061 | static int |
| 3062 | shmem_write_end(struct file *file, struct address_space *mapping, |
| 3063 | loff_t pos, unsigned len, unsigned copied, |
Matthew Wilcox (Oracle) | a225800 | 2024-07-10 15:45:32 -0400 | [diff] [blame] | 3064 | struct folio *folio, void *fsdata) |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 3065 | { |
| 3066 | struct inode *inode = mapping->host; |
| 3067 | |
Hugh Dickins | d360244 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 3068 | if (pos + copied > inode->i_size) |
| 3069 | i_size_write(inode, pos + copied); |
| 3070 | |
Matthew Wilcox (Oracle) | 69bbb87 | 2023-01-12 13:10:31 +0000 | [diff] [blame] | 3071 | if (!folio_test_uptodate(folio)) { |
| 3072 | if (copied < folio_size(folio)) { |
| 3073 | size_t from = offset_in_folio(folio, pos); |
| 3074 | folio_zero_segments(folio, 0, from, |
| 3075 | from + copied, folio_size(folio)); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 3076 | } |
Matthew Wilcox (Oracle) | 69bbb87 | 2023-01-12 13:10:31 +0000 | [diff] [blame] | 3077 | folio_mark_uptodate(folio); |
Hugh Dickins | ec9516f | 2012-05-29 15:06:39 -0700 | [diff] [blame] | 3078 | } |
Matthew Wilcox (Oracle) | 69bbb87 | 2023-01-12 13:10:31 +0000 | [diff] [blame] | 3079 | folio_mark_dirty(folio); |
| 3080 | folio_unlock(folio); |
| 3081 | folio_put(folio); |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 3082 | |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 3083 | return copied; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3084 | } |
| 3085 | |
Al Viro | 2ba5bbe | 2014-04-02 20:00:02 -0400 | [diff] [blame] | 3086 | static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3087 | { |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 3088 | struct file *file = iocb->ki_filp; |
| 3089 | struct inode *inode = file_inode(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3090 | struct address_space *mapping = inode->i_mapping; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3091 | pgoff_t index; |
| 3092 | unsigned long offset; |
Geert Uytterhoeven | f7c1d07 | 2014-04-13 20:46:22 +0200 | [diff] [blame] | 3093 | int error = 0; |
Al Viro | cb66a7a | 2014-03-04 15:24:06 -0500 | [diff] [blame] | 3094 | ssize_t retval = 0; |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 3095 | loff_t *ppos = &iocb->ki_pos; |
Hugh Dickins | a0ee5ec | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 3096 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3097 | index = *ppos >> PAGE_SHIFT; |
| 3098 | offset = *ppos & ~PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3099 | |
| 3100 | for (;;) { |
Matthew Wilcox (Oracle) | 4601e2f | 2022-09-02 20:46:23 +0100 | [diff] [blame] | 3101 | struct folio *folio = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3102 | struct page *page = NULL; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3103 | pgoff_t end_index; |
| 3104 | unsigned long nr, ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3105 | loff_t i_size = i_size_read(inode); |
| 3106 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3107 | end_index = i_size >> PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3108 | if (index > end_index) |
| 3109 | break; |
| 3110 | if (index == end_index) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3111 | nr = i_size & ~PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3112 | if (nr <= offset) |
| 3113 | break; |
| 3114 | } |
| 3115 | |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 3116 | error = shmem_get_folio(inode, index, 0, &folio, SGP_READ); |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 3117 | if (error) { |
| 3118 | if (error == -EINVAL) |
| 3119 | error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3120 | break; |
| 3121 | } |
Matthew Wilcox (Oracle) | 4601e2f | 2022-09-02 20:46:23 +0100 | [diff] [blame] | 3122 | if (folio) { |
| 3123 | folio_unlock(folio); |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 3124 | |
Matthew Wilcox (Oracle) | 4601e2f | 2022-09-02 20:46:23 +0100 | [diff] [blame] | 3125 | page = folio_file_page(folio, index); |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 3126 | if (PageHWPoison(page)) { |
Matthew Wilcox (Oracle) | 4601e2f | 2022-09-02 20:46:23 +0100 | [diff] [blame] | 3127 | folio_put(folio); |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 3128 | error = -EIO; |
| 3129 | break; |
| 3130 | } |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 3131 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3132 | |
| 3133 | /* |
| 3134 | * We must evaluate after, since reads (unlike writes) |
Jan Kara | 9608703 | 2021-04-12 15:50:21 +0200 | [diff] [blame] | 3135 | * are called without i_rwsem protection against truncate |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3136 | */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3137 | nr = PAGE_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3138 | i_size = i_size_read(inode); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3139 | end_index = i_size >> PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3140 | if (index == end_index) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3141 | nr = i_size & ~PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3142 | if (nr <= offset) { |
Matthew Wilcox (Oracle) | 4601e2f | 2022-09-02 20:46:23 +0100 | [diff] [blame] | 3143 | if (folio) |
| 3144 | folio_put(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3145 | break; |
| 3146 | } |
| 3147 | } |
| 3148 | nr -= offset; |
| 3149 | |
Matthew Wilcox (Oracle) | 4601e2f | 2022-09-02 20:46:23 +0100 | [diff] [blame] | 3150 | if (folio) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3151 | /* |
| 3152 | * If users can be writing to this page using arbitrary |
| 3153 | * virtual addresses, take care about potential aliasing |
| 3154 | * before reading the page on the kernel side. |
| 3155 | */ |
| 3156 | if (mapping_writably_mapped(mapping)) |
| 3157 | flush_dcache_page(page); |
| 3158 | /* |
| 3159 | * Mark the page accessed if we read the beginning. |
| 3160 | */ |
| 3161 | if (!offset) |
Matthew Wilcox (Oracle) | 4601e2f | 2022-09-02 20:46:23 +0100 | [diff] [blame] | 3162 | folio_mark_accessed(folio); |
Hugh Dickins | 1bdec44b | 2022-04-14 19:13:27 -0700 | [diff] [blame] | 3163 | /* |
| 3164 | * Ok, we have the page, and it's up-to-date, so |
| 3165 | * now we can copy it to user space... |
| 3166 | */ |
| 3167 | ret = copy_page_to_iter(page, offset, nr, to); |
Matthew Wilcox (Oracle) | 4601e2f | 2022-09-02 20:46:23 +0100 | [diff] [blame] | 3168 | folio_put(folio); |
Hugh Dickins | 1bdec44b | 2022-04-14 19:13:27 -0700 | [diff] [blame] | 3169 | |
Al Viro | fcb14cb | 2022-05-22 14:59:25 -0400 | [diff] [blame] | 3170 | } else if (user_backed_iter(to)) { |
Hugh Dickins | 1bdec44b | 2022-04-14 19:13:27 -0700 | [diff] [blame] | 3171 | /* |
| 3172 | * Copy to user tends to be so well optimized, but |
| 3173 | * clear_user() not so much, that it is noticeably |
| 3174 | * faster to copy the zero page instead of clearing. |
| 3175 | */ |
| 3176 | ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to); |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 3177 | } else { |
Hugh Dickins | 1bdec44b | 2022-04-14 19:13:27 -0700 | [diff] [blame] | 3178 | /* |
| 3179 | * But submitting the same page twice in a row to |
| 3180 | * splice() - or others? - can result in confusion: |
| 3181 | * so don't attempt that optimization on pipes etc. |
| 3182 | */ |
| 3183 | ret = iov_iter_zero(nr, to); |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 3184 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3185 | |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 3186 | retval += ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3187 | offset += ret; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3188 | index += offset >> PAGE_SHIFT; |
| 3189 | offset &= ~PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3190 | |
Al Viro | 2ba5bbe | 2014-04-02 20:00:02 -0400 | [diff] [blame] | 3191 | if (!iov_iter_count(to)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3192 | break; |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 3193 | if (ret < nr) { |
| 3194 | error = -EFAULT; |
| 3195 | break; |
| 3196 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3197 | cond_resched(); |
| 3198 | } |
| 3199 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3200 | *ppos = ((loff_t) index << PAGE_SHIFT) + offset; |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 3201 | file_accessed(file); |
| 3202 | return retval ? retval : error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3203 | } |
| 3204 | |
Hugh Dickins | e88e0d3 | 2023-08-10 23:27:07 -0700 | [diff] [blame] | 3205 | static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
| 3206 | { |
| 3207 | struct file *file = iocb->ki_filp; |
| 3208 | struct inode *inode = file->f_mapping->host; |
| 3209 | ssize_t ret; |
| 3210 | |
| 3211 | inode_lock(inode); |
| 3212 | ret = generic_write_checks(iocb, from); |
| 3213 | if (ret <= 0) |
| 3214 | goto unlock; |
| 3215 | ret = file_remove_privs(file); |
| 3216 | if (ret) |
| 3217 | goto unlock; |
| 3218 | ret = file_update_time(file); |
| 3219 | if (ret) |
| 3220 | goto unlock; |
| 3221 | ret = generic_perform_write(iocb, from); |
| 3222 | unlock: |
| 3223 | inode_unlock(inode); |
| 3224 | return ret; |
| 3225 | } |
| 3226 | |
David Howells | bd194b1 | 2023-05-22 14:49:56 +0100 | [diff] [blame] | 3227 | static bool zero_pipe_buf_get(struct pipe_inode_info *pipe, |
| 3228 | struct pipe_buffer *buf) |
| 3229 | { |
| 3230 | return true; |
| 3231 | } |
| 3232 | |
| 3233 | static void zero_pipe_buf_release(struct pipe_inode_info *pipe, |
| 3234 | struct pipe_buffer *buf) |
| 3235 | { |
| 3236 | } |
| 3237 | |
| 3238 | static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe, |
| 3239 | struct pipe_buffer *buf) |
| 3240 | { |
| 3241 | return false; |
| 3242 | } |
| 3243 | |
| 3244 | static const struct pipe_buf_operations zero_pipe_buf_ops = { |
| 3245 | .release = zero_pipe_buf_release, |
| 3246 | .try_steal = zero_pipe_buf_try_steal, |
| 3247 | .get = zero_pipe_buf_get, |
| 3248 | }; |
| 3249 | |
| 3250 | static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe, |
| 3251 | loff_t fpos, size_t size) |
| 3252 | { |
| 3253 | size_t offset = fpos & ~PAGE_MASK; |
| 3254 | |
| 3255 | size = min_t(size_t, size, PAGE_SIZE - offset); |
| 3256 | |
| 3257 | if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { |
| 3258 | struct pipe_buffer *buf = pipe_head_buf(pipe); |
| 3259 | |
| 3260 | *buf = (struct pipe_buffer) { |
| 3261 | .ops = &zero_pipe_buf_ops, |
| 3262 | .page = ZERO_PAGE(0), |
| 3263 | .offset = offset, |
| 3264 | .len = size, |
| 3265 | }; |
| 3266 | pipe->head++; |
| 3267 | } |
| 3268 | |
| 3269 | return size; |
| 3270 | } |
| 3271 | |
| 3272 | static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, |
| 3273 | struct pipe_inode_info *pipe, |
| 3274 | size_t len, unsigned int flags) |
| 3275 | { |
| 3276 | struct inode *inode = file_inode(in); |
| 3277 | struct address_space *mapping = inode->i_mapping; |
| 3278 | struct folio *folio = NULL; |
| 3279 | size_t total_spliced = 0, used, npages, n, part; |
| 3280 | loff_t isize; |
| 3281 | int error = 0; |
| 3282 | |
| 3283 | /* Work out how much data we can actually add into the pipe */ |
| 3284 | used = pipe_occupancy(pipe->head, pipe->tail); |
| 3285 | npages = max_t(ssize_t, pipe->max_usage - used, 0); |
| 3286 | len = min_t(size_t, len, npages * PAGE_SIZE); |
| 3287 | |
| 3288 | do { |
| 3289 | if (*ppos >= i_size_read(inode)) |
| 3290 | break; |
| 3291 | |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 3292 | error = shmem_get_folio(inode, *ppos / PAGE_SIZE, 0, &folio, |
Hugh Dickins | fa59895 | 2023-07-23 14:05:54 -0700 | [diff] [blame] | 3293 | SGP_READ); |
David Howells | bd194b1 | 2023-05-22 14:49:56 +0100 | [diff] [blame] | 3294 | if (error) { |
| 3295 | if (error == -EINVAL) |
| 3296 | error = 0; |
| 3297 | break; |
| 3298 | } |
| 3299 | if (folio) { |
| 3300 | folio_unlock(folio); |
| 3301 | |
Hugh Dickins | fa59895 | 2023-07-23 14:05:54 -0700 | [diff] [blame] | 3302 | if (folio_test_hwpoison(folio) || |
| 3303 | (folio_test_large(folio) && |
| 3304 | folio_test_has_hwpoisoned(folio))) { |
David Howells | bd194b1 | 2023-05-22 14:49:56 +0100 | [diff] [blame] | 3305 | error = -EIO; |
| 3306 | break; |
| 3307 | } |
| 3308 | } |
| 3309 | |
| 3310 | /* |
| 3311 | * i_size must be checked after we know the pages are Uptodate. |
| 3312 | * |
| 3313 | * Checking i_size after the check allows us to calculate |
| 3314 | * the correct value for "nr", which means the zero-filled |
| 3315 | * part of the page is not copied back to userspace (unless |
| 3316 | * another truncate extends the file - this is desired though). |
| 3317 | */ |
| 3318 | isize = i_size_read(inode); |
| 3319 | if (unlikely(*ppos >= isize)) |
| 3320 | break; |
| 3321 | part = min_t(loff_t, isize - *ppos, len); |
| 3322 | |
| 3323 | if (folio) { |
| 3324 | /* |
| 3325 | * If users can be writing to this page using arbitrary |
| 3326 | * virtual addresses, take care about potential aliasing |
| 3327 | * before reading the page on the kernel side. |
| 3328 | */ |
| 3329 | if (mapping_writably_mapped(mapping)) |
| 3330 | flush_dcache_folio(folio); |
| 3331 | folio_mark_accessed(folio); |
| 3332 | /* |
| 3333 | * Ok, we have the page, and it's up-to-date, so we can |
| 3334 | * now splice it into the pipe. |
| 3335 | */ |
| 3336 | n = splice_folio_into_pipe(pipe, folio, *ppos, part); |
| 3337 | folio_put(folio); |
| 3338 | folio = NULL; |
| 3339 | } else { |
Hugh Dickins | fa59895 | 2023-07-23 14:05:54 -0700 | [diff] [blame] | 3340 | n = splice_zeropage_into_pipe(pipe, *ppos, part); |
David Howells | bd194b1 | 2023-05-22 14:49:56 +0100 | [diff] [blame] | 3341 | } |
| 3342 | |
| 3343 | if (!n) |
| 3344 | break; |
| 3345 | len -= n; |
| 3346 | total_spliced += n; |
| 3347 | *ppos += n; |
| 3348 | in->f_ra.prev_pos = *ppos; |
| 3349 | if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) |
| 3350 | break; |
| 3351 | |
| 3352 | cond_resched(); |
| 3353 | } while (len); |
| 3354 | |
| 3355 | if (folio) |
| 3356 | folio_put(folio); |
| 3357 | |
| 3358 | file_accessed(in); |
| 3359 | return total_spliced ? total_spliced : error; |
| 3360 | } |
| 3361 | |
Andrew Morton | 965c8e5 | 2012-12-17 15:59:39 -0800 | [diff] [blame] | 3362 | static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 3363 | { |
| 3364 | struct address_space *mapping = file->f_mapping; |
| 3365 | struct inode *inode = mapping->host; |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 3366 | |
Andrew Morton | 965c8e5 | 2012-12-17 15:59:39 -0800 | [diff] [blame] | 3367 | if (whence != SEEK_DATA && whence != SEEK_HOLE) |
| 3368 | return generic_file_llseek_size(file, offset, whence, |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 3369 | MAX_LFS_FILESIZE, i_size_read(inode)); |
Matthew Wilcox (Oracle) | 41139aa | 2021-02-25 17:15:48 -0800 | [diff] [blame] | 3370 | if (offset < 0) |
| 3371 | return -ENXIO; |
| 3372 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 3373 | inode_lock(inode); |
Jan Kara | 9608703 | 2021-04-12 15:50:21 +0200 | [diff] [blame] | 3374 | /* We're holding i_rwsem so we can access i_size directly */ |
Matthew Wilcox (Oracle) | 41139aa | 2021-02-25 17:15:48 -0800 | [diff] [blame] | 3375 | offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); |
Hugh Dickins | 387aae6 | 2013-08-04 11:30:25 -0700 | [diff] [blame] | 3376 | if (offset >= 0) |
| 3377 | offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 3378 | inode_unlock(inode); |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 3379 | return offset; |
| 3380 | } |
| 3381 | |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 3382 | static long shmem_fallocate(struct file *file, int mode, loff_t offset, |
| 3383 | loff_t len) |
| 3384 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 3385 | struct inode *inode = file_inode(file); |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3386 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 3387 | struct shmem_inode_info *info = SHMEM_I(inode); |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 3388 | struct shmem_falloc shmem_falloc; |
Hugh Dickins | d144bf6 | 2021-09-02 14:54:21 -0700 | [diff] [blame] | 3389 | pgoff_t start, index, end, undo_fallocend; |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3390 | int error; |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 3391 | |
Hugh Dickins | 13ace4d | 2014-06-23 13:22:03 -0700 | [diff] [blame] | 3392 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
| 3393 | return -EOPNOTSUPP; |
| 3394 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 3395 | inode_lock(inode); |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 3396 | |
| 3397 | if (mode & FALLOC_FL_PUNCH_HOLE) { |
| 3398 | struct address_space *mapping = file->f_mapping; |
| 3399 | loff_t unmap_start = round_up(offset, PAGE_SIZE); |
| 3400 | loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 3401 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 3402 | |
Jan Kara | 9608703 | 2021-04-12 15:50:21 +0200 | [diff] [blame] | 3403 | /* protected by i_rwsem */ |
Joel Fernandes (Google) | ab3948f | 2019-03-05 15:47:54 -0800 | [diff] [blame] | 3404 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 3405 | error = -EPERM; |
| 3406 | goto out; |
| 3407 | } |
| 3408 | |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 3409 | shmem_falloc.waitq = &shmem_falloc_waitq; |
Chen Jun | aa71ecd | 2019-11-30 17:58:11 -0800 | [diff] [blame] | 3410 | shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 3411 | shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; |
| 3412 | spin_lock(&inode->i_lock); |
| 3413 | inode->i_private = &shmem_falloc; |
| 3414 | spin_unlock(&inode->i_lock); |
| 3415 | |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 3416 | if ((u64)unmap_end > (u64)unmap_start) |
| 3417 | unmap_mapping_range(mapping, unmap_start, |
| 3418 | 1 + unmap_end - unmap_start, 0); |
| 3419 | shmem_truncate_range(inode, offset, offset + len - 1); |
| 3420 | /* No need to unmap again: hole-punching leaves COWed pages */ |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 3421 | |
| 3422 | spin_lock(&inode->i_lock); |
| 3423 | inode->i_private = NULL; |
| 3424 | wake_up_all(&shmem_falloc_waitq); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 3425 | WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 3426 | spin_unlock(&inode->i_lock); |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 3427 | error = 0; |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 3428 | goto out; |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 3429 | } |
| 3430 | |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3431 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ |
| 3432 | error = inode_newsize_ok(inode, offset + len); |
| 3433 | if (error) |
| 3434 | goto out; |
| 3435 | |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 3436 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { |
| 3437 | error = -EPERM; |
| 3438 | goto out; |
| 3439 | } |
| 3440 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3441 | start = offset >> PAGE_SHIFT; |
| 3442 | end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3443 | /* Try to avoid a swapstorm if len is impossible to satisfy */ |
| 3444 | if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { |
| 3445 | error = -ENOSPC; |
| 3446 | goto out; |
| 3447 | } |
| 3448 | |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 3449 | shmem_falloc.waitq = NULL; |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 3450 | shmem_falloc.start = start; |
| 3451 | shmem_falloc.next = start; |
| 3452 | shmem_falloc.nr_falloced = 0; |
| 3453 | shmem_falloc.nr_unswapped = 0; |
| 3454 | spin_lock(&inode->i_lock); |
| 3455 | inode->i_private = &shmem_falloc; |
| 3456 | spin_unlock(&inode->i_lock); |
| 3457 | |
Hugh Dickins | d144bf6 | 2021-09-02 14:54:21 -0700 | [diff] [blame] | 3458 | /* |
| 3459 | * info->fallocend is only relevant when huge pages might be |
| 3460 | * involved: to prevent split_huge_page() freeing fallocated |
| 3461 | * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size. |
| 3462 | */ |
| 3463 | undo_fallocend = info->fallocend; |
| 3464 | if (info->fallocend < end) |
| 3465 | info->fallocend = end; |
| 3466 | |
Hugh Dickins | 050dcb5 | 2021-09-02 14:54:18 -0700 | [diff] [blame] | 3467 | for (index = start; index < end; ) { |
Matthew Wilcox (Oracle) | b0802b2 | 2022-09-02 20:46:24 +0100 | [diff] [blame] | 3468 | struct folio *folio; |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3469 | |
| 3470 | /* |
Mikulas Patocka | ca86a5d | 2024-05-16 00:10:44 +0200 | [diff] [blame] | 3471 | * Check for fatal signal so that we abort early in OOM |
| 3472 | * situations. We don't want to abort in case of non-fatal |
| 3473 | * signals as large fallocate can take noticeable time and |
| 3474 | * e.g. periodic timers may result in fallocate constantly |
| 3475 | * restarting. |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3476 | */ |
Mikulas Patocka | ca86a5d | 2024-05-16 00:10:44 +0200 | [diff] [blame] | 3477 | if (fatal_signal_pending(current)) |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3478 | error = -EINTR; |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 3479 | else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) |
| 3480 | error = -ENOMEM; |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3481 | else |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 3482 | error = shmem_get_folio(inode, index, offset + len, |
| 3483 | &folio, SGP_FALLOC); |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3484 | if (error) { |
Hugh Dickins | d144bf6 | 2021-09-02 14:54:21 -0700 | [diff] [blame] | 3485 | info->fallocend = undo_fallocend; |
Matthew Wilcox (Oracle) | b0802b2 | 2022-09-02 20:46:24 +0100 | [diff] [blame] | 3486 | /* Remove the !uptodate folios we added */ |
Hugh Dickins | 7f55656 | 2016-07-10 16:46:32 -0700 | [diff] [blame] | 3487 | if (index > start) { |
| 3488 | shmem_undo_range(inode, |
| 3489 | (loff_t)start << PAGE_SHIFT, |
| 3490 | ((loff_t)index << PAGE_SHIFT) - 1, true); |
| 3491 | } |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 3492 | goto undone; |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3493 | } |
| 3494 | |
Hugh Dickins | 050dcb5 | 2021-09-02 14:54:18 -0700 | [diff] [blame] | 3495 | /* |
| 3496 | * Here is a more important optimization than it appears: |
Matthew Wilcox (Oracle) | b0802b2 | 2022-09-02 20:46:24 +0100 | [diff] [blame] | 3497 | * a second SGP_FALLOC on the same large folio will clear it, |
| 3498 | * making it uptodate and un-undoable if we fail later. |
Hugh Dickins | 050dcb5 | 2021-09-02 14:54:18 -0700 | [diff] [blame] | 3499 | */ |
Matthew Wilcox (Oracle) | b0802b2 | 2022-09-02 20:46:24 +0100 | [diff] [blame] | 3500 | index = folio_next_index(folio); |
| 3501 | /* Beware 32-bit wraparound */ |
| 3502 | if (!index) |
| 3503 | index--; |
Hugh Dickins | 050dcb5 | 2021-09-02 14:54:18 -0700 | [diff] [blame] | 3504 | |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3505 | /* |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 3506 | * Inform shmem_writepage() how far we have reached. |
| 3507 | * No need for lock or barrier: we have the page lock. |
| 3508 | */ |
Matthew Wilcox (Oracle) | b0802b2 | 2022-09-02 20:46:24 +0100 | [diff] [blame] | 3509 | if (!folio_test_uptodate(folio)) |
Hugh Dickins | 050dcb5 | 2021-09-02 14:54:18 -0700 | [diff] [blame] | 3510 | shmem_falloc.nr_falloced += index - shmem_falloc.next; |
| 3511 | shmem_falloc.next = index; |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 3512 | |
| 3513 | /* |
Matthew Wilcox (Oracle) | b0802b2 | 2022-09-02 20:46:24 +0100 | [diff] [blame] | 3514 | * If !uptodate, leave it that way so that freeable folios |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 3515 | * can be recognized if we need to rollback on error later. |
Matthew Wilcox (Oracle) | b0802b2 | 2022-09-02 20:46:24 +0100 | [diff] [blame] | 3516 | * But mark it dirty so that memory pressure will swap rather |
| 3517 | * than free the folios we are allocating (and SGP_CACHE folios |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3518 | * might still be clean: we now need to mark those dirty too). |
| 3519 | */ |
Matthew Wilcox (Oracle) | b0802b2 | 2022-09-02 20:46:24 +0100 | [diff] [blame] | 3520 | folio_mark_dirty(folio); |
| 3521 | folio_unlock(folio); |
| 3522 | folio_put(folio); |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3523 | cond_resched(); |
| 3524 | } |
| 3525 | |
| 3526 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) |
| 3527 | i_size_write(inode, offset + len); |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 3528 | undone: |
| 3529 | spin_lock(&inode->i_lock); |
| 3530 | inode->i_private = NULL; |
| 3531 | spin_unlock(&inode->i_lock); |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 3532 | out: |
Hugh Dickins | 15f242b | 2022-08-10 21:55:36 -0700 | [diff] [blame] | 3533 | if (!error) |
| 3534 | file_modified(file); |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 3535 | inode_unlock(inode); |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 3536 | return error; |
| 3537 | } |
| 3538 | |
David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 3539 | static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3540 | { |
David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 3541 | struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3542 | |
| 3543 | buf->f_type = TMPFS_MAGIC; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3544 | buf->f_bsize = PAGE_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3545 | buf->f_namelen = NAME_MAX; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3546 | if (sbinfo->max_blocks) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3547 | buf->f_blocks = sbinfo->max_blocks; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3548 | buf->f_bavail = |
| 3549 | buf->f_bfree = sbinfo->max_blocks - |
| 3550 | percpu_counter_sum(&sbinfo->used_blocks); |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3551 | } |
| 3552 | if (sbinfo->max_inodes) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3553 | buf->f_files = sbinfo->max_inodes; |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 3554 | buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3555 | } |
| 3556 | /* else leave those fields 0 like simple_statfs */ |
Amir Goldstein | 59cda49 | 2021-03-22 19:39:44 +0200 | [diff] [blame] | 3557 | |
| 3558 | buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); |
| 3559 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3560 | return 0; |
| 3561 | } |
| 3562 | |
| 3563 | /* |
| 3564 | * File creation. Allocate an inode, and we're done.. |
| 3565 | */ |
| 3566 | static int |
Christian Brauner | 5ebb29b | 2023-01-13 12:49:16 +0100 | [diff] [blame] | 3567 | shmem_mknod(struct mnt_idmap *idmap, struct inode *dir, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 3568 | struct dentry *dentry, umode_t mode, dev_t dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3569 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 3570 | struct inode *inode; |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 3571 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3572 | |
Giuseppe Scrivano | 7a80e5b | 2023-01-20 10:43:46 +0100 | [diff] [blame] | 3573 | inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 3574 | if (IS_ERR(inode)) |
| 3575 | return PTR_ERR(inode); |
Mimi Zohar | 37ec43c | 2013-04-14 09:21:47 -0400 | [diff] [blame] | 3576 | |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 3577 | error = simple_acl_create(dir, inode); |
| 3578 | if (error) |
| 3579 | goto out_iput; |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 3580 | error = security_inode_init_security(inode, dir, &dentry->d_name, |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 3581 | shmem_initxattrs, NULL); |
| 3582 | if (error && error != -EOPNOTSUPP) |
| 3583 | goto out_iput; |
| 3584 | |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 3585 | error = simple_offset_add(shmem_get_offset_ctx(dir), dentry); |
| 3586 | if (error) |
| 3587 | goto out_iput; |
| 3588 | |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 3589 | dir->i_size += BOGO_DIRENT_SIZE; |
Jeff Layton | cf2766b | 2023-10-04 14:53:07 -0400 | [diff] [blame] | 3590 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 3591 | inode_inc_iversion(dir); |
| 3592 | d_instantiate(dentry, inode); |
| 3593 | dget(dentry); /* Extra count - pin the dentry in core */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3594 | return error; |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 3595 | |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 3596 | out_iput: |
| 3597 | iput(inode); |
| 3598 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3599 | } |
| 3600 | |
Al Viro | 60545d0 | 2013-06-07 01:20:27 -0400 | [diff] [blame] | 3601 | static int |
Christian Brauner | 011e2b7 | 2023-01-13 12:49:18 +0100 | [diff] [blame] | 3602 | shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir, |
Miklos Szeredi | 863f144 | 2022-09-24 07:00:00 +0200 | [diff] [blame] | 3603 | struct file *file, umode_t mode) |
Al Viro | 60545d0 | 2013-06-07 01:20:27 -0400 | [diff] [blame] | 3604 | { |
| 3605 | struct inode *inode; |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 3606 | int error; |
Al Viro | 60545d0 | 2013-06-07 01:20:27 -0400 | [diff] [blame] | 3607 | |
Giuseppe Scrivano | 7a80e5b | 2023-01-20 10:43:46 +0100 | [diff] [blame] | 3608 | inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 3609 | if (IS_ERR(inode)) { |
| 3610 | error = PTR_ERR(inode); |
| 3611 | goto err_out; |
Al Viro | 60545d0 | 2013-06-07 01:20:27 -0400 | [diff] [blame] | 3612 | } |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 3613 | error = security_inode_init_security(inode, dir, NULL, |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 3614 | shmem_initxattrs, NULL); |
| 3615 | if (error && error != -EOPNOTSUPP) |
| 3616 | goto out_iput; |
| 3617 | error = simple_acl_create(dir, inode); |
| 3618 | if (error) |
| 3619 | goto out_iput; |
| 3620 | d_tmpfile(file, inode); |
| 3621 | |
| 3622 | err_out: |
Miklos Szeredi | 863f144 | 2022-09-24 07:00:00 +0200 | [diff] [blame] | 3623 | return finish_open_simple(file, error); |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 3624 | out_iput: |
| 3625 | iput(inode); |
| 3626 | return error; |
Al Viro | 60545d0 | 2013-06-07 01:20:27 -0400 | [diff] [blame] | 3627 | } |
| 3628 | |
Christian Brauner | c54bd91 | 2023-01-13 12:49:15 +0100 | [diff] [blame] | 3629 | static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 3630 | struct dentry *dentry, umode_t mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3631 | { |
| 3632 | int error; |
| 3633 | |
Giuseppe Scrivano | 7a80e5b | 2023-01-20 10:43:46 +0100 | [diff] [blame] | 3634 | error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0); |
| 3635 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3636 | return error; |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 3637 | inc_nlink(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3638 | return 0; |
| 3639 | } |
| 3640 | |
Christian Brauner | 6c960e6 | 2023-01-13 12:49:13 +0100 | [diff] [blame] | 3641 | static int shmem_create(struct mnt_idmap *idmap, struct inode *dir, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 3642 | struct dentry *dentry, umode_t mode, bool excl) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3643 | { |
Giuseppe Scrivano | 7a80e5b | 2023-01-20 10:43:46 +0100 | [diff] [blame] | 3644 | return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3645 | } |
| 3646 | |
| 3647 | /* |
| 3648 | * Link a file.. |
| 3649 | */ |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 3650 | static int shmem_link(struct dentry *old_dentry, struct inode *dir, |
| 3651 | struct dentry *dentry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3652 | { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 3653 | struct inode *inode = d_inode(old_dentry); |
Darrick J. Wong | 29b00e6 | 2019-02-22 22:35:32 -0800 | [diff] [blame] | 3654 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3655 | |
| 3656 | /* |
| 3657 | * No ordinary (disk based) filesystem counts links as inodes; |
| 3658 | * but each new link needs a new dentry, pinning lowmem, and |
| 3659 | * tmpfs dentries cannot be pruned until they are unlinked. |
Darrick J. Wong | 1062af9 | 2019-02-21 08:48:09 -0800 | [diff] [blame] | 3660 | * But if an O_TMPFILE file is linked into the tmpfs, the |
| 3661 | * first link must skip that, to get the accounting right. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3662 | */ |
Darrick J. Wong | 1062af9 | 2019-02-21 08:48:09 -0800 | [diff] [blame] | 3663 | if (inode->i_nlink) { |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 3664 | ret = shmem_reserve_inode(inode->i_sb, NULL); |
Darrick J. Wong | 1062af9 | 2019-02-21 08:48:09 -0800 | [diff] [blame] | 3665 | if (ret) |
| 3666 | goto out; |
| 3667 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3668 | |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 3669 | ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry); |
| 3670 | if (ret) { |
| 3671 | if (inode->i_nlink) |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 3672 | shmem_free_inode(inode->i_sb, 0); |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 3673 | goto out; |
| 3674 | } |
| 3675 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3676 | dir->i_size += BOGO_DIRENT_SIZE; |
Jeff Layton | cf2766b | 2023-10-04 14:53:07 -0400 | [diff] [blame] | 3677 | inode_set_mtime_to_ts(dir, |
| 3678 | inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode))); |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 3679 | inode_inc_iversion(dir); |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 3680 | inc_nlink(inode); |
Al Viro | 7de9c6ee | 2010-10-23 11:11:40 -0400 | [diff] [blame] | 3681 | ihold(inode); /* New dentry reference */ |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 3682 | dget(dentry); /* Extra pinning count for the created dentry */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3683 | d_instantiate(dentry, inode); |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 3684 | out: |
| 3685 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3686 | } |
| 3687 | |
| 3688 | static int shmem_unlink(struct inode *dir, struct dentry *dentry) |
| 3689 | { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 3690 | struct inode *inode = d_inode(dentry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3691 | |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 3692 | if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 3693 | shmem_free_inode(inode->i_sb, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3694 | |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 3695 | simple_offset_remove(shmem_get_offset_ctx(dir), dentry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3696 | |
| 3697 | dir->i_size -= BOGO_DIRENT_SIZE; |
Jeff Layton | cf2766b | 2023-10-04 14:53:07 -0400 | [diff] [blame] | 3698 | inode_set_mtime_to_ts(dir, |
| 3699 | inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode))); |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 3700 | inode_inc_iversion(dir); |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 3701 | drop_nlink(inode); |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 3702 | dput(dentry); /* Undo the count from "create" - does all the work */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3703 | return 0; |
| 3704 | } |
| 3705 | |
| 3706 | static int shmem_rmdir(struct inode *dir, struct dentry *dentry) |
| 3707 | { |
Chuck Lever | ecba88a | 2024-02-17 15:23:54 -0500 | [diff] [blame] | 3708 | if (!simple_offset_empty(dentry)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3709 | return -ENOTEMPTY; |
| 3710 | |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 3711 | drop_nlink(d_inode(dentry)); |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 3712 | drop_nlink(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3713 | return shmem_unlink(dir, dentry); |
| 3714 | } |
| 3715 | |
Christian Brauner | e18275a | 2023-01-13 12:49:17 +0100 | [diff] [blame] | 3716 | static int shmem_whiteout(struct mnt_idmap *idmap, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 3717 | struct inode *old_dir, struct dentry *old_dentry) |
Miklos Szeredi | 46fdb79 | 2014-10-24 00:14:37 +0200 | [diff] [blame] | 3718 | { |
| 3719 | struct dentry *whiteout; |
| 3720 | int error; |
| 3721 | |
| 3722 | whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); |
| 3723 | if (!whiteout) |
| 3724 | return -ENOMEM; |
| 3725 | |
Giuseppe Scrivano | 7a80e5b | 2023-01-20 10:43:46 +0100 | [diff] [blame] | 3726 | error = shmem_mknod(idmap, old_dir, whiteout, |
Miklos Szeredi | 46fdb79 | 2014-10-24 00:14:37 +0200 | [diff] [blame] | 3727 | S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); |
| 3728 | dput(whiteout); |
| 3729 | if (error) |
| 3730 | return error; |
| 3731 | |
| 3732 | /* |
| 3733 | * Cheat and hash the whiteout while the old dentry is still in |
| 3734 | * place, instead of playing games with FS_RENAME_DOES_D_MOVE. |
| 3735 | * |
| 3736 | * d_lookup() will consistently find one of them at this point, |
| 3737 | * not sure which one, but that isn't even important. |
| 3738 | */ |
| 3739 | d_rehash(whiteout); |
| 3740 | return 0; |
| 3741 | } |
| 3742 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3743 | /* |
| 3744 | * The VFS layer already does all the dentry stuff for rename, |
| 3745 | * we just have to decrement the usage count for the target if |
| 3746 | * it exists so that the VFS layer correctly free's it when it |
| 3747 | * gets overwritten. |
| 3748 | */ |
Christian Brauner | e18275a | 2023-01-13 12:49:17 +0100 | [diff] [blame] | 3749 | static int shmem_rename2(struct mnt_idmap *idmap, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 3750 | struct inode *old_dir, struct dentry *old_dentry, |
| 3751 | struct inode *new_dir, struct dentry *new_dentry, |
| 3752 | unsigned int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3753 | { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 3754 | struct inode *inode = d_inode(old_dentry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3755 | int they_are_dirs = S_ISDIR(inode->i_mode); |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 3756 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3757 | |
Miklos Szeredi | 46fdb79 | 2014-10-24 00:14:37 +0200 | [diff] [blame] | 3758 | if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) |
Miklos Szeredi | 3b69ff5 | 2014-07-23 15:15:33 +0200 | [diff] [blame] | 3759 | return -EINVAL; |
| 3760 | |
Miklos Szeredi | 3745677 | 2014-07-23 15:15:34 +0200 | [diff] [blame] | 3761 | if (flags & RENAME_EXCHANGE) |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 3762 | return simple_offset_rename_exchange(old_dir, old_dentry, |
| 3763 | new_dir, new_dentry); |
Miklos Szeredi | 3745677 | 2014-07-23 15:15:34 +0200 | [diff] [blame] | 3764 | |
Chuck Lever | ecba88a | 2024-02-17 15:23:54 -0500 | [diff] [blame] | 3765 | if (!simple_offset_empty(new_dentry)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3766 | return -ENOTEMPTY; |
| 3767 | |
Miklos Szeredi | 46fdb79 | 2014-10-24 00:14:37 +0200 | [diff] [blame] | 3768 | if (flags & RENAME_WHITEOUT) { |
Giuseppe Scrivano | 7a80e5b | 2023-01-20 10:43:46 +0100 | [diff] [blame] | 3769 | error = shmem_whiteout(idmap, old_dir, old_dentry); |
Miklos Szeredi | 46fdb79 | 2014-10-24 00:14:37 +0200 | [diff] [blame] | 3770 | if (error) |
| 3771 | return error; |
| 3772 | } |
| 3773 | |
Chuck Lever | 5a1a25b | 2024-04-15 11:20:55 -0400 | [diff] [blame] | 3774 | error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry); |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 3775 | if (error) |
| 3776 | return error; |
| 3777 | |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 3778 | if (d_really_is_positive(new_dentry)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3779 | (void) shmem_unlink(new_dir, new_dentry); |
Miklos Szeredi | b928095 | 2014-09-24 17:56:17 +0200 | [diff] [blame] | 3780 | if (they_are_dirs) { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 3781 | drop_nlink(d_inode(new_dentry)); |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 3782 | drop_nlink(old_dir); |
Miklos Szeredi | b928095 | 2014-09-24 17:56:17 +0200 | [diff] [blame] | 3783 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3784 | } else if (they_are_dirs) { |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 3785 | drop_nlink(old_dir); |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 3786 | inc_nlink(new_dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3787 | } |
| 3788 | |
| 3789 | old_dir->i_size -= BOGO_DIRENT_SIZE; |
| 3790 | new_dir->i_size += BOGO_DIRENT_SIZE; |
Jeff Layton | 944d0d9de | 2023-07-05 15:00:36 -0400 | [diff] [blame] | 3791 | simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 3792 | inode_inc_iversion(old_dir); |
| 3793 | inode_inc_iversion(new_dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3794 | return 0; |
| 3795 | } |
| 3796 | |
Christian Brauner | 7a77db9 | 2023-01-13 12:49:14 +0100 | [diff] [blame] | 3797 | static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir, |
Christian Brauner | 549c729 | 2021-01-21 14:19:43 +0100 | [diff] [blame] | 3798 | struct dentry *dentry, const char *symname) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3799 | { |
| 3800 | int error; |
| 3801 | int len; |
| 3802 | struct inode *inode; |
Matthew Wilcox (Oracle) | 7ad0414 | 2022-09-02 20:46:25 +0100 | [diff] [blame] | 3803 | struct folio *folio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3804 | |
| 3805 | len = strlen(symname) + 1; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3806 | if (len > PAGE_SIZE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3807 | return -ENAMETOOLONG; |
| 3808 | |
Giuseppe Scrivano | 7a80e5b | 2023-01-20 10:43:46 +0100 | [diff] [blame] | 3809 | inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0, |
Joe Perches | 0825a6f | 2018-06-14 15:27:58 -0700 | [diff] [blame] | 3810 | VM_NORESERVE); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 3811 | if (IS_ERR(inode)) |
| 3812 | return PTR_ERR(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3813 | |
Mimi Zohar | 9d8f13b | 2011-06-06 15:29:25 -0400 | [diff] [blame] | 3814 | error = security_inode_init_security(inode, dir, &dentry->d_name, |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3815 | shmem_initxattrs, NULL); |
Chuck Lever | 23a31d87 | 2023-06-30 13:48:56 -0400 | [diff] [blame] | 3816 | if (error && error != -EOPNOTSUPP) |
| 3817 | goto out_iput; |
Stephen Smalley | 570bc1c | 2005-09-09 13:01:43 -0700 | [diff] [blame] | 3818 | |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 3819 | error = simple_offset_add(shmem_get_offset_ctx(dir), dentry); |
| 3820 | if (error) |
| 3821 | goto out_iput; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3822 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3823 | inode->i_size = len-1; |
Hugh Dickins | 69f07ec | 2011-08-03 16:21:26 -0700 | [diff] [blame] | 3824 | if (len <= SHORT_SYMLINK_LEN) { |
Al Viro | 3ed47db | 2016-01-22 18:08:52 -0500 | [diff] [blame] | 3825 | inode->i_link = kmemdup(symname, len, GFP_KERNEL); |
| 3826 | if (!inode->i_link) { |
Chuck Lever | 23a31d87 | 2023-06-30 13:48:56 -0400 | [diff] [blame] | 3827 | error = -ENOMEM; |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 3828 | goto out_remove_offset; |
Hugh Dickins | 69f07ec | 2011-08-03 16:21:26 -0700 | [diff] [blame] | 3829 | } |
| 3830 | inode->i_op = &shmem_short_symlink_operations; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3831 | } else { |
Al Viro | e8ecde2 | 2016-01-14 17:52:59 -0500 | [diff] [blame] | 3832 | inode_nohighmem(inode); |
Christoph Hellwig | e11381d | 2024-02-19 07:27:11 +0100 | [diff] [blame] | 3833 | inode->i_mapping->a_ops = &shmem_aops; |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 3834 | error = shmem_get_folio(inode, 0, 0, &folio, SGP_WRITE); |
Chuck Lever | 23a31d87 | 2023-06-30 13:48:56 -0400 | [diff] [blame] | 3835 | if (error) |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 3836 | goto out_remove_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3837 | inode->i_op = &shmem_symlink_inode_operations; |
Matthew Wilcox (Oracle) | 7ad0414 | 2022-09-02 20:46:25 +0100 | [diff] [blame] | 3838 | memcpy(folio_address(folio), symname, len); |
| 3839 | folio_mark_uptodate(folio); |
| 3840 | folio_mark_dirty(folio); |
| 3841 | folio_unlock(folio); |
| 3842 | folio_put(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3843 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3844 | dir->i_size += BOGO_DIRENT_SIZE; |
Jeff Layton | cf2766b | 2023-10-04 14:53:07 -0400 | [diff] [blame] | 3845 | inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 3846 | inode_inc_iversion(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3847 | d_instantiate(dentry, inode); |
| 3848 | dget(dentry); |
| 3849 | return 0; |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 3850 | |
| 3851 | out_remove_offset: |
| 3852 | simple_offset_remove(shmem_get_offset_ctx(dir), dentry); |
Chuck Lever | 23a31d87 | 2023-06-30 13:48:56 -0400 | [diff] [blame] | 3853 | out_iput: |
| 3854 | iput(inode); |
| 3855 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3856 | } |
| 3857 | |
Al Viro | fceef39 | 2015-12-29 15:58:39 -0500 | [diff] [blame] | 3858 | static void shmem_put_link(void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3859 | { |
Matthew Wilcox (Oracle) | e4b5772 | 2022-09-02 20:46:26 +0100 | [diff] [blame] | 3860 | folio_mark_accessed(arg); |
| 3861 | folio_put(arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3862 | } |
| 3863 | |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 3864 | static const char *shmem_get_link(struct dentry *dentry, struct inode *inode, |
Al Viro | fceef39 | 2015-12-29 15:58:39 -0500 | [diff] [blame] | 3865 | struct delayed_call *done) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3866 | { |
Matthew Wilcox (Oracle) | e4b5772 | 2022-09-02 20:46:26 +0100 | [diff] [blame] | 3867 | struct folio *folio = NULL; |
Al Viro | 6b25539 | 2015-11-17 10:20:54 -0500 | [diff] [blame] | 3868 | int error; |
Matthew Wilcox (Oracle) | e4b5772 | 2022-09-02 20:46:26 +0100 | [diff] [blame] | 3869 | |
Al Viro | 6a6c990 | 2015-11-17 10:54:32 -0500 | [diff] [blame] | 3870 | if (!dentry) { |
Matthew Wilcox (Oracle) | e4b5772 | 2022-09-02 20:46:26 +0100 | [diff] [blame] | 3871 | folio = filemap_get_folio(inode->i_mapping, 0); |
Christoph Hellwig | 66dabbb | 2023-03-07 15:34:10 +0100 | [diff] [blame] | 3872 | if (IS_ERR(folio)) |
Al Viro | 6a6c990 | 2015-11-17 10:54:32 -0500 | [diff] [blame] | 3873 | return ERR_PTR(-ECHILD); |
Matthew Wilcox (Oracle) | 7459c14 | 2022-09-02 20:46:27 +0100 | [diff] [blame] | 3874 | if (PageHWPoison(folio_page(folio, 0)) || |
Matthew Wilcox (Oracle) | e4b5772 | 2022-09-02 20:46:26 +0100 | [diff] [blame] | 3875 | !folio_test_uptodate(folio)) { |
| 3876 | folio_put(folio); |
Al Viro | 6a6c990 | 2015-11-17 10:54:32 -0500 | [diff] [blame] | 3877 | return ERR_PTR(-ECHILD); |
| 3878 | } |
| 3879 | } else { |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 3880 | error = shmem_get_folio(inode, 0, 0, &folio, SGP_READ); |
Al Viro | 6a6c990 | 2015-11-17 10:54:32 -0500 | [diff] [blame] | 3881 | if (error) |
| 3882 | return ERR_PTR(error); |
Matthew Wilcox (Oracle) | e4b5772 | 2022-09-02 20:46:26 +0100 | [diff] [blame] | 3883 | if (!folio) |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 3884 | return ERR_PTR(-ECHILD); |
Matthew Wilcox (Oracle) | 7459c14 | 2022-09-02 20:46:27 +0100 | [diff] [blame] | 3885 | if (PageHWPoison(folio_page(folio, 0))) { |
Matthew Wilcox (Oracle) | e4b5772 | 2022-09-02 20:46:26 +0100 | [diff] [blame] | 3886 | folio_unlock(folio); |
| 3887 | folio_put(folio); |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 3888 | return ERR_PTR(-ECHILD); |
| 3889 | } |
Matthew Wilcox (Oracle) | e4b5772 | 2022-09-02 20:46:26 +0100 | [diff] [blame] | 3890 | folio_unlock(folio); |
Al Viro | 6a6c990 | 2015-11-17 10:54:32 -0500 | [diff] [blame] | 3891 | } |
Matthew Wilcox (Oracle) | e4b5772 | 2022-09-02 20:46:26 +0100 | [diff] [blame] | 3892 | set_delayed_call(done, shmem_put_link, folio); |
| 3893 | return folio_address(folio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3894 | } |
| 3895 | |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3896 | #ifdef CONFIG_TMPFS_XATTR |
Theodore Ts'o | e408e69 | 2022-07-14 21:59:12 -0400 | [diff] [blame] | 3897 | |
| 3898 | static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa) |
| 3899 | { |
| 3900 | struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); |
| 3901 | |
| 3902 | fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE); |
| 3903 | |
| 3904 | return 0; |
| 3905 | } |
| 3906 | |
Christian Brauner | 8782a9a | 2023-01-13 12:49:21 +0100 | [diff] [blame] | 3907 | static int shmem_fileattr_set(struct mnt_idmap *idmap, |
Theodore Ts'o | e408e69 | 2022-07-14 21:59:12 -0400 | [diff] [blame] | 3908 | struct dentry *dentry, struct fileattr *fa) |
| 3909 | { |
| 3910 | struct inode *inode = d_inode(dentry); |
| 3911 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 3912 | |
| 3913 | if (fileattr_has_fsx(fa)) |
| 3914 | return -EOPNOTSUPP; |
Hugh Dickins | cb24133 | 2022-08-10 21:51:09 -0700 | [diff] [blame] | 3915 | if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE) |
| 3916 | return -EOPNOTSUPP; |
Theodore Ts'o | e408e69 | 2022-07-14 21:59:12 -0400 | [diff] [blame] | 3917 | |
| 3918 | info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) | |
| 3919 | (fa->flags & SHMEM_FL_USER_MODIFIABLE); |
| 3920 | |
Hugh Dickins | cb24133 | 2022-08-10 21:51:09 -0700 | [diff] [blame] | 3921 | shmem_set_inode_flags(inode, info->fsflags); |
Jeff Layton | 6528733 | 2023-07-05 15:01:52 -0400 | [diff] [blame] | 3922 | inode_set_ctime_current(inode); |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 3923 | inode_inc_iversion(inode); |
Theodore Ts'o | e408e69 | 2022-07-14 21:59:12 -0400 | [diff] [blame] | 3924 | return 0; |
| 3925 | } |
| 3926 | |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3927 | /* |
| 3928 | * Superblocks without xattr inode operations may get some security.* xattr |
| 3929 | * support from the LSM "for free". As soon as we have any other xattrs |
| 3930 | * like ACLs, we also need to implement the security.* handlers at |
| 3931 | * filesystem level, though. |
| 3932 | */ |
| 3933 | |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3934 | /* |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3935 | * Callback for security_inode_init_security() for acquiring xattrs. |
| 3936 | */ |
| 3937 | static int shmem_initxattrs(struct inode *inode, |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 3938 | const struct xattr *xattr_array, void *fs_info) |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3939 | { |
| 3940 | struct shmem_inode_info *info = SHMEM_I(inode); |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 3941 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3942 | const struct xattr *xattr; |
Aristeu Rozanski | 38f3865 | 2012-08-23 16:53:28 -0400 | [diff] [blame] | 3943 | struct simple_xattr *new_xattr; |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 3944 | size_t ispace = 0; |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3945 | size_t len; |
| 3946 | |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 3947 | if (sbinfo->max_inodes) { |
| 3948 | for (xattr = xattr_array; xattr->name != NULL; xattr++) { |
| 3949 | ispace += simple_xattr_space(xattr->name, |
| 3950 | xattr->value_len + XATTR_SECURITY_PREFIX_LEN); |
| 3951 | } |
| 3952 | if (ispace) { |
| 3953 | raw_spin_lock(&sbinfo->stat_lock); |
| 3954 | if (sbinfo->free_ispace < ispace) |
| 3955 | ispace = 0; |
| 3956 | else |
| 3957 | sbinfo->free_ispace -= ispace; |
| 3958 | raw_spin_unlock(&sbinfo->stat_lock); |
| 3959 | if (!ispace) |
| 3960 | return -ENOSPC; |
| 3961 | } |
| 3962 | } |
| 3963 | |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3964 | for (xattr = xattr_array; xattr->name != NULL; xattr++) { |
Aristeu Rozanski | 38f3865 | 2012-08-23 16:53:28 -0400 | [diff] [blame] | 3965 | new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3966 | if (!new_xattr) |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 3967 | break; |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3968 | |
| 3969 | len = strlen(xattr->name) + 1; |
| 3970 | new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, |
Hugh Dickins | 572a3d1 | 2023-08-21 10:39:20 -0700 | [diff] [blame] | 3971 | GFP_KERNEL_ACCOUNT); |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3972 | if (!new_xattr->name) { |
Chengguang Xu | 3bef735 | 2020-07-23 21:15:14 -0700 | [diff] [blame] | 3973 | kvfree(new_xattr); |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 3974 | break; |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3975 | } |
| 3976 | |
| 3977 | memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, |
| 3978 | XATTR_SECURITY_PREFIX_LEN); |
| 3979 | memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, |
| 3980 | xattr->name, len); |
| 3981 | |
Christian Brauner | 3b4c7bc | 2022-11-04 13:52:42 +0100 | [diff] [blame] | 3982 | simple_xattr_add(&info->xattrs, new_xattr); |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3983 | } |
| 3984 | |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 3985 | if (xattr->name != NULL) { |
| 3986 | if (ispace) { |
| 3987 | raw_spin_lock(&sbinfo->stat_lock); |
| 3988 | sbinfo->free_ispace += ispace; |
| 3989 | raw_spin_unlock(&sbinfo->stat_lock); |
| 3990 | } |
| 3991 | simple_xattrs_free(&info->xattrs, NULL); |
| 3992 | return -ENOMEM; |
| 3993 | } |
| 3994 | |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3995 | return 0; |
| 3996 | } |
| 3997 | |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 3998 | static int shmem_xattr_handler_get(const struct xattr_handler *handler, |
Al Viro | b296821 | 2016-04-10 20:48:24 -0400 | [diff] [blame] | 3999 | struct dentry *unused, struct inode *inode, |
| 4000 | const char *name, void *buffer, size_t size) |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 4001 | { |
Al Viro | b296821 | 2016-04-10 20:48:24 -0400 | [diff] [blame] | 4002 | struct shmem_inode_info *info = SHMEM_I(inode); |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 4003 | |
| 4004 | name = xattr_full_name(handler, name); |
| 4005 | return simple_xattr_get(&info->xattrs, name, buffer, size); |
| 4006 | } |
| 4007 | |
| 4008 | static int shmem_xattr_handler_set(const struct xattr_handler *handler, |
Christian Brauner | 39f60c1 | 2023-01-13 12:49:23 +0100 | [diff] [blame] | 4009 | struct mnt_idmap *idmap, |
Al Viro | 5930122 | 2016-05-27 10:19:30 -0400 | [diff] [blame] | 4010 | struct dentry *unused, struct inode *inode, |
| 4011 | const char *name, const void *value, |
| 4012 | size_t size, int flags) |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 4013 | { |
Al Viro | 5930122 | 2016-05-27 10:19:30 -0400 | [diff] [blame] | 4014 | struct shmem_inode_info *info = SHMEM_I(inode); |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 4015 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
Hugh Dickins | 5de7597 | 2023-08-08 21:30:59 -0700 | [diff] [blame] | 4016 | struct simple_xattr *old_xattr; |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 4017 | size_t ispace = 0; |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 4018 | |
| 4019 | name = xattr_full_name(handler, name); |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 4020 | if (value && sbinfo->max_inodes) { |
| 4021 | ispace = simple_xattr_space(name, size); |
| 4022 | raw_spin_lock(&sbinfo->stat_lock); |
| 4023 | if (sbinfo->free_ispace < ispace) |
| 4024 | ispace = 0; |
| 4025 | else |
| 4026 | sbinfo->free_ispace -= ispace; |
| 4027 | raw_spin_unlock(&sbinfo->stat_lock); |
| 4028 | if (!ispace) |
| 4029 | return -ENOSPC; |
| 4030 | } |
| 4031 | |
Hugh Dickins | 5de7597 | 2023-08-08 21:30:59 -0700 | [diff] [blame] | 4032 | old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags); |
| 4033 | if (!IS_ERR(old_xattr)) { |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 4034 | ispace = 0; |
| 4035 | if (old_xattr && sbinfo->max_inodes) |
| 4036 | ispace = simple_xattr_space(old_xattr->name, |
| 4037 | old_xattr->size); |
Hugh Dickins | 5de7597 | 2023-08-08 21:30:59 -0700 | [diff] [blame] | 4038 | simple_xattr_free(old_xattr); |
| 4039 | old_xattr = NULL; |
Jeff Layton | 6528733 | 2023-07-05 15:01:52 -0400 | [diff] [blame] | 4040 | inode_set_ctime_current(inode); |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 4041 | inode_inc_iversion(inode); |
| 4042 | } |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 4043 | if (ispace) { |
| 4044 | raw_spin_lock(&sbinfo->stat_lock); |
| 4045 | sbinfo->free_ispace += ispace; |
| 4046 | raw_spin_unlock(&sbinfo->stat_lock); |
| 4047 | } |
Hugh Dickins | 5de7597 | 2023-08-08 21:30:59 -0700 | [diff] [blame] | 4048 | return PTR_ERR(old_xattr); |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 4049 | } |
| 4050 | |
| 4051 | static const struct xattr_handler shmem_security_xattr_handler = { |
| 4052 | .prefix = XATTR_SECURITY_PREFIX, |
| 4053 | .get = shmem_xattr_handler_get, |
| 4054 | .set = shmem_xattr_handler_set, |
| 4055 | }; |
| 4056 | |
| 4057 | static const struct xattr_handler shmem_trusted_xattr_handler = { |
| 4058 | .prefix = XATTR_TRUSTED_PREFIX, |
| 4059 | .get = shmem_xattr_handler_get, |
| 4060 | .set = shmem_xattr_handler_set, |
| 4061 | }; |
| 4062 | |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 4063 | static const struct xattr_handler shmem_user_xattr_handler = { |
| 4064 | .prefix = XATTR_USER_PREFIX, |
| 4065 | .get = shmem_xattr_handler_get, |
| 4066 | .set = shmem_xattr_handler_set, |
| 4067 | }; |
| 4068 | |
Wedson Almeida Filho | 2f50286 | 2023-09-30 02:00:32 -0300 | [diff] [blame] | 4069 | static const struct xattr_handler * const shmem_xattr_handlers[] = { |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 4070 | &shmem_security_xattr_handler, |
| 4071 | &shmem_trusted_xattr_handler, |
Hugh Dickins | 2daf18a | 2023-08-08 21:33:56 -0700 | [diff] [blame] | 4072 | &shmem_user_xattr_handler, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4073 | NULL |
| 4074 | }; |
| 4075 | |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4076 | static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) |
| 4077 | { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 4078 | struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); |
Andreas Gruenbacher | 786534b | 2015-12-02 14:44:39 +0100 | [diff] [blame] | 4079 | return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4080 | } |
| 4081 | #endif /* CONFIG_TMPFS_XATTR */ |
| 4082 | |
Hugh Dickins | 69f07ec | 2011-08-03 16:21:26 -0700 | [diff] [blame] | 4083 | static const struct inode_operations shmem_short_symlink_operations = { |
Xavier Roche | f7cd16a | 2022-03-22 14:39:55 -0700 | [diff] [blame] | 4084 | .getattr = shmem_getattr, |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4085 | .setattr = shmem_setattr, |
Al Viro | 6b25539 | 2015-11-17 10:20:54 -0500 | [diff] [blame] | 4086 | .get_link = simple_get_link, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4087 | #ifdef CONFIG_TMPFS_XATTR |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4088 | .listxattr = shmem_listxattr, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4089 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4090 | }; |
| 4091 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 4092 | static const struct inode_operations shmem_symlink_inode_operations = { |
Xavier Roche | f7cd16a | 2022-03-22 14:39:55 -0700 | [diff] [blame] | 4093 | .getattr = shmem_getattr, |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4094 | .setattr = shmem_setattr, |
Al Viro | 6b25539 | 2015-11-17 10:20:54 -0500 | [diff] [blame] | 4095 | .get_link = shmem_get_link, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4096 | #ifdef CONFIG_TMPFS_XATTR |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4097 | .listxattr = shmem_listxattr, |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 4098 | #endif |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4099 | }; |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 4100 | |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4101 | static struct dentry *shmem_get_parent(struct dentry *child) |
| 4102 | { |
| 4103 | return ERR_PTR(-ESTALE); |
| 4104 | } |
| 4105 | |
| 4106 | static int shmem_match(struct inode *ino, void *vfh) |
| 4107 | { |
| 4108 | __u32 *fh = vfh; |
| 4109 | __u64 inum = fh[2]; |
| 4110 | inum = (inum << 32) | fh[1]; |
| 4111 | return ino->i_ino == inum && fh[0] == ino->i_generation; |
| 4112 | } |
| 4113 | |
Amir Goldstein | 12ba780 | 2018-06-07 17:07:15 -0700 | [diff] [blame] | 4114 | /* Find any alias of inode, but prefer a hashed alias */ |
| 4115 | static struct dentry *shmem_find_alias(struct inode *inode) |
| 4116 | { |
| 4117 | struct dentry *alias = d_find_alias(inode); |
| 4118 | |
| 4119 | return alias ?: d_find_any_alias(inode); |
| 4120 | } |
| 4121 | |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 4122 | static struct dentry *shmem_fh_to_dentry(struct super_block *sb, |
| 4123 | struct fid *fid, int fh_len, int fh_type) |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4124 | { |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4125 | struct inode *inode; |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 4126 | struct dentry *dentry = NULL; |
Hugh Dickins | 35c2a7f | 2012-10-07 20:32:51 -0700 | [diff] [blame] | 4127 | u64 inum; |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4128 | |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 4129 | if (fh_len < 3) |
| 4130 | return NULL; |
| 4131 | |
Hugh Dickins | 35c2a7f | 2012-10-07 20:32:51 -0700 | [diff] [blame] | 4132 | inum = fid->raw[2]; |
| 4133 | inum = (inum << 32) | fid->raw[1]; |
| 4134 | |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 4135 | inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), |
| 4136 | shmem_match, fid->raw); |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4137 | if (inode) { |
Amir Goldstein | 12ba780 | 2018-06-07 17:07:15 -0700 | [diff] [blame] | 4138 | dentry = shmem_find_alias(inode); |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4139 | iput(inode); |
| 4140 | } |
| 4141 | |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 4142 | return dentry; |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4143 | } |
| 4144 | |
Al Viro | b0b0382 | 2012-04-02 14:34:06 -0400 | [diff] [blame] | 4145 | static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, |
| 4146 | struct inode *parent) |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4147 | { |
Aneesh Kumar K.V | 5fe0c23 | 2011-01-29 18:43:25 +0530 | [diff] [blame] | 4148 | if (*len < 3) { |
| 4149 | *len = 3; |
Namjae Jeon | 94e07a75 | 2013-02-17 15:48:11 +0900 | [diff] [blame] | 4150 | return FILEID_INVALID; |
Aneesh Kumar K.V | 5fe0c23 | 2011-01-29 18:43:25 +0530 | [diff] [blame] | 4151 | } |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4152 | |
Al Viro | 1d3382cb | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 4153 | if (inode_unhashed(inode)) { |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4154 | /* Unfortunately insert_inode_hash is not idempotent, |
| 4155 | * so as we hash inodes here rather than at creation |
| 4156 | * time, we need a lock to ensure we only try |
| 4157 | * to do it once |
| 4158 | */ |
| 4159 | static DEFINE_SPINLOCK(lock); |
| 4160 | spin_lock(&lock); |
Al Viro | 1d3382cb | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 4161 | if (inode_unhashed(inode)) |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4162 | __insert_inode_hash(inode, |
| 4163 | inode->i_ino + inode->i_generation); |
| 4164 | spin_unlock(&lock); |
| 4165 | } |
| 4166 | |
| 4167 | fh[0] = inode->i_generation; |
| 4168 | fh[1] = inode->i_ino; |
| 4169 | fh[2] = ((__u64)inode->i_ino) >> 32; |
| 4170 | |
| 4171 | *len = 3; |
| 4172 | return 1; |
| 4173 | } |
| 4174 | |
Christoph Hellwig | 3965516 | 2007-10-21 16:42:17 -0700 | [diff] [blame] | 4175 | static const struct export_operations shmem_export_ops = { |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4176 | .get_parent = shmem_get_parent, |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4177 | .encode_fh = shmem_encode_fh, |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 4178 | .fh_to_dentry = shmem_fh_to_dentry, |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4179 | }; |
| 4180 | |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4181 | enum shmem_param { |
| 4182 | Opt_gid, |
| 4183 | Opt_huge, |
| 4184 | Opt_mode, |
| 4185 | Opt_mpol, |
| 4186 | Opt_nr_blocks, |
| 4187 | Opt_nr_inodes, |
| 4188 | Opt_size, |
| 4189 | Opt_uid, |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 4190 | Opt_inode32, |
| 4191 | Opt_inode64, |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 4192 | Opt_noswap, |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4193 | Opt_quota, |
| 4194 | Opt_usrquota, |
| 4195 | Opt_grpquota, |
Lukas Czerner | de4c0e7 | 2023-07-25 16:45:09 +0200 | [diff] [blame] | 4196 | Opt_usrquota_block_hardlimit, |
| 4197 | Opt_usrquota_inode_hardlimit, |
| 4198 | Opt_grpquota_block_hardlimit, |
| 4199 | Opt_grpquota_inode_hardlimit, |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4200 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4201 | |
Al Viro | 5eede62 | 2019-12-16 13:33:32 -0500 | [diff] [blame] | 4202 | static const struct constant_table shmem_param_enums_huge[] = { |
Al Viro | 2710c957a | 2019-09-06 22:12:08 -0400 | [diff] [blame] | 4203 | {"never", SHMEM_HUGE_NEVER }, |
| 4204 | {"always", SHMEM_HUGE_ALWAYS }, |
| 4205 | {"within_size", SHMEM_HUGE_WITHIN_SIZE }, |
| 4206 | {"advise", SHMEM_HUGE_ADVISE }, |
Al Viro | 2710c957a | 2019-09-06 22:12:08 -0400 | [diff] [blame] | 4207 | {} |
| 4208 | }; |
| 4209 | |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 4210 | const struct fs_parameter_spec shmem_fs_parameters[] = { |
Eric Sandeen | 2ec0701 | 2024-06-27 19:38:12 -0500 | [diff] [blame] | 4211 | fsparam_gid ("gid", Opt_gid), |
Al Viro | 2710c957a | 2019-09-06 22:12:08 -0400 | [diff] [blame] | 4212 | fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge), |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4213 | fsparam_u32oct("mode", Opt_mode), |
| 4214 | fsparam_string("mpol", Opt_mpol), |
| 4215 | fsparam_string("nr_blocks", Opt_nr_blocks), |
| 4216 | fsparam_string("nr_inodes", Opt_nr_inodes), |
| 4217 | fsparam_string("size", Opt_size), |
Eric Sandeen | 2ec0701 | 2024-06-27 19:38:12 -0500 | [diff] [blame] | 4218 | fsparam_uid ("uid", Opt_uid), |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 4219 | fsparam_flag ("inode32", Opt_inode32), |
| 4220 | fsparam_flag ("inode64", Opt_inode64), |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 4221 | fsparam_flag ("noswap", Opt_noswap), |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4222 | #ifdef CONFIG_TMPFS_QUOTA |
| 4223 | fsparam_flag ("quota", Opt_quota), |
| 4224 | fsparam_flag ("usrquota", Opt_usrquota), |
| 4225 | fsparam_flag ("grpquota", Opt_grpquota), |
Lukas Czerner | de4c0e7 | 2023-07-25 16:45:09 +0200 | [diff] [blame] | 4226 | fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit), |
| 4227 | fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit), |
| 4228 | fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit), |
| 4229 | fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit), |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4230 | #endif |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4231 | {} |
| 4232 | }; |
| 4233 | |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4234 | static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4235 | { |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4236 | struct shmem_options *ctx = fc->fs_private; |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4237 | struct fs_parse_result result; |
| 4238 | unsigned long long size; |
| 4239 | char *rest; |
| 4240 | int opt; |
Christian Brauner | 0200679 | 2023-08-01 18:17:04 +0200 | [diff] [blame] | 4241 | kuid_t kuid; |
| 4242 | kgid_t kgid; |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4243 | |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 4244 | opt = fs_parse(fc, shmem_fs_parameters, param, &result); |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4245 | if (opt < 0) |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4246 | return opt; |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4247 | |
| 4248 | switch (opt) { |
| 4249 | case Opt_size: |
| 4250 | size = memparse(param->string, &rest); |
Al Viro | e04dc42 | 2019-09-08 19:20:12 -0400 | [diff] [blame] | 4251 | if (*rest == '%') { |
| 4252 | size <<= PAGE_SHIFT; |
| 4253 | size *= totalram_pages(); |
| 4254 | do_div(size, 100); |
| 4255 | rest++; |
| 4256 | } |
| 4257 | if (*rest) |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4258 | goto bad_value; |
Al Viro | e04dc42 | 2019-09-08 19:20:12 -0400 | [diff] [blame] | 4259 | ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); |
| 4260 | ctx->seen |= SHMEM_SEEN_BLOCKS; |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4261 | break; |
| 4262 | case Opt_nr_blocks: |
| 4263 | ctx->blocks = memparse(param->string, &rest); |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 4264 | if (*rest || ctx->blocks > LONG_MAX) |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4265 | goto bad_value; |
Al Viro | e04dc42 | 2019-09-08 19:20:12 -0400 | [diff] [blame] | 4266 | ctx->seen |= SHMEM_SEEN_BLOCKS; |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4267 | break; |
| 4268 | case Opt_nr_inodes: |
| 4269 | ctx->inodes = memparse(param->string, &rest); |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 4270 | if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE) |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4271 | goto bad_value; |
Al Viro | e04dc42 | 2019-09-08 19:20:12 -0400 | [diff] [blame] | 4272 | ctx->seen |= SHMEM_SEEN_INODES; |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4273 | break; |
| 4274 | case Opt_mode: |
| 4275 | ctx->mode = result.uint_32 & 07777; |
| 4276 | break; |
| 4277 | case Opt_uid: |
Eric Sandeen | 2ec0701 | 2024-06-27 19:38:12 -0500 | [diff] [blame] | 4278 | kuid = result.uid; |
Christian Brauner | 0200679 | 2023-08-01 18:17:04 +0200 | [diff] [blame] | 4279 | |
| 4280 | /* |
| 4281 | * The requested uid must be representable in the |
| 4282 | * filesystem's idmapping. |
| 4283 | */ |
| 4284 | if (!kuid_has_mapping(fc->user_ns, kuid)) |
| 4285 | goto bad_value; |
| 4286 | |
| 4287 | ctx->uid = kuid; |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4288 | break; |
| 4289 | case Opt_gid: |
Eric Sandeen | 2ec0701 | 2024-06-27 19:38:12 -0500 | [diff] [blame] | 4290 | kgid = result.gid; |
Christian Brauner | 0200679 | 2023-08-01 18:17:04 +0200 | [diff] [blame] | 4291 | |
| 4292 | /* |
| 4293 | * The requested gid must be representable in the |
| 4294 | * filesystem's idmapping. |
| 4295 | */ |
| 4296 | if (!kgid_has_mapping(fc->user_ns, kgid)) |
| 4297 | goto bad_value; |
| 4298 | |
| 4299 | ctx->gid = kgid; |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4300 | break; |
| 4301 | case Opt_huge: |
| 4302 | ctx->huge = result.uint_32; |
| 4303 | if (ctx->huge != SHMEM_HUGE_NEVER && |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 4304 | !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4305 | has_transparent_hugepage())) |
| 4306 | goto unsupported_parameter; |
Al Viro | e04dc42 | 2019-09-08 19:20:12 -0400 | [diff] [blame] | 4307 | ctx->seen |= SHMEM_SEEN_HUGE; |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4308 | break; |
| 4309 | case Opt_mpol: |
| 4310 | if (IS_ENABLED(CONFIG_NUMA)) { |
| 4311 | mpol_put(ctx->mpol); |
| 4312 | ctx->mpol = NULL; |
| 4313 | if (mpol_parse_str(param->string, &ctx->mpol)) |
| 4314 | goto bad_value; |
| 4315 | break; |
| 4316 | } |
| 4317 | goto unsupported_parameter; |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 4318 | case Opt_inode32: |
| 4319 | ctx->full_inums = false; |
| 4320 | ctx->seen |= SHMEM_SEEN_INUMS; |
| 4321 | break; |
| 4322 | case Opt_inode64: |
| 4323 | if (sizeof(ino_t) < 8) { |
| 4324 | return invalfc(fc, |
| 4325 | "Cannot use inode64 with <64bit inums in kernel\n"); |
| 4326 | } |
| 4327 | ctx->full_inums = true; |
| 4328 | ctx->seen |= SHMEM_SEEN_INUMS; |
| 4329 | break; |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 4330 | case Opt_noswap: |
Christian Brauner | 01106e1 | 2023-04-20 10:57:43 +0200 | [diff] [blame] | 4331 | if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) { |
| 4332 | return invalfc(fc, |
| 4333 | "Turning off swap in unprivileged tmpfs mounts unsupported"); |
| 4334 | } |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 4335 | ctx->noswap = true; |
| 4336 | ctx->seen |= SHMEM_SEEN_NOSWAP; |
| 4337 | break; |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4338 | case Opt_quota: |
| 4339 | if (fc->user_ns != &init_user_ns) |
| 4340 | return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); |
| 4341 | ctx->seen |= SHMEM_SEEN_QUOTA; |
| 4342 | ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP); |
| 4343 | break; |
| 4344 | case Opt_usrquota: |
| 4345 | if (fc->user_ns != &init_user_ns) |
| 4346 | return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); |
| 4347 | ctx->seen |= SHMEM_SEEN_QUOTA; |
| 4348 | ctx->quota_types |= QTYPE_MASK_USR; |
| 4349 | break; |
| 4350 | case Opt_grpquota: |
| 4351 | if (fc->user_ns != &init_user_ns) |
| 4352 | return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); |
| 4353 | ctx->seen |= SHMEM_SEEN_QUOTA; |
| 4354 | ctx->quota_types |= QTYPE_MASK_GRP; |
| 4355 | break; |
Lukas Czerner | de4c0e7 | 2023-07-25 16:45:09 +0200 | [diff] [blame] | 4356 | case Opt_usrquota_block_hardlimit: |
| 4357 | size = memparse(param->string, &rest); |
| 4358 | if (*rest || !size) |
| 4359 | goto bad_value; |
| 4360 | if (size > SHMEM_QUOTA_MAX_SPC_LIMIT) |
| 4361 | return invalfc(fc, |
| 4362 | "User quota block hardlimit too large."); |
| 4363 | ctx->qlimits.usrquota_bhardlimit = size; |
| 4364 | break; |
| 4365 | case Opt_grpquota_block_hardlimit: |
| 4366 | size = memparse(param->string, &rest); |
| 4367 | if (*rest || !size) |
| 4368 | goto bad_value; |
| 4369 | if (size > SHMEM_QUOTA_MAX_SPC_LIMIT) |
| 4370 | return invalfc(fc, |
| 4371 | "Group quota block hardlimit too large."); |
| 4372 | ctx->qlimits.grpquota_bhardlimit = size; |
| 4373 | break; |
| 4374 | case Opt_usrquota_inode_hardlimit: |
| 4375 | size = memparse(param->string, &rest); |
| 4376 | if (*rest || !size) |
| 4377 | goto bad_value; |
| 4378 | if (size > SHMEM_QUOTA_MAX_INO_LIMIT) |
| 4379 | return invalfc(fc, |
| 4380 | "User quota inode hardlimit too large."); |
| 4381 | ctx->qlimits.usrquota_ihardlimit = size; |
| 4382 | break; |
| 4383 | case Opt_grpquota_inode_hardlimit: |
| 4384 | size = memparse(param->string, &rest); |
| 4385 | if (*rest || !size) |
| 4386 | goto bad_value; |
| 4387 | if (size > SHMEM_QUOTA_MAX_INO_LIMIT) |
| 4388 | return invalfc(fc, |
| 4389 | "Group quota inode hardlimit too large."); |
| 4390 | ctx->qlimits.grpquota_ihardlimit = size; |
| 4391 | break; |
Al Viro | e04dc42 | 2019-09-08 19:20:12 -0400 | [diff] [blame] | 4392 | } |
| 4393 | return 0; |
| 4394 | |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4395 | unsupported_parameter: |
Al Viro | f35aa2b | 2019-12-21 21:35:55 -0500 | [diff] [blame] | 4396 | return invalfc(fc, "Unsupported parameter '%s'", param->key); |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4397 | bad_value: |
Al Viro | f35aa2b | 2019-12-21 21:35:55 -0500 | [diff] [blame] | 4398 | return invalfc(fc, "Bad value for '%s'", param->key); |
Al Viro | e04dc42 | 2019-09-08 19:20:12 -0400 | [diff] [blame] | 4399 | } |
| 4400 | |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4401 | static int shmem_parse_options(struct fs_context *fc, void *data) |
Al Viro | e04dc42 | 2019-09-08 19:20:12 -0400 | [diff] [blame] | 4402 | { |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4403 | char *options = data; |
| 4404 | |
Al Viro | 33f37c6 | 2019-10-09 22:48:01 -0400 | [diff] [blame] | 4405 | if (options) { |
| 4406 | int err = security_sb_eat_lsm_opts(options, &fc->security); |
| 4407 | if (err) |
| 4408 | return err; |
| 4409 | } |
| 4410 | |
Hugh Dickins | b00dc3a | 2006-02-21 23:49:47 +0000 | [diff] [blame] | 4411 | while (options != NULL) { |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4412 | char *this_char = options; |
Hugh Dickins | b00dc3a | 2006-02-21 23:49:47 +0000 | [diff] [blame] | 4413 | for (;;) { |
| 4414 | /* |
| 4415 | * NUL-terminate this option: unfortunately, |
| 4416 | * mount options form a comma-separated list, |
| 4417 | * but mpol's nodelist may also contain commas. |
| 4418 | */ |
| 4419 | options = strchr(options, ','); |
| 4420 | if (options == NULL) |
| 4421 | break; |
| 4422 | options++; |
| 4423 | if (!isdigit(*options)) { |
| 4424 | options[-1] = '\0'; |
| 4425 | break; |
| 4426 | } |
| 4427 | } |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4428 | if (*this_char) { |
Zhiyuan Dai | 68d68ff | 2021-05-04 18:40:12 -0700 | [diff] [blame] | 4429 | char *value = strchr(this_char, '='); |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4430 | size_t len = 0; |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4431 | int err; |
| 4432 | |
| 4433 | if (value) { |
| 4434 | *value++ = '\0'; |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4435 | len = strlen(value); |
Al Viro | 626c392 | 2019-09-08 20:28:06 -0400 | [diff] [blame] | 4436 | } |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4437 | err = vfs_parse_fs_string(fc, this_char, value, len); |
| 4438 | if (err < 0) |
| 4439 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4440 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4441 | } |
| 4442 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4443 | } |
| 4444 | |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4445 | /* |
| 4446 | * Reconfigure a shmem filesystem. |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4447 | */ |
| 4448 | static int shmem_reconfigure(struct fs_context *fc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4449 | { |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4450 | struct shmem_options *ctx = fc->fs_private; |
| 4451 | struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 4452 | unsigned long used_isp; |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 4453 | struct mempolicy *mpol = NULL; |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4454 | const char *err; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 4455 | |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 4456 | raw_spin_lock(&sbinfo->stat_lock); |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 4457 | used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace; |
ZhaoLong Wang | 0c98c8e | 2022-06-29 20:43:24 +0800 | [diff] [blame] | 4458 | |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4459 | if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { |
| 4460 | if (!sbinfo->max_blocks) { |
| 4461 | err = "Cannot retroactively limit size"; |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 4462 | goto out; |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4463 | } |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 4464 | if (percpu_counter_compare(&sbinfo->used_blocks, |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4465 | ctx->blocks) > 0) { |
| 4466 | err = "Too small a size for current use"; |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 4467 | goto out; |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4468 | } |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 4469 | } |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4470 | if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { |
| 4471 | if (!sbinfo->max_inodes) { |
| 4472 | err = "Cannot retroactively limit inodes"; |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 4473 | goto out; |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4474 | } |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 4475 | if (ctx->inodes * BOGO_INODE_SIZE < used_isp) { |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4476 | err = "Too few inodes for current use"; |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 4477 | goto out; |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4478 | } |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 4479 | } |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 4480 | |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 4481 | if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && |
| 4482 | sbinfo->next_ino > UINT_MAX) { |
| 4483 | err = "Current inum too high to switch to 32-bit inums"; |
| 4484 | goto out; |
| 4485 | } |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 4486 | if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) { |
| 4487 | err = "Cannot disable swap on remount"; |
| 4488 | goto out; |
| 4489 | } |
| 4490 | if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) { |
| 4491 | err = "Cannot enable swap on remount if it was disabled on first mount"; |
| 4492 | goto out; |
| 4493 | } |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 4494 | |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4495 | if (ctx->seen & SHMEM_SEEN_QUOTA && |
| 4496 | !sb_any_quota_loaded(fc->root->d_sb)) { |
| 4497 | err = "Cannot enable quota on remount"; |
| 4498 | goto out; |
| 4499 | } |
| 4500 | |
Lukas Czerner | de4c0e7 | 2023-07-25 16:45:09 +0200 | [diff] [blame] | 4501 | #ifdef CONFIG_TMPFS_QUOTA |
| 4502 | #define CHANGED_LIMIT(name) \ |
| 4503 | (ctx->qlimits.name## hardlimit && \ |
| 4504 | (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit)) |
| 4505 | |
| 4506 | if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) || |
| 4507 | CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) { |
| 4508 | err = "Cannot change global quota limit on remount"; |
| 4509 | goto out; |
| 4510 | } |
| 4511 | #endif /* CONFIG_TMPFS_QUOTA */ |
| 4512 | |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4513 | if (ctx->seen & SHMEM_SEEN_HUGE) |
| 4514 | sbinfo->huge = ctx->huge; |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 4515 | if (ctx->seen & SHMEM_SEEN_INUMS) |
| 4516 | sbinfo->full_inums = ctx->full_inums; |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4517 | if (ctx->seen & SHMEM_SEEN_BLOCKS) |
| 4518 | sbinfo->max_blocks = ctx->blocks; |
| 4519 | if (ctx->seen & SHMEM_SEEN_INODES) { |
| 4520 | sbinfo->max_inodes = ctx->inodes; |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 4521 | sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp; |
Al Viro | 0b5071d | 2019-09-08 18:49:18 -0400 | [diff] [blame] | 4522 | } |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 4523 | |
Greg Thelen | 5f00110 | 2013-02-22 16:36:01 -0800 | [diff] [blame] | 4524 | /* |
| 4525 | * Preserve previous mempolicy unless mpol remount option was specified. |
| 4526 | */ |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4527 | if (ctx->mpol) { |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 4528 | mpol = sbinfo->mpol; |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4529 | sbinfo->mpol = ctx->mpol; /* transfers initial ref */ |
| 4530 | ctx->mpol = NULL; |
Greg Thelen | 5f00110 | 2013-02-22 16:36:01 -0800 | [diff] [blame] | 4531 | } |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 4532 | |
| 4533 | if (ctx->noswap) |
| 4534 | sbinfo->noswap = true; |
| 4535 | |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 4536 | raw_spin_unlock(&sbinfo->stat_lock); |
| 4537 | mpol_put(mpol); |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4538 | return 0; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 4539 | out: |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 4540 | raw_spin_unlock(&sbinfo->stat_lock); |
Al Viro | f35aa2b | 2019-12-21 21:35:55 -0500 | [diff] [blame] | 4541 | return invalfc(fc, "%s", err); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4542 | } |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4543 | |
Al Viro | 34c80b1 | 2011-12-08 21:32:45 -0500 | [diff] [blame] | 4544 | static int shmem_show_options(struct seq_file *seq, struct dentry *root) |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4545 | { |
Al Viro | 34c80b1 | 2011-12-08 21:32:45 -0500 | [diff] [blame] | 4546 | struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); |
Tu Jinjiang | 283ebde | 2023-05-25 11:16:40 +0800 | [diff] [blame] | 4547 | struct mempolicy *mpol; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4548 | |
| 4549 | if (sbinfo->max_blocks != shmem_default_max_blocks()) |
ZhangPeng | b91742d | 2023-08-04 09:25:56 +0800 | [diff] [blame] | 4550 | seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks)); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4551 | if (sbinfo->max_inodes != shmem_default_max_inodes()) |
| 4552 | seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); |
Joe Perches | 0825a6f | 2018-06-14 15:27:58 -0700 | [diff] [blame] | 4553 | if (sbinfo->mode != (0777 | S_ISVTX)) |
Al Viro | 09208d1 | 2011-07-26 03:15:03 -0400 | [diff] [blame] | 4554 | seq_printf(seq, ",mode=%03ho", sbinfo->mode); |
Eric W. Biederman | 8751e03 | 2012-02-07 16:46:12 -0800 | [diff] [blame] | 4555 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) |
| 4556 | seq_printf(seq, ",uid=%u", |
| 4557 | from_kuid_munged(&init_user_ns, sbinfo->uid)); |
| 4558 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) |
| 4559 | seq_printf(seq, ",gid=%u", |
| 4560 | from_kgid_munged(&init_user_ns, sbinfo->gid)); |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 4561 | |
| 4562 | /* |
| 4563 | * Showing inode{64,32} might be useful even if it's the system default, |
| 4564 | * since then people don't have to resort to checking both here and |
| 4565 | * /proc/config.gz to confirm 64-bit inums were successfully applied |
| 4566 | * (which may not even exist if IKCONFIG_PROC isn't enabled). |
| 4567 | * |
| 4568 | * We hide it when inode64 isn't the default and we are using 32-bit |
| 4569 | * inodes, since that probably just means the feature isn't even under |
| 4570 | * consideration. |
| 4571 | * |
| 4572 | * As such: |
| 4573 | * |
| 4574 | * +-----------------+-----------------+ |
| 4575 | * | TMPFS_INODE64=y | TMPFS_INODE64=n | |
| 4576 | * +------------------+-----------------+-----------------+ |
| 4577 | * | full_inums=true | show | show | |
| 4578 | * | full_inums=false | show | hide | |
| 4579 | * +------------------+-----------------+-----------------+ |
| 4580 | * |
| 4581 | */ |
| 4582 | if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) |
| 4583 | seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 4584 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 4585 | /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ |
| 4586 | if (sbinfo->huge) |
| 4587 | seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); |
| 4588 | #endif |
Tu Jinjiang | 283ebde | 2023-05-25 11:16:40 +0800 | [diff] [blame] | 4589 | mpol = shmem_get_sbmpol(sbinfo); |
| 4590 | shmem_show_mpol(seq, mpol); |
| 4591 | mpol_put(mpol); |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 4592 | if (sbinfo->noswap) |
| 4593 | seq_printf(seq, ",noswap"); |
Jan Kara | b4d3de5 | 2024-01-29 13:01:31 +0100 | [diff] [blame] | 4594 | #ifdef CONFIG_TMPFS_QUOTA |
| 4595 | if (sb_has_quota_active(root->d_sb, USRQUOTA)) |
| 4596 | seq_printf(seq, ",usrquota"); |
| 4597 | if (sb_has_quota_active(root->d_sb, GRPQUOTA)) |
| 4598 | seq_printf(seq, ",grpquota"); |
| 4599 | if (sbinfo->qlimits.usrquota_bhardlimit) |
| 4600 | seq_printf(seq, ",usrquota_block_hardlimit=%lld", |
| 4601 | sbinfo->qlimits.usrquota_bhardlimit); |
| 4602 | if (sbinfo->qlimits.grpquota_bhardlimit) |
| 4603 | seq_printf(seq, ",grpquota_block_hardlimit=%lld", |
| 4604 | sbinfo->qlimits.grpquota_bhardlimit); |
| 4605 | if (sbinfo->qlimits.usrquota_ihardlimit) |
| 4606 | seq_printf(seq, ",usrquota_inode_hardlimit=%lld", |
| 4607 | sbinfo->qlimits.usrquota_ihardlimit); |
| 4608 | if (sbinfo->qlimits.grpquota_ihardlimit) |
| 4609 | seq_printf(seq, ",grpquota_inode_hardlimit=%lld", |
| 4610 | sbinfo->qlimits.grpquota_ihardlimit); |
| 4611 | #endif |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4612 | return 0; |
| 4613 | } |
David Herrmann | 9183df2 | 2014-08-08 14:25:29 -0700 | [diff] [blame] | 4614 | |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4615 | #endif /* CONFIG_TMPFS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4616 | |
| 4617 | static void shmem_put_super(struct super_block *sb) |
| 4618 | { |
Hugh Dickins | 602586a | 2010-08-17 15:23:56 -0700 | [diff] [blame] | 4619 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 4620 | |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4621 | #ifdef CONFIG_TMPFS_QUOTA |
| 4622 | shmem_disable_quotas(sb); |
| 4623 | #endif |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 4624 | free_percpu(sbinfo->ino_batch); |
Hugh Dickins | 602586a | 2010-08-17 15:23:56 -0700 | [diff] [blame] | 4625 | percpu_counter_destroy(&sbinfo->used_blocks); |
Greg Thelen | 49cd0a5 | 2013-02-22 16:36:02 -0800 | [diff] [blame] | 4626 | mpol_put(sbinfo->mpol); |
Hugh Dickins | 602586a | 2010-08-17 15:23:56 -0700 | [diff] [blame] | 4627 | kfree(sbinfo); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4628 | sb->s_fs_info = NULL; |
| 4629 | } |
| 4630 | |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4631 | static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4632 | { |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4633 | struct shmem_options *ctx = fc->fs_private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4634 | struct inode *inode; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 4635 | struct shmem_sb_info *sbinfo; |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 4636 | int error = -ENOMEM; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4637 | |
| 4638 | /* Round up to L1_CACHE_BYTES to resist false sharing */ |
Pekka Enberg | 425fbf0 | 2009-09-21 17:03:50 -0700 | [diff] [blame] | 4639 | sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4640 | L1_CACHE_BYTES), GFP_KERNEL); |
| 4641 | if (!sbinfo) |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 4642 | return error; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4643 | |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4644 | sb->s_fs_info = sbinfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4645 | |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 4646 | #ifdef CONFIG_TMPFS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4647 | /* |
| 4648 | * Per default we only allow half of the physical ram per |
| 4649 | * tmpfs instance, limiting inodes to one per page of lowmem; |
| 4650 | * but the internal instance is left unlimited. |
| 4651 | */ |
Linus Torvalds | 1751e8a | 2017-11-27 13:05:09 -0800 | [diff] [blame] | 4652 | if (!(sb->s_flags & SB_KERNMOUNT)) { |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4653 | if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) |
| 4654 | ctx->blocks = shmem_default_max_blocks(); |
| 4655 | if (!(ctx->seen & SHMEM_SEEN_INODES)) |
| 4656 | ctx->inodes = shmem_default_max_inodes(); |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 4657 | if (!(ctx->seen & SHMEM_SEEN_INUMS)) |
| 4658 | ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); |
Luis Chamberlain | 2c6efe9 | 2023-03-09 15:05:45 -0800 | [diff] [blame] | 4659 | sbinfo->noswap = ctx->noswap; |
Al Viro | ca4e051 | 2013-08-31 12:57:10 -0400 | [diff] [blame] | 4660 | } else { |
Linus Torvalds | 1751e8a | 2017-11-27 13:05:09 -0800 | [diff] [blame] | 4661 | sb->s_flags |= SB_NOUSER; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4662 | } |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 4663 | sb->s_export_op = &shmem_export_ops; |
Jeff Layton | 36f05ca | 2022-09-09 09:00:31 -0400 | [diff] [blame] | 4664 | sb->s_flags |= SB_NOSEC | SB_I_VERSION; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4665 | #else |
Linus Torvalds | 1751e8a | 2017-11-27 13:05:09 -0800 | [diff] [blame] | 4666 | sb->s_flags |= SB_NOUSER; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4667 | #endif |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4668 | sbinfo->max_blocks = ctx->blocks; |
Hugh Dickins | e07c469 | 2023-08-08 21:32:21 -0700 | [diff] [blame] | 4669 | sbinfo->max_inodes = ctx->inodes; |
| 4670 | sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE; |
Chris Down | e809d5f | 2020-08-06 23:20:20 -0700 | [diff] [blame] | 4671 | if (sb->s_flags & SB_KERNMOUNT) { |
| 4672 | sbinfo->ino_batch = alloc_percpu(ino_t); |
| 4673 | if (!sbinfo->ino_batch) |
| 4674 | goto failed; |
| 4675 | } |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4676 | sbinfo->uid = ctx->uid; |
| 4677 | sbinfo->gid = ctx->gid; |
Chris Down | ea3271f | 2020-08-06 23:20:25 -0700 | [diff] [blame] | 4678 | sbinfo->full_inums = ctx->full_inums; |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4679 | sbinfo->mode = ctx->mode; |
| 4680 | sbinfo->huge = ctx->huge; |
| 4681 | sbinfo->mpol = ctx->mpol; |
| 4682 | ctx->mpol = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4683 | |
Sebastian Andrzej Siewior | bf11b9a | 2021-09-02 14:54:03 -0700 | [diff] [blame] | 4684 | raw_spin_lock_init(&sbinfo->stat_lock); |
Tejun Heo | 908c7f1 | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 4685 | if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) |
Hugh Dickins | 602586a | 2010-08-17 15:23:56 -0700 | [diff] [blame] | 4686 | goto failed; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 4687 | spin_lock_init(&sbinfo->shrinklist_lock); |
| 4688 | INIT_LIST_HEAD(&sbinfo->shrinklist); |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 4689 | |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 4690 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 4691 | sb->s_blocksize = PAGE_SIZE; |
| 4692 | sb->s_blocksize_bits = PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4693 | sb->s_magic = TMPFS_MAGIC; |
| 4694 | sb->s_op = &shmem_ops; |
Robin H. Johnson | cfd95a9 | 2006-06-12 21:50:25 +0100 | [diff] [blame] | 4695 | sb->s_time_gran = 1; |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4696 | #ifdef CONFIG_TMPFS_XATTR |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 4697 | sb->s_xattr = shmem_xattr_handlers; |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4698 | #endif |
| 4699 | #ifdef CONFIG_TMPFS_POSIX_ACL |
Linus Torvalds | 1751e8a | 2017-11-27 13:05:09 -0800 | [diff] [blame] | 4700 | sb->s_flags |= SB_POSIXACL; |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 4701 | #endif |
Kent Overstreet | a4af51c | 2024-02-06 21:56:15 -0500 | [diff] [blame] | 4702 | uuid_t uuid; |
| 4703 | uuid_gen(&uuid); |
| 4704 | super_set_uuid(sb, uuid.b, sizeof(uuid)); |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 4705 | |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4706 | #ifdef CONFIG_TMPFS_QUOTA |
| 4707 | if (ctx->seen & SHMEM_SEEN_QUOTA) { |
| 4708 | sb->dq_op = &shmem_quota_operations; |
| 4709 | sb->s_qcop = &dquot_quotactl_sysfile_ops; |
| 4710 | sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; |
| 4711 | |
Lukas Czerner | de4c0e7 | 2023-07-25 16:45:09 +0200 | [diff] [blame] | 4712 | /* Copy the default limits from ctx into sbinfo */ |
| 4713 | memcpy(&sbinfo->qlimits, &ctx->qlimits, |
| 4714 | sizeof(struct shmem_quota_limits)); |
| 4715 | |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4716 | if (shmem_enable_quotas(sb, ctx->quota_types)) |
| 4717 | goto failed; |
| 4718 | } |
| 4719 | #endif /* CONFIG_TMPFS_QUOTA */ |
| 4720 | |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 4721 | inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, |
| 4722 | S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 4723 | if (IS_ERR(inode)) { |
| 4724 | error = PTR_ERR(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4725 | goto failed; |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 4726 | } |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4727 | inode->i_uid = sbinfo->uid; |
| 4728 | inode->i_gid = sbinfo->gid; |
Al Viro | 318ceed | 2012-02-12 22:08:01 -0500 | [diff] [blame] | 4729 | sb->s_root = d_make_root(inode); |
| 4730 | if (!sb->s_root) |
Al Viro | 48fde70 | 2012-01-08 22:15:13 -0500 | [diff] [blame] | 4731 | goto failed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4732 | return 0; |
| 4733 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4734 | failed: |
| 4735 | shmem_put_super(sb); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 4736 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4737 | } |
| 4738 | |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4739 | static int shmem_get_tree(struct fs_context *fc) |
| 4740 | { |
| 4741 | return get_tree_nodev(fc, shmem_fill_super); |
| 4742 | } |
| 4743 | |
| 4744 | static void shmem_free_fc(struct fs_context *fc) |
| 4745 | { |
| 4746 | struct shmem_options *ctx = fc->fs_private; |
| 4747 | |
| 4748 | if (ctx) { |
| 4749 | mpol_put(ctx->mpol); |
| 4750 | kfree(ctx); |
| 4751 | } |
| 4752 | } |
| 4753 | |
| 4754 | static const struct fs_context_operations shmem_fs_context_ops = { |
| 4755 | .free = shmem_free_fc, |
| 4756 | .get_tree = shmem_get_tree, |
| 4757 | #ifdef CONFIG_TMPFS |
| 4758 | .parse_monolithic = shmem_parse_options, |
| 4759 | .parse_param = shmem_parse_one, |
| 4760 | .reconfigure = shmem_reconfigure, |
| 4761 | #endif |
| 4762 | }; |
| 4763 | |
Alexey Dobriyan | 68279f9 | 2023-10-11 19:55:00 +0300 | [diff] [blame] | 4764 | static struct kmem_cache *shmem_inode_cachep __ro_after_init; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4765 | |
| 4766 | static struct inode *shmem_alloc_inode(struct super_block *sb) |
| 4767 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 4768 | struct shmem_inode_info *info; |
Muchun Song | fd60b28 | 2022-03-22 14:41:03 -0700 | [diff] [blame] | 4769 | info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL); |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 4770 | if (!info) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4771 | return NULL; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 4772 | return &info->vfs_inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4773 | } |
| 4774 | |
Al Viro | 74b1da5 | 2019-04-15 23:19:05 -0400 | [diff] [blame] | 4775 | static void shmem_free_in_core_inode(struct inode *inode) |
Nick Piggin | fa0d7e3d | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 4776 | { |
Al Viro | 84e710d | 2016-04-15 00:58:55 -0400 | [diff] [blame] | 4777 | if (S_ISLNK(inode->i_mode)) |
| 4778 | kfree(inode->i_link); |
Nick Piggin | fa0d7e3d | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 4779 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); |
| 4780 | } |
| 4781 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4782 | static void shmem_destroy_inode(struct inode *inode) |
| 4783 | { |
Al Viro | 09208d1 | 2011-07-26 03:15:03 -0400 | [diff] [blame] | 4784 | if (S_ISREG(inode->i_mode)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4785 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 4786 | if (S_ISDIR(inode->i_mode)) |
| 4787 | simple_offset_destroy(shmem_get_offset_ctx(inode)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4788 | } |
| 4789 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 4790 | static void shmem_init_inode(void *foo) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4791 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 4792 | struct shmem_inode_info *info = foo; |
| 4793 | inode_init_once(&info->vfs_inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4794 | } |
| 4795 | |
Alexey Dobriyan | 68279f9 | 2023-10-11 19:55:00 +0300 | [diff] [blame] | 4796 | static void __init shmem_init_inodecache(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4797 | { |
| 4798 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", |
| 4799 | sizeof(struct shmem_inode_info), |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 4800 | 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4801 | } |
| 4802 | |
Alexey Dobriyan | 68279f9 | 2023-10-11 19:55:00 +0300 | [diff] [blame] | 4803 | static void __init shmem_destroy_inodecache(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4804 | { |
Alexey Dobriyan | 1a1d92c | 2006-09-27 01:49:40 -0700 | [diff] [blame] | 4805 | kmem_cache_destroy(shmem_inode_cachep); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4806 | } |
| 4807 | |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 4808 | /* Keep the page in page cache instead of truncating it */ |
Matthew Wilcox (Oracle) | af7628d | 2023-11-17 16:14:47 +0000 | [diff] [blame] | 4809 | static int shmem_error_remove_folio(struct address_space *mapping, |
| 4810 | struct folio *folio) |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 4811 | { |
| 4812 | return 0; |
| 4813 | } |
| 4814 | |
Christoph Hellwig | aefacb2 | 2024-02-19 07:27:10 +0100 | [diff] [blame] | 4815 | static const struct address_space_operations shmem_aops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4816 | .writepage = shmem_writepage, |
Matthew Wilcox (Oracle) | 46de8b97 | 2022-02-09 20:22:13 +0000 | [diff] [blame] | 4817 | .dirty_folio = noop_dirty_folio, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4818 | #ifdef CONFIG_TMPFS |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 4819 | .write_begin = shmem_write_begin, |
| 4820 | .write_end = shmem_write_end, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4821 | #endif |
Andrew Morton | 1c93923 | 2014-10-09 15:27:59 -0700 | [diff] [blame] | 4822 | #ifdef CONFIG_MIGRATION |
Matthew Wilcox (Oracle) | 5418465 | 2022-06-06 10:27:41 -0400 | [diff] [blame] | 4823 | .migrate_folio = migrate_folio, |
Andrew Morton | 1c93923 | 2014-10-09 15:27:59 -0700 | [diff] [blame] | 4824 | #endif |
Matthew Wilcox (Oracle) | af7628d | 2023-11-17 16:14:47 +0000 | [diff] [blame] | 4825 | .error_remove_folio = shmem_error_remove_folio, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4826 | }; |
| 4827 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 4828 | static const struct file_operations shmem_file_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4829 | .mmap = shmem_mmap, |
Hugh Dickins | e88e0d3 | 2023-08-10 23:27:07 -0700 | [diff] [blame] | 4830 | .open = shmem_file_open, |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 4831 | .get_unmapped_area = shmem_get_unmapped_area, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4832 | #ifdef CONFIG_TMPFS |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 4833 | .llseek = shmem_file_llseek, |
Al Viro | 2ba5bbe | 2014-04-02 20:00:02 -0400 | [diff] [blame] | 4834 | .read_iter = shmem_file_read_iter, |
Hugh Dickins | e88e0d3 | 2023-08-10 23:27:07 -0700 | [diff] [blame] | 4835 | .write_iter = shmem_file_write_iter, |
Christoph Hellwig | 1b061d9 | 2010-05-26 17:53:41 +0200 | [diff] [blame] | 4836 | .fsync = noop_fsync, |
David Howells | bd194b1 | 2023-05-22 14:49:56 +0100 | [diff] [blame] | 4837 | .splice_read = shmem_file_splice_read, |
Al Viro | f6cb85d | 2014-04-05 04:38:56 -0400 | [diff] [blame] | 4838 | .splice_write = iter_file_splice_write, |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 4839 | .fallocate = shmem_fallocate, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4840 | #endif |
| 4841 | }; |
| 4842 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 4843 | static const struct inode_operations shmem_inode_operations = { |
Yu Zhao | 44a3022 | 2015-09-08 15:03:33 -0700 | [diff] [blame] | 4844 | .getattr = shmem_getattr, |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 4845 | .setattr = shmem_setattr, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4846 | #ifdef CONFIG_TMPFS_XATTR |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4847 | .listxattr = shmem_listxattr, |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 4848 | .set_acl = simple_set_acl, |
Theodore Ts'o | e408e69 | 2022-07-14 21:59:12 -0400 | [diff] [blame] | 4849 | .fileattr_get = shmem_fileattr_get, |
| 4850 | .fileattr_set = shmem_fileattr_set, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4851 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4852 | }; |
| 4853 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 4854 | static const struct inode_operations shmem_dir_inode_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4855 | #ifdef CONFIG_TMPFS |
Xavier Roche | f7cd16a | 2022-03-22 14:39:55 -0700 | [diff] [blame] | 4856 | .getattr = shmem_getattr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4857 | .create = shmem_create, |
| 4858 | .lookup = simple_lookup, |
| 4859 | .link = shmem_link, |
| 4860 | .unlink = shmem_unlink, |
| 4861 | .symlink = shmem_symlink, |
| 4862 | .mkdir = shmem_mkdir, |
| 4863 | .rmdir = shmem_rmdir, |
| 4864 | .mknod = shmem_mknod, |
Miklos Szeredi | 2773bf0 | 2016-09-27 11:03:58 +0200 | [diff] [blame] | 4865 | .rename = shmem_rename2, |
Al Viro | 60545d0 | 2013-06-07 01:20:27 -0400 | [diff] [blame] | 4866 | .tmpfile = shmem_tmpfile, |
Chuck Lever | a2e4595 | 2023-06-30 13:49:03 -0400 | [diff] [blame] | 4867 | .get_offset_ctx = shmem_get_offset_ctx, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4868 | #endif |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4869 | #ifdef CONFIG_TMPFS_XATTR |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4870 | .listxattr = shmem_listxattr, |
Theodore Ts'o | e408e69 | 2022-07-14 21:59:12 -0400 | [diff] [blame] | 4871 | .fileattr_get = shmem_fileattr_get, |
| 4872 | .fileattr_set = shmem_fileattr_set, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4873 | #endif |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 4874 | #ifdef CONFIG_TMPFS_POSIX_ACL |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 4875 | .setattr = shmem_setattr, |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 4876 | .set_acl = simple_set_acl, |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 4877 | #endif |
| 4878 | }; |
| 4879 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 4880 | static const struct inode_operations shmem_special_inode_operations = { |
Xavier Roche | f7cd16a | 2022-03-22 14:39:55 -0700 | [diff] [blame] | 4881 | .getattr = shmem_getattr, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4882 | #ifdef CONFIG_TMPFS_XATTR |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4883 | .listxattr = shmem_listxattr, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 4884 | #endif |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 4885 | #ifdef CONFIG_TMPFS_POSIX_ACL |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 4886 | .setattr = shmem_setattr, |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 4887 | .set_acl = simple_set_acl, |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 4888 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4889 | }; |
| 4890 | |
Hugh Dickins | 759b977 | 2007-03-05 00:30:28 -0800 | [diff] [blame] | 4891 | static const struct super_operations shmem_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4892 | .alloc_inode = shmem_alloc_inode, |
Al Viro | 74b1da5 | 2019-04-15 23:19:05 -0400 | [diff] [blame] | 4893 | .free_inode = shmem_free_in_core_inode, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4894 | .destroy_inode = shmem_destroy_inode, |
| 4895 | #ifdef CONFIG_TMPFS |
| 4896 | .statfs = shmem_statfs, |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 4897 | .show_options = shmem_show_options, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4898 | #endif |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4899 | #ifdef CONFIG_TMPFS_QUOTA |
| 4900 | .get_dquots = shmem_get_dquots, |
| 4901 | #endif |
Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 4902 | .evict_inode = shmem_evict_inode, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4903 | .drop_inode = generic_delete_inode, |
| 4904 | .put_super = shmem_put_super, |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 4905 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 4906 | .nr_cached_objects = shmem_unused_huge_count, |
| 4907 | .free_cached_objects = shmem_unused_huge_scan, |
| 4908 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4909 | }; |
| 4910 | |
Alexey Dobriyan | f0f37e2f | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 4911 | static const struct vm_operations_struct shmem_vm_ops = { |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 4912 | .fault = shmem_fault, |
Ning Qu | d7c1755 | 2014-04-07 15:37:24 -0700 | [diff] [blame] | 4913 | .map_pages = filemap_map_pages, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4914 | #ifdef CONFIG_NUMA |
| 4915 | .set_policy = shmem_set_policy, |
| 4916 | .get_policy = shmem_get_policy, |
| 4917 | #endif |
| 4918 | }; |
| 4919 | |
Pasha Tatashin | d09e8ca | 2022-11-15 02:06:01 +0000 | [diff] [blame] | 4920 | static const struct vm_operations_struct shmem_anon_vm_ops = { |
| 4921 | .fault = shmem_fault, |
| 4922 | .map_pages = filemap_map_pages, |
| 4923 | #ifdef CONFIG_NUMA |
| 4924 | .set_policy = shmem_set_policy, |
| 4925 | .get_policy = shmem_get_policy, |
| 4926 | #endif |
| 4927 | }; |
| 4928 | |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4929 | int shmem_init_fs_context(struct fs_context *fc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4930 | { |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4931 | struct shmem_options *ctx; |
| 4932 | |
| 4933 | ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); |
| 4934 | if (!ctx) |
| 4935 | return -ENOMEM; |
| 4936 | |
| 4937 | ctx->mode = 0777 | S_ISVTX; |
| 4938 | ctx->uid = current_fsuid(); |
| 4939 | ctx->gid = current_fsgid(); |
| 4940 | |
| 4941 | fc->fs_private = ctx; |
| 4942 | fc->ops = &shmem_fs_context_ops; |
| 4943 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4944 | } |
| 4945 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 4946 | static struct file_system_type shmem_fs_type = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4947 | .owner = THIS_MODULE, |
| 4948 | .name = "tmpfs", |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4949 | .init_fs_context = shmem_init_fs_context, |
| 4950 | #ifdef CONFIG_TMPFS |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 4951 | .parameters = shmem_fs_parameters, |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 4952 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4953 | .kill_sb = kill_litter_super, |
Christian Brauner | db58b5e | 2023-09-20 16:40:22 +0200 | [diff] [blame] | 4954 | .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4955 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4956 | |
Miaohe Lin | 9096bbe | 2022-04-28 23:15:58 -0700 | [diff] [blame] | 4957 | void __init shmem_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4958 | { |
| 4959 | int error; |
| 4960 | |
weiping zhang | 9a8ec03e | 2017-11-15 17:38:18 -0800 | [diff] [blame] | 4961 | shmem_init_inodecache(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4962 | |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4963 | #ifdef CONFIG_TMPFS_QUOTA |
Kemeng Shi | a838e5d | 2024-07-15 21:05:32 +0800 | [diff] [blame] | 4964 | register_quota_format(&shmem_quota_format); |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4965 | #endif |
| 4966 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 4967 | error = register_filesystem(&shmem_fs_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4968 | if (error) { |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 4969 | pr_err("Could not register tmpfs\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4970 | goto out2; |
| 4971 | } |
Greg Kroah-Hartman | 95dc112 | 2005-06-20 21:15:16 -0700 | [diff] [blame] | 4972 | |
Al Viro | ca4e051 | 2013-08-31 12:57:10 -0400 | [diff] [blame] | 4973 | shm_mnt = kern_mount(&shmem_fs_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4974 | if (IS_ERR(shm_mnt)) { |
| 4975 | error = PTR_ERR(shm_mnt); |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 4976 | pr_err("Could not kern_mount tmpfs\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4977 | goto out1; |
| 4978 | } |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 4979 | |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 4980 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Kirill A. Shutemov | 435c0b87 | 2017-08-25 15:55:33 -0700 | [diff] [blame] | 4981 | if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 4982 | SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; |
| 4983 | else |
Hugh Dickins | 5e6e5a1 | 2021-09-02 14:54:37 -0700 | [diff] [blame] | 4984 | shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */ |
Baolin Wang | 4b98995 | 2024-06-11 18:11:07 +0800 | [diff] [blame] | 4985 | |
| 4986 | /* |
| 4987 | * Default to setting PMD-sized THP to inherit the global setting and |
| 4988 | * disable all other multi-size THPs. |
| 4989 | */ |
| 4990 | huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER); |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 4991 | #endif |
Miaohe Lin | 9096bbe | 2022-04-28 23:15:58 -0700 | [diff] [blame] | 4992 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4993 | |
| 4994 | out1: |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 4995 | unregister_filesystem(&shmem_fs_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4996 | out2: |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4997 | #ifdef CONFIG_TMPFS_QUOTA |
| 4998 | unregister_quota_format(&shmem_quota_format); |
Carlos Maiolino | e09764c | 2023-07-25 16:45:08 +0200 | [diff] [blame] | 4999 | #endif |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 5000 | shmem_destroy_inodecache(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5001 | shm_mnt = ERR_PTR(error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5002 | } |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 5003 | |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 5004 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 5005 | static ssize_t shmem_enabled_show(struct kobject *kobj, |
Joe Perches | 79d4d38 | 2020-12-14 19:14:53 -0800 | [diff] [blame] | 5006 | struct kobj_attribute *attr, char *buf) |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 5007 | { |
Colin Ian King | 26083eb6 | 2019-11-30 17:58:04 -0800 | [diff] [blame] | 5008 | static const int values[] = { |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 5009 | SHMEM_HUGE_ALWAYS, |
| 5010 | SHMEM_HUGE_WITHIN_SIZE, |
| 5011 | SHMEM_HUGE_ADVISE, |
| 5012 | SHMEM_HUGE_NEVER, |
| 5013 | SHMEM_HUGE_DENY, |
| 5014 | SHMEM_HUGE_FORCE, |
| 5015 | }; |
Joe Perches | 79d4d38 | 2020-12-14 19:14:53 -0800 | [diff] [blame] | 5016 | int len = 0; |
| 5017 | int i; |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 5018 | |
Joe Perches | 79d4d38 | 2020-12-14 19:14:53 -0800 | [diff] [blame] | 5019 | for (i = 0; i < ARRAY_SIZE(values); i++) { |
| 5020 | len += sysfs_emit_at(buf, len, |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 5021 | shmem_huge == values[i] ? "%s[%s]" : "%s%s", |
| 5022 | i ? " " : "", shmem_format_huge(values[i])); |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 5023 | } |
Joe Perches | 79d4d38 | 2020-12-14 19:14:53 -0800 | [diff] [blame] | 5024 | len += sysfs_emit_at(buf, len, "\n"); |
| 5025 | |
| 5026 | return len; |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 5027 | } |
| 5028 | |
| 5029 | static ssize_t shmem_enabled_store(struct kobject *kobj, |
| 5030 | struct kobj_attribute *attr, const char *buf, size_t count) |
| 5031 | { |
| 5032 | char tmp[16]; |
| 5033 | int huge; |
| 5034 | |
| 5035 | if (count + 1 > sizeof(tmp)) |
| 5036 | return -EINVAL; |
| 5037 | memcpy(tmp, buf, count); |
| 5038 | tmp[count] = '\0'; |
| 5039 | if (count && tmp[count - 1] == '\n') |
| 5040 | tmp[count - 1] = '\0'; |
| 5041 | |
| 5042 | huge = shmem_parse_huge(tmp); |
| 5043 | if (huge == -EINVAL) |
| 5044 | return -EINVAL; |
| 5045 | if (!has_transparent_hugepage() && |
| 5046 | huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) |
| 5047 | return -EINVAL; |
| 5048 | |
Baolin Wang | 4b98995 | 2024-06-11 18:11:07 +0800 | [diff] [blame] | 5049 | /* Do not override huge allocation policy with non-PMD sized mTHP */ |
| 5050 | if (huge == SHMEM_HUGE_FORCE && |
| 5051 | huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER)) |
| 5052 | return -EINVAL; |
| 5053 | |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 5054 | shmem_huge = huge; |
Kirill A. Shutemov | 435c0b87 | 2017-08-25 15:55:33 -0700 | [diff] [blame] | 5055 | if (shmem_huge > SHMEM_HUGE_DENY) |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 5056 | SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; |
| 5057 | return count; |
| 5058 | } |
| 5059 | |
Miaohe Lin | 4bfa8ad | 2022-03-22 14:40:04 -0700 | [diff] [blame] | 5060 | struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled); |
Baolin Wang | 4b98995 | 2024-06-11 18:11:07 +0800 | [diff] [blame] | 5061 | static DEFINE_SPINLOCK(huge_shmem_orders_lock); |
| 5062 | |
| 5063 | static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj, |
| 5064 | struct kobj_attribute *attr, char *buf) |
| 5065 | { |
| 5066 | int order = to_thpsize(kobj)->order; |
| 5067 | const char *output; |
| 5068 | |
| 5069 | if (test_bit(order, &huge_shmem_orders_always)) |
| 5070 | output = "[always] inherit within_size advise never"; |
| 5071 | else if (test_bit(order, &huge_shmem_orders_inherit)) |
| 5072 | output = "always [inherit] within_size advise never"; |
| 5073 | else if (test_bit(order, &huge_shmem_orders_within_size)) |
| 5074 | output = "always inherit [within_size] advise never"; |
| 5075 | else if (test_bit(order, &huge_shmem_orders_madvise)) |
| 5076 | output = "always inherit within_size [advise] never"; |
| 5077 | else |
| 5078 | output = "always inherit within_size advise [never]"; |
| 5079 | |
| 5080 | return sysfs_emit(buf, "%s\n", output); |
| 5081 | } |
| 5082 | |
| 5083 | static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj, |
| 5084 | struct kobj_attribute *attr, |
| 5085 | const char *buf, size_t count) |
| 5086 | { |
| 5087 | int order = to_thpsize(kobj)->order; |
| 5088 | ssize_t ret = count; |
| 5089 | |
| 5090 | if (sysfs_streq(buf, "always")) { |
| 5091 | spin_lock(&huge_shmem_orders_lock); |
| 5092 | clear_bit(order, &huge_shmem_orders_inherit); |
| 5093 | clear_bit(order, &huge_shmem_orders_madvise); |
| 5094 | clear_bit(order, &huge_shmem_orders_within_size); |
| 5095 | set_bit(order, &huge_shmem_orders_always); |
| 5096 | spin_unlock(&huge_shmem_orders_lock); |
| 5097 | } else if (sysfs_streq(buf, "inherit")) { |
| 5098 | /* Do not override huge allocation policy with non-PMD sized mTHP */ |
| 5099 | if (shmem_huge == SHMEM_HUGE_FORCE && |
| 5100 | order != HPAGE_PMD_ORDER) |
| 5101 | return -EINVAL; |
| 5102 | |
| 5103 | spin_lock(&huge_shmem_orders_lock); |
| 5104 | clear_bit(order, &huge_shmem_orders_always); |
| 5105 | clear_bit(order, &huge_shmem_orders_madvise); |
| 5106 | clear_bit(order, &huge_shmem_orders_within_size); |
| 5107 | set_bit(order, &huge_shmem_orders_inherit); |
| 5108 | spin_unlock(&huge_shmem_orders_lock); |
| 5109 | } else if (sysfs_streq(buf, "within_size")) { |
| 5110 | spin_lock(&huge_shmem_orders_lock); |
| 5111 | clear_bit(order, &huge_shmem_orders_always); |
| 5112 | clear_bit(order, &huge_shmem_orders_inherit); |
| 5113 | clear_bit(order, &huge_shmem_orders_madvise); |
| 5114 | set_bit(order, &huge_shmem_orders_within_size); |
| 5115 | spin_unlock(&huge_shmem_orders_lock); |
Bang Li | 843a2e2 | 2024-06-28 11:23:27 +0800 | [diff] [blame] | 5116 | } else if (sysfs_streq(buf, "advise")) { |
Baolin Wang | 4b98995 | 2024-06-11 18:11:07 +0800 | [diff] [blame] | 5117 | spin_lock(&huge_shmem_orders_lock); |
| 5118 | clear_bit(order, &huge_shmem_orders_always); |
| 5119 | clear_bit(order, &huge_shmem_orders_inherit); |
| 5120 | clear_bit(order, &huge_shmem_orders_within_size); |
| 5121 | set_bit(order, &huge_shmem_orders_madvise); |
| 5122 | spin_unlock(&huge_shmem_orders_lock); |
| 5123 | } else if (sysfs_streq(buf, "never")) { |
| 5124 | spin_lock(&huge_shmem_orders_lock); |
| 5125 | clear_bit(order, &huge_shmem_orders_always); |
| 5126 | clear_bit(order, &huge_shmem_orders_inherit); |
| 5127 | clear_bit(order, &huge_shmem_orders_within_size); |
| 5128 | clear_bit(order, &huge_shmem_orders_madvise); |
| 5129 | spin_unlock(&huge_shmem_orders_lock); |
| 5130 | } else { |
| 5131 | ret = -EINVAL; |
| 5132 | } |
| 5133 | |
| 5134 | return ret; |
| 5135 | } |
| 5136 | |
| 5137 | struct kobj_attribute thpsize_shmem_enabled_attr = |
| 5138 | __ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store); |
Matthew Wilcox (Oracle) | 396bcc5 | 2020-04-06 20:04:35 -0700 | [diff] [blame] | 5139 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ |
Kirill A. Shutemov | f3f0e1d | 2016-07-26 15:26:32 -0700 | [diff] [blame] | 5140 | |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 5141 | #else /* !CONFIG_SHMEM */ |
| 5142 | |
| 5143 | /* |
| 5144 | * tiny-shmem: simple shmemfs and tmpfs using ramfs code |
| 5145 | * |
| 5146 | * This is intended for small system where the benefits of the full |
| 5147 | * shmem code (swap-backed and resource-limited) are outweighed by |
| 5148 | * their complexity. On systems without swap this code should be |
| 5149 | * effectively equivalent, but much lighter weight. |
| 5150 | */ |
| 5151 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 5152 | static struct file_system_type shmem_fs_type = { |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 5153 | .name = "tmpfs", |
David Howells | f323562 | 2019-03-25 16:38:31 +0000 | [diff] [blame] | 5154 | .init_fs_context = ramfs_init_fs_context, |
Al Viro | d7167b1 | 2019-09-07 07:23:15 -0400 | [diff] [blame] | 5155 | .parameters = ramfs_fs_parameters, |
Roberto Sassu | 36ce9d7 | 2023-06-07 18:15:23 +0200 | [diff] [blame] | 5156 | .kill_sb = ramfs_kill_sb, |
Eric W. Biederman | 2b8576c | 2013-01-25 16:32:10 -0800 | [diff] [blame] | 5157 | .fs_flags = FS_USERNS_MOUNT, |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 5158 | }; |
| 5159 | |
Miaohe Lin | 9096bbe | 2022-04-28 23:15:58 -0700 | [diff] [blame] | 5160 | void __init shmem_init(void) |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 5161 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 5162 | BUG_ON(register_filesystem(&shmem_fs_type) != 0); |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 5163 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 5164 | shm_mnt = kern_mount(&shmem_fs_type); |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 5165 | BUG_ON(IS_ERR(shm_mnt)); |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 5166 | } |
| 5167 | |
Christoph Hellwig | 10a9c49 | 2022-01-21 22:14:57 -0800 | [diff] [blame] | 5168 | int shmem_unuse(unsigned int type) |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 5169 | { |
| 5170 | return 0; |
| 5171 | } |
| 5172 | |
Alexey Gladkov | d7c9e99 | 2021-04-22 14:27:14 +0200 | [diff] [blame] | 5173 | int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) |
Hugh Dickins | 3f96b79 | 2009-09-21 17:03:37 -0700 | [diff] [blame] | 5174 | { |
| 5175 | return 0; |
| 5176 | } |
| 5177 | |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 5178 | void shmem_unlock_mapping(struct address_space *mapping) |
| 5179 | { |
| 5180 | } |
| 5181 | |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 5182 | #ifdef CONFIG_MMU |
| 5183 | unsigned long shmem_get_unmapped_area(struct file *file, |
| 5184 | unsigned long addr, unsigned long len, |
| 5185 | unsigned long pgoff, unsigned long flags) |
| 5186 | { |
Rick Edgecombe | 529ce23 | 2024-03-25 19:16:44 -0700 | [diff] [blame] | 5187 | return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 5188 | } |
| 5189 | #endif |
| 5190 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 5191 | void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 5192 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 5193 | truncate_inode_pages_range(inode->i_mapping, lstart, lend); |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 5194 | } |
| 5195 | EXPORT_SYMBOL_GPL(shmem_truncate_range); |
| 5196 | |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 5197 | #define shmem_vm_ops generic_file_vm_ops |
Pasha Tatashin | d09e8ca | 2022-11-15 02:06:01 +0000 | [diff] [blame] | 5198 | #define shmem_anon_vm_ops generic_file_vm_ops |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 5199 | #define shmem_file_operations ramfs_file_operations |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 5200 | #define shmem_acct_size(flags, size) 0 |
| 5201 | #define shmem_unacct_size(flags, size) do {} while (0) |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 5202 | |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 5203 | static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, |
| 5204 | struct super_block *sb, struct inode *dir, |
| 5205 | umode_t mode, dev_t dev, unsigned long flags) |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 5206 | { |
| 5207 | struct inode *inode = ramfs_get_inode(sb, dir, mode, dev); |
| 5208 | return inode ? inode : ERR_PTR(-ENOSPC); |
| 5209 | } |
| 5210 | |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 5211 | #endif /* CONFIG_SHMEM */ |
| 5212 | |
| 5213 | /* common code */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5214 | |
Hugh Dickins | 9be7d5b | 2023-09-29 20:28:50 -0700 | [diff] [blame] | 5215 | static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, |
| 5216 | loff_t size, unsigned long flags, unsigned int i_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5217 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5218 | struct inode *inode; |
Al Viro | 93dec2d | 2018-07-08 23:02:03 -0400 | [diff] [blame] | 5219 | struct file *res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5220 | |
Matthew Auld | 703321b | 2017-10-06 23:18:13 +0100 | [diff] [blame] | 5221 | if (IS_ERR(mnt)) |
| 5222 | return ERR_CAST(mnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5223 | |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 5224 | if (size < 0 || size > MAX_LFS_FILESIZE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5225 | return ERR_PTR(-EINVAL); |
| 5226 | |
| 5227 | if (shmem_acct_size(flags, size)) |
| 5228 | return ERR_PTR(-ENOMEM); |
| 5229 | |
Giuseppe Scrivano | 7a80e5b | 2023-01-20 10:43:46 +0100 | [diff] [blame] | 5230 | if (is_idmapped_mnt(mnt)) |
| 5231 | return ERR_PTR(-EINVAL); |
| 5232 | |
| 5233 | inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL, |
| 5234 | S_IFREG | S_IRWXUGO, 0, flags); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 5235 | if (IS_ERR(inode)) { |
Al Viro | dac2d1f | 2018-06-09 09:27:41 -0400 | [diff] [blame] | 5236 | shmem_unacct_size(flags, size); |
Carlos Maiolino | 7148066 | 2023-07-25 16:45:05 +0200 | [diff] [blame] | 5237 | return ERR_CAST(inode); |
Al Viro | dac2d1f | 2018-06-09 09:27:41 -0400 | [diff] [blame] | 5238 | } |
Eric Paris | c727709 | 2013-12-02 11:24:19 +0000 | [diff] [blame] | 5239 | inode->i_flags |= i_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5240 | inode->i_size = size; |
Miklos Szeredi | 6d6b77f | 2011-10-28 14:13:28 +0200 | [diff] [blame] | 5241 | clear_nlink(inode); /* It is unlinked */ |
Al Viro | 26567cd | 2013-03-01 20:22:53 -0500 | [diff] [blame] | 5242 | res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); |
Al Viro | 93dec2d | 2018-07-08 23:02:03 -0400 | [diff] [blame] | 5243 | if (!IS_ERR(res)) |
| 5244 | res = alloc_file_pseudo(inode, mnt, name, O_RDWR, |
| 5245 | &shmem_file_operations); |
Al Viro | 26567cd | 2013-03-01 20:22:53 -0500 | [diff] [blame] | 5246 | if (IS_ERR(res)) |
Al Viro | 93dec2d | 2018-07-08 23:02:03 -0400 | [diff] [blame] | 5247 | iput(inode); |
Al Viro | 6b4d0b2 | 2013-02-14 21:37:26 -0500 | [diff] [blame] | 5248 | return res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5249 | } |
Eric Paris | c727709 | 2013-12-02 11:24:19 +0000 | [diff] [blame] | 5250 | |
| 5251 | /** |
| 5252 | * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be |
| 5253 | * kernel internal. There will be NO LSM permission checks against the |
| 5254 | * underlying inode. So users of this interface must do LSM checks at a |
Stephen Smalley | e1832f2 | 2015-08-06 15:46:55 -0700 | [diff] [blame] | 5255 | * higher layer. The users are the big_key and shm implementations. LSM |
| 5256 | * checks are provided at the key or shm level rather than the inode. |
Eric Paris | c727709 | 2013-12-02 11:24:19 +0000 | [diff] [blame] | 5257 | * @name: name for dentry (to be seen in /proc/<pid>/maps |
| 5258 | * @size: size to be set for the file |
| 5259 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size |
| 5260 | */ |
| 5261 | struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) |
| 5262 | { |
Matthew Auld | 703321b | 2017-10-06 23:18:13 +0100 | [diff] [blame] | 5263 | return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); |
Eric Paris | c727709 | 2013-12-02 11:24:19 +0000 | [diff] [blame] | 5264 | } |
Christoph Hellwig | be9d936 | 2024-02-19 07:27:14 +0100 | [diff] [blame] | 5265 | EXPORT_SYMBOL_GPL(shmem_kernel_file_setup); |
Eric Paris | c727709 | 2013-12-02 11:24:19 +0000 | [diff] [blame] | 5266 | |
| 5267 | /** |
| 5268 | * shmem_file_setup - get an unlinked file living in tmpfs |
| 5269 | * @name: name for dentry (to be seen in /proc/<pid>/maps |
| 5270 | * @size: size to be set for the file |
| 5271 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size |
| 5272 | */ |
| 5273 | struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) |
| 5274 | { |
Matthew Auld | 703321b | 2017-10-06 23:18:13 +0100 | [diff] [blame] | 5275 | return __shmem_file_setup(shm_mnt, name, size, flags, 0); |
Eric Paris | c727709 | 2013-12-02 11:24:19 +0000 | [diff] [blame] | 5276 | } |
Keith Packard | 395e0dd | 2008-06-20 00:08:06 -0700 | [diff] [blame] | 5277 | EXPORT_SYMBOL_GPL(shmem_file_setup); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5278 | |
Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 5279 | /** |
Matthew Auld | 703321b | 2017-10-06 23:18:13 +0100 | [diff] [blame] | 5280 | * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs |
| 5281 | * @mnt: the tmpfs mount where the file will be created |
| 5282 | * @name: name for dentry (to be seen in /proc/<pid>/maps |
| 5283 | * @size: size to be set for the file |
| 5284 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size |
| 5285 | */ |
| 5286 | struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, |
| 5287 | loff_t size, unsigned long flags) |
| 5288 | { |
| 5289 | return __shmem_file_setup(mnt, name, size, flags, 0); |
| 5290 | } |
| 5291 | EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); |
| 5292 | |
| 5293 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5294 | * shmem_zero_setup - setup a shared anonymous mapping |
Peter Collingbourne | 45e5530 | 2020-08-06 23:23:37 -0700 | [diff] [blame] | 5295 | * @vma: the vma to be mmapped is prepared by do_mmap |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5296 | */ |
| 5297 | int shmem_zero_setup(struct vm_area_struct *vma) |
| 5298 | { |
| 5299 | struct file *file; |
| 5300 | loff_t size = vma->vm_end - vma->vm_start; |
| 5301 | |
Hugh Dickins | 66fc130 | 2015-06-14 09:48:09 -0700 | [diff] [blame] | 5302 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 5303 | * Cloning a new file under mmap_lock leads to a lock ordering conflict |
Hugh Dickins | 66fc130 | 2015-06-14 09:48:09 -0700 | [diff] [blame] | 5304 | * between XFS directory reading and selinux: since this file is only |
| 5305 | * accessible to the user through its mapping, use S_PRIVATE flag to |
| 5306 | * bypass file security, in the same way as shmem_kernel_file_setup(). |
| 5307 | */ |
Matthew Auld | 703321b | 2017-10-06 23:18:13 +0100 | [diff] [blame] | 5308 | file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5309 | if (IS_ERR(file)) |
| 5310 | return PTR_ERR(file); |
| 5311 | |
| 5312 | if (vma->vm_file) |
| 5313 | fput(vma->vm_file); |
| 5314 | vma->vm_file = file; |
Pasha Tatashin | d09e8ca | 2022-11-15 02:06:01 +0000 | [diff] [blame] | 5315 | vma->vm_ops = &shmem_anon_vm_ops; |
Kirill A. Shutemov | f3f0e1d | 2016-07-26 15:26:32 -0700 | [diff] [blame] | 5316 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5317 | return 0; |
| 5318 | } |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 5319 | |
| 5320 | /** |
Matthew Wilcox (Oracle) | f01b2b3 | 2023-02-06 16:25:20 +0000 | [diff] [blame] | 5321 | * shmem_read_folio_gfp - read into page cache, using specified page allocation flags. |
| 5322 | * @mapping: the folio's address_space |
| 5323 | * @index: the folio index |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 5324 | * @gfp: the page allocator flags to use if allocating |
| 5325 | * |
| 5326 | * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", |
| 5327 | * with any new page allocations done using the specified allocation flags. |
Matthew Wilcox (Oracle) | 7e0a126 | 2022-04-29 11:53:28 -0400 | [diff] [blame] | 5328 | * But read_cache_page_gfp() uses the ->read_folio() method: which does not |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 5329 | * suit tmpfs, since it may have pages in swapcache, and needs to find those |
| 5330 | * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. |
| 5331 | * |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 5332 | * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in |
| 5333 | * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 5334 | */ |
Matthew Wilcox (Oracle) | f01b2b3 | 2023-02-06 16:25:20 +0000 | [diff] [blame] | 5335 | struct folio *shmem_read_folio_gfp(struct address_space *mapping, |
| 5336 | pgoff_t index, gfp_t gfp) |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 5337 | { |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 5338 | #ifdef CONFIG_SHMEM |
| 5339 | struct inode *inode = mapping->host; |
Matthew Wilcox (Oracle) | a3a9c39 | 2022-09-02 20:46:19 +0100 | [diff] [blame] | 5340 | struct folio *folio; |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 5341 | int error; |
| 5342 | |
Rik van Riel | e1e4cfd | 2024-09-03 11:19:28 -0400 | [diff] [blame] | 5343 | error = shmem_get_folio_gfp(inode, index, 0, &folio, SGP_CACHE, |
Hugh Dickins | e3e1a506 | 2023-09-29 20:26:53 -0700 | [diff] [blame] | 5344 | gfp, NULL, NULL); |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 5345 | if (error) |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 5346 | return ERR_PTR(error); |
| 5347 | |
Matthew Wilcox (Oracle) | a3a9c39 | 2022-09-02 20:46:19 +0100 | [diff] [blame] | 5348 | folio_unlock(folio); |
Matthew Wilcox (Oracle) | f01b2b3 | 2023-02-06 16:25:20 +0000 | [diff] [blame] | 5349 | return folio; |
| 5350 | #else |
| 5351 | /* |
| 5352 | * The tiny !SHMEM case uses ramfs without swap |
| 5353 | */ |
| 5354 | return mapping_read_folio_gfp(mapping, index, gfp); |
| 5355 | #endif |
| 5356 | } |
| 5357 | EXPORT_SYMBOL_GPL(shmem_read_folio_gfp); |
| 5358 | |
| 5359 | struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, |
| 5360 | pgoff_t index, gfp_t gfp) |
| 5361 | { |
| 5362 | struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp); |
| 5363 | struct page *page; |
| 5364 | |
| 5365 | if (IS_ERR(folio)) |
| 5366 | return &folio->page; |
| 5367 | |
Matthew Wilcox (Oracle) | a3a9c39 | 2022-09-02 20:46:19 +0100 | [diff] [blame] | 5368 | page = folio_file_page(folio, index); |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 5369 | if (PageHWPoison(page)) { |
Matthew Wilcox (Oracle) | a3a9c39 | 2022-09-02 20:46:19 +0100 | [diff] [blame] | 5370 | folio_put(folio); |
Yang Shi | a760542 | 2022-01-14 14:05:19 -0800 | [diff] [blame] | 5371 | return ERR_PTR(-EIO); |
| 5372 | } |
| 5373 | |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 5374 | return page; |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 5375 | } |
| 5376 | EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); |