Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/fs/file.c |
| 4 | * |
| 5 | * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes |
| 6 | * |
| 7 | * Manage the dynamic fd arrays in the process files_struct. |
| 8 | */ |
| 9 | |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 10 | #include <linux/syscalls.h> |
Paul Gortmaker | 630d9c4 | 2011-11-16 23:57:37 -0500 | [diff] [blame] | 11 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/fs.h> |
Christian Brauner | 278a5fb | 2019-05-24 11:30:34 +0200 | [diff] [blame] | 13 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/mm.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 15 | #include <linux/sched/signal.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/file.h> |
Al Viro | 9f3acc3 | 2008-04-24 07:44:08 -0400 | [diff] [blame] | 18 | #include <linux/fdtable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/bitops.h> |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 20 | #include <linux/spinlock.h> |
| 21 | #include <linux/rcupdate.h> |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 22 | #include <linux/close_range.h> |
Kees Cook | 6659061 | 2020-06-10 08:20:05 -0700 | [diff] [blame] | 23 | #include <net/sock.h> |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 24 | |
Jens Axboe | 53dec2e | 2021-01-19 15:41:52 -0700 | [diff] [blame] | 25 | #include "internal.h" |
| 26 | |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 27 | unsigned int sysctl_nr_open __read_mostly = 1024*1024; |
| 28 | unsigned int sysctl_nr_open_min = BITS_PER_LONG; |
Rasmus Villemoes | 752343b | 2015-10-29 12:01:41 +0100 | [diff] [blame] | 29 | /* our min() is unusable in constant expressions ;-/ */ |
| 30 | #define __const_min(x, y) ((x) < (y) ? (x) : (y)) |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 31 | unsigned int sysctl_nr_open_max = |
| 32 | __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG; |
Eric Dumazet | 9cfe015 | 2008-02-06 01:37:16 -0800 | [diff] [blame] | 33 | |
Changli Gao | a892e2d | 2010-08-10 18:01:35 -0700 | [diff] [blame] | 34 | static void __free_fdtable(struct fdtable *fdt) |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 35 | { |
Al Viro | f6c0a19 | 2014-04-23 10:18:46 -0400 | [diff] [blame] | 36 | kvfree(fdt->fd); |
| 37 | kvfree(fdt->open_fds); |
Changli Gao | a892e2d | 2010-08-10 18:01:35 -0700 | [diff] [blame] | 38 | kfree(fdt); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 39 | } |
| 40 | |
Al Viro | 7cf4dc3 | 2012-08-15 19:56:12 -0400 | [diff] [blame] | 41 | static void free_fdtable_rcu(struct rcu_head *rcu) |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 42 | { |
Al Viro | ac3e3c5 | 2013-04-28 21:42:33 -0400 | [diff] [blame] | 43 | __free_fdtable(container_of(rcu, struct fdtable, rcu)); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 44 | } |
| 45 | |
Linus Torvalds | f3f86e3 | 2015-10-30 16:53:57 -0700 | [diff] [blame] | 46 | #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr)) |
| 47 | #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long)) |
| 48 | |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 49 | /* |
Eric Biggers | ea5c58e | 2015-11-06 00:32:04 -0600 | [diff] [blame] | 50 | * Copy 'count' fd bits from the old table to the new table and clear the extra |
| 51 | * space if any. This does not copy the file pointers. Called with the files |
| 52 | * spinlock held for write. |
| 53 | */ |
| 54 | static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt, |
| 55 | unsigned int count) |
| 56 | { |
| 57 | unsigned int cpy, set; |
| 58 | |
| 59 | cpy = count / BITS_PER_BYTE; |
| 60 | set = (nfdt->max_fds - count) / BITS_PER_BYTE; |
| 61 | memcpy(nfdt->open_fds, ofdt->open_fds, cpy); |
| 62 | memset((char *)nfdt->open_fds + cpy, 0, set); |
| 63 | memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); |
| 64 | memset((char *)nfdt->close_on_exec + cpy, 0, set); |
| 65 | |
| 66 | cpy = BITBIT_SIZE(count); |
| 67 | set = BITBIT_SIZE(nfdt->max_fds) - cpy; |
| 68 | memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy); |
| 69 | memset((char *)nfdt->full_fds_bits + cpy, 0, set); |
| 70 | } |
| 71 | |
| 72 | /* |
| 73 | * Copy all file descriptors from the old table to the new, expanded table and |
| 74 | * clear the extra space. Called with the files spinlock held for write. |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 75 | */ |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 76 | static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 77 | { |
Al Viro | 4e89b72 | 2020-05-19 17:48:52 -0400 | [diff] [blame] | 78 | size_t cpy, set; |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 79 | |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 80 | BUG_ON(nfdt->max_fds < ofdt->max_fds); |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 81 | |
| 82 | cpy = ofdt->max_fds * sizeof(struct file *); |
| 83 | set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); |
| 84 | memcpy(nfdt->fd, ofdt->fd, cpy); |
Eric Biggers | ea5c58e | 2015-11-06 00:32:04 -0600 | [diff] [blame] | 85 | memset((char *)nfdt->fd + cpy, 0, set); |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 86 | |
Eric Biggers | ea5c58e | 2015-11-06 00:32:04 -0600 | [diff] [blame] | 87 | copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | } |
| 89 | |
Linus Torvalds | 1c24a18 | 2022-03-29 15:06:39 -0700 | [diff] [blame] | 90 | /* |
| 91 | * Note how the fdtable bitmap allocations very much have to be a multiple of |
| 92 | * BITS_PER_LONG. This is not only because we walk those things in chunks of |
| 93 | * 'unsigned long' in some places, but simply because that is how the Linux |
| 94 | * kernel bitmaps are defined to work: they are not "bits in an array of bytes", |
| 95 | * they are very much "bits in an array of unsigned long". |
| 96 | * |
| 97 | * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied |
| 98 | * by that "1024/sizeof(ptr)" before, we already know there are sufficient |
| 99 | * clear low bits. Clang seems to realize that, gcc ends up being confused. |
| 100 | * |
| 101 | * On a 128-bit machine, the ALIGN() would actually matter. In the meantime, |
| 102 | * let's consider it documentation (and maybe a test-case for gcc to improve |
| 103 | * its code generation ;) |
| 104 | */ |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 105 | static struct fdtable * alloc_fdtable(unsigned int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | { |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 107 | struct fdtable *fdt; |
David Howells | 1fd36ad | 2012-02-16 17:49:54 +0000 | [diff] [blame] | 108 | void *data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 110 | /* |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 111 | * Figure out how many fds we actually want to support in this fdtable. |
| 112 | * Allocation steps are keyed to the size of the fdarray, since it |
| 113 | * grows far faster than any of the other dynamic data. We try to fit |
| 114 | * the fdarray into comfortable page-tuned chunks: starting at 1024B |
| 115 | * and growing in powers of two from there on. |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 116 | */ |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 117 | nr /= (1024 / sizeof(struct file *)); |
| 118 | nr = roundup_pow_of_two(nr + 1); |
| 119 | nr *= (1024 / sizeof(struct file *)); |
Linus Torvalds | 1c24a18 | 2022-03-29 15:06:39 -0700 | [diff] [blame] | 120 | nr = ALIGN(nr, BITS_PER_LONG); |
Al Viro | 5c598b3 | 2008-04-27 20:04:15 -0400 | [diff] [blame] | 121 | /* |
| 122 | * Note that this can drive nr *below* what we had passed if sysctl_nr_open |
| 123 | * had been set lower between the check in expand_files() and here. Deal |
| 124 | * with that in caller, it's cheaper that way. |
| 125 | * |
| 126 | * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise |
| 127 | * bitmaps handling below becomes unpleasant, to put it mildly... |
| 128 | */ |
| 129 | if (unlikely(nr > sysctl_nr_open)) |
| 130 | nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; |
Vadim Lobanov | bbea9f6 | 2006-12-10 02:21:12 -0800 | [diff] [blame] | 131 | |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 132 | fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT); |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 133 | if (!fdt) |
Vadim Lobanov | bbea9f6 | 2006-12-10 02:21:12 -0800 | [diff] [blame] | 134 | goto out; |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 135 | fdt->max_fds = nr; |
Michal Hocko | c823bd9 | 2017-07-06 15:36:19 -0700 | [diff] [blame] | 136 | data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT); |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 137 | if (!data) |
| 138 | goto out_fdt; |
David Howells | 1fd36ad | 2012-02-16 17:49:54 +0000 | [diff] [blame] | 139 | fdt->fd = data; |
| 140 | |
Michal Hocko | c823bd9 | 2017-07-06 15:36:19 -0700 | [diff] [blame] | 141 | data = kvmalloc(max_t(size_t, |
| 142 | 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES), |
| 143 | GFP_KERNEL_ACCOUNT); |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 144 | if (!data) |
| 145 | goto out_arr; |
David Howells | 1fd36ad | 2012-02-16 17:49:54 +0000 | [diff] [blame] | 146 | fdt->open_fds = data; |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 147 | data += nr / BITS_PER_BYTE; |
David Howells | 1fd36ad | 2012-02-16 17:49:54 +0000 | [diff] [blame] | 148 | fdt->close_on_exec = data; |
Linus Torvalds | f3f86e3 | 2015-10-30 16:53:57 -0700 | [diff] [blame] | 149 | data += nr / BITS_PER_BYTE; |
| 150 | fdt->full_fds_bits = data; |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 151 | |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 152 | return fdt; |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 153 | |
| 154 | out_arr: |
Al Viro | f6c0a19 | 2014-04-23 10:18:46 -0400 | [diff] [blame] | 155 | kvfree(fdt->fd); |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 156 | out_fdt: |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 157 | kfree(fdt); |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 158 | out: |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 159 | return NULL; |
| 160 | } |
| 161 | |
| 162 | /* |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 163 | * Expand the file descriptor table. |
| 164 | * This function will allocate a new fdtable and both fd array and fdset, of |
| 165 | * the given size. |
| 166 | * Return <0 error code on error; 1 on successful completion. |
| 167 | * The files->file_lock should be held on entry, and will be held on exit. |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 168 | */ |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 169 | static int expand_fdtable(struct files_struct *files, unsigned int nr) |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 170 | __releases(files->file_lock) |
| 171 | __acquires(files->file_lock) |
| 172 | { |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 173 | struct fdtable *new_fdt, *cur_fdt; |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 174 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | spin_unlock(&files->file_lock); |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 176 | new_fdt = alloc_fdtable(nr); |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 177 | |
Eric W. Biederman | d74ba04 | 2020-11-20 17:14:35 -0600 | [diff] [blame] | 178 | /* make sure all fd_install() have seen resize_in_progress |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 179 | * or have finished their rcu_read_lock_sched() section. |
| 180 | */ |
| 181 | if (atomic_read(&files->count) > 1) |
Paul E. McKenney | c93ffc1 | 2018-11-05 17:31:31 -0800 | [diff] [blame] | 182 | synchronize_rcu(); |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 183 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | spin_lock(&files->file_lock); |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 185 | if (!new_fdt) |
| 186 | return -ENOMEM; |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 187 | /* |
Al Viro | 5c598b3 | 2008-04-27 20:04:15 -0400 | [diff] [blame] | 188 | * extremely unlikely race - sysctl_nr_open decreased between the check in |
| 189 | * caller and alloc_fdtable(). Cheaper to catch it here... |
| 190 | */ |
| 191 | if (unlikely(new_fdt->max_fds <= nr)) { |
Changli Gao | a892e2d | 2010-08-10 18:01:35 -0700 | [diff] [blame] | 192 | __free_fdtable(new_fdt); |
Al Viro | 5c598b3 | 2008-04-27 20:04:15 -0400 | [diff] [blame] | 193 | return -EMFILE; |
| 194 | } |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 195 | cur_fdt = files_fdtable(files); |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 196 | BUG_ON(nr < cur_fdt->max_fds); |
| 197 | copy_fdtable(new_fdt, cur_fdt); |
| 198 | rcu_assign_pointer(files->fdt, new_fdt); |
| 199 | if (cur_fdt != &files->fdtab) |
| 200 | call_rcu(&cur_fdt->rcu, free_fdtable_rcu); |
Eric W. Biederman | d74ba04 | 2020-11-20 17:14:35 -0600 | [diff] [blame] | 201 | /* coupled with smp_rmb() in fd_install() */ |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 202 | smp_wmb(); |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 203 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | } |
| 205 | |
| 206 | /* |
| 207 | * Expand files. |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 208 | * This function will expand the file structures, if the requested size exceeds |
| 209 | * the current capacity and there is room for expansion. |
| 210 | * Return <0 error code on error; 0 when nothing done; 1 when files were |
| 211 | * expanded and execution may have blocked. |
| 212 | * The files->file_lock should be held on entry, and will be held on exit. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | */ |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 214 | static int expand_files(struct files_struct *files, unsigned int nr) |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 215 | __releases(files->file_lock) |
| 216 | __acquires(files->file_lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | { |
Dipankar Sarma | badf166 | 2005-09-09 13:04:10 -0700 | [diff] [blame] | 218 | struct fdtable *fdt; |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 219 | int expanded = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 221 | repeat: |
Dipankar Sarma | badf166 | 2005-09-09 13:04:10 -0700 | [diff] [blame] | 222 | fdt = files_fdtable(files); |
Al Viro | 4e1e018 | 2008-07-26 16:01:20 -0400 | [diff] [blame] | 223 | |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 224 | /* Do we need to expand? */ |
Vadim Lobanov | bbea9f6 | 2006-12-10 02:21:12 -0800 | [diff] [blame] | 225 | if (nr < fdt->max_fds) |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 226 | return expanded; |
Al Viro | 4e1e018 | 2008-07-26 16:01:20 -0400 | [diff] [blame] | 227 | |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 228 | /* Can we expand? */ |
Eric Dumazet | 9cfe015 | 2008-02-06 01:37:16 -0800 | [diff] [blame] | 229 | if (nr >= sysctl_nr_open) |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 230 | return -EMFILE; |
| 231 | |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 232 | if (unlikely(files->resize_in_progress)) { |
| 233 | spin_unlock(&files->file_lock); |
| 234 | expanded = 1; |
| 235 | wait_event(files->resize_wait, !files->resize_in_progress); |
| 236 | spin_lock(&files->file_lock); |
| 237 | goto repeat; |
| 238 | } |
| 239 | |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 240 | /* All good, so we try */ |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 241 | files->resize_in_progress = true; |
| 242 | expanded = expand_fdtable(files, nr); |
| 243 | files->resize_in_progress = false; |
| 244 | |
| 245 | wake_up_all(&files->resize_wait); |
| 246 | return expanded; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | } |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 248 | |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 249 | static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt) |
Al Viro | b8318b0 | 2012-08-21 20:09:42 -0400 | [diff] [blame] | 250 | { |
| 251 | __set_bit(fd, fdt->close_on_exec); |
| 252 | } |
| 253 | |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 254 | static inline void __clear_close_on_exec(unsigned int fd, struct fdtable *fdt) |
Al Viro | b8318b0 | 2012-08-21 20:09:42 -0400 | [diff] [blame] | 255 | { |
Linus Torvalds | fc90888 | 2015-10-31 16:06:40 -0700 | [diff] [blame] | 256 | if (test_bit(fd, fdt->close_on_exec)) |
| 257 | __clear_bit(fd, fdt->close_on_exec); |
Al Viro | b8318b0 | 2012-08-21 20:09:42 -0400 | [diff] [blame] | 258 | } |
| 259 | |
Linus Torvalds | f3f86e3 | 2015-10-30 16:53:57 -0700 | [diff] [blame] | 260 | static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt) |
Al Viro | b8318b0 | 2012-08-21 20:09:42 -0400 | [diff] [blame] | 261 | { |
| 262 | __set_bit(fd, fdt->open_fds); |
Linus Torvalds | f3f86e3 | 2015-10-30 16:53:57 -0700 | [diff] [blame] | 263 | fd /= BITS_PER_LONG; |
| 264 | if (!~fdt->open_fds[fd]) |
| 265 | __set_bit(fd, fdt->full_fds_bits); |
Al Viro | b8318b0 | 2012-08-21 20:09:42 -0400 | [diff] [blame] | 266 | } |
| 267 | |
Linus Torvalds | f3f86e3 | 2015-10-30 16:53:57 -0700 | [diff] [blame] | 268 | static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) |
Al Viro | b8318b0 | 2012-08-21 20:09:42 -0400 | [diff] [blame] | 269 | { |
| 270 | __clear_bit(fd, fdt->open_fds); |
Linus Torvalds | f3f86e3 | 2015-10-30 16:53:57 -0700 | [diff] [blame] | 271 | __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits); |
Al Viro | b8318b0 | 2012-08-21 20:09:42 -0400 | [diff] [blame] | 272 | } |
| 273 | |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 274 | static unsigned int count_open_files(struct fdtable *fdt) |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 275 | { |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 276 | unsigned int size = fdt->max_fds; |
| 277 | unsigned int i; |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 278 | |
| 279 | /* Find the last open fd */ |
David Howells | 1fd36ad | 2012-02-16 17:49:54 +0000 | [diff] [blame] | 280 | for (i = size / BITS_PER_LONG; i > 0; ) { |
| 281 | if (fdt->open_fds[--i]) |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 282 | break; |
| 283 | } |
David Howells | 1fd36ad | 2012-02-16 17:49:54 +0000 | [diff] [blame] | 284 | i = (i + 1) * BITS_PER_LONG; |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 285 | return i; |
| 286 | } |
| 287 | |
Linus Torvalds | 1c24a18 | 2022-03-29 15:06:39 -0700 | [diff] [blame] | 288 | /* |
| 289 | * Note that a sane fdtable size always has to be a multiple of |
| 290 | * BITS_PER_LONG, since we have bitmaps that are sized by this. |
| 291 | * |
| 292 | * 'max_fds' will normally already be properly aligned, but it |
| 293 | * turns out that in the close_range() -> __close_range() -> |
| 294 | * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end |
| 295 | * up having a 'max_fds' value that isn't already aligned. |
| 296 | * |
| 297 | * Rather than make close_range() have to worry about this, |
| 298 | * just make that BITS_PER_LONG alignment be part of a sane |
| 299 | * fdtable size. Becuase that's really what it is. |
| 300 | */ |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 301 | static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds) |
| 302 | { |
| 303 | unsigned int count; |
| 304 | |
| 305 | count = count_open_files(fdt); |
| 306 | if (max_fds < NR_OPEN_DEFAULT) |
| 307 | max_fds = NR_OPEN_DEFAULT; |
Linus Torvalds | d888c83 | 2022-03-29 23:29:18 -0700 | [diff] [blame] | 308 | return ALIGN(min(count, max_fds), BITS_PER_LONG); |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 309 | } |
| 310 | |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 311 | /* |
| 312 | * Allocate a new files structure and copy contents from the |
| 313 | * passed in files structure. |
| 314 | * errorp will be valid only when the returned files_struct is NULL. |
| 315 | */ |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 316 | struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp) |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 317 | { |
| 318 | struct files_struct *newf; |
| 319 | struct file **old_fds, **new_fds; |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 320 | unsigned int open_files, i; |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 321 | struct fdtable *old_fdt, *new_fdt; |
| 322 | |
| 323 | *errorp = -ENOMEM; |
Al Viro | afbec7f | 2008-05-08 21:11:17 -0400 | [diff] [blame] | 324 | newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 325 | if (!newf) |
| 326 | goto out; |
| 327 | |
Al Viro | afbec7f | 2008-05-08 21:11:17 -0400 | [diff] [blame] | 328 | atomic_set(&newf->count, 1); |
| 329 | |
| 330 | spin_lock_init(&newf->file_lock); |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 331 | newf->resize_in_progress = false; |
| 332 | init_waitqueue_head(&newf->resize_wait); |
Al Viro | afbec7f | 2008-05-08 21:11:17 -0400 | [diff] [blame] | 333 | newf->next_fd = 0; |
| 334 | new_fdt = &newf->fdtab; |
| 335 | new_fdt->max_fds = NR_OPEN_DEFAULT; |
David Howells | 1fd36ad | 2012-02-16 17:49:54 +0000 | [diff] [blame] | 336 | new_fdt->close_on_exec = newf->close_on_exec_init; |
| 337 | new_fdt->open_fds = newf->open_fds_init; |
Linus Torvalds | f3f86e3 | 2015-10-30 16:53:57 -0700 | [diff] [blame] | 338 | new_fdt->full_fds_bits = newf->full_fds_bits_init; |
Al Viro | afbec7f | 2008-05-08 21:11:17 -0400 | [diff] [blame] | 339 | new_fdt->fd = &newf->fd_array[0]; |
Al Viro | afbec7f | 2008-05-08 21:11:17 -0400 | [diff] [blame] | 340 | |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 341 | spin_lock(&oldf->file_lock); |
| 342 | old_fdt = files_fdtable(oldf); |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 343 | open_files = sane_fdtable_size(old_fdt, max_fds); |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 344 | |
| 345 | /* |
| 346 | * Check whether we need to allocate a larger fd array and fd set. |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 347 | */ |
Al Viro | adbecb1 | 2008-05-08 21:19:42 -0400 | [diff] [blame] | 348 | while (unlikely(open_files > new_fdt->max_fds)) { |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 349 | spin_unlock(&oldf->file_lock); |
Al Viro | 9dec3c4 | 2008-05-08 21:02:45 -0400 | [diff] [blame] | 350 | |
Changli Gao | a892e2d | 2010-08-10 18:01:35 -0700 | [diff] [blame] | 351 | if (new_fdt != &newf->fdtab) |
| 352 | __free_fdtable(new_fdt); |
Al Viro | adbecb1 | 2008-05-08 21:19:42 -0400 | [diff] [blame] | 353 | |
Al Viro | 9dec3c4 | 2008-05-08 21:02:45 -0400 | [diff] [blame] | 354 | new_fdt = alloc_fdtable(open_files - 1); |
| 355 | if (!new_fdt) { |
| 356 | *errorp = -ENOMEM; |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 357 | goto out_release; |
Al Viro | 9dec3c4 | 2008-05-08 21:02:45 -0400 | [diff] [blame] | 358 | } |
| 359 | |
| 360 | /* beyond sysctl_nr_open; nothing to do */ |
| 361 | if (unlikely(new_fdt->max_fds < open_files)) { |
Changli Gao | a892e2d | 2010-08-10 18:01:35 -0700 | [diff] [blame] | 362 | __free_fdtable(new_fdt); |
Al Viro | 9dec3c4 | 2008-05-08 21:02:45 -0400 | [diff] [blame] | 363 | *errorp = -EMFILE; |
| 364 | goto out_release; |
| 365 | } |
Al Viro | 9dec3c4 | 2008-05-08 21:02:45 -0400 | [diff] [blame] | 366 | |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 367 | /* |
| 368 | * Reacquire the oldf lock and a pointer to its fd table |
| 369 | * who knows it may have a new bigger fd table. We need |
| 370 | * the latest pointer. |
| 371 | */ |
| 372 | spin_lock(&oldf->file_lock); |
| 373 | old_fdt = files_fdtable(oldf); |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 374 | open_files = sane_fdtable_size(old_fdt, max_fds); |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 375 | } |
| 376 | |
Eric Biggers | ea5c58e | 2015-11-06 00:32:04 -0600 | [diff] [blame] | 377 | copy_fd_bitmaps(new_fdt, old_fdt, open_files); |
| 378 | |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 379 | old_fds = old_fdt->fd; |
| 380 | new_fds = new_fdt->fd; |
| 381 | |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 382 | for (i = open_files; i != 0; i--) { |
| 383 | struct file *f = *old_fds++; |
| 384 | if (f) { |
| 385 | get_file(f); |
| 386 | } else { |
| 387 | /* |
| 388 | * The fd may be claimed in the fd bitmap but not yet |
| 389 | * instantiated in the files array if a sibling thread |
| 390 | * is partway through open(). So make sure that this |
| 391 | * fd is available to the new process. |
| 392 | */ |
David Howells | 1dce27c | 2012-02-16 17:49:42 +0000 | [diff] [blame] | 393 | __clear_open_fd(open_files - i, new_fdt); |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 394 | } |
| 395 | rcu_assign_pointer(*new_fds++, f); |
| 396 | } |
| 397 | spin_unlock(&oldf->file_lock); |
| 398 | |
Eric Biggers | ea5c58e | 2015-11-06 00:32:04 -0600 | [diff] [blame] | 399 | /* clear the remainder */ |
| 400 | memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *)); |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 401 | |
Al Viro | afbec7f | 2008-05-08 21:11:17 -0400 | [diff] [blame] | 402 | rcu_assign_pointer(newf->fdt, new_fdt); |
| 403 | |
Al Viro | 02afc626 | 2008-05-08 19:42:56 -0400 | [diff] [blame] | 404 | return newf; |
| 405 | |
| 406 | out_release: |
| 407 | kmem_cache_free(files_cachep, newf); |
| 408 | out: |
| 409 | return NULL; |
| 410 | } |
| 411 | |
Oleg Nesterov | ce08b62 | 2014-01-11 19:19:53 +0100 | [diff] [blame] | 412 | static struct fdtable *close_files(struct files_struct * files) |
Al Viro | 7cf4dc3 | 2012-08-15 19:56:12 -0400 | [diff] [blame] | 413 | { |
Al Viro | 7cf4dc3 | 2012-08-15 19:56:12 -0400 | [diff] [blame] | 414 | /* |
| 415 | * It is safe to dereference the fd table without RCU or |
| 416 | * ->file_lock because this is the last reference to the |
Oleg Nesterov | ce08b62 | 2014-01-11 19:19:53 +0100 | [diff] [blame] | 417 | * files structure. |
Al Viro | 7cf4dc3 | 2012-08-15 19:56:12 -0400 | [diff] [blame] | 418 | */ |
Oleg Nesterov | ce08b62 | 2014-01-11 19:19:53 +0100 | [diff] [blame] | 419 | struct fdtable *fdt = rcu_dereference_raw(files->fdt); |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 420 | unsigned int i, j = 0; |
Oleg Nesterov | ce08b62 | 2014-01-11 19:19:53 +0100 | [diff] [blame] | 421 | |
Al Viro | 7cf4dc3 | 2012-08-15 19:56:12 -0400 | [diff] [blame] | 422 | for (;;) { |
| 423 | unsigned long set; |
| 424 | i = j * BITS_PER_LONG; |
| 425 | if (i >= fdt->max_fds) |
| 426 | break; |
| 427 | set = fdt->open_fds[j++]; |
| 428 | while (set) { |
| 429 | if (set & 1) { |
| 430 | struct file * file = xchg(&fdt->fd[i], NULL); |
| 431 | if (file) { |
| 432 | filp_close(file, files); |
Paul E. McKenney | 388a4c8 | 2017-10-24 08:39:34 -0700 | [diff] [blame] | 433 | cond_resched(); |
Al Viro | 7cf4dc3 | 2012-08-15 19:56:12 -0400 | [diff] [blame] | 434 | } |
| 435 | } |
| 436 | i++; |
| 437 | set >>= 1; |
| 438 | } |
| 439 | } |
Oleg Nesterov | ce08b62 | 2014-01-11 19:19:53 +0100 | [diff] [blame] | 440 | |
| 441 | return fdt; |
Al Viro | 7cf4dc3 | 2012-08-15 19:56:12 -0400 | [diff] [blame] | 442 | } |
| 443 | |
Al Viro | 7cf4dc3 | 2012-08-15 19:56:12 -0400 | [diff] [blame] | 444 | void put_files_struct(struct files_struct *files) |
| 445 | { |
Al Viro | 7cf4dc3 | 2012-08-15 19:56:12 -0400 | [diff] [blame] | 446 | if (atomic_dec_and_test(&files->count)) { |
Oleg Nesterov | ce08b62 | 2014-01-11 19:19:53 +0100 | [diff] [blame] | 447 | struct fdtable *fdt = close_files(files); |
| 448 | |
Al Viro | b9e02af | 2012-08-15 20:00:58 -0400 | [diff] [blame] | 449 | /* free the arrays if they are not embedded */ |
| 450 | if (fdt != &files->fdtab) |
| 451 | __free_fdtable(fdt); |
| 452 | kmem_cache_free(files_cachep, files); |
Al Viro | 7cf4dc3 | 2012-08-15 19:56:12 -0400 | [diff] [blame] | 453 | } |
| 454 | } |
| 455 | |
Al Viro | 7cf4dc3 | 2012-08-15 19:56:12 -0400 | [diff] [blame] | 456 | void exit_files(struct task_struct *tsk) |
| 457 | { |
| 458 | struct files_struct * files = tsk->files; |
| 459 | |
| 460 | if (files) { |
| 461 | task_lock(tsk); |
| 462 | tsk->files = NULL; |
| 463 | task_unlock(tsk); |
| 464 | put_files_struct(files); |
| 465 | } |
| 466 | } |
| 467 | |
Al Viro | f52111b | 2008-05-08 18:19:16 -0400 | [diff] [blame] | 468 | struct files_struct init_files = { |
| 469 | .count = ATOMIC_INIT(1), |
| 470 | .fdt = &init_files.fdtab, |
| 471 | .fdtab = { |
| 472 | .max_fds = NR_OPEN_DEFAULT, |
| 473 | .fd = &init_files.fd_array[0], |
David Howells | 1fd36ad | 2012-02-16 17:49:54 +0000 | [diff] [blame] | 474 | .close_on_exec = init_files.close_on_exec_init, |
| 475 | .open_fds = init_files.open_fds_init, |
Linus Torvalds | f3f86e3 | 2015-10-30 16:53:57 -0700 | [diff] [blame] | 476 | .full_fds_bits = init_files.full_fds_bits_init, |
Al Viro | f52111b | 2008-05-08 18:19:16 -0400 | [diff] [blame] | 477 | }, |
Thomas Gleixner | eece09e | 2011-07-17 21:25:03 +0200 | [diff] [blame] | 478 | .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), |
Shuriyc Chu | 5704a06 | 2019-03-05 15:41:56 -0800 | [diff] [blame] | 479 | .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait), |
Al Viro | f52111b | 2008-05-08 18:19:16 -0400 | [diff] [blame] | 480 | }; |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 481 | |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 482 | static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start) |
Linus Torvalds | f3f86e3 | 2015-10-30 16:53:57 -0700 | [diff] [blame] | 483 | { |
Alexey Dobriyan | 9b80a18 | 2016-09-02 00:38:52 +0300 | [diff] [blame] | 484 | unsigned int maxfd = fdt->max_fds; |
| 485 | unsigned int maxbit = maxfd / BITS_PER_LONG; |
| 486 | unsigned int bitbit = start / BITS_PER_LONG; |
Linus Torvalds | f3f86e3 | 2015-10-30 16:53:57 -0700 | [diff] [blame] | 487 | |
| 488 | bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; |
| 489 | if (bitbit > maxfd) |
| 490 | return maxfd; |
| 491 | if (bitbit > start) |
| 492 | start = bitbit; |
| 493 | return find_next_zero_bit(fdt->open_fds, maxfd, start); |
| 494 | } |
| 495 | |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 496 | /* |
| 497 | * allocate a file descriptor, mark it busy. |
| 498 | */ |
Eric W. Biederman | aa384d1 | 2020-11-20 17:14:37 -0600 | [diff] [blame] | 499 | static int alloc_fd(unsigned start, unsigned end, unsigned flags) |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 500 | { |
Eric W. Biederman | aa384d1 | 2020-11-20 17:14:37 -0600 | [diff] [blame] | 501 | struct files_struct *files = current->files; |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 502 | unsigned int fd; |
| 503 | int error; |
| 504 | struct fdtable *fdt; |
| 505 | |
| 506 | spin_lock(&files->file_lock); |
| 507 | repeat: |
| 508 | fdt = files_fdtable(files); |
| 509 | fd = start; |
| 510 | if (fd < files->next_fd) |
| 511 | fd = files->next_fd; |
| 512 | |
| 513 | if (fd < fdt->max_fds) |
Linus Torvalds | f3f86e3 | 2015-10-30 16:53:57 -0700 | [diff] [blame] | 514 | fd = find_next_fd(fdt, fd); |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 515 | |
Al Viro | f33ff99 | 2012-08-12 16:17:59 -0400 | [diff] [blame] | 516 | /* |
| 517 | * N.B. For clone tasks sharing a files structure, this test |
| 518 | * will limit the total number of files that can be opened. |
| 519 | */ |
| 520 | error = -EMFILE; |
| 521 | if (fd >= end) |
| 522 | goto out; |
| 523 | |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 524 | error = expand_files(files, fd); |
| 525 | if (error < 0) |
| 526 | goto out; |
| 527 | |
| 528 | /* |
| 529 | * If we needed to expand the fs array we |
| 530 | * might have blocked - try again. |
| 531 | */ |
| 532 | if (error) |
| 533 | goto repeat; |
| 534 | |
| 535 | if (start <= files->next_fd) |
| 536 | files->next_fd = fd + 1; |
| 537 | |
David Howells | 1dce27c | 2012-02-16 17:49:42 +0000 | [diff] [blame] | 538 | __set_open_fd(fd, fdt); |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 539 | if (flags & O_CLOEXEC) |
David Howells | 1dce27c | 2012-02-16 17:49:42 +0000 | [diff] [blame] | 540 | __set_close_on_exec(fd, fdt); |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 541 | else |
David Howells | 1dce27c | 2012-02-16 17:49:42 +0000 | [diff] [blame] | 542 | __clear_close_on_exec(fd, fdt); |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 543 | error = fd; |
| 544 | #if 1 |
| 545 | /* Sanity check */ |
Paul E. McKenney | add1f09 | 2014-02-12 12:51:09 -0800 | [diff] [blame] | 546 | if (rcu_access_pointer(fdt->fd[fd]) != NULL) { |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 547 | printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); |
| 548 | rcu_assign_pointer(fdt->fd[fd], NULL); |
| 549 | } |
| 550 | #endif |
| 551 | |
| 552 | out: |
| 553 | spin_unlock(&files->file_lock); |
| 554 | return error; |
| 555 | } |
| 556 | |
Jens Axboe | 4022e7a | 2020-03-19 19:23:18 -0600 | [diff] [blame] | 557 | int __get_unused_fd_flags(unsigned flags, unsigned long nofile) |
| 558 | { |
Eric W. Biederman | aa384d1 | 2020-11-20 17:14:37 -0600 | [diff] [blame] | 559 | return alloc_fd(0, nofile, flags); |
Jens Axboe | 4022e7a | 2020-03-19 19:23:18 -0600 | [diff] [blame] | 560 | } |
| 561 | |
Al Viro | 1a7bd22 | 2012-08-12 17:18:05 -0400 | [diff] [blame] | 562 | int get_unused_fd_flags(unsigned flags) |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 563 | { |
Jens Axboe | 4022e7a | 2020-03-19 19:23:18 -0600 | [diff] [blame] | 564 | return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE)); |
Al Viro | 1027abe | 2008-07-30 04:13:04 -0400 | [diff] [blame] | 565 | } |
Al Viro | 1a7bd22 | 2012-08-12 17:18:05 -0400 | [diff] [blame] | 566 | EXPORT_SYMBOL(get_unused_fd_flags); |
Al Viro | 56007ca | 2012-08-15 21:03:26 -0400 | [diff] [blame] | 567 | |
| 568 | static void __put_unused_fd(struct files_struct *files, unsigned int fd) |
| 569 | { |
| 570 | struct fdtable *fdt = files_fdtable(files); |
| 571 | __clear_open_fd(fd, fdt); |
| 572 | if (fd < files->next_fd) |
| 573 | files->next_fd = fd; |
| 574 | } |
| 575 | |
| 576 | void put_unused_fd(unsigned int fd) |
| 577 | { |
| 578 | struct files_struct *files = current->files; |
| 579 | spin_lock(&files->file_lock); |
| 580 | __put_unused_fd(files, fd); |
| 581 | spin_unlock(&files->file_lock); |
| 582 | } |
| 583 | |
| 584 | EXPORT_SYMBOL(put_unused_fd); |
| 585 | |
| 586 | /* |
| 587 | * Install a file pointer in the fd array. |
| 588 | * |
| 589 | * The VFS is full of places where we drop the files lock between |
| 590 | * setting the open_fds bitmap and installing the file in the file |
| 591 | * array. At any such point, we are vulnerable to a dup2() race |
| 592 | * installing a file in the array before us. We need to detect this and |
| 593 | * fput() the struct file we are about to overwrite in this case. |
| 594 | * |
| 595 | * It should never happen - if we allow dup2() do it, _really_ bad things |
| 596 | * will follow. |
Al Viro | f869e8a | 2012-08-15 21:06:33 -0400 | [diff] [blame] | 597 | * |
Eric W. Biederman | d74ba04 | 2020-11-20 17:14:35 -0600 | [diff] [blame] | 598 | * This consumes the "file" refcount, so callers should treat it |
| 599 | * as if they had called fput(file). |
Al Viro | 56007ca | 2012-08-15 21:03:26 -0400 | [diff] [blame] | 600 | */ |
| 601 | |
Eric W. Biederman | d74ba04 | 2020-11-20 17:14:35 -0600 | [diff] [blame] | 602 | void fd_install(unsigned int fd, struct file *file) |
Al Viro | 56007ca | 2012-08-15 21:03:26 -0400 | [diff] [blame] | 603 | { |
Eric W. Biederman | d74ba04 | 2020-11-20 17:14:35 -0600 | [diff] [blame] | 604 | struct files_struct *files = current->files; |
Al Viro | 56007ca | 2012-08-15 21:03:26 -0400 | [diff] [blame] | 605 | struct fdtable *fdt; |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 606 | |
Christian Brauner | 6cf41fc | 2023-10-05 19:08:35 +0200 | [diff] [blame] | 607 | if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING))) |
| 608 | return; |
| 609 | |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 610 | rcu_read_lock_sched(); |
| 611 | |
Mateusz Guzik | c02b1a9 | 2017-10-03 12:58:15 +0200 | [diff] [blame] | 612 | if (unlikely(files->resize_in_progress)) { |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 613 | rcu_read_unlock_sched(); |
Mateusz Guzik | c02b1a9 | 2017-10-03 12:58:15 +0200 | [diff] [blame] | 614 | spin_lock(&files->file_lock); |
| 615 | fdt = files_fdtable(files); |
| 616 | BUG_ON(fdt->fd[fd] != NULL); |
| 617 | rcu_assign_pointer(fdt->fd[fd], file); |
| 618 | spin_unlock(&files->file_lock); |
| 619 | return; |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 620 | } |
| 621 | /* coupled with smp_wmb() in expand_fdtable() */ |
| 622 | smp_rmb(); |
| 623 | fdt = rcu_dereference_sched(files->fdt); |
Al Viro | 56007ca | 2012-08-15 21:03:26 -0400 | [diff] [blame] | 624 | BUG_ON(fdt->fd[fd] != NULL); |
| 625 | rcu_assign_pointer(fdt->fd[fd], file); |
Eric Dumazet | 8a81252b | 2015-06-30 15:54:08 +0200 | [diff] [blame] | 626 | rcu_read_unlock_sched(); |
Al Viro | 56007ca | 2012-08-15 21:03:26 -0400 | [diff] [blame] | 627 | } |
| 628 | |
| 629 | EXPORT_SYMBOL(fd_install); |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 630 | |
Christian Brauner | f49fd6d | 2021-04-02 10:32:21 +0200 | [diff] [blame] | 631 | /** |
Christian Brauner | 24fa3ae | 2023-11-30 13:49:08 +0100 | [diff] [blame] | 632 | * file_close_fd_locked - return file associated with fd |
Christian Brauner | f49fd6d | 2021-04-02 10:32:21 +0200 | [diff] [blame] | 633 | * @files: file struct to retrieve file from |
| 634 | * @fd: file descriptor to retrieve file for |
| 635 | * |
Christian Brauner | 24fa3ae | 2023-11-30 13:49:08 +0100 | [diff] [blame] | 636 | * Doesn't take a separate reference count. |
| 637 | * |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 638 | * Context: files_lock must be held. |
Christian Brauner | f49fd6d | 2021-04-02 10:32:21 +0200 | [diff] [blame] | 639 | * |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 640 | * Returns: The file associated with @fd (NULL if @fd is not open) |
Christian Brauner | f49fd6d | 2021-04-02 10:32:21 +0200 | [diff] [blame] | 641 | */ |
Christian Brauner | 24fa3ae | 2023-11-30 13:49:08 +0100 | [diff] [blame] | 642 | struct file *file_close_fd_locked(struct files_struct *files, unsigned fd) |
Al Viro | 483ce1d | 2012-08-19 12:04:24 -0400 | [diff] [blame] | 643 | { |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 644 | struct fdtable *fdt = files_fdtable(files); |
Christian Brauner | f49fd6d | 2021-04-02 10:32:21 +0200 | [diff] [blame] | 645 | struct file *file; |
Al Viro | 483ce1d | 2012-08-19 12:04:24 -0400 | [diff] [blame] | 646 | |
Christian Brauner | 24fa3ae | 2023-11-30 13:49:08 +0100 | [diff] [blame] | 647 | lockdep_assert_held(&files->file_lock); |
| 648 | |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 649 | if (fd >= fdt->max_fds) |
| 650 | return NULL; |
| 651 | |
Theodore Ts'o | 609d544 | 2023-03-06 13:54:50 -0500 | [diff] [blame] | 652 | fd = array_index_nospec(fd, fdt->max_fds); |
Al Viro | 483ce1d | 2012-08-19 12:04:24 -0400 | [diff] [blame] | 653 | file = fdt->fd[fd]; |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 654 | if (file) { |
| 655 | rcu_assign_pointer(fdt->fd[fd], NULL); |
| 656 | __put_unused_fd(files, fd); |
Christian Brauner | f49fd6d | 2021-04-02 10:32:21 +0200 | [diff] [blame] | 657 | } |
Christian Brauner | 278a5fb | 2019-05-24 11:30:34 +0200 | [diff] [blame] | 658 | return file; |
| 659 | } |
| 660 | |
Eric W. Biederman | 8760c90 | 2020-11-20 17:14:38 -0600 | [diff] [blame] | 661 | int close_fd(unsigned fd) |
Christian Brauner | 278a5fb | 2019-05-24 11:30:34 +0200 | [diff] [blame] | 662 | { |
Eric W. Biederman | 8760c90 | 2020-11-20 17:14:38 -0600 | [diff] [blame] | 663 | struct files_struct *files = current->files; |
Christian Brauner | 278a5fb | 2019-05-24 11:30:34 +0200 | [diff] [blame] | 664 | struct file *file; |
| 665 | |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 666 | spin_lock(&files->file_lock); |
Christian Brauner | 24fa3ae | 2023-11-30 13:49:08 +0100 | [diff] [blame] | 667 | file = file_close_fd_locked(files, fd); |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 668 | spin_unlock(&files->file_lock); |
| 669 | if (!file) |
Christian Brauner | 278a5fb | 2019-05-24 11:30:34 +0200 | [diff] [blame] | 670 | return -EBADF; |
| 671 | |
| 672 | return filp_close(file, files); |
Al Viro | 483ce1d | 2012-08-19 12:04:24 -0400 | [diff] [blame] | 673 | } |
Eric W. Biederman | 8760c90 | 2020-11-20 17:14:38 -0600 | [diff] [blame] | 674 | EXPORT_SYMBOL(close_fd); /* for ksys_close() */ |
Al Viro | 483ce1d | 2012-08-19 12:04:24 -0400 | [diff] [blame] | 675 | |
Christian Brauner | 9b5b8722 | 2021-04-02 10:29:36 +0200 | [diff] [blame] | 676 | /** |
| 677 | * last_fd - return last valid index into fd table |
Matthew Wilcox (Oracle) | 35931eb3 | 2023-08-18 21:08:24 +0100 | [diff] [blame] | 678 | * @fdt: File descriptor table. |
Christian Brauner | 9b5b8722 | 2021-04-02 10:29:36 +0200 | [diff] [blame] | 679 | * |
| 680 | * Context: Either rcu read lock or files_lock must be held. |
| 681 | * |
| 682 | * Returns: Last valid index into fdtable. |
| 683 | */ |
| 684 | static inline unsigned last_fd(struct fdtable *fdt) |
| 685 | { |
| 686 | return fdt->max_fds - 1; |
| 687 | } |
| 688 | |
Giuseppe Scrivano | 582f1fb | 2020-11-18 11:47:45 +0100 | [diff] [blame] | 689 | static inline void __range_cloexec(struct files_struct *cur_fds, |
| 690 | unsigned int fd, unsigned int max_fd) |
| 691 | { |
| 692 | struct fdtable *fdt; |
| 693 | |
Christian Brauner | 9b5b8722 | 2021-04-02 10:29:36 +0200 | [diff] [blame] | 694 | /* make sure we're using the correct maximum value */ |
Giuseppe Scrivano | 582f1fb | 2020-11-18 11:47:45 +0100 | [diff] [blame] | 695 | spin_lock(&cur_fds->file_lock); |
| 696 | fdt = files_fdtable(cur_fds); |
Christian Brauner | 9b5b8722 | 2021-04-02 10:29:36 +0200 | [diff] [blame] | 697 | max_fd = min(last_fd(fdt), max_fd); |
| 698 | if (fd <= max_fd) |
| 699 | bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1); |
Giuseppe Scrivano | 582f1fb | 2020-11-18 11:47:45 +0100 | [diff] [blame] | 700 | spin_unlock(&cur_fds->file_lock); |
| 701 | } |
| 702 | |
Mateusz Guzik | ed192c5 | 2023-07-27 13:38:09 +0200 | [diff] [blame] | 703 | static inline void __range_close(struct files_struct *files, unsigned int fd, |
Giuseppe Scrivano | 582f1fb | 2020-11-18 11:47:45 +0100 | [diff] [blame] | 704 | unsigned int max_fd) |
| 705 | { |
Mateusz Guzik | ed192c5 | 2023-07-27 13:38:09 +0200 | [diff] [blame] | 706 | struct file *file; |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 707 | unsigned n; |
| 708 | |
Mateusz Guzik | ed192c5 | 2023-07-27 13:38:09 +0200 | [diff] [blame] | 709 | spin_lock(&files->file_lock); |
| 710 | n = last_fd(files_fdtable(files)); |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 711 | max_fd = min(max_fd, n); |
| 712 | |
Mateusz Guzik | ed192c5 | 2023-07-27 13:38:09 +0200 | [diff] [blame] | 713 | for (; fd <= max_fd; fd++) { |
Christian Brauner | 24fa3ae | 2023-11-30 13:49:08 +0100 | [diff] [blame] | 714 | file = file_close_fd_locked(files, fd); |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 715 | if (file) { |
Mateusz Guzik | ed192c5 | 2023-07-27 13:38:09 +0200 | [diff] [blame] | 716 | spin_unlock(&files->file_lock); |
| 717 | filp_close(file, files); |
Christian Brauner | f49fd6d | 2021-04-02 10:32:21 +0200 | [diff] [blame] | 718 | cond_resched(); |
Mateusz Guzik | ed192c5 | 2023-07-27 13:38:09 +0200 | [diff] [blame] | 719 | spin_lock(&files->file_lock); |
| 720 | } else if (need_resched()) { |
| 721 | spin_unlock(&files->file_lock); |
| 722 | cond_resched(); |
| 723 | spin_lock(&files->file_lock); |
Christian Brauner | f49fd6d | 2021-04-02 10:32:21 +0200 | [diff] [blame] | 724 | } |
Giuseppe Scrivano | 582f1fb | 2020-11-18 11:47:45 +0100 | [diff] [blame] | 725 | } |
Mateusz Guzik | ed192c5 | 2023-07-27 13:38:09 +0200 | [diff] [blame] | 726 | spin_unlock(&files->file_lock); |
Giuseppe Scrivano | 582f1fb | 2020-11-18 11:47:45 +0100 | [diff] [blame] | 727 | } |
| 728 | |
Christian Brauner | 278a5fb | 2019-05-24 11:30:34 +0200 | [diff] [blame] | 729 | /** |
| 730 | * __close_range() - Close all file descriptors in a given range. |
| 731 | * |
| 732 | * @fd: starting file descriptor to close |
| 733 | * @max_fd: last file descriptor to close |
Matthew Wilcox (Oracle) | 35931eb3 | 2023-08-18 21:08:24 +0100 | [diff] [blame] | 734 | * @flags: CLOSE_RANGE flags. |
Christian Brauner | 278a5fb | 2019-05-24 11:30:34 +0200 | [diff] [blame] | 735 | * |
| 736 | * This closes a range of file descriptors. All file descriptors |
| 737 | * from @fd up to and including @max_fd are closed. |
| 738 | */ |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 739 | int __close_range(unsigned fd, unsigned max_fd, unsigned int flags) |
Christian Brauner | 278a5fb | 2019-05-24 11:30:34 +0200 | [diff] [blame] | 740 | { |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 741 | struct task_struct *me = current; |
| 742 | struct files_struct *cur_fds = me->files, *fds = NULL; |
| 743 | |
Giuseppe Scrivano | 582f1fb | 2020-11-18 11:47:45 +0100 | [diff] [blame] | 744 | if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC)) |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 745 | return -EINVAL; |
Christian Brauner | 278a5fb | 2019-05-24 11:30:34 +0200 | [diff] [blame] | 746 | |
| 747 | if (fd > max_fd) |
| 748 | return -EINVAL; |
| 749 | |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 750 | if (flags & CLOSE_RANGE_UNSHARE) { |
| 751 | int ret; |
| 752 | unsigned int max_unshare_fds = NR_OPEN_MAX; |
| 753 | |
| 754 | /* |
Christian Brauner | 03ba0fe | 2021-04-02 10:38:09 +0200 | [diff] [blame] | 755 | * If the caller requested all fds to be made cloexec we always |
| 756 | * copy all of the file descriptors since they still want to |
| 757 | * use them. |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 758 | */ |
Christian Brauner | 03ba0fe | 2021-04-02 10:38:09 +0200 | [diff] [blame] | 759 | if (!(flags & CLOSE_RANGE_CLOEXEC)) { |
| 760 | /* |
| 761 | * If the requested range is greater than the current |
| 762 | * maximum, we're closing everything so only copy all |
| 763 | * file descriptors beneath the lowest file descriptor. |
| 764 | */ |
| 765 | rcu_read_lock(); |
| 766 | if (max_fd >= last_fd(files_fdtable(cur_fds))) |
| 767 | max_unshare_fds = fd; |
| 768 | rcu_read_unlock(); |
| 769 | } |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 770 | |
| 771 | ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds); |
| 772 | if (ret) |
| 773 | return ret; |
| 774 | |
| 775 | /* |
| 776 | * We used to share our file descriptor table, and have now |
| 777 | * created a private one, make sure we're using it below. |
| 778 | */ |
| 779 | if (fds) |
| 780 | swap(cur_fds, fds); |
| 781 | } |
| 782 | |
Giuseppe Scrivano | 582f1fb | 2020-11-18 11:47:45 +0100 | [diff] [blame] | 783 | if (flags & CLOSE_RANGE_CLOEXEC) |
| 784 | __range_cloexec(cur_fds, fd, max_fd); |
| 785 | else |
| 786 | __range_close(cur_fds, fd, max_fd); |
Christian Brauner | 278a5fb | 2019-05-24 11:30:34 +0200 | [diff] [blame] | 787 | |
Christian Brauner | 60997c3 | 2020-06-03 21:48:55 +0200 | [diff] [blame] | 788 | if (fds) { |
| 789 | /* |
| 790 | * We're done closing the files we were supposed to. Time to install |
| 791 | * the new file descriptor table and drop the old one. |
| 792 | */ |
| 793 | task_lock(me); |
| 794 | me->files = cur_fds; |
| 795 | task_unlock(me); |
| 796 | put_files_struct(fds); |
| 797 | } |
| 798 | |
Christian Brauner | 278a5fb | 2019-05-24 11:30:34 +0200 | [diff] [blame] | 799 | return 0; |
| 800 | } |
| 801 | |
Christian Brauner | a88c955 | 2023-11-30 13:49:07 +0100 | [diff] [blame] | 802 | /** |
| 803 | * file_close_fd - return file associated with fd |
| 804 | * @fd: file descriptor to retrieve file for |
| 805 | * |
| 806 | * Doesn't take a separate reference count. |
| 807 | * |
| 808 | * Returns: The file associated with @fd (NULL if @fd is not open) |
Todd Kjos | 80cd795 | 2018-12-14 15:58:21 -0800 | [diff] [blame] | 809 | */ |
Christian Brauner | a88c955 | 2023-11-30 13:49:07 +0100 | [diff] [blame] | 810 | struct file *file_close_fd(unsigned int fd) |
Todd Kjos | 80cd795 | 2018-12-14 15:58:21 -0800 | [diff] [blame] | 811 | { |
| 812 | struct files_struct *files = current->files; |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 813 | struct file *file; |
Todd Kjos | 80cd795 | 2018-12-14 15:58:21 -0800 | [diff] [blame] | 814 | |
| 815 | spin_lock(&files->file_lock); |
Christian Brauner | 24fa3ae | 2023-11-30 13:49:08 +0100 | [diff] [blame] | 816 | file = file_close_fd_locked(files, fd); |
Todd Kjos | 80cd795 | 2018-12-14 15:58:21 -0800 | [diff] [blame] | 817 | spin_unlock(&files->file_lock); |
Todd Kjos | 80cd795 | 2018-12-14 15:58:21 -0800 | [diff] [blame] | 818 | |
Al Viro | 6319194e | 2022-05-12 17:08:03 -0400 | [diff] [blame] | 819 | return file; |
Todd Kjos | 80cd795 | 2018-12-14 15:58:21 -0800 | [diff] [blame] | 820 | } |
| 821 | |
Al Viro | 6a6d27d | 2012-08-21 09:56:33 -0400 | [diff] [blame] | 822 | void do_close_on_exec(struct files_struct *files) |
| 823 | { |
| 824 | unsigned i; |
| 825 | struct fdtable *fdt; |
| 826 | |
| 827 | /* exec unshares first */ |
Al Viro | 6a6d27d | 2012-08-21 09:56:33 -0400 | [diff] [blame] | 828 | spin_lock(&files->file_lock); |
| 829 | for (i = 0; ; i++) { |
| 830 | unsigned long set; |
| 831 | unsigned fd = i * BITS_PER_LONG; |
| 832 | fdt = files_fdtable(files); |
| 833 | if (fd >= fdt->max_fds) |
| 834 | break; |
| 835 | set = fdt->close_on_exec[i]; |
| 836 | if (!set) |
| 837 | continue; |
| 838 | fdt->close_on_exec[i] = 0; |
| 839 | for ( ; set ; fd++, set >>= 1) { |
| 840 | struct file *file; |
| 841 | if (!(set & 1)) |
| 842 | continue; |
| 843 | file = fdt->fd[fd]; |
| 844 | if (!file) |
| 845 | continue; |
| 846 | rcu_assign_pointer(fdt->fd[fd], NULL); |
| 847 | __put_unused_fd(files, fd); |
| 848 | spin_unlock(&files->file_lock); |
| 849 | filp_close(file, files); |
| 850 | cond_resched(); |
| 851 | spin_lock(&files->file_lock); |
| 852 | } |
| 853 | |
| 854 | } |
| 855 | spin_unlock(&files->file_lock); |
| 856 | } |
| 857 | |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 858 | static struct file *__get_file_rcu(struct file __rcu **f) |
| 859 | { |
| 860 | struct file __rcu *file; |
| 861 | struct file __rcu *file_reloaded; |
| 862 | struct file __rcu *file_reloaded_cmp; |
| 863 | |
| 864 | file = rcu_dereference_raw(*f); |
| 865 | if (!file) |
| 866 | return NULL; |
| 867 | |
| 868 | if (unlikely(!atomic_long_inc_not_zero(&file->f_count))) |
| 869 | return ERR_PTR(-EAGAIN); |
| 870 | |
| 871 | file_reloaded = rcu_dereference_raw(*f); |
| 872 | |
| 873 | /* |
| 874 | * Ensure that all accesses have a dependency on the load from |
| 875 | * rcu_dereference_raw() above so we get correct ordering |
| 876 | * between reuse/allocation and the pointer check below. |
| 877 | */ |
| 878 | file_reloaded_cmp = file_reloaded; |
| 879 | OPTIMIZER_HIDE_VAR(file_reloaded_cmp); |
| 880 | |
| 881 | /* |
| 882 | * atomic_long_inc_not_zero() above provided a full memory |
| 883 | * barrier when we acquired a reference. |
| 884 | * |
| 885 | * This is paired with the write barrier from assigning to the |
| 886 | * __rcu protected file pointer so that if that pointer still |
| 887 | * matches the current file, we know we have successfully |
| 888 | * acquired a reference to the right file. |
| 889 | * |
| 890 | * If the pointers don't match the file has been reallocated by |
| 891 | * SLAB_TYPESAFE_BY_RCU. |
| 892 | */ |
| 893 | if (file == file_reloaded_cmp) |
| 894 | return file_reloaded; |
| 895 | |
| 896 | fput(file); |
| 897 | return ERR_PTR(-EAGAIN); |
| 898 | } |
| 899 | |
| 900 | /** |
| 901 | * get_file_rcu - try go get a reference to a file under rcu |
| 902 | * @f: the file to get a reference on |
| 903 | * |
| 904 | * This function tries to get a reference on @f carefully verifying that |
| 905 | * @f hasn't been reused. |
| 906 | * |
| 907 | * This function should rarely have to be used and only by users who |
| 908 | * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it. |
| 909 | * |
| 910 | * Return: Returns @f with the reference count increased or NULL. |
| 911 | */ |
| 912 | struct file *get_file_rcu(struct file __rcu **f) |
| 913 | { |
| 914 | for (;;) { |
| 915 | struct file __rcu *file; |
| 916 | |
| 917 | file = __get_file_rcu(f); |
| 918 | if (unlikely(!file)) |
| 919 | return NULL; |
| 920 | |
| 921 | if (unlikely(IS_ERR(file))) |
| 922 | continue; |
| 923 | |
| 924 | return file; |
| 925 | } |
| 926 | } |
| 927 | EXPORT_SYMBOL_GPL(get_file_rcu); |
| 928 | |
Christian Brauner | 61d4fb0 | 2023-10-25 12:14:37 +0200 | [diff] [blame] | 929 | /** |
| 930 | * get_file_active - try go get a reference to a file |
| 931 | * @f: the file to get a reference on |
| 932 | * |
| 933 | * In contast to get_file_rcu() the pointer itself isn't part of the |
| 934 | * reference counting. |
| 935 | * |
| 936 | * This function should rarely have to be used and only by users who |
| 937 | * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it. |
| 938 | * |
| 939 | * Return: Returns @f with the reference count increased or NULL. |
| 940 | */ |
| 941 | struct file *get_file_active(struct file **f) |
| 942 | { |
| 943 | struct file __rcu *file; |
| 944 | |
| 945 | rcu_read_lock(); |
| 946 | file = __get_file_rcu(f); |
| 947 | rcu_read_unlock(); |
| 948 | if (IS_ERR(file)) |
| 949 | file = NULL; |
| 950 | return file; |
| 951 | } |
| 952 | EXPORT_SYMBOL_GPL(get_file_active); |
| 953 | |
Linus Torvalds | e386dfc | 2021-12-10 14:00:15 -0800 | [diff] [blame] | 954 | static inline struct file *__fget_files_rcu(struct files_struct *files, |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 955 | unsigned int fd, fmode_t mask) |
Linus Torvalds | e386dfc | 2021-12-10 14:00:15 -0800 | [diff] [blame] | 956 | { |
| 957 | for (;;) { |
| 958 | struct file *file; |
| 959 | struct fdtable *fdt = rcu_dereference_raw(files->fdt); |
| 960 | struct file __rcu **fdentry; |
Linus Torvalds | 253ca86 | 2023-11-26 12:24:38 -0800 | [diff] [blame] | 961 | unsigned long nospec_mask; |
Linus Torvalds | e386dfc | 2021-12-10 14:00:15 -0800 | [diff] [blame] | 962 | |
Linus Torvalds | 253ca86 | 2023-11-26 12:24:38 -0800 | [diff] [blame] | 963 | /* Mask is a 0 for invalid fd's, ~0 for valid ones */ |
| 964 | nospec_mask = array_index_mask_nospec(fd, fdt->max_fds); |
Linus Torvalds | e386dfc | 2021-12-10 14:00:15 -0800 | [diff] [blame] | 965 | |
| 966 | /* |
Linus Torvalds | 253ca86 | 2023-11-26 12:24:38 -0800 | [diff] [blame] | 967 | * fdentry points to the 'fd' offset, or fdt->fd[0]. |
| 968 | * Loading from fdt->fd[0] is always safe, because the |
| 969 | * array always exists. |
| 970 | */ |
| 971 | fdentry = fdt->fd + (fd & nospec_mask); |
| 972 | |
| 973 | /* Do the load, then mask any invalid result */ |
| 974 | file = rcu_dereference_raw(*fdentry); |
| 975 | file = (void *)(nospec_mask & (unsigned long)file); |
| 976 | if (unlikely(!file)) |
| 977 | return NULL; |
| 978 | |
| 979 | /* |
| 980 | * Ok, we have a file pointer that was valid at |
| 981 | * some point, but it might have become stale since. |
Linus Torvalds | e386dfc | 2021-12-10 14:00:15 -0800 | [diff] [blame] | 982 | * |
Linus Torvalds | 253ca86 | 2023-11-26 12:24:38 -0800 | [diff] [blame] | 983 | * We need to confirm it by incrementing the refcount |
| 984 | * and then check the lookup again. |
| 985 | * |
| 986 | * atomic_long_inc_not_zero() gives us a full memory |
| 987 | * barrier. We only really need an 'acquire' one to |
| 988 | * protect the loads below, but we don't have that. |
| 989 | */ |
| 990 | if (unlikely(!atomic_long_inc_not_zero(&file->f_count))) |
| 991 | continue; |
| 992 | |
| 993 | /* |
Linus Torvalds | e386dfc | 2021-12-10 14:00:15 -0800 | [diff] [blame] | 994 | * Such a race can take two forms: |
| 995 | * |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 996 | * (a) the file ref already went down to zero and the |
| 997 | * file hasn't been reused yet or the file count |
| 998 | * isn't zero but the file has already been reused. |
Linus Torvalds | 253ca86 | 2023-11-26 12:24:38 -0800 | [diff] [blame] | 999 | * |
Linus Torvalds | e386dfc | 2021-12-10 14:00:15 -0800 | [diff] [blame] | 1000 | * (b) the file table entry has changed under us. |
| 1001 | * Note that we don't need to re-check the 'fdt->fd' |
| 1002 | * pointer having changed, because it always goes |
| 1003 | * hand-in-hand with 'fdt'. |
| 1004 | * |
Gou Hao | 81132a3 | 2021-11-02 10:46:48 +0800 | [diff] [blame] | 1005 | * If so, we need to put our ref and try again. |
Linus Torvalds | e386dfc | 2021-12-10 14:00:15 -0800 | [diff] [blame] | 1006 | */ |
Linus Torvalds | 253ca86 | 2023-11-26 12:24:38 -0800 | [diff] [blame] | 1007 | if (unlikely(file != rcu_dereference_raw(*fdentry)) || |
| 1008 | unlikely(rcu_dereference_raw(files->fdt) != fdt)) { |
Gou Hao | 81132a3 | 2021-11-02 10:46:48 +0800 | [diff] [blame] | 1009 | fput(file); |
Linus Torvalds | e386dfc | 2021-12-10 14:00:15 -0800 | [diff] [blame] | 1010 | continue; |
| 1011 | } |
| 1012 | |
| 1013 | /* |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 1014 | * This isn't the file we're looking for or we're not |
| 1015 | * allowed to get a reference to it. |
| 1016 | */ |
| 1017 | if (unlikely(file->f_mode & mask)) { |
| 1018 | fput(file); |
| 1019 | return NULL; |
| 1020 | } |
| 1021 | |
| 1022 | /* |
Linus Torvalds | e386dfc | 2021-12-10 14:00:15 -0800 | [diff] [blame] | 1023 | * Ok, we have a ref to the file, and checked that it |
| 1024 | * still exists. |
| 1025 | */ |
| 1026 | return file; |
| 1027 | } |
| 1028 | } |
| 1029 | |
Sargun Dhillon | 5e876fb | 2020-01-07 09:59:24 -0800 | [diff] [blame] | 1030 | static struct file *__fget_files(struct files_struct *files, unsigned int fd, |
Gou Hao | 81132a3 | 2021-11-02 10:46:48 +0800 | [diff] [blame] | 1031 | fmode_t mask) |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1032 | { |
Oleg Nesterov | 1deb46e | 2014-01-13 16:48:19 +0100 | [diff] [blame] | 1033 | struct file *file; |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1034 | |
| 1035 | rcu_read_lock(); |
Gou Hao | 81132a3 | 2021-11-02 10:46:48 +0800 | [diff] [blame] | 1036 | file = __fget_files_rcu(files, fd, mask); |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1037 | rcu_read_unlock(); |
| 1038 | |
| 1039 | return file; |
| 1040 | } |
| 1041 | |
Gou Hao | 81132a3 | 2021-11-02 10:46:48 +0800 | [diff] [blame] | 1042 | static inline struct file *__fget(unsigned int fd, fmode_t mask) |
Sargun Dhillon | 5e876fb | 2020-01-07 09:59:24 -0800 | [diff] [blame] | 1043 | { |
Gou Hao | 81132a3 | 2021-11-02 10:46:48 +0800 | [diff] [blame] | 1044 | return __fget_files(current->files, fd, mask); |
Jens Axboe | 091141a | 2018-11-21 10:32:39 -0700 | [diff] [blame] | 1045 | } |
| 1046 | |
Oleg Nesterov | 1deb46e | 2014-01-13 16:48:19 +0100 | [diff] [blame] | 1047 | struct file *fget(unsigned int fd) |
| 1048 | { |
Gou Hao | 81132a3 | 2021-11-02 10:46:48 +0800 | [diff] [blame] | 1049 | return __fget(fd, FMODE_PATH); |
Oleg Nesterov | 1deb46e | 2014-01-13 16:48:19 +0100 | [diff] [blame] | 1050 | } |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1051 | EXPORT_SYMBOL(fget); |
| 1052 | |
| 1053 | struct file *fget_raw(unsigned int fd) |
| 1054 | { |
Gou Hao | 81132a3 | 2021-11-02 10:46:48 +0800 | [diff] [blame] | 1055 | return __fget(fd, 0); |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1056 | } |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1057 | EXPORT_SYMBOL(fget_raw); |
| 1058 | |
Sargun Dhillon | 5e876fb | 2020-01-07 09:59:24 -0800 | [diff] [blame] | 1059 | struct file *fget_task(struct task_struct *task, unsigned int fd) |
| 1060 | { |
| 1061 | struct file *file = NULL; |
| 1062 | |
| 1063 | task_lock(task); |
| 1064 | if (task->files) |
Gou Hao | 81132a3 | 2021-11-02 10:46:48 +0800 | [diff] [blame] | 1065 | file = __fget_files(task->files, fd, 0); |
Sargun Dhillon | 5e876fb | 2020-01-07 09:59:24 -0800 | [diff] [blame] | 1066 | task_unlock(task); |
| 1067 | |
| 1068 | return file; |
| 1069 | } |
| 1070 | |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 1071 | struct file *lookup_fdget_rcu(unsigned int fd) |
| 1072 | { |
| 1073 | return __fget_files_rcu(current->files, fd, 0); |
| 1074 | |
| 1075 | } |
| 1076 | EXPORT_SYMBOL_GPL(lookup_fdget_rcu); |
| 1077 | |
| 1078 | struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd) |
Eric W. Biederman | 3a879fb | 2020-11-20 17:14:28 -0600 | [diff] [blame] | 1079 | { |
| 1080 | /* Must be called with rcu_read_lock held */ |
| 1081 | struct files_struct *files; |
| 1082 | struct file *file = NULL; |
| 1083 | |
| 1084 | task_lock(task); |
| 1085 | files = task->files; |
| 1086 | if (files) |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 1087 | file = __fget_files_rcu(files, fd, 0); |
Eric W. Biederman | 3a879fb | 2020-11-20 17:14:28 -0600 | [diff] [blame] | 1088 | task_unlock(task); |
| 1089 | |
| 1090 | return file; |
| 1091 | } |
| 1092 | |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 1093 | struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *ret_fd) |
Eric W. Biederman | e9a53ae | 2020-11-20 17:14:31 -0600 | [diff] [blame] | 1094 | { |
| 1095 | /* Must be called with rcu_read_lock held */ |
| 1096 | struct files_struct *files; |
| 1097 | unsigned int fd = *ret_fd; |
| 1098 | struct file *file = NULL; |
| 1099 | |
| 1100 | task_lock(task); |
| 1101 | files = task->files; |
| 1102 | if (files) { |
| 1103 | for (; fd < files_fdtable(files)->max_fds; fd++) { |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 1104 | file = __fget_files_rcu(files, fd, 0); |
Eric W. Biederman | e9a53ae | 2020-11-20 17:14:31 -0600 | [diff] [blame] | 1105 | if (file) |
| 1106 | break; |
| 1107 | } |
| 1108 | } |
| 1109 | task_unlock(task); |
| 1110 | *ret_fd = fd; |
| 1111 | return file; |
| 1112 | } |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 1113 | EXPORT_SYMBOL(task_lookup_next_fdget_rcu); |
Eric W. Biederman | e9a53ae | 2020-11-20 17:14:31 -0600 | [diff] [blame] | 1114 | |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1115 | /* |
| 1116 | * Lightweight file lookup - no refcnt increment if fd table isn't shared. |
| 1117 | * |
| 1118 | * You can use this instead of fget if you satisfy all of the following |
| 1119 | * conditions: |
| 1120 | * 1) You must call fput_light before exiting the syscall and returning control |
| 1121 | * to userspace (i.e. you cannot remember the returned struct file * after |
| 1122 | * returning to userspace). |
| 1123 | * 2) You must not call filp_close on the returned struct file * in between |
| 1124 | * calls to fget_light and fput_light. |
| 1125 | * 3) You must not clone the current task in between the calls to fget_light |
| 1126 | * and fput_light. |
| 1127 | * |
| 1128 | * The fput_needed flag returned by fget_light should be passed to the |
| 1129 | * corresponding fput_light. |
| 1130 | */ |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1131 | static unsigned long __fget_light(unsigned int fd, fmode_t mask) |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1132 | { |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1133 | struct files_struct *files = current->files; |
Oleg Nesterov | ad46183 | 2014-01-13 16:48:40 +0100 | [diff] [blame] | 1134 | struct file *file; |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1135 | |
Jann Horn | 7ee47dc | 2022-10-31 18:52:56 +0100 | [diff] [blame] | 1136 | /* |
| 1137 | * If another thread is concurrently calling close_fd() followed |
| 1138 | * by put_files_struct(), we must not observe the old table |
| 1139 | * entry combined with the new refcount - otherwise we could |
| 1140 | * return a file that is concurrently being freed. |
| 1141 | * |
| 1142 | * atomic_read_acquire() pairs with atomic_dec_and_test() in |
| 1143 | * put_files_struct(). |
| 1144 | */ |
Linus Torvalds | 253ca86 | 2023-11-26 12:24:38 -0800 | [diff] [blame] | 1145 | if (likely(atomic_read_acquire(&files->count) == 1)) { |
Eric W. Biederman | bebf684 | 2020-11-20 17:14:24 -0600 | [diff] [blame] | 1146 | file = files_lookup_fd_raw(files, fd); |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1147 | if (!file || unlikely(file->f_mode & mask)) |
| 1148 | return 0; |
| 1149 | return (unsigned long)file; |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1150 | } else { |
Linus Torvalds | 253ca86 | 2023-11-26 12:24:38 -0800 | [diff] [blame] | 1151 | file = __fget_files(files, fd, mask); |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1152 | if (!file) |
| 1153 | return 0; |
| 1154 | return FDPUT_FPUT | (unsigned long)file; |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1155 | } |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1156 | } |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1157 | unsigned long __fdget(unsigned int fd) |
Oleg Nesterov | ad46183 | 2014-01-13 16:48:40 +0100 | [diff] [blame] | 1158 | { |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1159 | return __fget_light(fd, FMODE_PATH); |
Oleg Nesterov | ad46183 | 2014-01-13 16:48:40 +0100 | [diff] [blame] | 1160 | } |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1161 | EXPORT_SYMBOL(__fdget); |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1162 | |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1163 | unsigned long __fdget_raw(unsigned int fd) |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1164 | { |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1165 | return __fget_light(fd, 0); |
Al Viro | 0ee8cdf | 2012-08-15 21:12:10 -0400 | [diff] [blame] | 1166 | } |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1167 | |
Linus Torvalds | 7979642 | 2023-08-03 11:35:53 -0700 | [diff] [blame] | 1168 | /* |
| 1169 | * Try to avoid f_pos locking. We only need it if the |
| 1170 | * file is marked for FMODE_ATOMIC_POS, and it can be |
| 1171 | * accessed multiple ways. |
| 1172 | * |
| 1173 | * Always do it for directories, because pidfd_getfd() |
| 1174 | * can make a file accessible even if it otherwise would |
| 1175 | * not be, and for directories this is a correctness |
| 1176 | * issue, not a "POSIX requirement". |
| 1177 | */ |
| 1178 | static inline bool file_needs_f_pos_lock(struct file *file) |
| 1179 | { |
| 1180 | return (file->f_mode & FMODE_ATOMIC_POS) && |
Christian Brauner | 7d84d1b | 2023-08-06 14:49:35 +0200 | [diff] [blame] | 1181 | (file_count(file) > 1 || file->f_op->iterate_shared); |
Linus Torvalds | 7979642 | 2023-08-03 11:35:53 -0700 | [diff] [blame] | 1182 | } |
| 1183 | |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1184 | unsigned long __fdget_pos(unsigned int fd) |
| 1185 | { |
Eric Biggers | 99aea68 | 2014-03-16 15:47:48 -0500 | [diff] [blame] | 1186 | unsigned long v = __fdget(fd); |
| 1187 | struct file *file = (struct file *)(v & ~3); |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1188 | |
Linus Torvalds | 7979642 | 2023-08-03 11:35:53 -0700 | [diff] [blame] | 1189 | if (file && file_needs_f_pos_lock(file)) { |
Christian Brauner | 20ea1e7 | 2023-07-24 17:00:49 +0200 | [diff] [blame] | 1190 | v |= FDPUT_POS_UNLOCK; |
| 1191 | mutex_lock(&file->f_pos_lock); |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1192 | } |
Eric Biggers | 99aea68 | 2014-03-16 15:47:48 -0500 | [diff] [blame] | 1193 | return v; |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1194 | } |
| 1195 | |
Al Viro | 63b6df1 | 2016-04-20 17:08:21 -0400 | [diff] [blame] | 1196 | void __f_unlock_pos(struct file *f) |
| 1197 | { |
| 1198 | mutex_unlock(&f->f_pos_lock); |
| 1199 | } |
| 1200 | |
Al Viro | bd2a31d | 2014-03-04 14:54:22 -0500 | [diff] [blame] | 1201 | /* |
| 1202 | * We only lock f_pos if we have threads or if the file might be |
| 1203 | * shared with another process. In both cases we'll have an elevated |
| 1204 | * file count (done either by fdget() or by fork()). |
| 1205 | */ |
| 1206 | |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1207 | void set_close_on_exec(unsigned int fd, int flag) |
| 1208 | { |
| 1209 | struct files_struct *files = current->files; |
| 1210 | struct fdtable *fdt; |
| 1211 | spin_lock(&files->file_lock); |
| 1212 | fdt = files_fdtable(files); |
| 1213 | if (flag) |
| 1214 | __set_close_on_exec(fd, fdt); |
| 1215 | else |
| 1216 | __clear_close_on_exec(fd, fdt); |
| 1217 | spin_unlock(&files->file_lock); |
| 1218 | } |
| 1219 | |
| 1220 | bool get_close_on_exec(unsigned int fd) |
| 1221 | { |
| 1222 | struct files_struct *files = current->files; |
| 1223 | struct fdtable *fdt; |
| 1224 | bool res; |
| 1225 | rcu_read_lock(); |
| 1226 | fdt = files_fdtable(files); |
| 1227 | res = close_on_exec(fd, fdt); |
| 1228 | rcu_read_unlock(); |
| 1229 | return res; |
| 1230 | } |
| 1231 | |
Al Viro | 8280d16 | 2012-08-21 12:11:46 -0400 | [diff] [blame] | 1232 | static int do_dup2(struct files_struct *files, |
| 1233 | struct file *file, unsigned fd, unsigned flags) |
Al Viro | e983094 | 2014-08-31 14:12:09 -0400 | [diff] [blame] | 1234 | __releases(&files->file_lock) |
Al Viro | 8280d16 | 2012-08-21 12:11:46 -0400 | [diff] [blame] | 1235 | { |
| 1236 | struct file *tofree; |
| 1237 | struct fdtable *fdt; |
| 1238 | |
| 1239 | /* |
| 1240 | * We need to detect attempts to do dup2() over allocated but still |
| 1241 | * not finished descriptor. NB: OpenBSD avoids that at the price of |
| 1242 | * extra work in their equivalent of fget() - they insert struct |
| 1243 | * file immediately after grabbing descriptor, mark it larval if |
| 1244 | * more work (e.g. actual opening) is needed and make sure that |
| 1245 | * fget() treats larval files as absent. Potentially interesting, |
| 1246 | * but while extra work in fget() is trivial, locking implications |
| 1247 | * and amount of surgery on open()-related paths in VFS are not. |
| 1248 | * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" |
| 1249 | * deadlocks in rather amusing ways, AFAICS. All of that is out of |
| 1250 | * scope of POSIX or SUS, since neither considers shared descriptor |
| 1251 | * tables and this condition does not arise without those. |
| 1252 | */ |
| 1253 | fdt = files_fdtable(files); |
| 1254 | tofree = fdt->fd[fd]; |
| 1255 | if (!tofree && fd_is_open(fd, fdt)) |
| 1256 | goto Ebusy; |
| 1257 | get_file(file); |
| 1258 | rcu_assign_pointer(fdt->fd[fd], file); |
| 1259 | __set_open_fd(fd, fdt); |
| 1260 | if (flags & O_CLOEXEC) |
| 1261 | __set_close_on_exec(fd, fdt); |
| 1262 | else |
| 1263 | __clear_close_on_exec(fd, fdt); |
| 1264 | spin_unlock(&files->file_lock); |
| 1265 | |
| 1266 | if (tofree) |
| 1267 | filp_close(tofree, files); |
| 1268 | |
| 1269 | return fd; |
| 1270 | |
| 1271 | Ebusy: |
| 1272 | spin_unlock(&files->file_lock); |
| 1273 | return -EBUSY; |
| 1274 | } |
| 1275 | |
| 1276 | int replace_fd(unsigned fd, struct file *file, unsigned flags) |
| 1277 | { |
| 1278 | int err; |
| 1279 | struct files_struct *files = current->files; |
| 1280 | |
| 1281 | if (!file) |
Eric W. Biederman | 8760c90 | 2020-11-20 17:14:38 -0600 | [diff] [blame] | 1282 | return close_fd(fd); |
Al Viro | 8280d16 | 2012-08-21 12:11:46 -0400 | [diff] [blame] | 1283 | |
| 1284 | if (fd >= rlimit(RLIMIT_NOFILE)) |
Al Viro | 08f05c4 | 2012-10-31 03:37:48 +0000 | [diff] [blame] | 1285 | return -EBADF; |
Al Viro | 8280d16 | 2012-08-21 12:11:46 -0400 | [diff] [blame] | 1286 | |
| 1287 | spin_lock(&files->file_lock); |
| 1288 | err = expand_files(files, fd); |
| 1289 | if (unlikely(err < 0)) |
| 1290 | goto out_unlock; |
| 1291 | return do_dup2(files, file, fd, flags); |
| 1292 | |
| 1293 | out_unlock: |
| 1294 | spin_unlock(&files->file_lock); |
| 1295 | return err; |
| 1296 | } |
| 1297 | |
Kees Cook | 6659061 | 2020-06-10 08:20:05 -0700 | [diff] [blame] | 1298 | /** |
Christian Brauner | 4e94ddf | 2023-11-30 13:49:11 +0100 | [diff] [blame] | 1299 | * receive_fd() - Install received file into file descriptor table |
Kees Cook | 6659061 | 2020-06-10 08:20:05 -0700 | [diff] [blame] | 1300 | * @file: struct file that was received from another process |
| 1301 | * @ufd: __user pointer to write new fd number to |
| 1302 | * @o_flags: the O_* flags to apply to the new fd entry |
| 1303 | * |
| 1304 | * Installs a received file into the file descriptor table, with appropriate |
Kees Cook | deefa7f | 2020-06-10 20:47:45 -0700 | [diff] [blame] | 1305 | * checks and count updates. Optionally writes the fd number to userspace, if |
| 1306 | * @ufd is non-NULL. |
Kees Cook | 6659061 | 2020-06-10 08:20:05 -0700 | [diff] [blame] | 1307 | * |
| 1308 | * This helper handles its own reference counting of the incoming |
| 1309 | * struct file. |
| 1310 | * |
Kees Cook | deefa7f | 2020-06-10 20:47:45 -0700 | [diff] [blame] | 1311 | * Returns newly install fd or -ve on error. |
Kees Cook | 6659061 | 2020-06-10 08:20:05 -0700 | [diff] [blame] | 1312 | */ |
Christian Brauner | 4e94ddf | 2023-11-30 13:49:11 +0100 | [diff] [blame] | 1313 | int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags) |
Kees Cook | 6659061 | 2020-06-10 08:20:05 -0700 | [diff] [blame] | 1314 | { |
| 1315 | int new_fd; |
| 1316 | int error; |
| 1317 | |
| 1318 | error = security_file_receive(file); |
| 1319 | if (error) |
| 1320 | return error; |
| 1321 | |
Christoph Hellwig | 42eb0d5 | 2021-03-25 09:22:09 +0100 | [diff] [blame] | 1322 | new_fd = get_unused_fd_flags(o_flags); |
| 1323 | if (new_fd < 0) |
| 1324 | return new_fd; |
Kees Cook | 6659061 | 2020-06-10 08:20:05 -0700 | [diff] [blame] | 1325 | |
Kees Cook | deefa7f | 2020-06-10 20:47:45 -0700 | [diff] [blame] | 1326 | if (ufd) { |
| 1327 | error = put_user(new_fd, ufd); |
| 1328 | if (error) { |
Christoph Hellwig | 42eb0d5 | 2021-03-25 09:22:09 +0100 | [diff] [blame] | 1329 | put_unused_fd(new_fd); |
Kees Cook | deefa7f | 2020-06-10 20:47:45 -0700 | [diff] [blame] | 1330 | return error; |
| 1331 | } |
Kees Cook | 6659061 | 2020-06-10 08:20:05 -0700 | [diff] [blame] | 1332 | } |
| 1333 | |
Christoph Hellwig | 42eb0d5 | 2021-03-25 09:22:09 +0100 | [diff] [blame] | 1334 | fd_install(new_fd, get_file(file)); |
| 1335 | __receive_sock(file); |
| 1336 | return new_fd; |
| 1337 | } |
Christian Brauner | 4e94ddf | 2023-11-30 13:49:11 +0100 | [diff] [blame] | 1338 | EXPORT_SYMBOL_GPL(receive_fd); |
Kees Cook | 17381715 | 2020-06-10 08:46:58 -0700 | [diff] [blame] | 1339 | |
Christoph Hellwig | 42eb0d5 | 2021-03-25 09:22:09 +0100 | [diff] [blame] | 1340 | int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags) |
| 1341 | { |
| 1342 | int error; |
| 1343 | |
| 1344 | error = security_file_receive(file); |
| 1345 | if (error) |
| 1346 | return error; |
| 1347 | error = replace_fd(new_fd, file, o_flags); |
| 1348 | if (error) |
| 1349 | return error; |
Kees Cook | 6659061 | 2020-06-10 08:20:05 -0700 | [diff] [blame] | 1350 | __receive_sock(file); |
Kees Cook | deefa7f | 2020-06-10 20:47:45 -0700 | [diff] [blame] | 1351 | return new_fd; |
Kees Cook | 6659061 | 2020-06-10 08:20:05 -0700 | [diff] [blame] | 1352 | } |
| 1353 | |
Dominik Brodowski | c724832 | 2018-03-11 11:34:40 +0100 | [diff] [blame] | 1354 | static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1355 | { |
| 1356 | int err = -EBADF; |
Al Viro | 8280d16 | 2012-08-21 12:11:46 -0400 | [diff] [blame] | 1357 | struct file *file; |
| 1358 | struct files_struct *files = current->files; |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1359 | |
| 1360 | if ((flags & ~O_CLOEXEC) != 0) |
| 1361 | return -EINVAL; |
| 1362 | |
Richard W.M. Jones | aed9764 | 2012-10-09 15:27:43 +0100 | [diff] [blame] | 1363 | if (unlikely(oldfd == newfd)) |
| 1364 | return -EINVAL; |
| 1365 | |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1366 | if (newfd >= rlimit(RLIMIT_NOFILE)) |
Al Viro | 08f05c4 | 2012-10-31 03:37:48 +0000 | [diff] [blame] | 1367 | return -EBADF; |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1368 | |
| 1369 | spin_lock(&files->file_lock); |
| 1370 | err = expand_files(files, newfd); |
Eric W. Biederman | 120ce2b | 2020-11-20 17:14:25 -0600 | [diff] [blame] | 1371 | file = files_lookup_fd_locked(files, oldfd); |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1372 | if (unlikely(!file)) |
| 1373 | goto Ebadf; |
| 1374 | if (unlikely(err < 0)) { |
| 1375 | if (err == -EMFILE) |
| 1376 | goto Ebadf; |
| 1377 | goto out_unlock; |
| 1378 | } |
Al Viro | 8280d16 | 2012-08-21 12:11:46 -0400 | [diff] [blame] | 1379 | return do_dup2(files, file, newfd, flags); |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1380 | |
| 1381 | Ebadf: |
| 1382 | err = -EBADF; |
| 1383 | out_unlock: |
| 1384 | spin_unlock(&files->file_lock); |
| 1385 | return err; |
| 1386 | } |
| 1387 | |
Dominik Brodowski | c724832 | 2018-03-11 11:34:40 +0100 | [diff] [blame] | 1388 | SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags) |
| 1389 | { |
| 1390 | return ksys_dup3(oldfd, newfd, flags); |
| 1391 | } |
| 1392 | |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1393 | SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) |
| 1394 | { |
| 1395 | if (unlikely(newfd == oldfd)) { /* corner case */ |
| 1396 | struct files_struct *files = current->files; |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 1397 | struct file *f; |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1398 | int retval = oldfd; |
| 1399 | |
| 1400 | rcu_read_lock(); |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 1401 | f = __fget_files_rcu(files, oldfd, 0); |
| 1402 | if (!f) |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1403 | retval = -EBADF; |
| 1404 | rcu_read_unlock(); |
Christian Brauner | 0ede61d | 2023-09-29 08:45:59 +0200 | [diff] [blame] | 1405 | if (f) |
| 1406 | fput(f); |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1407 | return retval; |
| 1408 | } |
Dominik Brodowski | c724832 | 2018-03-11 11:34:40 +0100 | [diff] [blame] | 1409 | return ksys_dup3(oldfd, newfd, 0); |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1410 | } |
| 1411 | |
Christoph Hellwig | bc1cd99 | 2020-07-14 08:58:49 +0200 | [diff] [blame] | 1412 | SYSCALL_DEFINE1(dup, unsigned int, fildes) |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1413 | { |
| 1414 | int ret = -EBADF; |
| 1415 | struct file *file = fget_raw(fildes); |
| 1416 | |
| 1417 | if (file) { |
Yann Droneaud | 8d10a03 | 2014-12-10 15:45:44 -0800 | [diff] [blame] | 1418 | ret = get_unused_fd_flags(0); |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1419 | if (ret >= 0) |
| 1420 | fd_install(ret, file); |
| 1421 | else |
| 1422 | fput(file); |
| 1423 | } |
| 1424 | return ret; |
| 1425 | } |
| 1426 | |
| 1427 | int f_dupfd(unsigned int from, struct file *file, unsigned flags) |
| 1428 | { |
Eric W. Biederman | e06b53c | 2020-11-20 17:14:36 -0600 | [diff] [blame] | 1429 | unsigned long nofile = rlimit(RLIMIT_NOFILE); |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1430 | int err; |
Eric W. Biederman | e06b53c | 2020-11-20 17:14:36 -0600 | [diff] [blame] | 1431 | if (from >= nofile) |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1432 | return -EINVAL; |
Eric W. Biederman | e06b53c | 2020-11-20 17:14:36 -0600 | [diff] [blame] | 1433 | err = alloc_fd(from, nofile, flags); |
Al Viro | fe17f22 | 2012-08-21 11:48:11 -0400 | [diff] [blame] | 1434 | if (err >= 0) { |
| 1435 | get_file(file); |
| 1436 | fd_install(err, file); |
| 1437 | } |
| 1438 | return err; |
| 1439 | } |
Al Viro | c3c073f | 2012-08-21 22:32:06 -0400 | [diff] [blame] | 1440 | |
| 1441 | int iterate_fd(struct files_struct *files, unsigned n, |
| 1442 | int (*f)(const void *, struct file *, unsigned), |
| 1443 | const void *p) |
| 1444 | { |
| 1445 | struct fdtable *fdt; |
Al Viro | c3c073f | 2012-08-21 22:32:06 -0400 | [diff] [blame] | 1446 | int res = 0; |
| 1447 | if (!files) |
| 1448 | return 0; |
| 1449 | spin_lock(&files->file_lock); |
Al Viro | a77cfcb | 2012-11-29 22:57:33 -0500 | [diff] [blame] | 1450 | for (fdt = files_fdtable(files); n < fdt->max_fds; n++) { |
| 1451 | struct file *file; |
| 1452 | file = rcu_dereference_check_fdtable(files, fdt->fd[n]); |
| 1453 | if (!file) |
| 1454 | continue; |
| 1455 | res = f(p, file, n); |
| 1456 | if (res) |
| 1457 | break; |
Al Viro | c3c073f | 2012-08-21 22:32:06 -0400 | [diff] [blame] | 1458 | } |
| 1459 | spin_unlock(&files->file_lock); |
| 1460 | return res; |
| 1461 | } |
| 1462 | EXPORT_SYMBOL(iterate_fd); |