| // SPDX-License-Identifier: GPL-2.0-only |
| /* |
| * linux/fs/namespace.c |
| * |
| * (C) Copyright Al Viro 2000, 2001 |
| * |
| * Based on code from fs/super.c, copyright Linus Torvalds and others. |
| * Heavily rewritten. |
| */ |
| |
| #include <linux/syscalls.h> |
| #include <linux/export.h> |
| #include <linux/capability.h> |
| #include <linux/mnt_namespace.h> |
| #include <linux/user_namespace.h> |
| #include <linux/namei.h> |
| #include <linux/security.h> |
| #include <linux/cred.h> |
| #include <linux/idr.h> |
| #include <linux/init.h> /* init_rootfs */ |
| #include <linux/fs_struct.h> /* get_fs_root et.al. */ |
| #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ |
| #include <linux/file.h> |
| #include <linux/uaccess.h> |
| #include <linux/proc_ns.h> |
| #include <linux/magic.h> |
| #include <linux/memblock.h> |
| #include <linux/proc_fs.h> |
| #include <linux/task_work.h> |
| #include <linux/sched/task.h> |
| #include <uapi/linux/mount.h> |
| #include <linux/fs_context.h> |
| #include <linux/shmem_fs.h> |
| |
| #include "pnode.h" |
| #include "internal.h" |
| |
| /* Maximum number of mounts in a mount namespace */ |
| unsigned int sysctl_mount_max __read_mostly = 100000; |
| |
| static unsigned int m_hash_mask __read_mostly; |
| static unsigned int m_hash_shift __read_mostly; |
| static unsigned int mp_hash_mask __read_mostly; |
| static unsigned int mp_hash_shift __read_mostly; |
| |
| static __initdata unsigned long mhash_entries; |
| static int __init set_mhash_entries(char *str) |
| { |
| if (!str) |
| return 0; |
| mhash_entries = simple_strtoul(str, &str, 0); |
| return 1; |
| } |
| __setup("mhash_entries=", set_mhash_entries); |
| |
| static __initdata unsigned long mphash_entries; |
| static int __init set_mphash_entries(char *str) |
| { |
| if (!str) |
| return 0; |
| mphash_entries = simple_strtoul(str, &str, 0); |
| return 1; |
| } |
| __setup("mphash_entries=", set_mphash_entries); |
| |
| static u64 event; |
| static DEFINE_IDA(mnt_id_ida); |
| static DEFINE_IDA(mnt_group_ida); |
| |
| static struct hlist_head *mount_hashtable __read_mostly; |
| static struct hlist_head *mountpoint_hashtable __read_mostly; |
| static struct kmem_cache *mnt_cache __read_mostly; |
| static DECLARE_RWSEM(namespace_sem); |
| static HLIST_HEAD(unmounted); /* protected by namespace_sem */ |
| static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */ |
| |
| struct mount_kattr { |
| unsigned int attr_set; |
| unsigned int attr_clr; |
| unsigned int propagation; |
| unsigned int lookup_flags; |
| bool recurse; |
| struct user_namespace *mnt_userns; |
| }; |
| |
| /* /sys/fs */ |
| struct kobject *fs_kobj; |
| EXPORT_SYMBOL_GPL(fs_kobj); |
| |
| /* |
| * vfsmount lock may be taken for read to prevent changes to the |
| * vfsmount hash, ie. during mountpoint lookups or walking back |
| * up the tree. |
| * |
| * It should be taken for write in all cases where the vfsmount |
| * tree or hash is modified or when a vfsmount structure is modified. |
| */ |
| __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); |
| |
| static inline void lock_mount_hash(void) |
| { |
| write_seqlock(&mount_lock); |
| } |
| |
| static inline void unlock_mount_hash(void) |
| { |
| write_sequnlock(&mount_lock); |
| } |
| |
| static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) |
| { |
| unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); |
| tmp += ((unsigned long)dentry / L1_CACHE_BYTES); |
| tmp = tmp + (tmp >> m_hash_shift); |
| return &mount_hashtable[tmp & m_hash_mask]; |
| } |
| |
| static inline struct hlist_head *mp_hash(struct dentry *dentry) |
| { |
| unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); |
| tmp = tmp + (tmp >> mp_hash_shift); |
| return &mountpoint_hashtable[tmp & mp_hash_mask]; |
| } |
| |
| static int mnt_alloc_id(struct mount *mnt) |
| { |
| int res = ida_alloc(&mnt_id_ida, GFP_KERNEL); |
| |
| if (res < 0) |
| return res; |
| mnt->mnt_id = res; |
| return 0; |
| } |
| |
| static void mnt_free_id(struct mount *mnt) |
| { |
| ida_free(&mnt_id_ida, mnt->mnt_id); |
| } |
| |
| /* |
| * Allocate a new peer group ID |
| */ |
| static int mnt_alloc_group_id(struct mount *mnt) |
| { |
| int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL); |
| |
| if (res < 0) |
| return res; |
| mnt->mnt_group_id = res; |
| return 0; |
| } |
| |
| /* |
| * Release a peer group ID |
| */ |
| void mnt_release_group_id(struct mount *mnt) |
| { |
| ida_free(&mnt_group_ida, mnt->mnt_group_id); |
| mnt->mnt_group_id = 0; |
| } |
| |
| /* |
| * vfsmount lock must be held for read |
| */ |
| static inline void mnt_add_count(struct mount *mnt, int n) |
| { |
| #ifdef CONFIG_SMP |
| this_cpu_add(mnt->mnt_pcp->mnt_count, n); |
| #else |
| preempt_disable(); |
| mnt->mnt_count += n; |
| preempt_enable(); |
| #endif |
| } |
| |
| /* |
| * vfsmount lock must be held for write |
| */ |
| int mnt_get_count(struct mount *mnt) |
| { |
| #ifdef CONFIG_SMP |
| int count = 0; |
| int cpu; |
| |
| for_each_possible_cpu(cpu) { |
| count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; |
| } |
| |
| return count; |
| #else |
| return mnt->mnt_count; |
| #endif |
| } |
| |
| static struct mount *alloc_vfsmnt(const char *name) |
| { |
| struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); |
| if (mnt) { |
| int err; |
| |
| err = mnt_alloc_id(mnt); |
| if (err) |
| goto out_free_cache; |
| |
| if (name) { |
| mnt->mnt_devname = kstrdup_const(name, |
| GFP_KERNEL_ACCOUNT); |
| if (!mnt->mnt_devname) |
| goto out_free_id; |
| } |
| |
| #ifdef CONFIG_SMP |
| mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); |
| if (!mnt->mnt_pcp) |
| goto out_free_devname; |
| |
| this_cpu_add(mnt->mnt_pcp->mnt_count, 1); |
| #else |
| mnt->mnt_count = 1; |
| mnt->mnt_writers = 0; |
| #endif |
| |
| INIT_HLIST_NODE(&mnt->mnt_hash); |
| INIT_LIST_HEAD(&mnt->mnt_child); |
| INIT_LIST_HEAD(&mnt->mnt_mounts); |
| INIT_LIST_HEAD(&mnt->mnt_list); |
| INIT_LIST_HEAD(&mnt->mnt_expire); |
| INIT_LIST_HEAD(&mnt->mnt_share); |
| INIT_LIST_HEAD(&mnt->mnt_slave_list); |
| INIT_LIST_HEAD(&mnt->mnt_slave); |
| INIT_HLIST_NODE(&mnt->mnt_mp_list); |
| INIT_LIST_HEAD(&mnt->mnt_umounting); |
| INIT_HLIST_HEAD(&mnt->mnt_stuck_children); |
| mnt->mnt.mnt_userns = &init_user_ns; |
| } |
| return mnt; |
| |
| #ifdef CONFIG_SMP |
| out_free_devname: |
| kfree_const(mnt->mnt_devname); |
| #endif |
| out_free_id: |
| mnt_free_id(mnt); |
| out_free_cache: |
| kmem_cache_free(mnt_cache, mnt); |
| return NULL; |
| } |
| |
| /* |
| * Most r/o checks on a fs are for operations that take |
| * discrete amounts of time, like a write() or unlink(). |
| * We must keep track of when those operations start |
| * (for permission checks) and when they end, so that |
| * we can determine when writes are able to occur to |
| * a filesystem. |
| */ |
| /* |
| * __mnt_is_readonly: check whether a mount is read-only |
| * @mnt: the mount to check for its write status |
| * |
| * This shouldn't be used directly ouside of the VFS. |
| * It does not guarantee that the filesystem will stay |
| * r/w, just that it is right *now*. This can not and |
| * should not be used in place of IS_RDONLY(inode). |
| * mnt_want/drop_write() will _keep_ the filesystem |
| * r/w. |
| */ |
| bool __mnt_is_readonly(struct vfsmount *mnt) |
| { |
| return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb); |
| } |
| EXPORT_SYMBOL_GPL(__mnt_is_readonly); |
| |
| static inline void mnt_inc_writers(struct mount *mnt) |
| { |
| #ifdef CONFIG_SMP |
| this_cpu_inc(mnt->mnt_pcp->mnt_writers); |
| #else |
| mnt->mnt_writers++; |
| #endif |
| } |
| |
| static inline void mnt_dec_writers(struct mount *mnt) |
| { |
| #ifdef CONFIG_SMP |
| this_cpu_dec(mnt->mnt_pcp->mnt_writers); |
| #else |
| mnt->mnt_writers--; |
| #endif |
| } |
| |
| static unsigned int mnt_get_writers(struct mount *mnt) |
| { |
| #ifdef CONFIG_SMP |
| unsigned int count = 0; |
| int cpu; |
| |
| for_each_possible_cpu(cpu) { |
| count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; |
| } |
| |
| return count; |
| #else |
| return mnt->mnt_writers; |
| #endif |
| } |
| |
| static int mnt_is_readonly(struct vfsmount *mnt) |
| { |
| if (mnt->mnt_sb->s_readonly_remount) |
| return 1; |
| /* Order wrt setting s_flags/s_readonly_remount in do_remount() */ |
| smp_rmb(); |
| return __mnt_is_readonly(mnt); |
| } |
| |
| /* |
| * Most r/o & frozen checks on a fs are for operations that take discrete |
| * amounts of time, like a write() or unlink(). We must keep track of when |
| * those operations start (for permission checks) and when they end, so that we |
| * can determine when writes are able to occur to a filesystem. |
| */ |
| /** |
| * __mnt_want_write - get write access to a mount without freeze protection |
| * @m: the mount on which to take a write |
| * |
| * This tells the low-level filesystem that a write is about to be performed to |
| * it, and makes sure that writes are allowed (mnt it read-write) before |
| * returning success. This operation does not protect against filesystem being |
| * frozen. When the write operation is finished, __mnt_drop_write() must be |
| * called. This is effectively a refcount. |
| */ |
| int __mnt_want_write(struct vfsmount *m) |
| { |
| struct mount *mnt = real_mount(m); |
| int ret = 0; |
| |
| preempt_disable(); |
| mnt_inc_writers(mnt); |
| /* |
| * The store to mnt_inc_writers must be visible before we pass |
| * MNT_WRITE_HOLD loop below, so that the slowpath can see our |
| * incremented count after it has set MNT_WRITE_HOLD. |
| */ |
| smp_mb(); |
| while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) |
| cpu_relax(); |
| /* |
| * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will |
| * be set to match its requirements. So we must not load that until |
| * MNT_WRITE_HOLD is cleared. |
| */ |
| smp_rmb(); |
| if (mnt_is_readonly(m)) { |
| mnt_dec_writers(mnt); |
| ret = -EROFS; |
| } |
| preempt_enable(); |
| |
| return ret; |
| } |
| |
| /** |
| * mnt_want_write - get write access to a mount |
| * @m: the mount on which to take a write |
| * |
| * This tells the low-level filesystem that a write is about to be performed to |
| * it, and makes sure that writes are allowed (mount is read-write, filesystem |
| * is not frozen) before returning success. When the write operation is |
| * finished, mnt_drop_write() must be called. This is effectively a refcount. |
| */ |
| int mnt_want_write(struct vfsmount *m) |
| { |
| int ret; |
| |
| sb_start_write(m->mnt_sb); |
| ret = __mnt_want_write(m); |
| if (ret) |
| sb_end_write(m->mnt_sb); |
| return ret; |
| } |
| EXPORT_SYMBOL_GPL(mnt_want_write); |
| |
| /** |
| * __mnt_want_write_file - get write access to a file's mount |
| * @file: the file who's mount on which to take a write |
| * |
| * This is like __mnt_want_write, but if the file is already open for writing it |
| * skips incrementing mnt_writers (since the open file already has a reference) |
| * and instead only does the check for emergency r/o remounts. This must be |
| * paired with __mnt_drop_write_file. |
| */ |
| int __mnt_want_write_file(struct file *file) |
| { |
| if (file->f_mode & FMODE_WRITER) { |
| /* |
| * Superblock may have become readonly while there are still |
| * writable fd's, e.g. due to a fs error with errors=remount-ro |
| */ |
| if (__mnt_is_readonly(file->f_path.mnt)) |
| return -EROFS; |
| return 0; |
| } |
| return __mnt_want_write(file->f_path.mnt); |
| } |
| |
| /** |
| * mnt_want_write_file - get write access to a file's mount |
| * @file: the file who's mount on which to take a write |
| * |
| * This is like mnt_want_write, but if the file is already open for writing it |
| * skips incrementing mnt_writers (since the open file already has a reference) |
| * and instead only does the freeze protection and the check for emergency r/o |
| * remounts. This must be paired with mnt_drop_write_file. |
| */ |
| int mnt_want_write_file(struct file *file) |
| { |
| int ret; |
| |
| sb_start_write(file_inode(file)->i_sb); |
| ret = __mnt_want_write_file(file); |
| if (ret) |
| sb_end_write(file_inode(file)->i_sb); |
| return ret; |
| } |
| EXPORT_SYMBOL_GPL(mnt_want_write_file); |
| |
| /** |
| * __mnt_drop_write - give up write access to a mount |
| * @mnt: the mount on which to give up write access |
| * |
| * Tells the low-level filesystem that we are done |
| * performing writes to it. Must be matched with |
| * __mnt_want_write() call above. |
| */ |
| void __mnt_drop_write(struct vfsmount *mnt) |
| { |
| preempt_disable(); |
| mnt_dec_writers(real_mount(mnt)); |
| preempt_enable(); |
| } |
| |
| /** |
| * mnt_drop_write - give up write access to a mount |
| * @mnt: the mount on which to give up write access |
| * |
| * Tells the low-level filesystem that we are done performing writes to it and |
| * also allows filesystem to be frozen again. Must be matched with |
| * mnt_want_write() call above. |
| */ |
| void mnt_drop_write(struct vfsmount *mnt) |
| { |
| __mnt_drop_write(mnt); |
| sb_end_write(mnt->mnt_sb); |
| } |
| EXPORT_SYMBOL_GPL(mnt_drop_write); |
| |
| void __mnt_drop_write_file(struct file *file) |
| { |
| if (!(file->f_mode & FMODE_WRITER)) |
| __mnt_drop_write(file->f_path.mnt); |
| } |
| |
| void mnt_drop_write_file(struct file *file) |
| { |
| __mnt_drop_write_file(file); |
| sb_end_write(file_inode(file)->i_sb); |
| } |
| EXPORT_SYMBOL(mnt_drop_write_file); |
| |
| static inline int mnt_hold_writers(struct mount *mnt) |
| { |
| mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; |
| /* |
| * After storing MNT_WRITE_HOLD, we'll read the counters. This store |
| * should be visible before we do. |
| */ |
| smp_mb(); |
| |
| /* |
| * With writers on hold, if this value is zero, then there are |
| * definitely no active writers (although held writers may subsequently |
| * increment the count, they'll have to wait, and decrement it after |
| * seeing MNT_READONLY). |
| * |
| * It is OK to have counter incremented on one CPU and decremented on |
| * another: the sum will add up correctly. The danger would be when we |
| * sum up each counter, if we read a counter before it is incremented, |
| * but then read another CPU's count which it has been subsequently |
| * decremented from -- we would see more decrements than we should. |
| * MNT_WRITE_HOLD protects against this scenario, because |
| * mnt_want_write first increments count, then smp_mb, then spins on |
| * MNT_WRITE_HOLD, so it can't be decremented by another CPU while |
| * we're counting up here. |
| */ |
| if (mnt_get_writers(mnt) > 0) |
| return -EBUSY; |
| |
| return 0; |
| } |
| |
| static inline void mnt_unhold_writers(struct mount *mnt) |
| { |
| /* |
| * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers |
| * that become unheld will see MNT_READONLY. |
| */ |
| smp_wmb(); |
| mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; |
| } |
| |
| static int mnt_make_readonly(struct mount *mnt) |
| { |
| int ret; |
| |
| ret = mnt_hold_writers(mnt); |
| if (!ret) |
| mnt->mnt.mnt_flags |= MNT_READONLY; |
| mnt_unhold_writers(mnt); |
| return ret; |
| } |
| |
| int sb_prepare_remount_readonly(struct super_block *sb) |
| { |
| struct mount *mnt; |
| int err = 0; |
| |
| /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ |
| if (atomic_long_read(&sb->s_remove_count)) |
| return -EBUSY; |
| |
| lock_mount_hash(); |
| list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { |
| if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { |
| mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; |
| smp_mb(); |
| if (mnt_get_writers(mnt) > 0) { |
| err = -EBUSY; |
| break; |
| } |
| } |
| } |
| if (!err && atomic_long_read(&sb->s_remove_count)) |
| err = -EBUSY; |
| |
| if (!err) { |
| sb->s_readonly_remount = 1; |
| smp_wmb(); |
| } |
| list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { |
| if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) |
| mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; |
| } |
| unlock_mount_hash(); |
| |
| return err; |
| } |
| |
| static void free_vfsmnt(struct mount *mnt) |
| { |
| struct user_namespace *mnt_userns; |
| |
| mnt_userns = mnt_user_ns(&mnt->mnt); |
| if (mnt_userns != &init_user_ns) |
| put_user_ns(mnt_userns); |
| kfree_const(mnt->mnt_devname); |
| #ifdef CONFIG_SMP |
| free_percpu(mnt->mnt_pcp); |
| #endif |
| kmem_cache_free(mnt_cache, mnt); |
| } |
| |
| static void delayed_free_vfsmnt(struct rcu_head *head) |
| { |
| free_vfsmnt(container_of(head, struct mount, mnt_rcu)); |
| } |
| |
| /* call under rcu_read_lock */ |
| int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) |
| { |
| struct mount *mnt; |
| if (read_seqretry(&mount_lock, seq)) |
| return 1; |
| if (bastard == NULL) |
| return 0; |
| mnt = real_mount(bastard); |
| mnt_add_count(mnt, 1); |
| smp_mb(); // see mntput_no_expire() |
| if (likely(!read_seqretry(&mount_lock, seq))) |
| return 0; |
| if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { |
| mnt_add_count(mnt, -1); |
| return 1; |
| } |
| lock_mount_hash(); |
| if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { |
| mnt_add_count(mnt, -1); |
| unlock_mount_hash(); |
| return 1; |
| } |
| unlock_mount_hash(); |
| /* caller will mntput() */ |
| return -1; |
| } |
| |
| /* call under rcu_read_lock */ |
| bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) |
| { |
| int res = __legitimize_mnt(bastard, seq); |
| if (likely(!res)) |
| return true; |
| if (unlikely(res < 0)) { |
| rcu_read_unlock(); |
| mntput(bastard); |
| rcu_read_lock(); |
| } |
| return false; |
| } |
| |
| /* |
| * find the first mount at @dentry on vfsmount @mnt. |
| * call under rcu_read_lock() |
| */ |
| struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) |
| { |
| struct hlist_head *head = m_hash(mnt, dentry); |
| struct mount *p; |
| |
| hlist_for_each_entry_rcu(p, head, mnt_hash) |
| if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) |
| return p; |
| return NULL; |
| } |
| |
| /* |
| * lookup_mnt - Return the first child mount mounted at path |
| * |
| * "First" means first mounted chronologically. If you create the |
| * following mounts: |
| * |
| * mount /dev/sda1 /mnt |
| * mount /dev/sda2 /mnt |
| * mount /dev/sda3 /mnt |
| * |
| * Then lookup_mnt() on the base /mnt dentry in the root mount will |
| * return successively the root dentry and vfsmount of /dev/sda1, then |
| * /dev/sda2, then /dev/sda3, then NULL. |
| * |
| * lookup_mnt takes a reference to the found vfsmount. |
| */ |
| struct vfsmount *lookup_mnt(const struct path *path) |
| { |
| struct mount *child_mnt; |
| struct vfsmount *m; |
| unsigned seq; |
| |
| rcu_read_lock(); |
| do { |
| seq = read_seqbegin(&mount_lock); |
| child_mnt = __lookup_mnt(path->mnt, path->dentry); |
| m = child_mnt ? &child_mnt->mnt : NULL; |
| } while (!legitimize_mnt(m, seq)); |
| rcu_read_unlock(); |
| return m; |
| } |
| |
| static inline void lock_ns_list(struct mnt_namespace *ns) |
| { |
| spin_lock(&ns->ns_lock); |
| } |
| |
| static inline void unlock_ns_list(struct mnt_namespace *ns) |
| { |
| spin_unlock(&ns->ns_lock); |
| } |
| |
| static inline bool mnt_is_cursor(struct mount *mnt) |
| { |
| return mnt->mnt.mnt_flags & MNT_CURSOR; |
| } |
| |
| /* |
| * __is_local_mountpoint - Test to see if dentry is a mountpoint in the |
| * current mount namespace. |
| * |
| * The common case is dentries are not mountpoints at all and that |
| * test is handled inline. For the slow case when we are actually |
| * dealing with a mountpoint of some kind, walk through all of the |
| * mounts in the current mount namespace and test to see if the dentry |
| * is a mountpoint. |
| * |
| * The mount_hashtable is not usable in the context because we |
| * need to identify all mounts that may be in the current mount |
| * namespace not just a mount that happens to have some specified |
| * parent mount. |
| */ |
| bool __is_local_mountpoint(struct dentry *dentry) |
| { |
| struct mnt_namespace *ns = current->nsproxy->mnt_ns; |
| struct mount *mnt; |
| bool is_covered = false; |
| |
| down_read(&namespace_sem); |
| lock_ns_list(ns); |
| list_for_each_entry(mnt, &ns->list, mnt_list) { |
| if (mnt_is_cursor(mnt)) |
| continue; |
| is_covered = (mnt->mnt_mountpoint == dentry); |
| if (is_covered) |
| break; |
| } |
| unlock_ns_list(ns); |
| up_read(&namespace_sem); |
| |
| return is_covered; |
| } |
| |
| static struct mountpoint *lookup_mountpoint(struct dentry *dentry) |
| { |
| struct hlist_head *chain = mp_hash(dentry); |
| struct mountpoint *mp; |
| |
| hlist_for_each_entry(mp, chain, m_hash) { |
| if (mp->m_dentry == dentry) { |
| mp->m_count++; |
| return mp; |
| } |
| } |
| return NULL; |
| } |
| |
| static struct mountpoint *get_mountpoint(struct dentry *dentry) |
| { |
| struct mountpoint *mp, *new = NULL; |
| int ret; |
| |
| if (d_mountpoint(dentry)) { |
| /* might be worth a WARN_ON() */ |
| if (d_unlinked(dentry)) |
| return ERR_PTR(-ENOENT); |
| mountpoint: |
| read_seqlock_excl(&mount_lock); |
| mp = lookup_mountpoint(dentry); |
| read_sequnlock_excl(&mount_lock); |
| if (mp) |
| goto done; |
| } |
| |
| if (!new) |
| new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); |
| if (!new) |
| return ERR_PTR(-ENOMEM); |
| |
| |
| /* Exactly one processes may set d_mounted */ |
| ret = d_set_mounted(dentry); |
| |
| /* Someone else set d_mounted? */ |
| if (ret == -EBUSY) |
| goto mountpoint; |
| |
| /* The dentry is not available as a mountpoint? */ |
| mp = ERR_PTR(ret); |
| if (ret) |
| goto done; |
| |
| /* Add the new mountpoint to the hash table */ |
| read_seqlock_excl(&mount_lock); |
| new->m_dentry = dget(dentry); |
| new->m_count = 1; |
| hlist_add_head(&new->m_hash, mp_hash(dentry)); |
| INIT_HLIST_HEAD(&new->m_list); |
| read_sequnlock_excl(&mount_lock); |
| |
| mp = new; |
| new = NULL; |
| done: |
| kfree(new); |
| return mp; |
| } |
| |
| /* |
| * vfsmount lock must be held. Additionally, the caller is responsible |
| * for serializing calls for given disposal list. |
| */ |
| static void __put_mountpoint(struct mountpoint *mp, struct list_head *list) |
| { |
| if (!--mp->m_count) { |
| struct dentry *dentry = mp->m_dentry; |
| BUG_ON(!hlist_empty(&mp->m_list)); |
| spin_lock(&dentry->d_lock); |
| dentry->d_flags &= ~DCACHE_MOUNTED; |
| spin_unlock(&dentry->d_lock); |
| dput_to_list(dentry, list); |
| hlist_del(&mp->m_hash); |
| kfree(mp); |
| } |
| } |
| |
| /* called with namespace_lock and vfsmount lock */ |
| static void put_mountpoint(struct mountpoint *mp) |
| { |
| __put_mountpoint(mp, &ex_mountpoints); |
| } |
| |
| static inline int check_mnt(struct mount *mnt) |
| { |
| return mnt->mnt_ns == current->nsproxy->mnt_ns; |
| } |
| |
| /* |
| * vfsmount lock must be held for write |
| */ |
| static void touch_mnt_namespace(struct mnt_namespace *ns) |
| { |
| if (ns) { |
| ns->event = ++event; |
| wake_up_interruptible(&ns->poll); |
| } |
| } |
| |
| /* |
| * vfsmount lock must be held for write |
| */ |
| static void __touch_mnt_namespace(struct mnt_namespace *ns) |
| { |
| if (ns && ns->event != event) { |
| ns->event = event; |
| wake_up_interruptible(&ns->poll); |
| } |
| } |
| |
| /* |
| * vfsmount lock must be held for write |
| */ |
| static struct mountpoint *unhash_mnt(struct mount *mnt) |
| { |
| struct mountpoint *mp; |
| mnt->mnt_parent = mnt; |
| mnt->mnt_mountpoint = mnt->mnt.mnt_root; |
| list_del_init(&mnt->mnt_child); |
| hlist_del_init_rcu(&mnt->mnt_hash); |
| hlist_del_init(&mnt->mnt_mp_list); |
| mp = mnt->mnt_mp; |
| mnt->mnt_mp = NULL; |
| return mp; |
| } |
| |
| /* |
| * vfsmount lock must be held for write |
| */ |
| static void umount_mnt(struct mount *mnt) |
| { |
| put_mountpoint(unhash_mnt(mnt)); |
| } |
| |
| /* |
| * vfsmount lock must be held for write |
| */ |
| void mnt_set_mountpoint(struct mount *mnt, |
| struct mountpoint *mp, |
| struct mount *child_mnt) |
| { |
| mp->m_count++; |
| mnt_add_count(mnt, 1); /* essentially, that's mntget */ |
| child_mnt->mnt_mountpoint = mp->m_dentry; |
| child_mnt->mnt_parent = mnt; |
| child_mnt->mnt_mp = mp; |
| hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); |
| } |
| |
| static void __attach_mnt(struct mount *mnt, struct mount *parent) |
| { |
| hlist_add_head_rcu(&mnt->mnt_hash, |
| m_hash(&parent->mnt, mnt->mnt_mountpoint)); |
| list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); |
| } |
| |
| /* |
| * vfsmount lock must be held for write |
| */ |
| static void attach_mnt(struct mount *mnt, |
| struct mount *parent, |
| struct mountpoint *mp) |
| { |
| mnt_set_mountpoint(parent, mp, mnt); |
| __attach_mnt(mnt, parent); |
| } |
| |
| void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt) |
| { |
| struct mountpoint *old_mp = mnt->mnt_mp; |
| struct mount *old_parent = mnt->mnt_parent; |
| |
| list_del_init(&mnt->mnt_child); |
| hlist_del_init(&mnt->mnt_mp_list); |
| hlist_del_init_rcu(&mnt->mnt_hash); |
| |
| attach_mnt(mnt, parent, mp); |
| |
| put_mountpoint(old_mp); |
| mnt_add_count(old_parent, -1); |
| } |
| |
| /* |
| * vfsmount lock must be held for write |
| */ |
| static void commit_tree(struct mount *mnt) |
| { |
| struct mount *parent = mnt->mnt_parent; |
| struct mount *m; |
| LIST_HEAD(head); |
| struct mnt_namespace *n = parent->mnt_ns; |
| |
| BUG_ON(parent == mnt); |
| |
| list_add_tail(&head, &mnt->mnt_list); |
| list_for_each_entry(m, &head, mnt_list) |
| m->mnt_ns = n; |
| |
| list_splice(&head, n->list.prev); |
| |
| n->mounts += n->pending_mounts; |
| n->pending_mounts = 0; |
| |
| __attach_mnt(mnt, parent); |
| touch_mnt_namespace(n); |
| } |
| |
| static struct mount *next_mnt(struct mount *p, struct mount *root) |
| { |
| struct list_head *next = p->mnt_mounts.next; |
| if (next == &p->mnt_mounts) { |
| while (1) { |
| if (p == root) |
| return NULL; |
| next = p->mnt_child.next; |
| if (next != &p->mnt_parent->mnt_mounts) |
| break; |
| p = p->mnt_parent; |
| } |
| } |
| return list_entry(next, struct mount, mnt_child); |
| } |
| |
| static struct mount *skip_mnt_tree(struct mount *p) |
| { |
| struct list_head *prev = p->mnt_mounts.prev; |
| while (prev != &p->mnt_mounts) { |
| p = list_entry(prev, struct mount, mnt_child); |
| prev = p->mnt_mounts.prev; |
| } |
| return p; |
| } |
| |
| /** |
| * vfs_create_mount - Create a mount for a configured superblock |
| * @fc: The configuration context with the superblock attached |
| * |
| * Create a mount to an already configured superblock. If necessary, the |
| * caller should invoke vfs_get_tree() before calling this. |
| * |
| * Note that this does not attach the mount to anything. |
| */ |
| struct vfsmount *vfs_create_mount(struct fs_context *fc) |
| { |
| struct mount *mnt; |
| |
| if (!fc->root) |
| return ERR_PTR(-EINVAL); |
| |
| mnt = alloc_vfsmnt(fc->source ?: "none"); |
| if (!mnt) |
| return ERR_PTR(-ENOMEM); |
| |
| if (fc->sb_flags & SB_KERNMOUNT) |
| mnt->mnt.mnt_flags = MNT_INTERNAL; |
| |
| atomic_inc(&fc->root->d_sb->s_active); |
| mnt->mnt.mnt_sb = fc->root->d_sb; |
| mnt->mnt.mnt_root = dget(fc->root); |
| mnt->mnt_mountpoint = mnt->mnt.mnt_root; |
| mnt->mnt_parent = mnt; |
| |
| lock_mount_hash(); |
| list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts); |
| unlock_mount_hash(); |
| return &mnt->mnt; |
| } |
| EXPORT_SYMBOL(vfs_create_mount); |
| |
| struct vfsmount *fc_mount(struct fs_context *fc) |
| { |
| int err = vfs_get_tree(fc); |
| if (!err) { |
| up_write(&fc->root->d_sb->s_umount); |
| return vfs_create_mount(fc); |
| } |
| return ERR_PTR(err); |
| } |
| EXPORT_SYMBOL(fc_mount); |
| |
| struct vfsmount *vfs_kern_mount(struct file_system_type *type, |
| int flags, const char *name, |
| void *data) |
| { |
| struct fs_context *fc; |
| struct vfsmount *mnt; |
| int ret = 0; |
| |
| if (!type) |
| return ERR_PTR(-EINVAL); |
| |
| fc = fs_context_for_mount(type, flags); |
| if (IS_ERR(fc)) |
| return ERR_CAST(fc); |
| |
| if (name) |
| ret = vfs_parse_fs_string(fc, "source", |
| name, strlen(name)); |
| if (!ret) |
| ret = parse_monolithic_mount_data(fc, data); |
| if (!ret) |
| mnt = fc_mount(fc); |
| else |
| mnt = ERR_PTR(ret); |
| |
| put_fs_context(fc); |
| return mnt; |
| } |
| EXPORT_SYMBOL_GPL(vfs_kern_mount); |
| |
| struct vfsmount * |
| vfs_submount(const struct dentry *mountpoint, struct file_system_type *type, |
| const char *name, void *data) |
| { |
| /* Until it is worked out how to pass the user namespace |
| * through from the parent mount to the submount don't support |
| * unprivileged mounts with submounts. |
| */ |
| if (mountpoint->d_sb->s_user_ns != &init_user_ns) |
| return ERR_PTR(-EPERM); |
| |
| return vfs_kern_mount(type, SB_SUBMOUNT, name, data); |
| } |
| EXPORT_SYMBOL_GPL(vfs_submount); |
| |
| static struct mount *clone_mnt(struct mount *old, struct dentry *root, |
| int flag) |
| { |
| struct super_block *sb = old->mnt.mnt_sb; |
| struct mount *mnt; |
| int err; |
| |
| mnt = alloc_vfsmnt(old->mnt_devname); |
| if (!mnt) |
| return ERR_PTR(-ENOMEM); |
| |
| if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) |
| mnt->mnt_group_id = 0; /* not a peer of original */ |
| else |
| mnt->mnt_group_id = old->mnt_group_id; |
| |
| if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { |
| err = mnt_alloc_group_id(mnt); |
| if (err) |
| goto out_free; |
| } |
| |
| mnt->mnt.mnt_flags = old->mnt.mnt_flags; |
| mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL); |
| |
| atomic_inc(&sb->s_active); |
| mnt->mnt.mnt_userns = mnt_user_ns(&old->mnt); |
| if (mnt->mnt.mnt_userns != &init_user_ns) |
| mnt->mnt.mnt_userns = get_user_ns(mnt->mnt.mnt_userns); |
| mnt->mnt.mnt_sb = sb; |
| mnt->mnt.mnt_root = dget(root); |
| mnt->mnt_mountpoint = mnt->mnt.mnt_root; |
| mnt->mnt_parent = mnt; |
| lock_mount_hash(); |
| list_add_tail(&mnt->mnt_instance, &sb->s_mounts); |
| unlock_mount_hash(); |
| |
| if ((flag & CL_SLAVE) || |
| ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { |
| list_add(&mnt->mnt_slave, &old->mnt_slave_list); |
| mnt->mnt_master = old; |
| CLEAR_MNT_SHARED(mnt); |
| } else if (!(flag & CL_PRIVATE)) { |
| if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) |
| list_add(&mnt->mnt_share, &old->mnt_share); |
| if (IS_MNT_SLAVE(old)) |
| list_add(&mnt->mnt_slave, &old->mnt_slave); |
| mnt->mnt_master = old->mnt_master; |
| } else { |
| CLEAR_MNT_SHARED(mnt); |
| } |
| if (flag & CL_MAKE_SHARED) |
| set_mnt_shared(mnt); |
| |
| /* stick the duplicate mount on the same expiry list |
| * as the original if that was on one */ |
| if (flag & CL_EXPIRE) { |
| if (!list_empty(&old->mnt_expire)) |
| list_add(&mnt->mnt_expire, &old->mnt_expire); |
| } |
| |
| return mnt; |
| |
| out_free: |
| mnt_free_id(mnt); |
| free_vfsmnt(mnt); |
| return ERR_PTR(err); |
| } |
| |
| static void cleanup_mnt(struct mount *mnt) |
| { |
| struct hlist_node *p; |
| struct mount *m; |
| /* |
| * The warning here probably indicates that somebody messed |
| * up a mnt_want/drop_write() pair. If this happens, the |
| * filesystem was probably unable to make r/w->r/o transitions. |
| * The locking used to deal with mnt_count decrement provides barriers, |
| * so mnt_get_writers() below is safe. |
| */ |
| WARN_ON(mnt_get_writers(mnt)); |
| if (unlikely(mnt->mnt_pins.first)) |
| mnt_pin_kill(mnt); |
| hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) { |
| hlist_del(&m->mnt_umount); |
| mntput(&m->mnt); |
| } |
| fsnotify_vfsmount_delete(&mnt->mnt); |
| dput(mnt->mnt.mnt_root); |
| deactivate_super(mnt->mnt.mnt_sb); |
| mnt_free_id(mnt); |
| call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); |
| } |
| |
| static void __cleanup_mnt(struct rcu_head *head) |
| { |
| cleanup_mnt(container_of(head, struct mount, mnt_rcu)); |
| } |
| |
| static LLIST_HEAD(delayed_mntput_list); |
| static void delayed_mntput(struct work_struct *unused) |
| { |
| struct llist_node *node = llist_del_all(&delayed_mntput_list); |
| struct mount *m, *t; |
| |
| llist_for_each_entry_safe(m, t, node, mnt_llist) |
| cleanup_mnt(m); |
| } |
| static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); |
| |
| static void mntput_no_expire(struct mount *mnt) |
| { |
| LIST_HEAD(list); |
| int count; |
| |
| rcu_read_lock(); |
| if (likely(READ_ONCE(mnt->mnt_ns))) { |
| /* |
| * Since we don't do lock_mount_hash() here, |
| * ->mnt_ns can change under us. However, if it's |
| * non-NULL, then there's a reference that won't |
| * be dropped until after an RCU delay done after |
| * turning ->mnt_ns NULL. So if we observe it |
| * non-NULL under rcu_read_lock(), the reference |
| * we are dropping is not the final one. |
| */ |
| mnt_add_count(mnt, -1); |
| rcu_read_unlock(); |
| return; |
| } |
| lock_mount_hash(); |
| /* |
| * make sure that if __legitimize_mnt() has not seen us grab |
| * mount_lock, we'll see their refcount increment here. |
| */ |
| smp_mb(); |
| mnt_add_count(mnt, -1); |
| count = mnt_get_count(mnt); |
| if (count != 0) { |
| WARN_ON(count < 0); |
| rcu_read_unlock(); |
| unlock_mount_hash(); |
| return; |
| } |
| if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { |
| rcu_read_unlock(); |
| unlock_mount_hash(); |
| return; |
| } |
| mnt->mnt.mnt_flags |= MNT_DOOMED; |
| rcu_read_unlock(); |
| |
| list_del(&mnt->mnt_instance); |
| |
| if (unlikely(!list_empty(&mnt->mnt_mounts))) { |
| struct mount *p, *tmp; |
| list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { |
| __put_mountpoint(unhash_mnt(p), &list); |
| hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children); |
| } |
| } |
| unlock_mount_hash(); |
| shrink_dentry_list(&list); |
| |
| if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { |
| struct task_struct *task = current; |
| if (likely(!(task->flags & PF_KTHREAD))) { |
| init_task_work(&mnt->mnt_rcu, __cleanup_mnt); |
| if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME)) |
| return; |
| } |
| if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) |
| schedule_delayed_work(&delayed_mntput_work, 1); |
| return; |
| } |
| cleanup_mnt(mnt); |
| } |
| |
| void mntput(struct vfsmount *mnt) |
| { |
| if (mnt) { |
| struct mount *m = real_mount(mnt); |
| /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ |
| if (unlikely(m->mnt_expiry_mark)) |
| m->mnt_expiry_mark = 0; |
| mntput_no_expire(m); |
| } |
| } |
| EXPORT_SYMBOL(mntput); |
| |
| struct vfsmount *mntget(struct vfsmount *mnt) |
| { |
| if (mnt) |
| mnt_add_count(real_mount(mnt), 1); |
| return mnt; |
| } |
| EXPORT_SYMBOL(mntget); |
| |
| /** |
| * path_is_mountpoint() - Check if path is a mount in the current namespace. |
| * @path: path to check |
| * |
| * d_mountpoint() can only be used reliably to establish if a dentry is |
| * not mounted in any namespace and that common case is handled inline. |
| * d_mountpoint() isn't aware of the possibility there may be multiple |
| * mounts using a given dentry in a different namespace. This function |
| * checks if the passed in path is a mountpoint rather than the dentry |
| * alone. |
| */ |
| bool path_is_mountpoint(const struct path *path) |
| { |
| unsigned seq; |
| bool res; |
| |
| if (!d_mountpoint(path->dentry)) |
| return false; |
| |
| rcu_read_lock(); |
| do { |
| seq = read_seqbegin(&mount_lock); |
| res = __path_is_mountpoint(path); |
| } while (read_seqretry(&mount_lock, seq)); |
| rcu_read_unlock(); |
| |
| return res; |
| } |
| EXPORT_SYMBOL(path_is_mountpoint); |
| |
| struct vfsmount *mnt_clone_internal(const struct path *path) |
| { |
| struct mount *p; |
| p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); |
| if (IS_ERR(p)) |
| return ERR_CAST(p); |
| p->mnt.mnt_flags |= MNT_INTERNAL; |
| return &p->mnt; |
| } |
| |
| #ifdef CONFIG_PROC_FS |
| static struct mount *mnt_list_next(struct mnt_namespace *ns, |
| struct list_head *p) |
| { |
| struct mount *mnt, *ret = NULL; |
| |
| lock_ns_list(ns); |
| list_for_each_continue(p, &ns->list) { |
| mnt = list_entry(p, typeof(*mnt), mnt_list); |
| if (!mnt_is_cursor(mnt)) { |
| ret = mnt; |
| break; |
| } |
| } |
| unlock_ns_list(ns); |
| |
| return ret; |
| } |
| |
| /* iterator; we want it to have access to namespace_sem, thus here... */ |
| static void *m_start(struct seq_file *m, loff_t *pos) |
| { |
| struct proc_mounts *p = m->private; |
| struct list_head *prev; |
| |
| down_read(&namespace_sem); |
| if (!*pos) { |
| prev = &p->ns->list; |
| } else { |
| prev = &p->cursor.mnt_list; |
| |
| /* Read after we'd reached the end? */ |
| if (list_empty(prev)) |
| return NULL; |
| } |
| |
| return mnt_list_next(p->ns, prev); |
| } |
| |
| static void *m_next(struct seq_file *m, void *v, loff_t *pos) |
| { |
| struct proc_mounts *p = m->private; |
| struct mount *mnt = v; |
| |
| ++*pos; |
| return mnt_list_next(p->ns, &mnt->mnt_list); |
| } |
| |
| static void m_stop(struct seq_file *m, void *v) |
| { |
| struct proc_mounts *p = m->private; |
| struct mount *mnt = v; |
| |
| lock_ns_list(p->ns); |
| if (mnt) |
| list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list); |
| else |
| list_del_init(&p->cursor.mnt_list); |
| unlock_ns_list(p->ns); |
| up_read(&namespace_sem); |
| } |
| |
| static int m_show(struct seq_file *m, void *v) |
| { |
| struct proc_mounts *p = m->private; |
| struct mount *r = v; |
| return p->show(m, &r->mnt); |
| } |
| |
| const struct seq_operations mounts_op = { |
| .start = m_start, |
| .next = m_next, |
| .stop = m_stop, |
| .show = m_show, |
| }; |
| |
| void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor) |
| { |
| down_read(&namespace_sem); |
| lock_ns_list(ns); |
| list_del(&cursor->mnt_list); |
| unlock_ns_list(ns); |
| up_read(&namespace_sem); |
| } |
| #endif /* CONFIG_PROC_FS */ |
| |
| /** |
| * may_umount_tree - check if a mount tree is busy |
| * @m: root of mount tree |
| * |
| * This is called to check if a tree of mounts has any |
| * open files, pwds, chroots or sub mounts that are |
| * busy. |
| */ |
| int may_umount_tree(struct vfsmount *m) |
| { |
| struct mount *mnt = real_mount(m); |
| int actual_refs = 0; |
| int minimum_refs = 0; |
| struct mount *p; |
| BUG_ON(!m); |
| |
| /* write lock needed for mnt_get_count */ |
| lock_mount_hash(); |
| for (p = mnt; p; p = next_mnt(p, mnt)) { |
| actual_refs += mnt_get_count(p); |
| minimum_refs += 2; |
| } |
| unlock_mount_hash(); |
| |
| if (actual_refs > minimum_refs) |
| return 0; |
| |
| return 1; |
| } |
| |
| EXPORT_SYMBOL(may_umount_tree); |
| |
| /** |
| * may_umount - check if a mount point is busy |
| * @mnt: root of mount |
| * |
| * This is called to check if a mount point has any |
| * open files, pwds, chroots or sub mounts. If the |
| * mount has sub mounts this will return busy |
| * regardless of whether the sub mounts are busy. |
| * |
| * Doesn't take quota and stuff into account. IOW, in some cases it will |
| * give false negatives. The main reason why it's here is that we need |
| * a non-destructive way to look for easily umountable filesystems. |
| */ |
| int may_umount(struct vfsmount *mnt) |
| { |
| int ret = 1; |
| down_read(&namespace_sem); |
| lock_mount_hash(); |
| if (propagate_mount_busy(real_mount(mnt), 2)) |
| ret = 0; |
| unlock_mount_hash(); |
| up_read(&namespace_sem); |
| return ret; |
| } |
| |
| EXPORT_SYMBOL(may_umount); |
| |
| static void namespace_unlock(void) |
| { |
| struct hlist_head head; |
| struct hlist_node *p; |
| struct mount *m; |
| LIST_HEAD(list); |
| |
| hlist_move_list(&unmounted, &head); |
| list_splice_init(&ex_mountpoints, &list); |
| |
| up_write(&namespace_sem); |
| |
| shrink_dentry_list(&list); |
| |
| if (likely(hlist_empty(&head))) |
| return; |
| |
| synchronize_rcu_expedited(); |
| |
| hlist_for_each_entry_safe(m, p, &head, mnt_umount) { |
| hlist_del(&m->mnt_umount); |
| mntput(&m->mnt); |
| } |
| } |
| |
| static inline void namespace_lock(void) |
| { |
| down_write(&namespace_sem); |
| } |
| |
| enum umount_tree_flags { |
| UMOUNT_SYNC = 1, |
| UMOUNT_PROPAGATE = 2, |
| UMOUNT_CONNECTED = 4, |
| }; |
| |
| static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) |
| { |
| /* Leaving mounts connected is only valid for lazy umounts */ |
| if (how & UMOUNT_SYNC) |
| return true; |
| |
| /* A mount without a parent has nothing to be connected to */ |
| if (!mnt_has_parent(mnt)) |
| return true; |
| |
| /* Because the reference counting rules change when mounts are |
| * unmounted and connected, umounted mounts may not be |
| * connected to mounted mounts. |
| */ |
| if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) |
| return true; |
| |
| /* Has it been requested that the mount remain connected? */ |
| if (how & UMOUNT_CONNECTED) |
| return false; |
| |
| /* Is the mount locked such that it needs to remain connected? */ |
| if (IS_MNT_LOCKED(mnt)) |
| return false; |
| |
| /* By default disconnect the mount */ |
| return true; |
| } |
| |
| /* |
| * mount_lock must be held |
| * namespace_sem must be held for write |
| */ |
| static void umount_tree(struct mount *mnt, enum umount_tree_flags how) |
| { |
| LIST_HEAD(tmp_list); |
| struct mount *p; |
| |
| if (how & UMOUNT_PROPAGATE) |
| propagate_mount_unlock(mnt); |
| |
| /* Gather the mounts to umount */ |
| for (p = mnt; p; p = next_mnt(p, mnt)) { |
| p->mnt.mnt_flags |= MNT_UMOUNT; |
| list_move(&p->mnt_list, &tmp_list); |
| } |
| |
| /* Hide the mounts from mnt_mounts */ |
| list_for_each_entry(p, &tmp_list, mnt_list) { |
| list_del_init(&p->mnt_child); |
| } |
| |
| /* Add propogated mounts to the tmp_list */ |
| if (how & UMOUNT_PROPAGATE) |
| propagate_umount(&tmp_list); |
| |
| while (!list_empty(&tmp_list)) { |
| struct mnt_namespace *ns; |
| bool disconnect; |
| p = list_first_entry(&tmp_list, struct mount, mnt_list); |
| list_del_init(&p->mnt_expire); |
| list_del_init(&p->mnt_list); |
| ns = p->mnt_ns; |
| if (ns) { |
| ns->mounts--; |
| __touch_mnt_namespace(ns); |
| } |
| p->mnt_ns = NULL; |
| if (how & UMOUNT_SYNC) |
| p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; |
| |
| disconnect = disconnect_mount(p, how); |
| if (mnt_has_parent(p)) { |
| mnt_add_count(p->mnt_parent, -1); |
| if (!disconnect) { |
| /* Don't forget about p */ |
| list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); |
| } else { |
| umount_mnt(p); |
| } |
| } |
| change_mnt_propagation(p, MS_PRIVATE); |
| if (disconnect) |
| hlist_add_head(&p->mnt_umount, &unmounted); |
| } |
| } |
| |
| static void shrink_submounts(struct mount *mnt); |
| |
| static int do_umount_root(struct super_block *sb) |
| { |
| int ret = 0; |
| |
| down_write(&sb->s_umount); |
| if (!sb_rdonly(sb)) { |
| struct fs_context *fc; |
| |
| fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY, |
| SB_RDONLY); |
| if (IS_ERR(fc)) { |
| ret = PTR_ERR(fc); |
| } else { |
| ret = parse_monolithic_mount_data(fc, NULL); |
| if (!ret) |
| ret = reconfigure_super(fc); |
| put_fs_context(fc); |
| } |
| } |
| up_write(&sb->s_umount); |
| return ret; |
| } |
| |
| static int do_umount(struct mount *mnt, int flags) |
| { |
| struct super_block *sb = mnt->mnt.mnt_sb; |
| int retval; |
| |
| retval = security_sb_umount(&mnt->mnt, flags); |
| if (retval) |
| return retval; |
| |
| /* |
| * Allow userspace to request a mountpoint be expired rather than |
| * unmounting unconditionally. Unmount only happens if: |
| * (1) the mark is already set (the mark is cleared by mntput()) |
| * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] |
| */ |
| if (flags & MNT_EXPIRE) { |
| if (&mnt->mnt == current->fs->root.mnt || |
| flags & (MNT_FORCE | MNT_DETACH)) |
| return -EINVAL; |
| |
| /* |
| * probably don't strictly need the lock here if we examined |
| * all race cases, but it's a slowpath. |
| */ |
| lock_mount_hash(); |
| if (mnt_get_count(mnt) != 2) { |
| unlock_mount_hash(); |
| return -EBUSY; |
| } |
| unlock_mount_hash(); |
| |
| if (!xchg(&mnt->mnt_expiry_mark, 1)) |
| return -EAGAIN; |
| } |
| |
| /* |
| * If we may have to abort operations to get out of this |
| * mount, and they will themselves hold resources we must |
| * allow the fs to do things. In the Unix tradition of |
| * 'Gee thats tricky lets do it in userspace' the umount_begin |
| * might fail to complete on the first run through as other tasks |
| * must return, and the like. Thats for the mount program to worry |
| * about for the moment. |
| */ |
| |
| if (flags & MNT_FORCE && sb->s_op->umount_begin) { |
| sb->s_op->umount_begin(sb); |
| } |
| |
| /* |
| * No sense to grab the lock for this test, but test itself looks |
| * somewhat bogus. Suggestions for better replacement? |
| * Ho-hum... In principle, we might treat that as umount + switch |
| * to rootfs. GC would eventually take care of the old vfsmount. |
| * Actually it makes sense, especially if rootfs would contain a |
| * /reboot - static binary that would close all descriptors and |
| * call reboot(9). Then init(8) could umount root and exec /reboot. |
| */ |
| if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { |
| /* |
| * Special case for "unmounting" root ... |
| * we just try to remount it readonly. |
| */ |
| if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) |
| return -EPERM; |
| return do_umount_root(sb); |
| } |
| |
| namespace_lock(); |
| lock_mount_hash(); |
| |
| /* Recheck MNT_LOCKED with the locks held */ |
| retval = -EINVAL; |
| if (mnt->mnt.mnt_flags & MNT_LOCKED) |
| goto out; |
| |
| event++; |
| if (flags & MNT_DETACH) { |
| if (!list_empty(&mnt->mnt_list)) |
| umount_tree(mnt, UMOUNT_PROPAGATE); |
| retval = 0; |
| } else { |
| shrink_submounts(mnt); |
| retval = -EBUSY; |
| if (!propagate_mount_busy(mnt, 2)) { |
| if (!list_empty(&mnt->mnt_list)) |
| umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); |
| retval = 0; |
| } |
| } |
| out: |
| unlock_mount_hash(); |
| namespace_unlock(); |
| return retval; |
| } |
| |
| /* |
| * __detach_mounts - lazily unmount all mounts on the specified dentry |
| * |
| * During unlink, rmdir, and d_drop it is possible to loose the path |
| * to an existing mountpoint, and wind up leaking the mount. |
| * detach_mounts allows lazily unmounting those mounts instead of |
| * leaking them. |
| * |
| * The caller may hold dentry->d_inode->i_mutex. |
| */ |
| void __detach_mounts(struct dentry *dentry) |
| { |
| struct mountpoint *mp; |
| struct mount *mnt; |
| |
| namespace_lock(); |
| lock_mount_hash(); |
| mp = lookup_mountpoint(dentry); |
| if (!mp) |
| goto out_unlock; |
| |
| event++; |
| while (!hlist_empty(&mp->m_list)) { |
| mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); |
| if (mnt->mnt.mnt_flags & MNT_UMOUNT) { |
| umount_mnt(mnt); |
| hlist_add_head(&mnt->mnt_umount, &unmounted); |
| } |
| else umount_tree(mnt, UMOUNT_CONNECTED); |
| } |
| put_mountpoint(mp); |
| out_unlock: |
| unlock_mount_hash(); |
| namespace_unlock(); |
| } |
| |
| /* |
| * Is the caller allowed to modify his namespace? |
| */ |
| static inline bool may_mount(void) |
| { |
| return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); |
| } |
| |
| static void warn_mandlock(void) |
| { |
| pr_warn_once("=======================================================\n" |
| "WARNING: The mand mount option has been deprecated and\n" |
| " and is ignored by this kernel. Remove the mand\n" |
| " option from the mount to silence this warning.\n" |
| "=======================================================\n"); |
| } |
| |
| static int can_umount(const struct path *path, int flags) |
| { |
| struct mount *mnt = real_mount(path->mnt); |
| |
| if (!may_mount()) |
| return -EPERM; |
| if (path->dentry != path->mnt->mnt_root) |
| return -EINVAL; |
| if (!check_mnt(mnt)) |
| return -EINVAL; |
| if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ |
| return -EINVAL; |
| if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) |
| return -EPERM; |
| return 0; |
| } |
| |
| // caller is responsible for flags being sane |
| int path_umount(struct path *path, int flags) |
| { |
| struct mount *mnt = real_mount(path->mnt); |
| int ret; |
| |
| ret = can_umount(path, flags); |
| if (!ret) |
| ret = do_umount(mnt, flags); |
| |
| /* we mustn't call path_put() as that would clear mnt_expiry_mark */ |
| dput(path->dentry); |
| mntput_no_expire(mnt); |
| return ret; |
| } |
| |
| static int ksys_umount(char __user *name, int flags) |
| { |
| int lookup_flags = LOOKUP_MOUNTPOINT; |
| struct path path; |
| int ret; |
| |
| // basic validity checks done first |
| if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) |
| return -EINVAL; |
| |
| if (!(flags & UMOUNT_NOFOLLOW)) |
| lookup_flags |= LOOKUP_FOLLOW; |
| ret = user_path_at(AT_FDCWD, name, lookup_flags, &path); |
| if (ret) |
| return ret; |
| return path_umount(&path, flags); |
| } |
| |
| SYSCALL_DEFINE2(umount, char __user *, name, int, flags) |
| { |
| return ksys_umount(name, flags); |
| } |
| |
| #ifdef __ARCH_WANT_SYS_OLDUMOUNT |
| |
| /* |
| * The 2.0 compatible umount. No flags. |
| */ |
| SYSCALL_DEFINE1(oldumount, char __user *, name) |
| { |
| return ksys_umount(name, 0); |
| } |
| |
| #endif |
| |
| static bool is_mnt_ns_file(struct dentry *dentry) |
| { |
| /* Is this a proxy for a mount namespace? */ |
| return dentry->d_op == &ns_dentry_operations && |
| dentry->d_fsdata == &mntns_operations; |
| } |
| |
| static struct mnt_namespace *to_mnt_ns(struct ns_common *ns) |
| { |
| return container_of(ns, struct mnt_namespace, ns); |
| } |
| |
| struct ns_common *from_mnt_ns(struct mnt_namespace *mnt) |
| { |
| return &mnt->ns; |
| } |
| |
| static bool mnt_ns_loop(struct dentry *dentry) |
| { |
| /* Could bind mounting the mount namespace inode cause a |
| * mount namespace loop? |
| */ |
| struct mnt_namespace *mnt_ns; |
| if (!is_mnt_ns_file(dentry)) |
| return false; |
| |
| mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode)); |
| return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; |
| } |
| |
| struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, |
| int flag) |
| { |
| struct mount *res, *p, *q, *r, *parent; |
| |
| if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) |
| return ERR_PTR(-EINVAL); |
| |
| if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) |
| return ERR_PTR(-EINVAL); |
| |
| res = q = clone_mnt(mnt, dentry, flag); |
| if (IS_ERR(q)) |
| return q; |
| |
| q->mnt_mountpoint = mnt->mnt_mountpoint; |
| |
| p = mnt; |
| list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { |
| struct mount *s; |
| if (!is_subdir(r->mnt_mountpoint, dentry)) |
| continue; |
| |
| for (s = r; s; s = next_mnt(s, r)) { |
| if (!(flag & CL_COPY_UNBINDABLE) && |
| IS_MNT_UNBINDABLE(s)) { |
| if (s->mnt.mnt_flags & MNT_LOCKED) { |
| /* Both unbindable and locked. */ |
| q = ERR_PTR(-EPERM); |
| goto out; |
| } else { |
| s = skip_mnt_tree(s); |
| continue; |
| } |
| } |
| if (!(flag & CL_COPY_MNT_NS_FILE) && |
| is_mnt_ns_file(s->mnt.mnt_root)) { |
| s = skip_mnt_tree(s); |
| continue; |
| } |
| while (p != s->mnt_parent) { |
| p = p->mnt_parent; |
| q = q->mnt_parent; |
| } |
| p = s; |
| parent = q; |
| q = clone_mnt(p, p->mnt.mnt_root, flag); |
| if (IS_ERR(q)) |
| goto out; |
| lock_mount_hash(); |
| list_add_tail(&q->mnt_list, &res->mnt_list); |
| attach_mnt(q, parent, p->mnt_mp); |
| unlock_mount_hash(); |
| } |
| } |
| return res; |
| out: |
| if (res) { |
| lock_mount_hash(); |
| umount_tree(res, UMOUNT_SYNC); |
| unlock_mount_hash(); |
| } |
| return q; |
| } |
| |
| /* Caller should check returned pointer for errors */ |
| |
| struct vfsmount *collect_mounts(const struct path *path) |
| { |
| struct mount *tree; |
| namespace_lock(); |
| if (!check_mnt(real_mount(path->mnt))) |
| tree = ERR_PTR(-EINVAL); |
| else |
| tree = copy_tree(real_mount(path->mnt), path->dentry, |
| CL_COPY_ALL | CL_PRIVATE); |
| namespace_unlock(); |
| if (IS_ERR(tree)) |
| return ERR_CAST(tree); |
| return &tree->mnt; |
| } |
| |
| static void free_mnt_ns(struct mnt_namespace *); |
| static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool); |
| |
| void dissolve_on_fput(struct vfsmount *mnt) |
| { |
| struct mnt_namespace *ns; |
| namespace_lock(); |
| lock_mount_hash(); |
| ns = real_mount(mnt)->mnt_ns; |
| if (ns) { |
| if (is_anon_ns(ns)) |
| umount_tree(real_mount(mnt), UMOUNT_CONNECTED); |
| else |
| ns = NULL; |
| } |
| unlock_mount_hash(); |
| namespace_unlock(); |
| if (ns) |
| free_mnt_ns(ns); |
| } |
| |
| void drop_collected_mounts(struct vfsmount *mnt) |
| { |
| namespace_lock(); |
| lock_mount_hash(); |
| umount_tree(real_mount(mnt), 0); |
| unlock_mount_hash(); |
| namespace_unlock(); |
| } |
| |
| static bool has_locked_children(struct mount *mnt, struct dentry *dentry) |
| { |
| struct mount *child; |
| |
| list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { |
| if (!is_subdir(child->mnt_mountpoint, dentry)) |
| continue; |
| |
| if (child->mnt.mnt_flags & MNT_LOCKED) |
| return true; |
| } |
| return false; |
| } |
| |
| /** |
| * clone_private_mount - create a private clone of a path |
| * @path: path to clone |
| * |
| * This creates a new vfsmount, which will be the clone of @path. The new mount |
| * will not be attached anywhere in the namespace and will be private (i.e. |
| * changes to the originating mount won't be propagated into this). |
| * |
| * Release with mntput(). |
| */ |
| struct vfsmount *clone_private_mount(const struct path *path) |
| { |
| struct mount *old_mnt = real_mount(path->mnt); |
| struct mount *new_mnt; |
| |
| down_read(&namespace_sem); |
| if (IS_MNT_UNBINDABLE(old_mnt)) |
| goto invalid; |
| |
| if (!check_mnt(old_mnt)) |
| goto invalid; |
| |
| if (has_locked_children(old_mnt, path->dentry)) |
| goto invalid; |
| |
| new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); |
| up_read(&namespace_sem); |
| |
| if (IS_ERR(new_mnt)) |
| return ERR_CAST(new_mnt); |
| |
| /* Longterm mount to be removed by kern_unmount*() */ |
| new_mnt->mnt_ns = MNT_NS_INTERNAL; |
| |
| return &new_mnt->mnt; |
| |
| invalid: |
| up_read(&namespace_sem); |
| return ERR_PTR(-EINVAL); |
| } |
| EXPORT_SYMBOL_GPL(clone_private_mount); |
| |
| int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, |
| struct vfsmount *root) |
| { |
| struct mount *mnt; |
| int res = f(root, arg); |
| if (res) |
| return res; |
| list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { |
| res = f(&mnt->mnt, arg); |
| if (res) |
| return res; |
| } |
| return 0; |
| } |
| |
| static void lock_mnt_tree(struct mount *mnt) |
| { |
| struct mount *p; |
| |
| for (p = mnt; p; p = next_mnt(p, mnt)) { |
| int flags = p->mnt.mnt_flags; |
| /* Don't allow unprivileged users to change mount flags */ |
| flags |= MNT_LOCK_ATIME; |
| |
| if (flags & MNT_READONLY) |
| flags |= MNT_LOCK_READONLY; |
| |
| if (flags & MNT_NODEV) |
| flags |= MNT_LOCK_NODEV; |
| |
| if (flags & MNT_NOSUID) |
| flags |= MNT_LOCK_NOSUID; |
| |
| if (flags & MNT_NOEXEC) |
| flags |= MNT_LOCK_NOEXEC; |
| /* Don't allow unprivileged users to reveal what is under a mount */ |
| if (list_empty(&p->mnt_expire)) |
| flags |= MNT_LOCKED; |
| p->mnt.mnt_flags = flags; |
| } |
| } |
| |
| static void cleanup_group_ids(struct mount *mnt, struct mount *end) |
| { |
| struct mount *p; |
| |
| for (p = mnt; p != end; p = next_mnt(p, mnt)) { |
| if (p->mnt_group_id && !IS_MNT_SHARED(p)) |
| mnt_release_group_id(p); |
| } |
| } |
| |
| static int invent_group_ids(struct mount *mnt, bool recurse) |
| { |
| struct mount *p; |
| |
| for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { |
| if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { |
| int err = mnt_alloc_group_id(p); |
| if (err) { |
| cleanup_group_ids(mnt, p); |
| return err; |
| } |
| } |
| } |
| |
| return 0; |
| } |
| |
| int count_mounts(struct mnt_namespace *ns, struct mount *mnt) |
| { |
| unsigned int max = READ_ONCE(sysctl_mount_max); |
| unsigned int mounts = 0, old, pending, sum; |
| struct mount *p; |
| |
| for (p = mnt; p; p = next_mnt(p, mnt)) |
| mounts++; |
| |
| old = ns->mounts; |
| pending = ns->pending_mounts; |
| sum = old + pending; |
| if ((old > sum) || |
| (pending > sum) || |
| (max < sum) || |
| (mounts > (max - sum))) |
| return -ENOSPC; |
| |
| ns->pending_mounts = pending + mounts; |
| return 0; |
| } |
| |
| /* |
| * @source_mnt : mount tree to be attached |
| * @nd : place the mount tree @source_mnt is attached |
| * @parent_nd : if non-null, detach the source_mnt from its parent and |
| * store the parent mount and mountpoint dentry. |
| * (done when source_mnt is moved) |
| * |
| * NOTE: in the table below explains the semantics when a source mount |
| * of a given type is attached to a destination mount of a given type. |
| * --------------------------------------------------------------------------- |
| * | BIND MOUNT OPERATION | |
| * |************************************************************************** |
| * | source-->| shared | private | slave | unbindable | |
| * | dest | | | | | |
| * | | | | | | | |
| * | v | | | | | |
| * |************************************************************************** |
| * | shared | shared (++) | shared (+) | shared(+++)| invalid | |
| * | | | | | | |
| * |non-shared| shared (+) | private | slave (*) | invalid | |
| * *************************************************************************** |
| * A bind operation clones the source mount and mounts the clone on the |
| * destination mount. |
| * |
| * (++) the cloned mount is propagated to all the mounts in the propagation |
| * tree of the destination mount and the cloned mount is added to |
| * the peer group of the source mount. |
| * (+) the cloned mount is created under the destination mount and is marked |
| * as shared. The cloned mount is added to the peer group of the source |
| * mount. |
| * (+++) the mount is propagated to all the mounts in the propagation tree |
| * of the destination mount and the cloned mount is made slave |
| * of the same master as that of the source mount. The cloned mount |
| * is marked as 'shared and slave'. |
| * (*) the cloned mount is made a slave of the same master as that of the |
| * source mount. |
| * |
| * --------------------------------------------------------------------------- |
| * | MOVE MOUNT OPERATION | |
| * |************************************************************************** |
| * | source-->| shared | private | slave | unbindable | |
| * | dest | | | | | |
| * | | | | | | | |
| * | v | | | | | |
| * |************************************************************************** |
| * | shared | shared (+) | shared (+) | shared(+++) | invalid | |
| * | | | | | | |
| * |non-shared| shared (+*) | private | slave (*) | unbindable | |
| * *************************************************************************** |
| * |
| * (+) the mount is moved to the destination. And is then propagated to |
| * all the mounts in the propagation tree of the destination mount. |
| * (+*) the mount is moved to the destination. |
| * (+++) the mount is moved to the destination and is then propagated to |
| * all the mounts belonging to the destination mount's propagation tree. |
| * the mount is marked as 'shared and slave'. |
| * (*) the mount continues to be a slave at the new location. |
| * |
| * if the source mount is a tree, the operations explained above is |
| * applied to each mount in the tree. |
| * Must be called without spinlocks held, since this function can sleep |
| * in allocations. |
| */ |
| static int attach_recursive_mnt(struct mount *source_mnt, |
| struct mount *dest_mnt, |
| struct mountpoint *dest_mp, |
| bool moving) |
| { |
| struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; |
| HLIST_HEAD(tree_list); |
| struct mnt_namespace *ns = dest_mnt->mnt_ns; |
| struct mountpoint *smp; |
| struct mount *child, *p; |
| struct hlist_node *n; |
| int err; |
| |
| /* Preallocate a mountpoint in case the new mounts need |
| * to be tucked under other mounts. |
| */ |
| smp = get_mountpoint(source_mnt->mnt.mnt_root); |
| if (IS_ERR(smp)) |
| return PTR_ERR(smp); |
| |
| /* Is there space to add these mounts to the mount namespace? */ |
| if (!moving) { |
| err = count_mounts(ns, source_mnt); |
| if (err) |
| goto out; |
| } |
| |
| if (IS_MNT_SHARED(dest_mnt)) { |
| err = invent_group_ids(source_mnt, true); |
| if (err) |
| goto out; |
| err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); |
| lock_mount_hash(); |
| if (err) |
| goto out_cleanup_ids; |
| for (p = source_mnt; p; p = next_mnt(p, source_mnt)) |
| set_mnt_shared(p); |
| } else { |
| lock_mount_hash(); |
| } |
| if (moving) { |
| unhash_mnt(source_mnt); |
| attach_mnt(source_mnt, dest_mnt, dest_mp); |
| touch_mnt_namespace(source_mnt->mnt_ns); |
| } else { |
| if (source_mnt->mnt_ns) { |
| /* move from anon - the caller will destroy */ |
| list_del_init(&source_mnt->mnt_ns->list); |
| } |
| mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); |
| commit_tree(source_mnt); |
| } |
| |
| hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { |
| struct mount *q; |
| hlist_del_init(&child->mnt_hash); |
| q = __lookup_mnt(&child->mnt_parent->mnt, |
| child->mnt_mountpoint); |
| if (q) |
| mnt_change_mountpoint(child, smp, q); |
| /* Notice when we are propagating across user namespaces */ |
| if (child->mnt_parent->mnt_ns->user_ns != user_ns) |
| lock_mnt_tree(child); |
| child->mnt.mnt_flags &= ~MNT_LOCKED; |
| commit_tree(child); |
| } |
| put_mountpoint(smp); |
| unlock_mount_hash(); |
| |
| return 0; |
| |
| out_cleanup_ids: |
| while (!hlist_empty(&tree_list)) { |
| child = hlist_entry(tree_list.first, struct mount, mnt_hash); |
| child->mnt_parent->mnt_ns->pending_mounts = 0; |
| umount_tree(child, UMOUNT_SYNC); |
| } |
| unlock_mount_hash(); |
| cleanup_group_ids(source_mnt, NULL); |
| out: |
| ns->pending_mounts = 0; |
| |
| read_seqlock_excl(&mount_lock); |
| put_mountpoint(smp); |
| read_sequnlock_excl(&mount_lock); |
| |
| return err; |
| } |
| |
| static struct mountpoint *lock_mount(struct path *path) |
| { |
| struct vfsmount *mnt; |
| struct dentry *dentry = path->dentry; |
| retry: |
| inode_lock(dentry->d_inode); |
| if (unlikely(cant_mount(dentry))) { |
| inode_unlock(dentry->d_inode); |
| return ERR_PTR(-ENOENT); |
| } |
| namespace_lock(); |
| mnt = lookup_mnt(path); |
| if (likely(!mnt)) { |
| struct mountpoint *mp = get_mountpoint(dentry); |
| if (IS_ERR(mp)) { |
| namespace_unlock(); |
| inode_unlock(dentry->d_inode); |
| return mp; |
| } |
| return mp; |
| } |
| namespace_unlock(); |
| inode_unlock(path->dentry->d_inode); |
| path_put(path); |
| path->mnt = mnt; |
| dentry = path->dentry = dget(mnt->mnt_root); |
| goto retry; |
| } |
| |
| static void unlock_mount(struct mountpoint *where) |
| { |
| struct dentry *dentry = where->m_dentry; |
| |
| read_seqlock_excl(&mount_lock); |
| put_mountpoint(where); |
| read_sequnlock_excl(&mount_lock); |
| |
| namespace_unlock(); |
| inode_unlock(dentry->d_inode); |
| } |
| |
| static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) |
| { |
| if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER) |
| return -EINVAL; |
| |
| if (d_is_dir(mp->m_dentry) != |
| d_is_dir(mnt->mnt.mnt_root)) |
| return -ENOTDIR; |
| |
| return attach_recursive_mnt(mnt, p, mp, false); |
| } |
| |
| /* |
| * Sanity check the flags to change_mnt_propagation. |
| */ |
| |
| static int flags_to_propagation_type(int ms_flags) |
| { |
| int type = ms_flags & ~(MS_REC | MS_SILENT); |
| |
| /* Fail if any non-propagation flags are set */ |
| if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) |
| return 0; |
| /* Only one propagation flag should be set */ |
| if (!is_power_of_2(type)) |
| return 0; |
| return type; |
| } |
| |
| /* |
| * recursively change the type of the mountpoint. |
| */ |
| static int do_change_type(struct path *path, int ms_flags) |
| { |
| struct mount *m; |
| struct mount *mnt = real_mount(path->mnt); |
| int recurse = ms_flags & MS_REC; |
| int type; |
| int err = 0; |
| |
| if (path->dentry != path->mnt->mnt_root) |
| return -EINVAL; |
| |
| type = flags_to_propagation_type(ms_flags); |
| if (!type) |
| return -EINVAL; |
| |
| namespace_lock(); |
| if (type == MS_SHARED) { |
| err = invent_group_ids(mnt, recurse); |
| if (err) |
| goto out_unlock; |
| } |
| |
| lock_mount_hash(); |
| for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) |
| change_mnt_propagation(m, type); |
| unlock_mount_hash(); |
| |
| out_unlock: |
| namespace_unlock(); |
| return err; |
| } |
| |
| static struct mount *__do_loopback(struct path *old_path, int recurse) |
| { |
| struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt); |
| |
| if (IS_MNT_UNBINDABLE(old)) |
| return mnt; |
| |
| if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations) |
| return mnt; |
| |
| if (!recurse && has_locked_children(old, old_path->dentry)) |
| return mnt; |
| |
| if (recurse) |
| mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE); |
| else |
| mnt = clone_mnt(old, old_path->dentry, 0); |
| |
| if (!IS_ERR(mnt)) |
| mnt->mnt.mnt_flags &= ~MNT_LOCKED; |
| |
| return mnt; |
| } |
| |
| /* |
| * do loopback mount. |
| */ |
| static int do_loopback(struct path *path, const char *old_name, |
| int recurse) |
| { |
| struct path old_path; |
| struct mount *mnt = NULL, *parent; |
| struct mountpoint *mp; |
| int err; |
| if (!old_name || !*old_name) |
| return -EINVAL; |
| err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); |
| if (err) |
| return err; |
| |
| err = -EINVAL; |
| if (mnt_ns_loop(old_path.dentry)) |
| goto out; |
| |
| mp = lock_mount(path); |
| if (IS_ERR(mp)) { |
| err = PTR_ERR(mp); |
| goto out; |
| } |
| |
| parent = real_mount(path->mnt); |
| if (!check_mnt(parent)) |
| goto out2; |
| |
| mnt = __do_loopback(&old_path, recurse); |
| if (IS_ERR(mnt)) { |
| err = PTR_ERR(mnt); |
| goto out2; |
| } |
| |
| err = graft_tree(mnt, parent, mp); |
| if (err) { |
| lock_mount_hash(); |
| umount_tree(mnt, UMOUNT_SYNC); |
| unlock_mount_hash(); |
| } |
| out2: |
| unlock_mount(mp); |
| out: |
| path_put(&old_path); |
| return err; |
| } |
| |
| static struct file *open_detached_copy(struct path *path, bool recursive) |
| { |
| struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; |
| struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true); |
| struct mount *mnt, *p; |
| struct file *file; |
| |
| if (IS_ERR(ns)) |
| return ERR_CAST(ns); |
| |
| namespace_lock(); |
| mnt = __do_loopback(path, recursive); |
| if (IS_ERR(mnt)) { |
| namespace_unlock(); |
| free_mnt_ns(ns); |
| return ERR_CAST(mnt); |
| } |
| |
| lock_mount_hash(); |
| for (p = mnt; p; p = next_mnt(p, mnt)) { |
| p->mnt_ns = ns; |
| ns->mounts++; |
| } |
| ns->root = mnt; |
| list_add_tail(&ns->list, &mnt->mnt_list); |
| mntget(&mnt->mnt); |
| unlock_mount_hash(); |
| namespace_unlock(); |
| |
| mntput(path->mnt); |
| path->mnt = &mnt->mnt; |
| file = dentry_open(path, O_PATH, current_cred()); |
| if (IS_ERR(file)) |
| dissolve_on_fput(path->mnt); |
| else |
| file->f_mode |= FMODE_NEED_UNMOUNT; |
| return file; |
| } |
| |
| SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags) |
| { |
| struct file *file; |
| struct path path; |
| int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW; |
| bool detached = flags & OPEN_TREE_CLONE; |
| int error; |
| int fd; |
| |
| BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC); |
| |
| if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE | |
| AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE | |
| OPEN_TREE_CLOEXEC)) |
| return -EINVAL; |
| |
| if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE) |
| return -EINVAL; |
| |
| if (flags & AT_NO_AUTOMOUNT) |
| lookup_flags &= ~LOOKUP_AUTOMOUNT; |
| if (flags & AT_SYMLINK_NOFOLLOW) |
| lookup_flags &= ~LOOKUP_FOLLOW; |
| if (flags & AT_EMPTY_PATH) |
| lookup_flags |= LOOKUP_EMPTY; |
| |
| if (detached && !may_mount()) |
| return -EPERM; |
| |
| fd = get_unused_fd_flags(flags & O_CLOEXEC); |
| if (fd < 0) |
| return fd; |
| |
| error = user_path_at(dfd, filename, lookup_flags, &path); |
| if (unlikely(error)) { |
| file = ERR_PTR(error); |
| } else { |
| if (detached) |
| file = open_detached_copy(&path, flags & AT_RECURSIVE); |
| else |
| file = dentry_open(&path, O_PATH, current_cred()); |
| path_put(&path); |
| } |
| if (IS_ERR(file)) { |
| put_unused_fd(fd); |
| return PTR_ERR(file); |
| } |
| fd_install(fd, file); |
| return fd; |
| } |
| |
| /* |
| * Don't allow locked mount flags to be cleared. |
| * |
| * No locks need to be held here while testing the various MNT_LOCK |
| * flags because those flags can never be cleared once they are set. |
| */ |
| static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags) |
| { |
| unsigned int fl = mnt->mnt.mnt_flags; |
| |
| if ((fl & MNT_LOCK_READONLY) && |
| !(mnt_flags & MNT_READONLY)) |
| return false; |
| |
| if ((fl & MNT_LOCK_NODEV) && |
| !(mnt_flags & MNT_NODEV)) |
| return false; |
| |
| if ((fl & MNT_LOCK_NOSUID) && |
| !(mnt_flags & MNT_NOSUID)) |
| return false; |
| |
| if ((fl & MNT_LOCK_NOEXEC) && |
| !(mnt_flags & MNT_NOEXEC)) |
| return false; |
| |
| if ((fl & MNT_LOCK_ATIME) && |
| ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) |
| return false; |
| |
| return true; |
| } |
| |
| static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags) |
| { |
| bool readonly_request = (mnt_flags & MNT_READONLY); |
| |
| if (readonly_request == __mnt_is_readonly(&mnt->mnt)) |
| return 0; |
| |
| if (readonly_request) |
| return mnt_make_readonly(mnt); |
| |
| mnt->mnt.mnt_flags &= ~MNT_READONLY; |
| return 0; |
| } |
| |
| static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags) |
| { |
| mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; |
| mnt->mnt.mnt_flags = mnt_flags; |
| touch_mnt_namespace(mnt->mnt_ns); |
| } |
| |
| static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt) |
| { |
| struct super_block *sb = mnt->mnt_sb; |
| |
| if (!__mnt_is_readonly(mnt) && |
| (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) { |
| char *buf = (char *)__get_free_page(GFP_KERNEL); |
| char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM); |
| struct tm tm; |
| |
| time64_to_tm(sb->s_time_max, 0, &tm); |
| |
| pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n", |
| sb->s_type->name, |
| is_mounted(mnt) ? "remounted" : "mounted", |
| mntpath, |
| tm.tm_year+1900, (unsigned long long)sb->s_time_max); |
| |
| free_page((unsigned long)buf); |
| } |
| } |
| |
| /* |
| * Handle reconfiguration of the mountpoint only without alteration of the |
| * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND |
| * to mount(2). |
| */ |
| static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags) |
| { |
| struct super_block *sb = path->mnt->mnt_sb; |
| struct mount *mnt = real_mount(path->mnt); |
| int ret; |
| |
| if (!check_mnt(mnt)) |
| return -EINVAL; |
| |
| if (path->dentry != mnt->mnt.mnt_root) |
| return -EINVAL; |
| |
| if (!can_change_locked_flags(mnt, mnt_flags)) |
| return -EPERM; |
| |
| /* |
| * We're only checking whether the superblock is read-only not |
| * changing it, so only take down_read(&sb->s_umount). |
| */ |
| down_read(&sb->s_umount); |
| lock_mount_hash(); |
| ret = change_mount_ro_state(mnt, mnt_flags); |
| if (ret == 0) |
| set_mount_attributes(mnt, mnt_flags); |
| unlock_mount_hash(); |
| up_read(&sb->s_umount); |
| |
| mnt_warn_timestamp_expiry(path, &mnt->mnt); |
| |
| return ret; |
| } |
| |
| /* |
| * change filesystem flags. dir should be a physical root of filesystem. |
| * If you've mounted a non-root directory somewhere and want to do remount |
| * on it - tough luck. |
| */ |
| static int do_remount(struct path *path, int ms_flags, int sb_flags, |
| int mnt_flags, void *data) |
| { |
| int err; |
| struct super_block *sb = path->mnt->mnt_sb; |
| struct mount *mnt = real_mount(path->mnt); |
| struct fs_context *fc; |
| |
| if (!check_mnt(mnt)) |
| return -EINVAL; |
| |
| if (path->dentry != path->mnt->mnt_root) |
| return -EINVAL; |
| |
| if (!can_change_locked_flags(mnt, mnt_flags)) |
| return -EPERM; |
| |
| fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK); |
| if (IS_ERR(fc)) |
| return PTR_ERR(fc); |
| |
| fc->oldapi = true; |
| err = parse_monolithic_mount_data(fc, data); |
| if (!err) { |
| down_write(&sb->s_umount); |
| err = -EPERM; |
| if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) { |
| err = reconfigure_super(fc); |
| if (!err) { |
| lock_mount_hash(); |
| set_mount_attributes(mnt, mnt_flags); |
| unlock_mount_hash(); |
| } |
| } |
| up_write(&sb->s_umount); |
| } |
| |
| mnt_warn_timestamp_expiry(path, &mnt->mnt); |
| |
| put_fs_context(fc); |
| return err; |
| } |
| |
| static inline int tree_contains_unbindable(struct mount *mnt) |
| { |
| struct mount *p; |
| for (p = mnt; p; p = next_mnt(p, mnt)) { |
| if (IS_MNT_UNBINDABLE(p)) |
| return 1; |
| } |
| return 0; |
| } |
| |
| /* |
| * Check that there aren't references to earlier/same mount namespaces in the |
| * specified subtree. Such references can act as pins for mount namespaces |
| * that aren't checked by the mount-cycle checking code, thereby allowing |
| * cycles to be made. |
| */ |
| static bool check_for_nsfs_mounts(struct mount *subtree) |
| { |
| struct mount *p; |
| bool ret = false; |
| |
| lock_mount_hash(); |
| for (p = subtree; p; p = next_mnt(p, subtree)) |
| if (mnt_ns_loop(p->mnt.mnt_root)) |
| goto out; |
| |
| ret = true; |
| out: |
| unlock_mount_hash(); |
| return ret; |
| } |
| |
| static int do_set_group(struct path *from_path, struct path *to_path) |
| { |
| struct mount *from, *to; |
| int err; |
| |
| from = real_mount(from_path->mnt); |
| to = real_mount(to_path->mnt); |
| |
| namespace_lock(); |
| |
| err = -EINVAL; |
| /* To and From must be mounted */ |
| if (!is_mounted(&from->mnt)) |
| goto out; |
| if (!is_mounted(&to->mnt)) |
| goto out; |
| |
| err = -EPERM; |
| /* We should be allowed to modify mount namespaces of both mounts */ |
| if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN)) |
| goto out; |
| if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN)) |
| goto out; |
| |
| err = -EINVAL; |
| /* To and From paths should be mount roots */ |
| if (from_path->dentry != from_path->mnt->mnt_root) |
| goto out; |
| if (to_path->dentry != to_path->mnt->mnt_root) |
| goto out; |
| |
| /* Setting sharing groups is only allowed across same superblock */ |
| if (from->mnt.mnt_sb != to->mnt.mnt_sb) |
| goto out; |
| |
| /* From mount root should be wider than To mount root */ |
| if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root)) |
| goto out; |
| |
| /* From mount should not have locked children in place of To's root */ |
| if (has_locked_children(from, to->mnt.mnt_root)) |
| goto out; |
| |
| /* Setting sharing groups is only allowed on private mounts */ |
| if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to)) |
| goto out; |
| |
| /* From should not be private */ |
| if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from)) |
| goto out; |
| |
| if (IS_MNT_SLAVE(from)) { |
| struct mount *m = from->mnt_master; |
| |
| list_add(&to->mnt_slave, &m->mnt_slave_list); |
| to->mnt_master = m; |
| } |
| |
| if (IS_MNT_SHARED(from)) { |
| to->mnt_group_id = from->mnt_group_id; |
| list_add(&to->mnt_share, &from->mnt_share); |
| lock_mount_hash(); |
| set_mnt_shared(to); |
| unlock_mount_hash(); |
| } |
| |
| err = 0; |
| out: |
| namespace_unlock(); |
| return err; |
| } |
| |
| static int do_move_mount(struct path *old_path, struct path *new_path) |
| { |
| struct mnt_namespace *ns; |
| struct mount *p; |
| struct mount *old; |
| struct mount *parent; |
| struct mountpoint *mp, *old_mp; |
| int err; |
| bool attached; |
| |
| mp = lock_mount(new_path); |
| if (IS_ERR(mp)) |
| return PTR_ERR(mp); |
| |
| old = real_mount(old_path->mnt); |
| p = real_mount(new_path->mnt); |
| parent = old->mnt_parent; |
| attached = mnt_has_parent(old); |
| old_mp = old->mnt_mp; |
| ns = old->mnt_ns; |
| |
| err = -EINVAL; |
| /* The mountpoint must be in our namespace. */ |
| if (!check_mnt(p)) |
| goto out; |
| |
| /* The thing moved must be mounted... */ |
| if (!is_mounted(&old->mnt)) |
| goto out; |
| |
| /* ... and either ours or the root of anon namespace */ |
| if (!(attached ? check_mnt(old) : is_anon_ns(ns))) |
| goto out; |
| |
| if (old->mnt.mnt_flags & MNT_LOCKED) |
| goto out; |
| |
| if (old_path->dentry != old_path->mnt->mnt_root) |
| goto out; |
| |
| if (d_is_dir(new_path->dentry) != |
| d_is_dir(old_path->dentry)) |
| goto out; |
| /* |
| * Don't move a mount residing in a shared parent. |
| */ |
| if (attached && IS_MNT_SHARED(parent)) |
| goto out; |
| /* |
| * Don't move a mount tree containing unbindable mounts to a destination |
| * mount which is shared. |
| */ |
| if (IS_MNT_SHARED(p) && tree_contains_unbindable(old)) |
| goto out; |
| err = -ELOOP; |
| if (!check_for_nsfs_mounts(old)) |
| goto out; |
| for (; mnt_has_parent(p); p = p->mnt_parent) |
| if (p == old) |
| goto out; |
| |
| err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp, |
| attached); |
| if (err) |
| goto out; |
| |
| /* if the mount is moved, it should no longer be expire |
| * automatically */ |
| list_del_init(&old->mnt_expire); |
| if (attached) |
| put_mountpoint(old_mp); |
| out: |
| unlock_mount(mp); |
| if (!err) { |
| if (attached) |
| mntput_no_expire(parent); |
| else |
| free_mnt_ns(ns); |
| } |
| return err; |
| } |
| |
| static int do_move_mount_old(struct path *path, const char *old_name) |
| { |
| struct path old_path; |
| int err; |
| |
| if (!old_name || !*old_name) |
| return -EINVAL; |
| |
| err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); |
| if (err) |
| return err; |
| |
| err = do_move_mount(&old_path, path); |
| path_put(&old_path); |
| return err; |
| } |
| |
| /* |
| * add a mount into a namespace's mount tree |
| */ |
| static int do_add_mount(struct mount *newmnt, struct mountpoint *mp, |
| struct path *path, int mnt_flags) |
| { |
| struct mount *parent = real_mount(path->mnt); |
| |
| mnt_flags &= ~MNT_INTERNAL_FLAGS; |
| |
| if (unlikely(!check_mnt(parent))) { |
| /* that's acceptable only for automounts done in private ns */ |
| if (!(mnt_flags & MNT_SHRINKABLE)) |
| return -EINVAL; |
| /* ... and for those we'd better have mountpoint still alive */ |
| if (!parent->mnt_ns) |
| return -EINVAL; |
| } |
| |
| /* Refuse the same filesystem on the same mount point */ |
| if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && |
| path->mnt->mnt_root == path->dentry) |
| return -EBUSY; |
| |
| if (d_is_symlink(newmnt->mnt.mnt_root)) |
| return -EINVAL; |
| |
| newmnt->mnt.mnt_flags = mnt_flags; |
| return graft_tree(newmnt, parent, mp); |
| } |
| |
| static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags); |
| |
| /* |
| * Create a new mount using a superblock configuration and request it |
| * be added to the namespace tree. |
| */ |
| static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint, |
| unsigned int mnt_flags) |
| { |
| struct vfsmount *mnt; |
| struct mountpoint *mp; |
| struct super_block *sb = fc->root->d_sb; |
| int error; |
| |
| error = security_sb_kern_mount(sb); |
| if (!error && mount_too_revealing(sb, &mnt_flags)) |
| error = -EPERM; |
| |
| if (unlikely(error)) { |
| fc_drop_locked(fc); |
| return error; |
| } |
| |
| up_write(&sb->s_umount); |
| |
| mnt = vfs_create_mount(fc); |
| if (IS_ERR(mnt)) |
| return PTR_ERR(mnt); |
| |
| mnt_warn_timestamp_expiry(mountpoint, mnt); |
| |
| mp = lock_mount(mountpoint); |
| if (IS_ERR(mp)) { |
| mntput(mnt); |
| return PTR_ERR(mp); |
| } |
| error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags); |
| unlock_mount(mp); |
| if (error < 0) |
| mntput(mnt); |
| return error; |
| } |
| |
| /* |
| * create a new mount for userspace and request it to be added into the |
| * namespace's tree |
| */ |
| static int do_new_mount(struct path *path, const char *fstype, int sb_flags, |
| int mnt_flags, const char *name, void *data) |
| { |
| struct file_system_type *type; |
| struct fs_context *fc; |
| const char *subtype = NULL; |
| int err = 0; |
| |
| if (!fstype) |
| return -EINVAL; |
| |
| type = get_fs_type(fstype); |
| if (!type) |
| return -ENODEV; |
| |
| if (type->fs_flags & FS_HAS_SUBTYPE) { |
| subtype = strchr(fstype, '.'); |
| if (subtype) { |
| subtype++; |
| if (!*subtype) { |
| put_filesystem(type); |
| return -EINVAL; |
| } |
| } |
| } |
| |
| fc = fs_context_for_mount(type, sb_flags); |
| put_filesystem(type); |
| if (IS_ERR(fc)) |
| return PTR_ERR(fc); |
| |
| if (subtype) |
| err = vfs_parse_fs_string(fc, "subtype", |
| subtype, strlen(subtype)); |
| if (!err && name) |
| err = vfs_parse_fs_string(fc, "source", name, strlen(name)); |
| if (!err) |
| err = parse_monolithic_mount_data(fc, data); |
| if (!err && !mount_capable(fc)) |
| err = -EPERM; |
| if (!err) |
| err = vfs_get_tree(fc); |
| if (!err) |
| err = do_new_mount_fc(fc, path, mnt_flags); |
| |
| put_fs_context(fc); |
| return err; |
| } |
| |
| int finish_automount(struct vfsmount *m, struct path *path) |
| { |
| struct dentry *dentry = path->dentry; |
| struct mountpoint *mp; |
| struct mount *mnt; |
| int err; |
| |
| if (!m) |
| return 0; |
| if (IS_ERR(m)) |
| return PTR_ERR(m); |
| |
| mnt = real_mount(m); |
| /* The new mount record should have at least 2 refs to prevent it being |
| * expired before we get a chance to add it |
| */ |
| BUG_ON(mnt_get_count(mnt) < 2); |
| |
| if (m->mnt_sb == path->mnt->mnt_sb && |
| m->mnt_root == dentry) { |
| err = -ELOOP; |
| goto discard; |
| } |
| |
| /* |
| * we don't want to use lock_mount() - in this case finding something |
| * that overmounts our mountpoint to be means "quitely drop what we've |
| * got", not "try to mount it on top". |
| */ |
| inode_lock(dentry->d_inode); |
| namespace_lock(); |
| if (unlikely(cant_mount(dentry))) { |
| err = -ENOENT; |
| goto discard_locked; |
| } |
| rcu_read_lock(); |
| if (unlikely(__lookup_mnt(path->mnt, dentry))) { |
| rcu_read_unlock(); |
| err = 0; |
| goto discard_locked; |
| } |
| rcu_read_unlock(); |
| mp = get_mountpoint(dentry); |
| if (IS_ERR(mp)) { |
| err = PTR_ERR(mp); |
| goto discard_locked; |
| } |
| |
| err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE); |
| unlock_mount(mp); |
| if (unlikely(err)) |
| goto discard; |
| mntput(m); |
| return 0; |
| |
| discard_locked: |
| namespace_unlock(); |
| inode_unlock(dentry->d_inode); |
| discard: |
| /* remove m from any expiration list it may be on */ |
| if (!list_empty(&mnt->mnt_expire)) { |
| namespace_lock(); |
| list_del_init(&mnt->mnt_expire); |
| namespace_unlock(); |
| } |
| mntput(m); |
| mntput(m); |
| return err; |
| } |
| |
| /** |
| * mnt_set_expiry - Put a mount on an expiration list |
| * @mnt: The mount to list. |
| * @expiry_list: The list to add the mount to. |
| */ |
| void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) |
| { |
| namespace_lock(); |
| |
| list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); |
| |
| namespace_unlock(); |
| } |
| EXPORT_SYMBOL(mnt_set_expiry); |
| |
| /* |
| * process a list of expirable mountpoints with the intent of discarding any |
| * mountpoints that aren't in use and haven't been touched since last we came |
| * here |
| */ |
| void mark_mounts_for_expiry(struct list_head *mounts) |
| { |
| struct mount *mnt, *next; |
| LIST_HEAD(graveyard); |
| |
| if (list_empty(mounts)) |
| return; |
| |
| namespace_lock(); |
| lock_mount_hash(); |
| |
| /* extract from the expiration list every vfsmount that matches the |
| * following criteria: |
| * - only referenced by its parent vfsmount |
| * - still marked for expiry (marked on the last call here; marks are |
| * cleared by mntput()) |
| */ |
| list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { |
| if (!xchg(&mnt->mnt_expiry_mark, 1) || |
| propagate_mount_busy(mnt, 1)) |
| continue; |
| list_move(&mnt->mnt_expire, &graveyard); |
| } |
| while (!list_empty(&graveyard)) { |
| mnt = list_first_entry(&graveyard, struct mount, mnt_expire); |
| touch_mnt_namespace(mnt->mnt_ns); |
| umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); |
| } |
| unlock_mount_hash(); |
| namespace_unlock(); |
| } |
| |
| EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); |
| |
| /* |
| * Ripoff of 'select_parent()' |
| * |
| * search the list of submounts for a given mountpoint, and move any |
| * shrinkable submounts to the 'graveyard' list. |
| */ |
| static int select_submounts(struct mount *parent, struct list_head *graveyard) |
| { |
| struct mount *this_parent = parent; |
| struct list_head *next; |
| int found = 0; |
| |
| repeat: |
| next = this_parent->mnt_mounts.next; |
| resume: |
| while (next != &this_parent->mnt_mounts) { |
| struct list_head *tmp = next; |
| struct mount *mnt = list_entry(tmp, struct mount, mnt_child); |
| |
| next = tmp->next; |
| if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) |
| continue; |
| /* |
| * Descend a level if the d_mounts list is non-empty. |
| */ |
| if (!list_empty(&mnt->mnt_mounts)) { |
| this_parent = mnt; |
| goto repeat; |
| } |
| |
| if (!propagate_mount_busy(mnt, 1)) { |
| list_move_tail(&mnt->mnt_expire, graveyard); |
| found++; |
| } |
| } |
| /* |
| * All done at this level ... ascend and resume the search |
| */ |
| if (this_parent != parent) { |
| next = this_parent->mnt_child.next; |
| this_parent = this_parent->mnt_parent; |
| goto resume; |
| } |
| return found; |
| } |
| |
| /* |
| * process a list of expirable mountpoints with the intent of discarding any |
| * submounts of a specific parent mountpoint |
| * |
| * mount_lock must be held for write |
| */ |
| static void shrink_submounts(struct mount *mnt) |
| { |
| LIST_HEAD(graveyard); |
| struct mount *m; |
| |
| /* extract submounts of 'mountpoint' from the expiration list */ |
| while (select_submounts(mnt, &graveyard)) { |
| while (!list_empty(&graveyard)) { |
| m = list_first_entry(&graveyard, struct mount, |
| mnt_expire); |
| touch_mnt_namespace(m->mnt_ns); |
| umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); |
| } |
| } |
| } |
| |
| static void *copy_mount_options(const void __user * data) |
| { |
| char *copy; |
| unsigned left, offset; |
| |
| if (!data) |
| return NULL; |
| |
| copy = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| if (!copy) |
| return ERR_PTR(-ENOMEM); |
| |
| left = copy_from_user(copy, data, PAGE_SIZE); |
| |
| /* |
| * Not all architectures have an exact copy_from_user(). Resort to |
| * byte at a time. |
| */ |
| offset = PAGE_SIZE - left; |
| while (left) { |
| char c; |
| if (get_user(c, (const char __user *)data + offset)) |
| break; |
| copy[offset] = c; |
| left--; |
| offset++; |
| } |
| |
|
|