blob: 206ed6b40c1d0fcfbb6abcbced65581c647c1892 [file] [log] [blame]
Mike Rapoport1507f512021-07-07 18:08:03 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corporation, 2021
4 *
5 * Author: Mike Rapoport <rppt@linux.ibm.com>
6 */
7
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/swap.h>
11#include <linux/mount.h>
12#include <linux/memfd.h>
13#include <linux/bitops.h>
14#include <linux/printk.h>
15#include <linux/pagemap.h>
16#include <linux/syscalls.h>
17#include <linux/pseudo_fs.h>
18#include <linux/secretmem.h>
19#include <linux/set_memory.h>
20#include <linux/sched/signal.h>
21
22#include <uapi/linux/magic.h>
23
24#include <asm/tlbflush.h>
25
26#include "internal.h"
27
28#undef pr_fmt
29#define pr_fmt(fmt) "secretmem: " fmt
30
31/*
32 * Define mode and flag masks to allow validation of the system call
33 * parameters.
34 */
35#define SECRETMEM_MODE_MASK (0x0)
36#define SECRETMEM_FLAGS_MASK SECRETMEM_MODE_MASK
37
38static bool secretmem_enable __ro_after_init;
39module_param_named(enable, secretmem_enable, bool, 0400);
40MODULE_PARM_DESC(secretmem_enable,
41 "Enable secretmem and memfd_secret(2) system call");
42
Linus Torvalds87066fd2021-10-24 09:48:33 -100043static atomic_t secretmem_users;
Mike Rapoport9a436f82021-07-07 18:08:07 -070044
45bool secretmem_active(void)
46{
Linus Torvalds87066fd2021-10-24 09:48:33 -100047 return !!atomic_read(&secretmem_users);
Mike Rapoport9a436f82021-07-07 18:08:07 -070048}
49
Mike Rapoport1507f512021-07-07 18:08:03 -070050static vm_fault_t secretmem_fault(struct vm_fault *vmf)
51{
52 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
53 struct inode *inode = file_inode(vmf->vma->vm_file);
54 pgoff_t offset = vmf->pgoff;
55 gfp_t gfp = vmf->gfp_mask;
56 unsigned long addr;
57 struct page *page;
58 int err;
59
60 if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
61 return vmf_error(-EINVAL);
62
63retry:
64 page = find_lock_page(mapping, offset);
65 if (!page) {
66 page = alloc_page(gfp | __GFP_ZERO);
67 if (!page)
68 return VM_FAULT_OOM;
69
70 err = set_direct_map_invalid_noflush(page);
71 if (err) {
72 put_page(page);
73 return vmf_error(err);
74 }
75
76 __SetPageUptodate(page);
77 err = add_to_page_cache_lru(page, mapping, offset, gfp);
78 if (unlikely(err)) {
79 put_page(page);
80 /*
81 * If a split of large page was required, it
82 * already happened when we marked the page invalid
83 * which guarantees that this call won't fail
84 */
85 set_direct_map_default_noflush(page);
86 if (err == -EEXIST)
87 goto retry;
88
89 return vmf_error(err);
90 }
91
92 addr = (unsigned long)page_address(page);
93 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
94 }
95
96 vmf->page = page;
97 return VM_FAULT_LOCKED;
98}
99
100static const struct vm_operations_struct secretmem_vm_ops = {
101 .fault = secretmem_fault,
102};
103
Mike Rapoport9a436f82021-07-07 18:08:07 -0700104static int secretmem_release(struct inode *inode, struct file *file)
105{
Linus Torvalds87066fd2021-10-24 09:48:33 -1000106 atomic_dec(&secretmem_users);
Mike Rapoport9a436f82021-07-07 18:08:07 -0700107 return 0;
108}
109
Mike Rapoport1507f512021-07-07 18:08:03 -0700110static int secretmem_mmap(struct file *file, struct vm_area_struct *vma)
111{
112 unsigned long len = vma->vm_end - vma->vm_start;
113
114 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
115 return -EINVAL;
116
117 if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
118 return -EAGAIN;
119
120 vma->vm_flags |= VM_LOCKED | VM_DONTDUMP;
121 vma->vm_ops = &secretmem_vm_ops;
122
123 return 0;
124}
125
126bool vma_is_secretmem(struct vm_area_struct *vma)
127{
128 return vma->vm_ops == &secretmem_vm_ops;
129}
130
131static const struct file_operations secretmem_fops = {
Mike Rapoport9a436f82021-07-07 18:08:07 -0700132 .release = secretmem_release,
Mike Rapoport1507f512021-07-07 18:08:03 -0700133 .mmap = secretmem_mmap,
134};
135
136static bool secretmem_isolate_page(struct page *page, isolate_mode_t mode)
137{
138 return false;
139}
140
141static int secretmem_migratepage(struct address_space *mapping,
142 struct page *newpage, struct page *page,
143 enum migrate_mode mode)
144{
145 return -EBUSY;
146}
147
Matthew Wilcox (Oracle)6612ed22022-05-02 01:47:42 -0400148static void secretmem_free_folio(struct folio *folio)
Mike Rapoport1507f512021-07-07 18:08:03 -0700149{
Matthew Wilcox (Oracle)6612ed22022-05-02 01:47:42 -0400150 set_direct_map_default_noflush(&folio->page);
151 folio_zero_segment(folio, 0, folio_size(folio));
Mike Rapoport1507f512021-07-07 18:08:03 -0700152}
153
154const struct address_space_operations secretmem_aops = {
Matthew Wilcox (Oracle)46de8b972022-02-09 20:22:13 +0000155 .dirty_folio = noop_dirty_folio,
Matthew Wilcox (Oracle)6612ed22022-05-02 01:47:42 -0400156 .free_folio = secretmem_free_folio,
Mike Rapoport1507f512021-07-07 18:08:03 -0700157 .migratepage = secretmem_migratepage,
158 .isolate_page = secretmem_isolate_page,
159};
160
Axel Rasmussenf9b141f2022-04-14 19:13:31 -0700161static int secretmem_setattr(struct user_namespace *mnt_userns,
162 struct dentry *dentry, struct iattr *iattr)
163{
164 struct inode *inode = d_inode(dentry);
165 unsigned int ia_valid = iattr->ia_valid;
166
167 if ((ia_valid & ATTR_SIZE) && inode->i_size)
168 return -EINVAL;
169
170 return simple_setattr(mnt_userns, dentry, iattr);
171}
172
173static const struct inode_operations secretmem_iops = {
174 .setattr = secretmem_setattr,
175};
176
Mike Rapoport1507f512021-07-07 18:08:03 -0700177static struct vfsmount *secretmem_mnt;
178
179static struct file *secretmem_file_create(unsigned long flags)
180{
181 struct file *file = ERR_PTR(-ENOMEM);
182 struct inode *inode;
183
184 inode = alloc_anon_inode(secretmem_mnt->mnt_sb);
185 if (IS_ERR(inode))
186 return ERR_CAST(inode);
187
188 file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
189 O_RDWR, &secretmem_fops);
190 if (IS_ERR(file))
191 goto err_free_inode;
192
193 mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
194 mapping_set_unevictable(inode->i_mapping);
195
Axel Rasmussenf9b141f2022-04-14 19:13:31 -0700196 inode->i_op = &secretmem_iops;
Mike Rapoport1507f512021-07-07 18:08:03 -0700197 inode->i_mapping->a_ops = &secretmem_aops;
198
199 /* pretend we are a normal file with zero size */
200 inode->i_mode |= S_IFREG;
201 inode->i_size = 0;
202
203 return file;
204
205err_free_inode:
206 iput(inode);
207 return file;
208}
209
210SYSCALL_DEFINE1(memfd_secret, unsigned int, flags)
211{
212 struct file *file;
213 int fd, err;
214
215 /* make sure local flags do not confict with global fcntl.h */
216 BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC);
217
218 if (!secretmem_enable)
219 return -ENOSYS;
220
221 if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC))
222 return -EINVAL;
Matthew Wilcox (Oracle)cb685432021-10-25 19:16:34 +0100223 if (atomic_read(&secretmem_users) < 0)
224 return -ENFILE;
Mike Rapoport1507f512021-07-07 18:08:03 -0700225
226 fd = get_unused_fd_flags(flags & O_CLOEXEC);
227 if (fd < 0)
228 return fd;
229
230 file = secretmem_file_create(flags);
231 if (IS_ERR(file)) {
232 err = PTR_ERR(file);
233 goto err_put_fd;
234 }
235
236 file->f_flags |= O_LARGEFILE;
237
Linus Torvalds87066fd2021-10-24 09:48:33 -1000238 atomic_inc(&secretmem_users);
Kees Cook855d4442021-10-28 14:36:21 -0700239 fd_install(fd, file);
Mike Rapoport1507f512021-07-07 18:08:03 -0700240 return fd;
241
242err_put_fd:
243 put_unused_fd(fd);
244 return err;
245}
246
247static int secretmem_init_fs_context(struct fs_context *fc)
248{
249 return init_pseudo(fc, SECRETMEM_MAGIC) ? 0 : -ENOMEM;
250}
251
252static struct file_system_type secretmem_fs = {
253 .name = "secretmem",
254 .init_fs_context = secretmem_init_fs_context,
255 .kill_sb = kill_anon_super,
256};
257
258static int secretmem_init(void)
259{
260 int ret = 0;
261
262 if (!secretmem_enable)
263 return ret;
264
265 secretmem_mnt = kern_mount(&secretmem_fs);
266 if (IS_ERR(secretmem_mnt))
267 ret = PTR_ERR(secretmem_mnt);
268
269 /* prevent secretmem mappings from ever getting PROT_EXEC */
270 secretmem_mnt->mnt_flags |= MNT_NOEXEC;
271
272 return ret;
273}
274fs_initcall(secretmem_init);