blob: b9575957a7c2946018fd00ad8bdbb9f041894dcd [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andrew Morton9d0243b2006-01-08 01:00:39 -08002/*
3 * Implement the manual drop-all-pagecache function
4 */
5
Johannes Weiner16e2df22021-09-02 14:53:21 -07006#include <linux/pagemap.h>
Andrew Morton9d0243b2006-01-08 01:00:39 -08007#include <linux/kernel.h>
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/writeback.h>
11#include <linux/sysctl.h>
12#include <linux/gfp.h>
Andrew Yang8a144612023-06-30 17:22:02 +080013#include <linux/swap.h>
Dave Chinner55fa6092011-03-22 22:23:40 +110014#include "internal.h"
Andrew Morton9d0243b2006-01-08 01:00:39 -080015
16/* A global variable is a bit ugly, but it keeps the code simple */
17int sysctl_drop_caches;
18
Al Viro01a05b32010-03-23 06:06:58 -040019static void drop_pagecache_sb(struct super_block *sb, void *unused)
Andrew Morton9d0243b2006-01-08 01:00:39 -080020{
Jan Karaeccb95c2008-04-29 00:59:37 -070021 struct inode *inode, *toput_inode = NULL;
Andrew Morton9d0243b2006-01-08 01:00:39 -080022
Dave Chinner74278da92015-03-04 12:37:22 -050023 spin_lock(&sb->s_inode_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080024 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
Dave Chinner250df6e2011-03-22 22:23:36 +110025 spin_lock(&inode->i_lock);
Jan Karac27d82f2019-02-01 14:21:23 -080026 /*
27 * We must skip inodes in unusual state. We may also skip
28 * inodes without pages but we deliberately won't in case
29 * we need to reschedule to avoid softlockups.
30 */
Dave Chinner250df6e2011-03-22 22:23:36 +110031 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
Johannes Weiner16e2df22021-09-02 14:53:21 -070032 (mapping_empty(inode->i_mapping) && !need_resched())) {
Dave Chinner250df6e2011-03-22 22:23:36 +110033 spin_unlock(&inode->i_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080034 continue;
Dave Chinner250df6e2011-03-22 22:23:36 +110035 }
Jan Karaeccb95c2008-04-29 00:59:37 -070036 __iget(inode);
Dave Chinner250df6e2011-03-22 22:23:36 +110037 spin_unlock(&inode->i_lock);
Dave Chinner74278da92015-03-04 12:37:22 -050038 spin_unlock(&sb->s_inode_list_lock);
39
Mike Waychison28697352009-06-16 15:32:59 -070040 invalidate_mapping_pages(inode->i_mapping, 0, -1);
Jan Karaeccb95c2008-04-29 00:59:37 -070041 iput(toput_inode);
42 toput_inode = inode;
Dave Chinner74278da92015-03-04 12:37:22 -050043
Eric Sandeen04646ae2019-12-06 10:54:23 -060044 cond_resched();
Dave Chinner74278da92015-03-04 12:37:22 -050045 spin_lock(&sb->s_inode_list_lock);
Andrew Morton9d0243b2006-01-08 01:00:39 -080046 }
Dave Chinner74278da92015-03-04 12:37:22 -050047 spin_unlock(&sb->s_inode_list_lock);
Jan Karaeccb95c2008-04-29 00:59:37 -070048 iput(toput_inode);
Andrew Morton9d0243b2006-01-08 01:00:39 -080049}
50
Joe Perches1f7e0612014-06-06 14:38:05 -070051int drop_caches_sysctl_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +020052 void *buffer, size_t *length, loff_t *ppos)
Andrew Morton9d0243b2006-01-08 01:00:39 -080053{
Petr Holasekcb16e952011-03-23 16:43:09 -070054 int ret;
55
56 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
57 if (ret)
58 return ret;
Andrew Morton9d0243b2006-01-08 01:00:39 -080059 if (write) {
Dave Hansen5509a5d2014-04-03 14:48:19 -070060 static int stfu;
61
62 if (sysctl_drop_caches & 1) {
Andrew Yang8a144612023-06-30 17:22:02 +080063 lru_add_drain_all();
Al Viro01a05b32010-03-23 06:06:58 -040064 iterate_supers(drop_pagecache_sb, NULL);
Dave Hansen5509a5d2014-04-03 14:48:19 -070065 count_vm_event(DROP_PAGECACHE);
66 }
67 if (sysctl_drop_caches & 2) {
Andrew Morton9d0243b2006-01-08 01:00:39 -080068 drop_slab();
Dave Hansen5509a5d2014-04-03 14:48:19 -070069 count_vm_event(DROP_SLAB);
70 }
71 if (!stfu) {
72 pr_info("%s (%d): drop_caches: %d\n",
73 current->comm, task_pid_nr(current),
74 sysctl_drop_caches);
75 }
76 stfu |= sysctl_drop_caches & 4;
Andrew Morton9d0243b2006-01-08 01:00:39 -080077 }
78 return 0;
79}