1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
29d0243bcSAndrew Morton /*
39d0243bcSAndrew Morton * Implement the manual drop-all-pagecache function
49d0243bcSAndrew Morton */
59d0243bcSAndrew Morton
616e2df2aSJohannes Weiner #include <linux/pagemap.h>
79d0243bcSAndrew Morton #include <linux/kernel.h>
89d0243bcSAndrew Morton #include <linux/mm.h>
99d0243bcSAndrew Morton #include <linux/fs.h>
109d0243bcSAndrew Morton #include <linux/writeback.h>
119d0243bcSAndrew Morton #include <linux/sysctl.h>
129d0243bcSAndrew Morton #include <linux/gfp.h>
13*8a144612SAndrew Yang #include <linux/swap.h>
1455fa6091SDave Chinner #include "internal.h"
159d0243bcSAndrew Morton
169d0243bcSAndrew Morton /* A global variable is a bit ugly, but it keeps the code simple */
179d0243bcSAndrew Morton int sysctl_drop_caches;
189d0243bcSAndrew Morton
drop_pagecache_sb(struct super_block * sb,void * unused)1901a05b33SAl Viro static void drop_pagecache_sb(struct super_block *sb, void *unused)
209d0243bcSAndrew Morton {
21eccb95ceSJan Kara struct inode *inode, *toput_inode = NULL;
229d0243bcSAndrew Morton
2374278da9SDave Chinner spin_lock(&sb->s_inode_list_lock);
249d0243bcSAndrew Morton list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
25250df6edSDave Chinner spin_lock(&inode->i_lock);
26c27d82f5SJan Kara /*
27c27d82f5SJan Kara * We must skip inodes in unusual state. We may also skip
28c27d82f5SJan Kara * inodes without pages but we deliberately won't in case
29c27d82f5SJan Kara * we need to reschedule to avoid softlockups.
30c27d82f5SJan Kara */
31250df6edSDave Chinner if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
3216e2df2aSJohannes Weiner (mapping_empty(inode->i_mapping) && !need_resched())) {
33250df6edSDave Chinner spin_unlock(&inode->i_lock);
349d0243bcSAndrew Morton continue;
35250df6edSDave Chinner }
36eccb95ceSJan Kara __iget(inode);
37250df6edSDave Chinner spin_unlock(&inode->i_lock);
3874278da9SDave Chinner spin_unlock(&sb->s_inode_list_lock);
3974278da9SDave Chinner
4028697355SMike Waychison invalidate_mapping_pages(inode->i_mapping, 0, -1);
41eccb95ceSJan Kara iput(toput_inode);
42eccb95ceSJan Kara toput_inode = inode;
4374278da9SDave Chinner
4404646aebSEric Sandeen cond_resched();
4574278da9SDave Chinner spin_lock(&sb->s_inode_list_lock);
469d0243bcSAndrew Morton }
4774278da9SDave Chinner spin_unlock(&sb->s_inode_list_lock);
48eccb95ceSJan Kara iput(toput_inode);
499d0243bcSAndrew Morton }
509d0243bcSAndrew Morton
drop_caches_sysctl_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)511f7e0616SJoe Perches int drop_caches_sysctl_handler(struct ctl_table *table, int write,
5232927393SChristoph Hellwig void *buffer, size_t *length, loff_t *ppos)
539d0243bcSAndrew Morton {
54cb16e95fSPetr Holasek int ret;
55cb16e95fSPetr Holasek
56cb16e95fSPetr Holasek ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
57cb16e95fSPetr Holasek if (ret)
58cb16e95fSPetr Holasek return ret;
599d0243bcSAndrew Morton if (write) {
605509a5d2SDave Hansen static int stfu;
615509a5d2SDave Hansen
625509a5d2SDave Hansen if (sysctl_drop_caches & 1) {
63*8a144612SAndrew Yang lru_add_drain_all();
6401a05b33SAl Viro iterate_supers(drop_pagecache_sb, NULL);
655509a5d2SDave Hansen count_vm_event(DROP_PAGECACHE);
665509a5d2SDave Hansen }
675509a5d2SDave Hansen if (sysctl_drop_caches & 2) {
689d0243bcSAndrew Morton drop_slab();
695509a5d2SDave Hansen count_vm_event(DROP_SLAB);
705509a5d2SDave Hansen }
715509a5d2SDave Hansen if (!stfu) {
725509a5d2SDave Hansen pr_info("%s (%d): drop_caches: %d\n",
735509a5d2SDave Hansen current->comm, task_pid_nr(current),
745509a5d2SDave Hansen sysctl_drop_caches);
755509a5d2SDave Hansen }
765509a5d2SDave Hansen stfu |= sysctl_drop_caches & 4;
779d0243bcSAndrew Morton }
789d0243bcSAndrew Morton return 0;
799d0243bcSAndrew Morton }
80