// SPDX-License-Identifier: GPL-2.0-only
#include <linux/blkdev.h>
#include <linux/wait.h>
#include <linux/rbtree.h>
#include <linux/kthread.h>
#include <linux/backing-dev.h>
#include <linux/blk-cgroup.h>
#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/writeback.h>
#include <linux/device.h>
#include <trace/events/writeback.h>
#include "internal.h"
struct backing_dev_info noop_backing_dev_info;
EXPORT_SYMBOL_GPL(noop_backing_dev_info);
static const char *bdi_unknown_name = "(unknown)";
/*
* bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
* reader side locking.
*/
DEFINE_SPINLOCK(bdi_lock);
static u64 bdi_id_cursor;
static struct rb_root bdi_tree = RB_ROOT;
LIST_HEAD(bdi_list);
/* bdi_wq serves all asynchronous writeback tasks */
struct workqueue_struct *bdi_wq;
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
struct wb_stats {
unsigned long nr_dirty;
unsigned long nr_io;
unsigned long nr_more_io;
unsigned long nr_dirty_time;
unsigned long nr_writeback;
unsigned long nr_reclaimable;
unsigned long nr_dirtied;
unsigned long nr_written;
unsigned long dirty_thresh;
unsigned long wb_thresh;
};
static struct dentry *bdi_debug_root;
static void bdi_debug_init(void)
{
bdi_debug_root = debugfs_create_dir("bdi", NULL);
}
static void collect_wb_stats(struct wb_stats *stats,
struct bdi_writeback *wb)
{
struct inode *inode;
spin_lock(&wb->list_lock);
list_for_each_entry(inode, &wb->b_dirty, i_io_list)
stats->nr_dirty++;
list_for_each_entry(inode, &wb->b_io, i_io_list)
stats->nr_io++;
list_for_each_entry(inode, &wb->b_more_io, i_io_list)
stats->nr_more_io++;
list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
if (inode->i_state & I_DIRTY_TIME)
stats->nr_dirty_time++;
spin_unlock(&wb->list_lock);
stats->nr_writeback += wb_stat(wb, WB_WRITEBACK);
stats->nr_reclaimable += wb_stat(wb, WB_RECLAIMABLE);
stats->nr_dirtied += wb_stat(wb, WB_DIRTIED);
stats->nr_written += wb_stat(wb, WB_WRITTEN);
stats->wb_thresh += wb_calc_thresh(wb, stats->dirty_thresh);
}
#ifdef CONFIG_CGROUP_WRITEBACK
static void bdi_collect_stats(struct backing_dev_info *bdi,
struct wb_stats *stats)
{
struct bdi_writeback *wb;
rcu_read_lock();
list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) {
if (!wb_tryget(wb))
continue;
collect_wb_stats(stats, wb);
wb_put(wb);
}
rcu_read_unlock();
}
#else
static void bdi_collect_stats(struct backing_dev_info *bdi,
struct wb_stats *stats)
{
collect_wb_stats(stats, &bdi->wb);
}
#endif
static int bdi_debug_stats_show(struct seq_file *m, void *v)
{
struct backing_dev_info *bdi = m->private;
unsigned long background_thresh;
unsigned long dirty_thresh;
struct wb_stats stats;
unsigned long tot_bw;
global_dirty_limits(&background_thresh, &dirty_thresh);
memset(&stats, 0, sizeof(stats));
stats.dirty_thresh = dirty_thresh;
bdi_collect_stats(bdi, &stats);
tot_bw = atomic_long_read(&bdi->tot_write_bandwidth