sched: Make cpu_shares_read_u64() use tg_weight()
Move tg_weight() upward and make cpu_shares_read_u64() use it too. This
makes the weight retrieval shared between cgroup v1 and v2 paths and will be
used to implement cgroup support for sched_ext.
No functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b05705d..362918d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9194,6 +9194,11 @@ static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
#endif /* CONFIG_UCLAMP_TASK_GROUP */
#ifdef CONFIG_FAIR_GROUP_SCHED
+static unsigned long tg_weight(struct task_group *tg)
+{
+ return scale_load_down(tg->shares);
+}
+
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 shareval)
{
@@ -9205,9 +9210,7 @@ static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- struct task_group *tg = css_tg(css);
-
- return (u64) scale_load_down(tg->shares);
+ return tg_weight(css_tg(css));
}
#ifdef CONFIG_CFS_BANDWIDTH
@@ -9709,11 +9712,6 @@ static int cpu_local_stat_show(struct seq_file *sf,
#ifdef CONFIG_FAIR_GROUP_SCHED
-static unsigned long tg_weight(struct task_group *tg)
-{
- return scale_load_down(tg->shares);
-}
-
static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{