| /* SPDX-License-Identifier: GPL-2.0 */ |
| /* |
| * Scheduler internal types and methods: |
| */ |
| #ifndef _KERNEL_SCHED_SCHED_H |
| #define _KERNEL_SCHED_SCHED_H |
| |
| #include <linux/sched/affinity.h> |
| #include <linux/sched/autogroup.h> |
| #include <linux/sched/cpufreq.h> |
| #include <linux/sched/deadline.h> |
| #include <linux/sched.h> |
| #include <linux/sched/loadavg.h> |
| #include <linux/sched/mm.h> |
| #include <linux/sched/rseq_api.h> |
| #include <linux/sched/signal.h> |
| #include <linux/sched/smt.h> |
| #include <linux/sched/stat.h> |
| #include <linux/sched/sysctl.h> |
| #include <linux/sched/task_flags.h> |
| #include <linux/sched/task.h> |
| #include <linux/sched/topology.h> |
| |
| #include <linux/atomic.h> |
| #include <linux/bitmap.h> |
| #include <linux/bug.h> |
| #include <linux/capability.h> |
| #include <linux/cgroup_api.h> |
| #include <linux/cgroup.h> |
| #include <linux/context_tracking.h> |
| #include <linux/cpufreq.h> |
| #include <linux/cpumask_api.h> |
| #include <linux/ctype.h> |
| #include <linux/file.h> |
| #include <linux/fs_api.h> |
| #include <linux/hrtimer_api.h> |
| #include <linux/interrupt.h> |
| #include <linux/irq_work.h> |
| #include <linux/jiffies.h> |
| #include <linux/kref_api.h> |
| #include <linux/kthread.h> |
| #include <linux/ktime_api.h> |
| #include <linux/lockdep_api.h> |
| #include <linux/lockdep.h> |
| #include <linux/minmax.h> |
| #include <linux/mm.h> |
| #include <linux/module.h> |
| #include <linux/mutex_api.h> |
| #include <linux/plist.h> |
| #include <linux/poll.h> |
| #include <linux/proc_fs.h> |
| #include <linux/profile.h> |
| #include <linux/psi.h> |
| #include <linux/rcupdate.h> |
| #include <linux/seq_file.h> |
| #include <linux/seqlock.h> |
| #include <linux/softirq.h> |
| #include <linux/spinlock_api.h> |
| #include <linux/static_key.h> |
| #include <linux/stop_machine.h> |
| #include <linux/syscalls_api.h> |
| #include <linux/syscalls.h> |
| #include <linux/tick.h> |
| #include <linux/topology.h> |
| #include <linux/types.h> |
| #include <linux/u64_stats_sync_api.h> |
| #include <linux/uaccess.h> |
| #include <linux/wait_api.h> |
| #include <linux/wait_bit.h> |
| #include <linux/workqueue_api.h> |
| #include <linux/delayacct.h> |
| |
| #include <trace/events/power.h> |
| #include <trace/events/sched.h> |
| |
| #include "../workqueue_internal.h" |
| |
| struct rq; |
| struct cfs_rq; |
| struct rt_rq; |
| struct sched_group; |
| struct cpuidle_state; |
| |
| #ifdef CONFIG_PARAVIRT |
| # include <asm/paravirt.h> |
| # include <asm/paravirt_api_clock.h> |
| #endif |
| |
| #include <asm/barrier.h> |
| |
| #include "cpupri.h" |
| #include "cpudeadline.h" |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) |
| #else |
| # define SCHED_WARN_ON(x) ({ (void)(x), 0; }) |
| #endif |
| |
| /* task_struct::on_rq states: */ |
| #define TASK_ON_RQ_QUEUED 1 |
| #define TASK_ON_RQ_MIGRATING 2 |
| |
| extern __read_mostly int scheduler_running; |
| |
| extern unsigned long calc_load_update; |
| extern atomic_long_t calc_load_tasks; |
| |
| extern void calc_global_load_tick(struct rq *this_rq); |
| extern long calc_load_fold_active(struct rq *this_rq, long adjust); |
| |
| extern void call_trace_sched_update_nr_running(struct rq *rq, int count); |
| |
| extern int sysctl_sched_rt_period; |
| extern int sysctl_sched_rt_runtime; |
| extern int sched_rr_timeslice; |
| |
| /* |
| * Asymmetric CPU capacity bits |
| */ |
| struct asym_cap_data { |
| struct list_head link; |
| struct rcu_head rcu; |
| unsigned long capacity; |
| unsigned long cpus[]; |
| }; |
| |
| extern struct list_head asym_cap_list; |
| |
| #define cpu_capacity_span(asym_data) to_cpumask((asym_data)->cpus) |
| |
| /* |
| * Helpers for converting nanosecond timing to jiffy resolution |
| */ |
| #define NS_TO_JIFFIES(time) ((unsigned long)(time) / (NSEC_PER_SEC/HZ)) |
| |
| /* |
| * Increase resolution of nice-level calculations for 64-bit architectures. |
| * The extra resolution improves shares distribution and load balancing of |
| * low-weight task groups (eg. nice +19 on an autogroup), deeper task-group |
| * hierarchies, especially on larger systems. This is not a user-visible change |
| * and does not change the user-interface for setting shares/weights. |
| * |
| * We increase resolution only if we have enough bits to allow this increased |
| * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit |
| * are pretty high and the returns do not justify the increased costs. |
| * |
| * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to |
| * increase coverage and consistency always enable it on 64-bit platforms. |
| */ |
| #ifdef CONFIG_64BIT |
| # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) |
| # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) |
| # define scale_load_down(w) \ |
| ({ \ |
| unsigned long __w = (w); \ |
| \ |
| if (__w) \ |
| __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ |
| __w; \ |
| }) |
| #else |
| # define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) |
| # define scale_load(w) (w) |
| # define scale_load_down(w) (w) |
| #endif |
| |
| /* |
| * Task weight (visible to users) and its load (invisible to users) have |
| * independent resolution, but they should be well calibrated. We use |
| * scale_load() and scale_load_down(w) to convert between them. The |
| * following must be true: |
| * |
| * scale_load(sched_prio_to_weight[NICE_TO_PRIO(0)-MAX_RT_PRIO]) == NICE_0_LOAD |
| * |
| */ |
| #define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) |
| |
| /* |
| * Single value that decides SCHED_DEADLINE internal math precision. |
| * 10 -> just above 1us |
| * 9 -> just above 0.5us |
| */ |
| #define DL_SCALE 10 |
| |
| /* |
| * Single value that denotes runtime == period, ie unlimited time. |
| */ |
| #define RUNTIME_INF ((u64)~0ULL) |
| |
| static inline int idle_policy(int policy) |
| { |
| return policy == SCHED_IDLE; |
| } |
| |
| static inline int normal_policy(int policy) |
| { |
| #ifdef CONFIG_SCHED_CLASS_EXT |
| if (policy == SCHED_EXT) |
| return true; |
| #endif |
| return policy == SCHED_NORMAL; |
| } |
| |
| static inline int fair_policy(int policy) |
| { |
| return normal_policy(policy) || policy == SCHED_BATCH; |
| } |
| |
| static inline int rt_policy(int policy) |
| { |
| return policy == SCHED_FIFO || policy == SCHED_RR; |
| } |
| |
| static inline int dl_policy(int policy) |
| { |
| return policy == SCHED_DEADLINE; |
| } |
| |
| static inline bool valid_policy(int policy) |
| { |
| return idle_policy(policy) || fair_policy(policy) || |
| rt_policy(policy) || dl_policy(policy); |
| } |
| |
| static inline int task_has_idle_policy(struct task_struct *p) |
| { |
| return idle_policy(p->policy); |
| } |
| |
| static inline int task_has_rt_policy(struct task_struct *p) |
| { |
| return rt_policy(p->policy); |
| } |
| |
| static inline int task_has_dl_policy(struct task_struct *p) |
| { |
| return dl_policy(p->policy); |
| } |
| |
| #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) |
| |
| static inline void update_avg(u64 *avg, u64 sample) |
| { |
| s64 diff = sample - *avg; |
| |
| *avg += diff / 8; |
| } |
| |
| /* |
| * Shifting a value by an exponent greater *or equal* to the size of said value |
| * is UB; cap at size-1. |
| */ |
| #define shr_bound(val, shift) \ |
| (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1)) |
| |
| /* |
| * cgroup weight knobs should use the common MIN, DFL and MAX values which are |
| * 1, 100 and 10000 respectively. While it loses a bit of range on both ends, it |
| * maps pretty well onto the shares value used by scheduler and the round-trip |
| * conversions preserve the original value over the entire range. |
| */ |
| static inline unsigned long sched_weight_from_cgroup(unsigned long cgrp_weight) |
| { |
| return DIV_ROUND_CLOSEST_ULL(cgrp_weight * 1024, CGROUP_WEIGHT_DFL); |
| } |
| |
| static inline unsigned long sched_weight_to_cgroup(unsigned long weight) |
| { |
| return clamp_t(unsigned long, |
| DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024), |
| CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX); |
| } |
| |
| /* |
| * !! For sched_setattr_nocheck() (kernel) only !! |
| * |
| * This is actually gross. :( |
| * |
| * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE |
| * tasks, but still be able to sleep. We need this on platforms that cannot |
| * atomically change clock frequency. Remove once fast switching will be |
| * available on such platforms. |
| * |
| * SUGOV stands for SchedUtil GOVernor. |
| */ |
| #define SCHED_FLAG_SUGOV 0x10000000 |
| |
| #define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) |
| |
| static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se) |
| { |
| #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL |
| return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); |
| #else |
| return false; |
| #endif |
| } |
| |
| /* |
| * Tells if entity @a should preempt entity @b. |
| */ |
| static inline bool dl_entity_preempt(const struct sched_dl_entity *a, |
| const struct sched_dl_entity *b) |
| { |
| return dl_entity_is_special(a) || |
| dl_time_before(a->deadline, b->deadline); |
| } |
| |
| /* |
| * This is the priority-queue data structure of the RT scheduling class: |
| */ |
| struct rt_prio_array { |
| DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */ |
| struct list_head queue[MAX_RT_PRIO]; |
| }; |
| |
| struct rt_bandwidth { |
| /* nests inside the rq lock: */ |
| raw_spinlock_t rt_runtime_lock; |
| ktime_t rt_period; |
| u64 rt_runtime; |
| struct hrtimer rt_period_timer; |
| unsigned int rt_period_active; |
| }; |
| |
| static inline int dl_bandwidth_enabled(void) |
| { |
| return sysctl_sched_rt_runtime >= 0; |
| } |
| |
| /* |
| * To keep the bandwidth of -deadline tasks under control |
| * we need some place where: |
| * - store the maximum -deadline bandwidth of each cpu; |
| * - cache the fraction of bandwidth that is currently allocated in |
| * each root domain; |
| * |
| * This is all done in the data structure below. It is similar to the |
| * one used for RT-throttling (rt_bandwidth), with the main difference |
| * that, since here we are only interested in admission control, we |
| * do not decrease any runtime while the group "executes", neither we |
| * need a timer to replenish it. |
| * |
| * With respect to SMP, bandwidth is given on a per root domain basis, |
| * meaning that: |
| * - bw (< 100%) is the deadline bandwidth of each CPU; |
| * - total_bw is the currently allocated bandwidth in each root domain; |
| */ |
| struct dl_bw { |
| raw_spinlock_t lock; |
| u64 bw; |
| u64 total_bw; |
| }; |
| |
| extern void init_dl_bw(struct dl_bw *dl_b); |
| extern int sched_dl_global_validate(void); |
| extern void sched_dl_do_global(void); |
| extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr); |
| extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr); |
| extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); |
| extern bool __checkparam_dl(const struct sched_attr *attr); |
| extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); |
| extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); |
| extern int dl_bw_check_overflow(int cpu); |
| extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec); |
| /* |
| * SCHED_DEADLINE supports servers (nested scheduling) with the following |
| * interface: |
| * |
| * dl_se::rq -- runqueue we belong to. |
| * |
| * dl_se::server_has_tasks() -- used on bandwidth enforcement; we 'stop' the |
| * server when it runs out of tasks to run. |
| * |
| * dl_se::server_pick() -- nested pick_next_task(); we yield the period if this |
| * returns NULL. |
| * |
| * dl_server_update() -- called from update_curr_common(), propagates runtime |
| * to the server. |
| * |
| * dl_server_start() |
| * dl_server_stop() -- start/stop the server when it has (no) tasks. |
| * |
| * dl_server_init() -- initializes the server. |
| */ |
| extern void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec); |
| extern void dl_server_start(struct sched_dl_entity *dl_se); |
| extern void dl_server_stop(struct sched_dl_entity *dl_se); |
| extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, |
| dl_server_has_tasks_f has_tasks, |
| dl_server_pick_f pick_task); |
| |
| extern void dl_server_update_idle_time(struct rq *rq, |
| struct task_struct *p); |
| extern void fair_server_init(struct rq *rq); |
| extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq); |
| extern int dl_server_apply_params(struct sched_dl_entity *dl_se, |
| u64 runtime, u64 period, bool init); |
| |
| #ifdef CONFIG_CGROUP_SCHED |
| |
| extern struct list_head task_groups; |
| |
| struct cfs_bandwidth { |
| #ifdef CONFIG_CFS_BANDWIDTH |
| raw_spinlock_t lock; |
| ktime_t period; |
| u64 quota; |
| u64 runtime; |
| u64 burst; |
| u64 runtime_snap; |
| s64 hierarchical_quota; |
| |
| u8 idle; |
| u8 period_active; |
| u8 slack_started; |
| struct hrtimer period_timer; |
| struct hrtimer slack_timer; |
| struct list_head throttled_cfs_rq; |
| |
| /* Statistics: */ |
| int nr_periods; |
| int nr_throttled; |
| int nr_burst; |
| u64 throttled_time; |
| u64 burst_time; |
| #endif |
| }; |
| |
| /* Task group related information */ |
| struct task_group { |
| struct cgroup_subsys_state css; |
| |
| #ifdef CONFIG_GROUP_SCHED_WEIGHT |
| /* A positive value indicates that this is a SCHED_IDLE group. */ |
| int idle; |
| #endif |
| |
| #ifdef CONFIG_FAIR_GROUP_SCHED |
| /* schedulable entities of this group on each CPU */ |
| struct sched_entity **se; |
| /* runqueue "owned" by this group on each CPU */ |
| struct cfs_rq **cfs_rq; |
| unsigned long shares; |
| #ifdef CONFIG_SMP |
| /* |
| * load_avg can be heavily contended at clock tick time, so put |
| * it in its own cache-line separated from the fields above which |
| * will also be accessed at each tick. |
| */ |
| atomic_long_t load_avg ____cacheline_aligned; |
| #endif |
| #endif |
| |
| #ifdef CONFIG_RT_GROUP_SCHED |
| struct sched_rt_entity **rt_se; |
| struct rt_rq **rt_rq; |
| |
| struct rt_bandwidth rt_bandwidth; |
| #endif |
| |
| #ifdef CONFIG_EXT_GROUP_SCHED |
| u32 scx_flags; /* SCX_TG_* */ |
| u32 scx_weight; |
| #endif |
| |
| struct rcu_head rcu; |
| struct list_head list; |
| |
| struct task_group *parent; |
| struct list_head siblings; |
| struct list_head children; |
| |
| #ifdef CONFIG_SCHED_AUTOGROUP |
| struct autogroup *autogroup; |
| #endif |
| |
| struct cfs_bandwidth cfs_bandwidth; |
| |
| #ifdef CONFIG_UCLAMP_TASK_GROUP |
| /* The two decimal precision [%] value requested from user-space */ |
| unsigned int uclamp_pct[UCLAMP_CNT]; |
| /* Clamp values requested for a task group */ |
| struct uclamp_se uclamp_req[UCLAMP_CNT]; |
| /* Effective clamp values used for a task group */ |
| struct uclamp_se uclamp[UCLAMP_CNT]; |
| #endif |
| |
| }; |
| |
| #ifdef CONFIG_GROUP_SCHED_WEIGHT |
| #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD |
| |
| /* |
| * A weight of 0 or 1 can cause arithmetics problems. |
| * A weight of a cfs_rq is the sum of weights of which entities |
| * are queued on this cfs_rq, so a weight of a entity should not be |
| * too large, so as the shares value of a task group. |
| * (The default weight is 1024 - so there's no practical |
| * limitation from this.) |
| */ |
| #define MIN_SHARES (1UL << 1) |
| #define MAX_SHARES (1UL << 18) |
| #endif |
| |
| typedef int (*tg_visitor)(struct task_group *, void *); |
| |
| extern int walk_tg_tree_from(struct task_group *from, |
| tg_visitor down, tg_visitor up, void *data); |
| |
| /* |
| * Iterate the full tree, calling @down when first entering a node and @up when |
| * leaving it for the final time. |
| * |
| * Caller must hold rcu_lock or sufficient equivalent. |
| */ |
| static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
| { |
| return walk_tg_tree_from(&root_task_group, down, up, data); |
| } |
| |
| static inline struct task_group *css_tg(struct cgroup_subsys_state *css) |
| { |
| return css ? container_of(css, struct task_group, css) : NULL; |
| } |
| |
| extern int tg_nop(struct task_group *tg, void *data); |
| |
| #ifdef CONFIG_FAIR_GROUP_SCHED |
| extern void free_fair_sched_group(struct task_group *tg); |
| extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent); |
| extern void online_fair_sched_group(struct task_group *tg); |
| extern void unregister_fair_sched_group(struct task_group *tg); |
| #else |
| static inline void free_fair_sched_group(struct task_group *tg) { } |
| static inline int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
| { |
| return 1; |
| } |
| static inline void online_fair_sched_group(struct task_group *tg) { } |
| static inline void unregister_fair_sched_group(struct task_group *tg) { } |
| #endif |
| |
| extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
| struct sched_entity *se, int cpu, |
| struct sched_entity *parent); |
| extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent); |
| |
| extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); |
| extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); |
| extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); |
| extern bool cfs_task_bw_constrained(struct task_struct *p); |
| |
| extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, |
| struct sched_rt_entity *rt_se, int cpu, |
| struct sched_rt_entity *parent); |
| extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us); |
| extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us); |
| extern long sched_group_rt_runtime(struct task_group *tg); |
| extern long sched_group_rt_period(struct task_group *tg); |
| extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); |
| |
| extern struct task_group *sched_create_group(struct task_group *parent); |
| extern void sched_online_group(struct task_group *tg, |
| struct task_group *parent); |
| extern void sched_destroy_group(struct task_group *tg); |
| extern void sched_release_group(struct task_group *tg); |
| |
| extern void sched_move_task(struct task_struct *tsk); |
| |
| #ifdef CONFIG_FAIR_GROUP_SCHED |
| extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); |
| |
| extern int sched_group_set_idle(struct task_group *tg, long idle); |
| |
| #ifdef CONFIG_SMP |
| extern void set_task_rq_fair(struct sched_entity *se, |
| struct cfs_rq *prev, struct cfs_rq *next); |
| #else /* !CONFIG_SMP */ |
| static inline void set_task_rq_fair(struct sched_entity *se, |
| struct cfs_rq *prev, struct cfs_rq *next) { } |
| #endif /* CONFIG_SMP */ |
| #else /* !CONFIG_FAIR_GROUP_SCHED */ |
| static inline int sched_group_set_shares(struct task_group *tg, unsigned long shares) { return 0; } |
| static inline int sched_group_set_idle(struct task_group *tg, long idle) { return 0; } |
| #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| |
| #else /* CONFIG_CGROUP_SCHED */ |
| |
| struct cfs_bandwidth { }; |
| |
| static inline bool cfs_task_bw_constrained(struct task_struct *p) { return false; } |
| |
| #endif /* CONFIG_CGROUP_SCHED */ |
| |
| extern void unregister_rt_sched_group(struct task_group *tg); |
| extern void free_rt_sched_group(struct task_group *tg); |
| extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent); |
| |
| /* |
| * u64_u32_load/u64_u32_store |
| * |
| * Use a copy of a u64 value to protect against data race. This is only |
| * applicable for 32-bits architectures. |
| */ |
| #ifdef CONFIG_64BIT |
| # define u64_u32_load_copy(var, copy) var |
| # define u64_u32_store_copy(var, copy, val) (var = val) |
| #else |
| # define u64_u32_load_copy(var, copy) \ |
| ({ \ |
| u64 __val, __val_copy; \ |
| do { \ |
| __val_copy = copy; \ |
| /* \ |
| * paired with u64_u32_store_copy(), ordering access \ |
| * to var and copy. \ |
| */ \ |
| smp_rmb(); \ |
| __val = var; \ |
| } while (__val != __val_copy); \ |
| __val; \ |
| }) |
| # define u64_u32_store_copy(var, copy, val) \ |
| do { \ |
| typeof(val) __val = (val); \ |
| var = __val; \ |
| /* \ |
| * paired with u64_u32_load_copy(), ordering access to var and \ |
| * copy. \ |
| */ \ |
| smp_wmb(); \ |
| copy = __val; \ |
| } while (0) |
| #endif |
| # define u64_u32_load(var) u64_u32_load_copy(var, var##_copy) |
| # define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val) |
| |
| struct balance_callback { |
| struct balance_callback *next; |
| void (*func)(struct rq *rq); |
| }; |
| |
| /* CFS-related fields in a runqueue */ |
| struct cfs_rq { |
| struct load_weight load; |
| unsigned int nr_running; |
| unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ |
| unsigned int idle_nr_running; /* SCHED_IDLE */ |
| unsigned int idle_h_nr_running; /* SCHED_IDLE */ |
| |
| s64 avg_vruntime; |
| u64 avg_load; |
| |
| u64 min_vruntime; |
| #ifdef CONFIG_SCHED_CORE |
| unsigned int forceidle_seq; |
| u64 min_vruntime_fi; |
| #endif |
| |
| struct rb_root_cached tasks_timeline; |
| |
| /* |
| * 'curr' points to currently running entity on this cfs_rq. |
| * It is set to NULL otherwise (i.e when none are currently running). |
| */ |
| struct sched_entity *curr; |
| struct sched_entity *next; |
| |
| #ifdef CONFIG_SMP |
| /* |
| * CFS load tracking |
| */ |
| struct sched_avg avg; |
| #ifndef CONFIG_64BIT |
| u64 last_update_time_copy; |
| #endif |
| struct { |
| raw_spinlock_t lock ____cacheline_aligned; |
| int nr; |
| unsigned long load_avg; |
| unsigned long util_avg; |
| unsigned long runnable_avg; |
| } removed; |
| |
| #ifdef CONFIG_FAIR_GROUP_SCHED |
| u64 last_update_tg_load_avg; |
| unsigned long tg_load_avg_contrib; |
| long propagate; |
| long prop_runnable_sum; |
| |
| /* |
| * h_load = weight * f(tg) |
| * |
| * Where f(tg) is the recursive weight fraction assigned to |
| * this group. |
| */ |
| unsigned long h_load; |
| u64 last_h_load_update; |
| struct sched_entity *h_load_next; |
| #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| #endif /* CONFIG_SMP */ |
| |
| #ifdef CONFIG_FAIR_GROUP_SCHED |
| struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ |
| |
| /* |
| * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in |
| * a hierarchy). Non-leaf lrqs hold other higher schedulable entities |
| * (like users, containers etc.) |
| * |
| * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a CPU. |
| * This list is used during load balance. |
| */ |
| int on_list; |
| struct list_head leaf_cfs_rq_list; |
| struct task_group *tg; /* group that "owns" this runqueue */ |
| |
| /* Locally cached copy of our task_group's idle value */ |
| int idle; |
| |
| #ifdef CONFIG_CFS_BANDWIDTH |
| int runtime_enabled; |
| s64 runtime_remaining; |
| |
| u64 throttled_pelt_idle; |
| #ifndef CONFIG_64BIT |
| u64 throttled_pelt_idle_copy; |
| #endif |
| u64 throttled_clock; |
| u64 throttled_clock_pelt; |
| u64 throttled_clock_pelt_time; |
| u64 throttled_clock_self; |
| u64 throttled_clock_self_time; |
| int throttled; |
| int throttle_count; |
| struct list_head throttled_list; |
| struct list_head throttled_csd_list; |
| #endif /* CONFIG_CFS_BANDWIDTH */ |
| #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| }; |
| |
| #ifdef CONFIG_SCHED_CLASS_EXT |
| /* scx_rq->flags, protected by the rq lock */ |
| enum scx_rq_flags { |
| /* |
| * A hotplugged CPU starts scheduling before rq_online_scx(). Track |
| * ops.cpu_on/offline() state so that ops.enqueue/dispatch() are called |
| * only while the BPF scheduler considers the CPU to be online. |
| */ |
| SCX_RQ_ONLINE = 1 << 0, |
| SCX_RQ_CAN_STOP_TICK = 1 << 1, |
| SCX_RQ_BAL_PENDING = 1 << 2, /* balance hasn't run yet */ |
| SCX_RQ_BAL_KEEP = 1 << 3, /* balance decided to keep current */ |
| SCX_RQ_BYPASSING = 1 << 4, |
| |
| SCX_RQ_IN_WAKEUP = 1 << 16, |
| SCX_RQ_IN_BALANCE = 1 << 17, |
| }; |
| |
| struct scx_rq { |
| struct scx_dispatch_q local_dsq; |
| struct list_head runnable_list; /* runnable tasks on this rq */ |
| struct list_head ddsp_deferred_locals; /* deferred ddsps from enq */ |
| unsigned long ops_qseq; |
| u64 extra_enq_flags; /* see move_task_to_local_dsq() */ |
| u32 nr_running; |
| u32 flags; |
| u32 cpuperf_target; /* [0, SCHED_CAPACITY_SCALE] */ |
| bool cpu_released; |
| cpumask_var_t cpus_to_kick; |
| cpumask_var_t cpus_to_kick_if_idle; |
| cpumask_var_t cpus_to_preempt; |
| cpumask_var_t cpus_to_wait; |
| unsigned long pnt_seq; |
| struct balance_callback deferred_bal_cb; |
| struct irq_work deferred_irq_work; |
| struct irq_work kick_cpus_irq_work; |
| }; |
| #endif /* CONFIG_SCHED_CLASS_EXT */ |
| |
| static inline int rt_bandwidth_enabled(void) |
| { |
| return sysctl_sched_rt_runtime >= 0; |
| } |
| |
| /* RT IPI pull logic requires IRQ_WORK */ |
| #if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP) |
| # define HAVE_RT_PUSH_IPI |
| #endif |
| |
| /* Real-Time classes' related field in a runqueue: */ |
| struct rt_rq { |
| struct rt_prio_array active; |
| unsigned int rt_nr_running; |
| unsigned int rr_nr_running; |
| #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
| struct { |
| int curr; /* highest queued rt task prio */ |
| #ifdef CONFIG_SMP |
| int next; /* next highest */ |
| #endif |
| } highest_prio; |
| #endif |
| #ifdef CONFIG_SMP |
| bool overloaded; |
| struct plist_head pushable_tasks; |
| |
| #endif /* CONFIG_SMP */ |
| int rt_queued; |
| |
| #ifdef CONFIG_RT_GROUP_SCHED |
| int rt_throttled; |
| u64 rt_time; |
| u64 rt_runtime; |
| /* Nests inside the rq lock: */ |
| raw_spinlock_t rt_runtime_lock; |
| |
| unsigned int rt_nr_boosted; |
| |
| struct rq *rq; |
| struct task_group *tg; |
| #endif |
| }; |
| |
| static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) |
| { |
| return rt_rq->rt_queued && rt_rq->rt_nr_running; |
| } |
| |
| /* Deadline class' related fields in a runqueue */ |
| struct dl_rq { |
| /* runqueue is an rbtree, ordered by deadline */ |
| struct rb_root_cached root; |
| |
| unsigned int dl_nr_running; |
| |
| #ifdef CONFIG_SMP |
| /* |
| * Deadline values of the currently executing and the |
| * earliest ready task on this rq. Caching these facilitates |
| * the decision whether or not a ready but not running task |
| * should migrate somewhere else. |
| */ |
| struct { |
| u64 curr; |
| u64 next; |
| } earliest_dl; |
| |
| bool overloaded; |
| |
| /* |
| * Tasks on this rq that can be pushed away. They are kept in |
| * an rb-tree, ordered by tasks' deadlines, with caching |
| * of the leftmost (earliest deadline) element. |
| */ |
| struct rb_root_cached pushable_dl_tasks_root; |
| #else |
| struct dl_bw dl_bw; |
| #endif |
| /* |
| * "Active utilization" for this runqueue: increased when a |
| * task wakes up (becomes TASK_RUNNING) and decreased when a |
| * task blocks |
| */ |
| u64 running_bw; |
| |
| /* |
| * Utilization of the tasks "assigned" to this runqueue (including |
| * the tasks that are in runqueue and the tasks that executed on this |
| * CPU and blocked). Increased when a task moves to this runqueue, and |
| * decreased when the task moves away (migrates, changes scheduling |
| * policy, or terminates). |
| * This is needed to compute the "inactive utilization" for the |
| * runqueue (inactive utilization = this_bw - running_bw). |
| */ |
| u64 this_bw; |
| u64 extra_bw; |
| |
| /* |
| * Maximum available bandwidth for reclaiming by SCHED_FLAG_RECLAIM |
| * tasks of this rq. Used in calculation of reclaimable bandwidth(GRUB). |
| */ |
| u64 max_bw; |
| |
| /* |
| * Inverse of the fraction of CPU utilization that can be reclaimed |
| * by the GRUB algorithm. |
| */ |
| u64 bw_ratio; |
| }; |
| |
| #ifdef CONFIG_FAIR_GROUP_SCHED |
| |
| /* An entity is a task if it doesn't "own" a runqueue */ |
| #define entity_is_task(se) (!se->my_q) |
| |
| static inline void se_update_runnable(struct sched_entity *se) |
| { |
| if (!entity_is_task(se)) |
| se->runnable_weight = se->my_q->h_nr_running; |
| } |
| |
| static inline long se_runnable(struct sched_entity *se) |
| { |
| if (se->sched_delayed) |
| return false; |
| |
| if (entity_is_task(se)) |
| return !!se->on_rq; |
| else |
| return se->runnable_weight; |
| } |
| |
| #else /* !CONFIG_FAIR_GROUP_SCHED: */ |
| |
| #define entity_is_task(se) 1 |
| |
| static inline void se_update_runnable(struct sched_entity *se) { } |
| |
| static inline long se_runnable(struct sched_entity *se) |
| { |
| if (se->sched_delayed) |
| return false; |
| |
| return !!se->on_rq; |
| } |
| |
| #endif /* !CONFIG_FAIR_GROUP_SCHED */ |
| |
| #ifdef CONFIG_SMP |
| /* |
| * XXX we want to get rid of these helpers and use the full load resolution. |
| */ |
| static inline long se_weight(struct sched_entity *se) |
| { |
| return scale_load_down(se->load.weight); |
| } |
| |
| |
| static inline bool sched_asym_prefer(int a, int b) |
| { |
| return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); |
| } |
| |
| struct perf_domain { |
| struct em_perf_domain *em_pd; |
| struct perf_domain *next; |
| struct rcu_head rcu; |
| }; |
| |
| /* |
| * We add the notion of a root-domain which will be used to define per-domain |
| * variables. Each exclusive cpuset essentially defines an island domain by |
| * fully partitioning the member CPUs from any other cpuset. Whenever a new |
| * exclusive cpuset is created, we also create and attach a new root-domain |
| * object. |
| * |
| */ |
| struct root_domain { |
| atomic_t refcount; |
| atomic_t rto_count; |
| struct rcu_head rcu; |
| cpumask_var_t span; |
| cpumask_var_t online; |
| |
| /* |
| * Indicate pullable load on at least one CPU, e.g: |
| * - More than one runnable task |
| * - Running task is misfit |
| */ |
| bool overloaded; |
| |
| /* Indicate one or more CPUs over-utilized (tipping point) */ |
| bool overutilized; |
| |
| /* |
| * The bit corresponding to a CPU gets set here if such CPU has more |
| * than one runnable -deadline task (as it is below for RT tasks). |
| */ |
| cpumask_var_t dlo_mask; |
| atomic_t dlo_count; |
| struct dl_bw dl_bw; |
| struct cpudl cpudl; |
| |
| /* |
| * Indicate whether a root_domain's dl_bw has been checked or |
| * updated. It's monotonously increasing value. |
| * |
| * Also, some corner cases, like 'wrap around' is dangerous, but given |
| * that u64 is 'big enough'. So that shouldn't be a concern. |
| */ |
| u64 visit_gen; |
| |
| #ifdef HAVE_RT_PUSH_IPI |
| /* |
| * For IPI pull requests, loop across the rto_mask. |
| */ |
| struct irq_work rto_push_work; |
| raw_spinlock_t rto_lock; |
| /* These are only updated and read within rto_lock */ |
| int rto_loop; |
| int rto_cpu; |
| /* These atomics are updated outside of a lock */ |
| atomic_t rto_loop_next; |
| atomic_t rto_loop_start; |
| #endif |
| /* |
| * The "RT overload" flag: it gets set if a CPU has more than |
| * one runnable RT task. |
| */ |
| cpumask_var_t rto_mask; |
| struct cpupri cpupri; |
| |
| /* |
| * NULL-terminated list of performance domains intersecting with the |
| * CPUs of the rd. Protected by RCU. |
| */ |
| struct perf_domain __rcu *pd; |
| }; |
| |
| extern void init_defrootdomain(void); |
| extern int sched_init_domains(const struct cpumask *cpu_map); |
| extern void rq_attach_root(struct rq *rq, struct root_domain *rd); |
| extern void sched_get_rd(struct root_domain *rd); |
| extern void sched_put_rd(struct root_domain *rd); |
| |
| static inline int get_rd_overloaded(struct root_domain *rd) |
| { |
| return READ_ONCE(rd->overloaded); |
| } |
| |
| static inline void set_rd_overloaded(struct root_domain *rd, int status) |
| { |
| if (get_rd_overloaded(rd) != status) |
| WRITE_ONCE(rd->overloaded, status); |
| } |
| |
| #ifdef HAVE_RT_PUSH_IPI |
| extern void rto_push_irq_work_func(struct irq_work *work); |
| #endif |
| #endif /* CONFIG_SMP */ |
| |
| #ifdef CONFIG_UCLAMP_TASK |
| /* |
| * struct uclamp_bucket - Utilization clamp bucket |
| * @value: utilization clamp value for tasks on this clamp bucket |
| * @tasks: number of RUNNABLE tasks on this clamp bucket |
| * |
| * Keep track of how many tasks are RUNNABLE for a given utilization |
| * clamp value. |
| */ |
| struct uclamp_bucket { |
| unsigned long value : bits_per(SCHED_CAPACITY_SCALE); |
| unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE); |
| }; |
| |
| /* |
| * struct uclamp_rq - rq's utilization clamp |
| * @value: currently active clamp values for a rq |
| * @bucket: utilization clamp buckets affecting a rq |
| * |
| * Keep track of RUNNABLE tasks on a rq to aggregate their clamp values. |
| * A clamp value is affecting a rq when there is at least one task RUNNABLE |
| * (or actually running) with that value. |
| * |
| * There are up to UCLAMP_CNT possible different clamp values, currently there |
| * are only two: minimum utilization and maximum utilization. |
| * |
| * All utilization clamping values are MAX aggregated, since: |
| * - for util_min: we want to run the CPU at least at the max of the minimum |
| * utilization required by its currently RUNNABLE tasks. |
| * - for util_max: we want to allow the CPU to run up to the max of the |
| * maximum utilization allowed by its currently RUNNABLE tasks. |
| * |
| * Since on each system we expect only a limited number of different |
| * utilization clamp values (UCLAMP_BUCKETS), use a simple array to track |
| * the metrics required to compute all the per-rq utilization clamp values. |
| */ |
| struct uclamp_rq { |
| unsigned int value; |
| struct uclamp_bucket bucket[UCLAMP_BUCKETS]; |
| }; |
| |
| DECLARE_STATIC_KEY_FALSE(sched_uclamp_used); |
| #endif /* CONFIG_UCLAMP_TASK */ |
| |
| /* |
| * This is the main, per-CPU runqueue data structure. |
| * |
| * Locking rule: those places that want to lock multiple runqueues |
| * (such as the load balancing or the thread migration code), lock |
| * acquire operations must be ordered by ascending &runqueue. |
| */ |
| struct rq { |
| /* runqueue lock: */ |
| raw_spinlock_t __lock; |
| |
| unsigned int nr_running; |
| #ifdef CONFIG_NUMA_BALANCING |
| unsigned int nr_numa_running; |
| unsigned int nr_preferred_running; |
| unsigned int numa_migrate_on; |
| #endif |
| #ifdef CONFIG_NO_HZ_COMMON |
| #ifdef CONFIG_SMP |
| unsigned long last_blocked_load_update_tick; |
| unsigned int has_blocked_load; |
| call_single_data_t nohz_csd; |
| #endif /* CONFIG_SMP */ |
| unsigned int nohz_tick_stopped; |
| atomic_t nohz_flags; |
| #endif /* CONFIG_NO_HZ_COMMON */ |
| |
| #ifdef CONFIG_SMP |
| unsigned int ttwu_pending; |
| #endif |
| u64 nr_switches; |
| |
| #ifdef CONFIG_UCLAMP_TASK |
| /* Utilization clamp values based on CPU's RUNNABLE tasks */ |
| struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; |
| unsigned int uclamp_flags; |
| #define UCLAMP_FLAG_IDLE 0x01 |
| #endif |
| |
| struct cfs_rq cfs; |
| struct rt_rq rt; |
| struct dl_rq dl; |
| #ifdef CONFIG_SCHED_CLASS_EXT |
| struct scx_rq scx; |
| #endif |
| |
| struct sched_dl_entity fair_server; |
| |
| #ifdef CONFIG_FAIR_GROUP_SCHED |
| /* list of leaf cfs_rq on this CPU: */ |
| struct list_head leaf_cfs_rq_list; |
| struct list_head *tmp_alone_branch; |
| #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| |
| /* |
| * This is part of a global counter where only the total sum |
| * over all CPUs matters. A task can increase this counter on |
| * one CPU and if it got migrated afterwards it may decrease |
| * it on another CPU. Always updated under the runqueue lock: |
| */ |
| unsigned int nr_uninterruptible; |
| |
| struct task_struct __rcu *curr; |
| struct sched_dl_entity *dl_server; |
| struct task_struct *idle; |
| struct task_struct *stop; |
| unsigned long next_balance; |
| struct mm_struct *prev_mm; |
| |
| unsigned int clock_update_flags; |
| u64 clock; |
| /* Ensure that all clocks are in the same cache line */ |
| u64 clock_task ____cacheline_aligned; |
| u64 clock_pelt; |
| unsigned long lost_idle_time; |
| u64 clock_pelt_idle; |
| u64 clock_idle; |
| #ifndef CONFIG_64BIT |
| u64 clock_pelt_idle_copy; |
| u64 clock_idle_copy; |
| #endif |
| |
| atomic_t nr_iowait; |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| u64 last_seen_need_resched_ns; |
| int ticks_without_resched; |
| #endif |
| |
| #ifdef CONFIG_MEMBARRIER |
| int membarrier_state; |
| #endif |
| |
| #ifdef CONFIG_SMP |
| struct root_domain *rd; |
| struct sched_domain __rcu *sd; |
| |
| unsigned long cpu_capacity; |
| |
| struct balance_callback *balance_callback; |
| |
| unsigned char nohz_idle_balance; |
| unsigned char idle_balance; |
| |
| unsigned long misfit_task_load; |
| |
| /* For active balancing */ |
| int active_balance; |
| int push_cpu; |
| struct cpu_stop_work active_balance_work; |
| |
| /* CPU of this runqueue: */ |
| int cpu; |
| int online; |
| |
| struct list_head cfs_tasks; |
| |
| struct sched_avg avg_rt; |
| struct sched_avg avg_dl; |
| #ifdef CONFIG_HAVE_SCHED_AVG_IRQ |
| struct sched_avg avg_irq; |
| #endif |
| #ifdef CONFIG_SCHED_HW_PRESSURE |
| struct sched_avg avg_hw; |
| #endif |
| u64 idle_stamp; |
| u64 avg_idle; |
| |
| /* This is used to determine avg_idle's max value */ |
| u64 max_idle_balance_cost; |
| |
| #ifdef CONFIG_HOTPLUG_CPU |
| struct rcuwait hotplug_wait; |
| #endif |
| #endif /* CONFIG_SMP */ |
| |
| #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| u64 prev_irq_time; |
| u64 psi_irq_time; |
| #endif |
| #ifdef CONFIG_PARAVIRT |
| u64 prev_steal_time; |
| #endif |
| #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| u64 prev_steal_time_rq; |
| #endif |
| |
| /* calc_load related fields */ |
| unsigned long calc_load_update; |
| long calc_load_active; |
| |
| #ifdef CONFIG_SCHED_HRTICK |
| #ifdef CONFIG_SMP |
| call_single_data_t hrtick_csd; |
| #endif |
| struct hrtimer hrtick_timer; |
| ktime_t hrtick_time; |
| #endif |
| |
| #ifdef CONFIG_SCHEDSTATS |
| /* latency stats */ |
| struct sched_info rq_sched_info; |
| unsigned long long rq_cpu_time; |
| |
| /* sys_sched_yield() stats */ |
| unsigned int yld_count; |
| |
| /* schedule() stats */ |
| unsigned int sched_count; |
| unsigned int sched_goidle; |
| |
| /* try_to_wake_up() stats */ |
| unsigned int ttwu_count; |
| unsigned int ttwu_local; |
| #endif |
| |
| #ifdef CONFIG_CPU_IDLE |
| /* Must be inspected within a RCU lock section */ |
| struct cpuidle_state *idle_state; |
| #endif |
| |
| #ifdef CONFIG_SMP |
| unsigned int nr_pinned; |
| #endif |
| unsigned int push_busy; |
| struct cpu_stop_work push_work; |
| |
| #ifdef CONFIG_SCHED_CORE |
| /* per rq */ |
| struct rq *core; |
| struct task_struct *core_pick; |
| struct sched_dl_entity *core_dl_server; |
| unsigned int core_enabled; |
| unsigned int core_sched_seq; |
| struct rb_root core_tree; |
| |
| /* shared state -- careful with sched_core_cpu_deactivate() */ |
| unsigned int core_task_seq; |
| unsigned int core_pick_seq; |
| unsigned long core_cookie; |
| unsigned int core_forceidle_count; |
| unsigned int core_forceidle_seq; |
| unsigned int core_forceidle_occupation; |
| u64 core_forceidle_start; |
| #endif |
| |
| /* Scratch cpumask to be temporarily used under rq_lock */ |
| cpumask_var_t scratch_mask; |
| |
| #if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) |
| call_single_data_t cfsb_csd; |
| struct list_head cfsb_csd_list; |
| #endif |
| }; |
| |
| #ifdef CONFIG_FAIR_GROUP_SCHED |
| |
| /* CPU runqueue to which this cfs_rq is attached */ |
| static inline struct rq *rq_of(struct cfs_rq *cfs_rq) |
| { |
| return cfs_rq->rq; |
| } |
| |
| #else |
| |
| static inline struct rq *rq_of(struct cfs_rq *cfs_rq) |
| { |
| return container_of(cfs_rq, struct rq, cfs); |
| } |
| #endif |
| |
| static inline int cpu_of(struct rq *rq) |
| { |
| #ifdef CONFIG_SMP |
| return rq->cpu; |
| #else |
| return 0; |
| #endif |
| } |
| |
| #define MDF_PUSH 0x01 |
| |
| static inline bool is_migration_disabled(struct task_struct *p) |
| { |
| #ifdef CONFIG_SMP |
| return p->migration_disabled; |
| #else |
| return false; |
| #endif |
| } |
| |
| DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
| |
| #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
| #define this_rq() this_cpu_ptr(&runqueues) |
| #define task_rq(p) cpu_rq(task_cpu(p)) |
| #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
| #define raw_rq() raw_cpu_ptr(&runqueues) |
| |
| #ifdef CONFIG_SCHED_CORE |
| static inline struct cpumask *sched_group_span(struct sched_group *sg); |
| |
| DECLARE_STATIC_KEY_FALSE(__sched_core_enabled); |
| |
| static inline bool sched_core_enabled(struct rq *rq) |
| { |
| return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; |
| } |
| |
| static inline bool sched_core_disabled(void) |
| { |
| return !static_branch_unlikely(&__sched_core_enabled); |
| } |
| |
| /* |
| * Be careful with this function; not for general use. The return value isn't |
| * stable unless you actually hold a relevant rq->__lock. |
| */ |
| static inline raw_spinlock_t *rq_lockp(struct rq *rq) |
| { |
| if (sched_core_enabled(rq)) |
| return &rq->core->__lock; |
| |
| return &rq->__lock; |
| } |
| |
| static inline raw_spinlock_t *__rq_lockp(struct rq *rq) |
| { |
| if (rq->core_enabled) |
| return &rq->core->__lock; |
| |
| return &rq->__lock; |
| } |
| |
| extern bool |
| cfs_prio_less(const struct task_struct *a, const struct task_struct *b, bool fi); |
| |
| extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); |
| |
| /* |
| * Helpers to check if the CPU's core cookie matches with the task's cookie |
| * when core scheduling is enabled. |
| * A special case is that the task's cookie always matches with CPU's core |
| * cookie if the CPU is in an idle core. |
| */ |
| static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) |
| { |
| /* Ignore cookie match if core scheduler is not enabled on the CPU. */ |
| if (!sched_core_enabled(rq)) |
| return true; |
| |
| return rq->core->core_cookie == p->core_cookie; |
| } |
| |
| static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) |
| { |
| bool idle_core = true; |
| int cpu; |
| |
| /* Ignore cookie match if core scheduler is not enabled on the CPU. */ |
| if (!sched_core_enabled(rq)) |
| return true; |
| |
| for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { |
| if (!available_idle_cpu(cpu)) { |
| idle_core = false; |
| break; |
| } |
| } |
| |
| /* |
| * A CPU in an idle core is always the best choice for tasks with |
| * cookies. |
| */ |
| return idle_core || rq->core->core_cookie == p->core_cookie; |
| } |
| |
| static inline bool sched_group_cookie_match(struct rq *rq, |
| struct task_struct *p, |
| struct sched_group *group) |
| { |
| int cpu; |
| |
| /* Ignore cookie match if core scheduler is not enabled on the CPU. */ |
| if (!sched_core_enabled(rq)) |
| return true; |
| |
| for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) { |
| if (sched_core_cookie_match(cpu_rq(cpu), p)) |
| return true; |
| } |
| return false; |
| } |
| |
| static inline bool sched_core_enqueued(struct task_struct *p) |
| { |
| return !RB_EMPTY_NODE(&p->core_node); |
| } |
| |
| extern void sched_core_enqueue(struct rq *rq, struct task_struct *p); |
| extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags); |
| |
| extern void sched_core_get(void); |
| extern void sched_core_put(void); |
| |
| #else /* !CONFIG_SCHED_CORE: */ |
| |
| static inline bool sched_core_enabled(struct rq *rq) |
| { |
| return false; |
| } |
| |
| static inline bool sched_core_disabled(void) |
| { |
| return true; |
| } |
| |
| static inline raw_spinlock_t *rq_lockp(struct rq *rq) |
| { |
| return &rq->__lock; |
| } |
| |
| static inline raw_spinlock_t *__rq_lockp(struct rq *rq) |
| { |
| return &rq->__lock; |
| } |
| |
| static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) |
| { |
| return true; |
| } |
| |
| static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) |
| { |
| return true; |
| } |
| |
| static inline bool sched_group_cookie_match(struct rq *rq, |
| struct task_struct *p, |
| struct sched_group *group) |
| { |
| return true; |
| } |
| |
| #endif /* !CONFIG_SCHED_CORE */ |
| |
| static inline void lockdep_assert_rq_held(struct rq *rq) |
| { |
| lockdep_assert_held(__rq_lockp(rq)); |
| } |
| |
| extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); |
| extern bool raw_spin_rq_trylock(struct rq *rq); |
| extern void raw_spin_rq_unlock(struct rq *rq); |
| |
| static inline void raw_spin_rq_lock(struct rq *rq) |
| { |
| raw_spin_rq_lock_nested(rq, 0); |
| } |
| |
| static inline void raw_spin_rq_lock_irq(struct rq *rq) |
| { |
| local_irq_disable(); |
| raw_spin_rq_lock(rq); |
| } |
| |
| static inline void raw_spin_rq_unlock_irq(struct rq *rq) |
| { |
| raw_spin_rq_unlock(rq); |
| local_irq_enable(); |
| } |
| |
| static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) |
| { |
| unsigned long flags; |
| |
| local_irq_save(flags); |
| raw_spin_rq_lock(rq); |
| |
| return flags; |
| } |
| |
| static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) |
| { |
| raw_spin_rq_unlock(rq); |
| local_irq_restore(flags); |
| } |
| |
| #define raw_spin_rq_lock_irqsave(rq, flags) \ |
| do { \ |
| flags = _raw_spin_rq_lock_irqsave(rq); \ |
| } while (0) |
| |
| #ifdef CONFIG_SCHED_SMT |
| extern void __update_idle_core(struct rq *rq); |
| |
| static inline void update_idle_core(struct rq *rq) |
| { |
| if (static_branch_unlikely(&sched_smt_present)) |
| __update_idle_core(rq); |
| } |
| |
| #else |
| static inline void update_idle_core(struct rq *rq) { } |
| #endif |
| |
| #ifdef CONFIG_FAIR_GROUP_SCHED |
| |
| static inline struct task_struct *task_of(struct sched_entity *se) |
| { |
| SCHED_WARN_ON(!entity_is_task(se)); |
| return container_of(se, struct task_struct, se); |
| } |
| |
| static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) |
| { |
| return p->se.cfs_rq; |
| } |
| |
| /* runqueue on which this entity is (to be) queued */ |
| static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) |
| { |
| return se->cfs_rq; |
| } |
| |
| /* runqueue "owned" by this group */ |
| static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) |
| { |
| return grp->my_q; |
| } |
| |
| #else /* !CONFIG_FAIR_GROUP_SCHED: */ |
| |
| #define task_of(_se) container_of(_se, struct task_struct, se) |
| |
| static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p) |
| { |
| return &task_rq(p)->cfs; |
| } |
| |
| static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se) |
| { |
| const struct task_struct *p = task_of(se); |
| struct rq *rq = task_rq(p); |
| |
| return &rq->cfs; |
| } |
| |
| /* runqueue "owned" by this group */ |
| static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) |
| { |
| return NULL; |
| } |
| |
| #endif /* !CONFIG_FAIR_GROUP_SCHED */ |
| |
| extern void update_rq_clock(struct rq *rq); |
| |
| /* |
| * rq::clock_update_flags bits |
| * |
| * %RQCF_REQ_SKIP - will request skipping of clock update on the next |
| * call to __schedule(). This is an optimisation to avoid |
| * neighbouring rq clock updates. |
| * |
| * %RQCF_ACT_SKIP - is set from inside of __schedule() when skipping is |
| * in effect and calls to update_rq_clock() are being ignored. |
| * |
| * %RQCF_UPDATED - is a debug flag that indicates whether a call has been |
| * made to update_rq_clock() since the last time rq::lock was pinned. |
| * |
| * If inside of __schedule(), clock_update_flags will have been |
| * shifted left (a left shift is a cheap operation for the fast path |
| * to promote %RQCF_REQ_SKIP to %RQCF_ACT_SKIP), so you must use, |
| * |
| * if (rq-clock_update_flags >= RQCF_UPDATED) |
| * |
| * to check if %RQCF_UPDATED is set. It'll never be shifted more than |
| * one position though, because the next rq_unpin_lock() will shift it |
| * back. |
| */ |
| #define RQCF_REQ_SKIP 0x01 |
| #define RQCF_ACT_SKIP 0x02 |
| #define RQCF_UPDATED 0x04 |
| |
| static inline void assert_clock_updated(struct rq *rq) |
| { |
| /* |
| * The only reason for not seeing a clock update since the |
| * last rq_pin_lock() is if we're currently skipping updates. |
| */ |
| SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); |
| } |
| |
| static inline u64 rq_clock(struct rq *rq) |
| { |
| lockdep_assert_rq_held(rq); |
| assert_clock_updated(rq); |
| |
| return rq->clock; |
| } |
| |
| static inline u64 rq_clock_task(struct rq *rq) |
| { |
| lockdep_assert_rq_held(rq); |
| assert_clock_updated(rq); |
| |
| return rq->clock_task; |
| } |
| |
| static inline void rq_clock_skip_update(struct rq *rq) |
| { |
| lockdep_assert_rq_held(rq); |
| rq->clock_update_flags |= RQCF_REQ_SKIP; |
| } |
| |
| /* |
| * See rt task throttling, which is the only time a skip |
| * request is canceled. |
| */ |
| static inline void rq_clock_cancel_skipupdate(struct rq *rq) |
| { |
| lockdep_assert_rq_held(rq); |
| rq->clock_update_flags &= ~RQCF_REQ_SKIP; |
| } |
| |
| /* |
| * During cpu offlining and rq wide unthrottling, we can trigger |
| * an update_rq_clock() for several cfs and rt runqueues (Typically |
| * when using list_for_each_entry_*) |
| * rq_clock_start_loop_update() can be called after updating the clock |
| * once and before iterating over the list to prevent multiple update. |
| * After the iterative traversal, we need to call rq_clock_stop_loop_update() |
| * to clear RQCF_ACT_SKIP of rq->clock_update_flags. |
| */ |
| static inline void rq_clock_start_loop_update(struct rq *rq) |
| { |
| lockdep_assert_rq_held(rq); |
| SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP); |
| rq->clock_update_flags |= RQCF_ACT_SKIP; |
| } |
| |
| static inline void rq_clock_stop_loop_update(struct rq *rq) |
| { |
| lockdep_assert_rq_held(rq); |
| rq->clock_update_flags &= ~RQCF_ACT_SKIP; |
| } |
| |
| struct rq_flags { |
| unsigned long flags; |
| struct pin_cookie cookie; |
| #ifdef CONFIG_SCHED_DEBUG |
| /* |
| * A copy of (rq::clock_update_flags & RQCF_UPDATED) for the |
| * current pin context is stashed here in case it needs to be |
| * restored in rq_repin_lock(). |
| */ |
| unsigned int clock_update_flags; |
| #endif |
| }; |
| |
| extern struct balance_callback balance_push_callback; |
| |
| /* |
| * Lockdep annotation that avoids accidental unlocks; it's like a |
| * sticky/continuous lockdep_assert_held(). |
| * |
| * This avoids code that has access to 'struct rq *rq' (basically everything in |
| * the scheduler) from accidentally unlocking the rq if they do not also have a |
| * copy of the (on-stack) 'struct rq_flags rf'. |
| * |
| * Also see Documentation/locking/lockdep-design.rst. |
| */ |
| static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) |
| { |
| rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); |
| rf->clock_update_flags = 0; |
| # ifdef CONFIG_SMP |
| SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); |
| # endif |
| #endif |
| } |
| |
| static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) |
| { |
| #ifdef CONFIG_SCHED_DEBUG |
| if (rq->clock_update_flags > RQCF_ACT_SKIP) |
| rf->clock_update_flags = RQCF_UPDATED; |
| #endif |
| |
| lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); |
| } |
| |
| static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) |
| { |
| lockdep_repin_lock(__rq_lockp(rq), rf->cookie); |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| /* |
| * Restore the value we stashed in @rf for this pin context. |
| */ |
| rq->clock_update_flags |= rf->clock_update_flags; |
| #endif |
| } |
| |
| extern |
| struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
| __acquires(rq->lock); |
| |
| extern |
| struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
| __acquires(p->pi_lock) |
| __acquires(rq->lock); |
| |
| static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) |
| __releases(rq->lock) |
| { |
| rq_unpin_lock(rq, rf); |
| raw_spin_rq_unlock(rq); |
| } |
| |
| static inline void |
| task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) |
| __releases(rq->lock) |
| __releases(p->pi_lock) |
| { |
| rq_unpin_lock(rq, rf); |
| raw_spin_rq_unlock(rq); |
| raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); |
| } |
| |
| DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct, |
| _T->rq = task_rq_lock(_T->lock, &_T->rf), |
| task_rq_unlock(_T->rq, _T->lock, &_T->rf), |
| struct rq *rq; struct rq_flags rf) |
| |
| static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) |
| __acquires(rq->lock) |
| { |
| raw_spin_rq_lock_irqsave(rq, rf->flags); |
| rq_pin_lock(rq, rf); |
| } |
| |
| static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf) |
| __acquires(rq->lock) |
| { |
| raw_spin_rq_lock_irq(rq); |
| rq_pin_lock(rq, rf); |
| } |
| |
| static inline void rq_lock(struct rq *rq, struct rq_flags *rf) |
| __acquires(rq->lock) |
| { |
| raw_spin_rq_lock(rq); |
| rq_pin_lock(rq, rf); |
| } |
| |
| static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) |
| __releases(rq->lock) |
| { |
| rq_unpin_lock(rq, rf); |
| raw_spin_rq_unlock_irqrestore(rq, rf->flags); |
| } |
| |
| static inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf) |
| __releases(rq->lock) |
| { |
| rq_unpin_lock(rq, rf); |
| raw_spin_rq_unlock_irq(rq); |
| } |
| |
| static inline void rq_unlock(struct rq *rq, struct rq_flags *rf) |
| __releases(rq->lock) |
| { |
| rq_unpin_lock(rq, rf); |
| raw_spin_rq_unlock(rq); |
| } |
| |
| DEFINE_LOCK_GUARD_1(rq_lock, struct rq, |
| rq_lock(_T->lock, &_T->rf), |
| rq_unlock(_T->lock, &_T->rf), |
| struct rq_flags rf) |
| |
| DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq, |
| rq_lock_irq(_T->lock, &_T->rf), |
| rq_unlock_irq(_T->lock, &_T->rf), |
| struct rq_flags rf) |
| |
| DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq, |
| rq_lock_irqsave(_T->lock, &_T->rf), |
| rq_unlock_irqrestore(_T->lock, &_T->rf), |
| struct rq_flags rf) |
| |
| static inline struct rq *this_rq_lock_irq(struct rq_flags *rf) |
| __acquires(rq->lock) |
| { |
| struct rq *rq; |
| |
| local_irq_disable(); |
| rq = this_rq(); |
| rq_lock(rq, rf); |
| |
| return rq; |
| } |
| |
| #ifdef CONFIG_NUMA |
| |
| enum numa_topology_type { |
| NUMA_DIRECT, |
| NUMA_GLUELESS_MESH, |
| NUMA_BACKPLANE, |
| }; |
| |
| extern enum numa_topology_type sched_numa_topology_type; |
| extern int sched_max_numa_distance; |
| extern bool find_numa_distance(int distance); |
| extern void sched_init_numa(int offline_node); |
| extern void sched_update_numa(int cpu, bool online); |
| extern void sched_domains_numa_masks_set(unsigned int cpu); |
| extern void sched_domains_numa_masks_clear(unsigned int cpu); |
| extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); |
| |
| #else /* !CONFIG_NUMA: */ |
| |
| static inline void sched_init_numa(int offline_node) { } |
| static inline void sched_update_numa(int cpu, bool online) { } |
| static inline void sched_domains_numa_masks_set(unsigned int cpu) { } |
| static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } |
| |
| static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) |
| { |
| return nr_cpu_ids; |
| } |
| |
| #endif /* !CONFIG_NUMA */ |
| |
| #ifdef CONFIG_NUMA_BALANCING |
| |
| /* The regions in numa_faults array from task_struct */ |
| enum numa_faults_stats { |
| NUMA_MEM = 0, |
| NUMA_CPU, |
| NUMA_MEMBUF, |
| NUMA_CPUBUF |
| }; |
| |
| extern void sched_setnuma(struct task_struct *p, int node); |
| extern int migrate_task_to(struct task_struct *p, int cpu); |
| extern int migrate_swap(struct task_struct *p, struct task_struct *t, |
| int cpu, int scpu); |
| extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); |
| |
| #else /* !CONFIG_NUMA_BALANCING: */ |
| |
| static inline void |
| init_numa_balancing(unsigned long clone_flags, struct task_struct *p) |
| { |
| } |
| |
| #endif /* !CONFIG_NUMA_BALANCING */ |
| |
| #ifdef CONFIG_SMP |
| |
| static inline void |
| queue_balance_callback(struct rq *rq, |
| struct balance_callback *head, |
| void (*func)(struct rq *rq)) |
| { |
| lockdep_assert_rq_held(rq); |
| |
| /* |
| * Don't (re)queue an already queued item; nor queue anything when |
| * balance_push() is active, see the comment with |
| * balance_push_callback. |
| */ |
| if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) |
| return; |
| |
| head->func = func; |
| head->next = rq->balance_callback; |
| rq->balance_callback = head; |
| } |
| |
| #define rcu_dereference_check_sched_domain(p) \ |
| rcu_dereference_check((p), lockdep_is_held(&sched_domains_mutex)) |
| |
| /* |
| * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
| * See destroy_sched_domains: call_rcu for details. |
| * |
| * The domain tree of any CPU may only be accessed from within |
| * preempt-disabled sections. |
| */ |
| #define for_each_domain(cpu, __sd) \ |
| for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ |
| __sd; __sd = __sd->parent) |
| |
| /* A mask of all the SD flags that have the SDF_SHARED_CHILD metaflag */ |
| #define SD_FLAG(name, mflags) (name * !!((mflags) & SDF_SHARED_CHILD)) | |
| static const unsigned int SD_SHARED_CHILD_MASK = |
| #include <linux/sched/sd_flags.h> |
| 0; |
| #undef SD_FLAG |
| |
| /** |
| * highest_flag_domain - Return highest sched_domain containing flag. |
| * @cpu: The CPU whose highest level of sched domain is to |
| * be returned. |
| * @flag: The flag to check for the highest sched_domain |
| * for the given CPU. |
| * |
| * Returns the highest sched_domain of a CPU which contains @flag. If @flag has |
| * the SDF_SHARED_CHILD metaflag, all the children domains also have @flag. |
| */ |
| static inline struct sched_domain *highest_flag_domain(int cpu, int flag) |
| { |
| struct sched_domain *sd, *hsd = NULL; |
| |
| for_each_domain(cpu, sd) { |
| if (sd->flags & flag) { |
| hsd = sd; |
| continue; |
| } |
| |
| /* |
| * Stop the search if @flag is known to be shared at lower |
| * levels. It will not be found further up. |
| */ |
| if (flag & SD_SHARED_CHILD_MASK) |
| break; |
| } |
| |
| return hsd; |
| } |
| |
| static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) |
| { |
| struct sched_domain *sd; |
| |
| for_each_domain(cpu, sd) { |
| if (sd->flags & flag) |
| break; |
| } |
| |
| return sd; |
| } |
| |
| DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); |
| DECLARE_PER_CPU(int, sd_llc_size); |
| DECLARE_PER_CPU(int, sd_llc_id); |
| DECLARE_PER_CPU(int, sd_share_id); |
| DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); |
| DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); |
| DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); |
| DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); |
| |
| extern struct static_key_false sched_asym_cpucapacity; |
| extern struct static_key_false sched_cluster_active; |
| |
| static __always_inline bool sched_asym_cpucap_active(void) |
| { |
| return static_branch_unlikely(&sched_asym_cpucapacity); |
| } |
| |
| struct sched_group_capacity { |
| atomic_t ref; |
| /* |
| * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity |
| * for a single CPU. |
| */ |
| unsigned long capacity; |
| unsigned long min_capacity; /* Min per-CPU capacity in group */ |
| unsigned long max_capacity; /* Max per-CPU capacity in group */ |
| unsigned long next_update; |
| int imbalance; /* XXX unrelated to capacity but shared group state */ |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| int id; |
| #endif |
| |
| unsigned long cpumask[]; /* Balance mask */ |
| }; |
| |
| struct sched_group { |
| struct sched_group *next; /* Must be a circular list */ |
| atomic_t ref; |
| |
| unsigned int group_weight; |
| unsigned int cores; |
| struct sched_group_capacity *sgc; |
| int asym_prefer_cpu; /* CPU of highest priority in group */ |
| int flags; |
| |
| /* |
| * The CPUs this group covers. |
| * |
| * NOTE: this field is variable length. (Allocated dynamically |
| * by attaching extra space to the end of the structure, |
| * depending on how many CPUs the kernel has booted up with) |
| */ |
| unsigned long cpumask[]; |
| }; |
| |
| static inline struct cpumask *sched_group_span(struct sched_group *sg) |
| { |
| return to_cpumask(sg->cpumask); |
| } |
| |
| /* |
| * See build_balance_mask(). |
| */ |
| static inline struct cpumask *group_balance_mask(struct sched_group *sg) |
| { |
| return to_cpumask(sg->sgc->cpumask); |
| } |
| |
| extern int group_balance_cpu(struct sched_group *sg); |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| extern void update_sched_domain_debugfs(void); |
| extern void dirty_sched_domain_sysctl(int cpu); |
| #else |
| static inline void update_sched_domain_debugfs(void) { } |
| static inline void dirty_sched_domain_sysctl(int cpu) { } |
| #endif |
| |
| extern int sched_update_scaling(void); |
| |
| static inline const struct cpumask *task_user_cpus(struct task_struct *p) |
| { |
| if (!p->user_cpus_ptr) |
| return cpu_possible_mask; /* &init_task.cpus_mask */ |
| return p->user_cpus_ptr; |
| } |
| |
| #endif /* CONFIG_SMP */ |
| |
| #include "stats.h" |
| |
| #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS) |
| |
| extern void __sched_core_account_forceidle(struct rq *rq); |
| |
| static inline void sched_core_account_forceidle(struct rq *rq) |
| { |
| if (schedstat_enabled()) |
| __sched_core_account_forceidle(rq); |
| } |
| |
| extern void __sched_core_tick(struct rq *rq); |
| |
| static inline void sched_core_tick(struct rq *rq) |
| { |
| if (sched_core_enabled(rq) && schedstat_enabled()) |
| __sched_core_tick(rq); |
| } |
| |
| #else /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS): */ |
| |
| static inline void sched_core_account_forceidle(struct rq *rq) { } |
| |
| static inline void sched_core_tick(struct rq *rq) { } |
| |
| #endif /* !(CONFIG_SCHED_CORE && CONFIG_SCHEDSTATS) */ |
| |
| #ifdef CONFIG_CGROUP_SCHED |
| |
| /* |
| * Return the group to which this tasks belongs. |
| * |
| * We cannot use task_css() and friends because the cgroup subsystem |
| * changes that value before the cgroup_subsys::attach() method is called, |
| * therefore we cannot pin it and might observe the wrong value. |
| * |
| * The same is true for autogroup's p->signal->autogroup->tg, the autogroup |
| * core changes this before calling sched_move_task(). |
| * |
| * Instead we use a 'copy' which is updated from sched_move_task() while |
| * holding both task_struct::pi_lock and rq::lock. |
| */ |
| static inline struct task_group *task_group(struct task_struct *p) |
| { |
| return p->sched_task_group; |
| } |
| |
| /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ |
| static inline void set_task_rq(struct task_struct *p, unsigned int cpu) |
| { |
| #if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED) |
| struct task_group *tg = task_group(p); |
| #endif |
| |
| #ifdef CONFIG_FAIR_GROUP_SCHED |
| set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); |
| p->se.cfs_rq = tg->cfs_rq[cpu]; |
| p->se.parent = tg->se[cpu]; |
| p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0; |
| #endif |
| |
| #ifdef CONFIG_RT_GROUP_SCHED |
| p->rt.rt_rq = tg->rt_rq[cpu]; |
| p->rt.parent = tg->rt_se[cpu]; |
| #endif |
| } |
| |
| #else /* !CONFIG_CGROUP_SCHED: */ |
| |
| static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } |
| |
| static inline struct task_group *task_group(struct task_struct *p) |
| { |
| return NULL; |
| } |
| |
| #endif /* !CONFIG_CGROUP_SCHED */ |
| |
| static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) |
| { |
| set_task_rq(p, cpu); |
| #ifdef CONFIG_SMP |
| /* |
| * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be |
| * successfully executed on another CPU. We must ensure that updates of |
| * per-task data have been completed by this moment. |
| */ |
| smp_wmb(); |
| WRITE_ONCE(task_thread_info(p)->cpu, cpu); |
| p->wake_cpu = cpu; |
| #endif |
| } |
| |
| /* |
| * Tunables that become constants when CONFIG_SCHED_DEBUG is off: |
| */ |
| #ifdef CONFIG_SCHED_DEBUG |
| # define const_debug __read_mostly |
| #else |
| # define const_debug const |
| #endif |
| |
| #define SCHED_FEAT(name, enabled) \ |
| __SCHED_FEAT_##name , |
| |
| enum { |
| #include "features.h" |
| __SCHED_FEAT_NR, |
| }; |
| |
| #undef SCHED_FEAT |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| |
| /* |
| * To support run-time toggling of sched features, all the translation units |
| * (but core.c) reference the sysctl_sched_features defined in core.c. |
| */ |
| extern const_debug unsigned int sysctl_sched_features; |
| |
| #ifdef CONFIG_JUMP_LABEL |
| |
| #define SCHED_FEAT(name, enabled) \ |
| static __always_inline bool static_branch_##name(struct static_key *key) \ |
| { \ |
| return static_key_##enabled(key); \ |
| } |
| |
| #include "features.h" |
| #undef SCHED_FEAT |
| |
| extern struct static_key sched_feat_keys[__SCHED_FEAT_NR]; |
| #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) |
| |
| #else /* !CONFIG_JUMP_LABEL: */ |
| |
| #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
| |
| #endif /* !CONFIG_JUMP_LABEL */ |
| |
| #else /* !SCHED_DEBUG: */ |
| |
| /* |
| * Each translation unit has its own copy of sysctl_sched_features to allow |
| * constants propagation at compile time and compiler optimization based on |
| * features default. |
| */ |
| #define SCHED_FEAT(name, enabled) \ |
| (1UL << __SCHED_FEAT_##name) * enabled | |
| static const_debug __maybe_unused unsigned int sysctl_sched_features = |
| #include "features.h" |
| 0; |
| #undef SCHED_FEAT |
| |
| #define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) |
| |
| #endif /* !SCHED_DEBUG */ |
| |
| extern struct static_key_false sched_numa_balancing; |
| extern struct static_key_false sched_schedstats; |
| |
| static inline u64 global_rt_period(void) |
| { |
| return (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
| } |
| |
| static inline u64 global_rt_runtime(void) |
| { |
| if (sysctl_sched_rt_runtime < 0) |
| return RUNTIME_INF; |
| |
| return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
| } |
| |
| static inline int task_current(struct rq *rq, struct task_struct *p) |
| { |
| return rq->curr == p; |
| } |
| |
| static inline int task_on_cpu(struct rq *rq, struct task_struct *p) |
| { |
| #ifdef CONFIG_SMP |
| return p->on_cpu; |
| #else |
| return task_current(rq, p); |
| #endif |
| } |
| |
| static inline int task_on_rq_queued(struct task_struct *p) |
| { |
| return p->on_rq == TASK_ON_RQ_QUEUED; |
| } |
| |
| static inline int task_on_rq_migrating(struct task_struct *p) |
| { |
| return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; |
| } |
| |
| /* Wake flags. The first three directly map to some SD flag value */ |
| #define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */ |
| #define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */ |
| #define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */ |
| |
| #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */ |
| #define WF_MIGRATED 0x20 /* Internal use, task got migrated */ |
| #define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CPU. */ |
| #define WF_RQ_SELECTED 0x80 /* ->select_task_rq() was called */ |
| |
| #ifdef CONFIG_SMP |
| static_assert(WF_EXEC == SD_BALANCE_EXEC); |
| static_assert(WF_FORK == SD_BALANCE_FORK); |
| static_assert(WF_TTWU == SD_BALANCE_WAKE); |
| #endif |
| |
| /* |
| * To aid in avoiding the subversion of "niceness" due to uneven distribution |
| * of tasks with abnormal "nice" values across CPUs the contribution that |
| * each task makes to its run queue's load is weighted according to its |
| * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
| * scaled version of the new time slice allocation that they receive on time |
| * slice expiry etc. |
| */ |
| |
| #define WEIGHT_IDLEPRIO 3 |
| #define WMULT_IDLEPRIO 1431655765 |
| |
| extern const int sched_prio_to_weight[40]; |
| extern const u32 sched_prio_to_wmult[40]; |
| |
| /* |
| * {de,en}queue flags: |
| * |
| * DEQUEUE_SLEEP - task is no longer runnable |
| * ENQUEUE_WAKEUP - task just became runnable |
| * |
| * SAVE/RESTORE - an otherwise spurious dequeue/enqueue, done to ensure tasks |
| * are in a known state which allows modification. Such pairs |
| * should preserve as much state as possible. |
| * |
| * MOVE - paired with SAVE/RESTORE, explicitly does not preserve the location |
| * in the runqueue. |
| * |
| * NOCLOCK - skip the update_rq_clock() (avoids double updates) |
| * |
| * MIGRATION - p->on_rq == TASK_ON_RQ_MIGRATING (used for DEADLINE) |
| * |
| * ENQUEUE_HEAD - place at front of runqueue (tail if not specified) |
| * ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline) |
| * ENQUEUE_MIGRATED - the task was migrated during wakeup |
| * ENQUEUE_RQ_SELECTED - ->select_task_rq() was called |
| * |
| */ |
| |
| #define DEQUEUE_SLEEP 0x01 /* Matches ENQUEUE_WAKEUP */ |
| #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ |
| #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ |
| #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ |
| #define DEQUEUE_SPECIAL 0x10 |
| #define DEQUEUE_MIGRATING 0x100 /* Matches ENQUEUE_MIGRATING */ |
| #define DEQUEUE_DELAYED 0x200 /* Matches ENQUEUE_DELAYED */ |
| |
| #define ENQUEUE_WAKEUP 0x01 |
| #define ENQUEUE_RESTORE 0x02 |
| #define ENQUEUE_MOVE 0x04 |
| #define ENQUEUE_NOCLOCK 0x08 |
| |
| #define ENQUEUE_HEAD 0x10 |
| #define ENQUEUE_REPLENISH 0x20 |
| #ifdef CONFIG_SMP |
| #define ENQUEUE_MIGRATED 0x40 |
| #else |
| #define ENQUEUE_MIGRATED 0x00 |
| #endif |
| #define ENQUEUE_INITIAL 0x80 |
| #define ENQUEUE_MIGRATING 0x100 |
| #define ENQUEUE_DELAYED 0x200 |
| #define ENQUEUE_RQ_SELECTED 0x400 |
| |
| #define RETRY_TASK ((void *)-1UL) |
| |
| struct affinity_context { |
| const struct cpumask *new_mask; |
| struct cpumask *user_mask; |
| unsigned int flags; |
| }; |
| |
| extern s64 update_curr_common(struct rq *rq); |
| |
| struct sched_class { |
| |
| #ifdef CONFIG_UCLAMP_TASK |
| int uclamp_enabled; |
| #endif |
| |
| void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); |
| bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); |
| void (*yield_task) (struct rq *rq); |
| bool (*yield_to_task)(struct rq *rq, struct task_struct *p); |
| |
| void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags); |
| |
| int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); |
| struct task_struct *(*pick_task)(struct rq *rq); |
| /* |
| * Optional! When implemented pick_next_task() should be equivalent to: |
| * |
| * next = pick_task(); |
| * if (next) { |
| * put_prev_task(prev); |
| * set_next_task_first(next); |
| * } |
| */ |
| struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev); |
| |
| void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next); |
| void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); |
| |
| #ifdef CONFIG_SMP |
| int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); |
| |
| void (*migrate_task_rq)(struct task_struct *p, int new_cpu); |
| |
| void (*task_woken)(struct rq *this_rq, struct task_struct *task); |
| |
| void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx); |
| |
| void (*rq_online)(struct rq *rq); |
| void (*rq_offline)(struct rq *rq); |
| |
| struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); |
| #endif |
| |
| void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); |
| void (*task_fork)(struct task_struct *p); |
| void (*task_dead)(struct task_struct *p); |
| |
| /* |
| * The switched_from() call is allowed to drop rq->lock, therefore we |
| * cannot assume the switched_from/switched_to pair is serialized by |
| * rq->lock. They are however serialized by p->pi_lock. |
| */ |
| void (*switching_to) (struct rq *this_rq, struct task_struct *task); |
| void (*switched_from)(struct rq *this_rq, struct task_struct *task); |
| void (*switched_to) (struct rq *this_rq, struct task_struct *task); |
| void (*reweight_task)(struct rq *this_rq, struct task_struct *task, |
| const struct load_weight *lw); |
| void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
| int oldprio); |
| |
| unsigned int (*get_rr_interval)(struct rq *rq, |
| struct task_struct *task); |
| |
| void (*update_curr)(struct rq *rq); |
| |
| #ifdef CONFIG_FAIR_GROUP_SCHED |
| void (*task_change_group)(struct task_struct *p); |
| #endif |
| |
| #ifdef CONFIG_SCHED_CORE |
| int (*task_is_throttled)(struct task_struct *p, int cpu); |
| #endif |
| }; |
| |
| static inline void put_prev_task(struct rq *rq, struct task_struct *prev) |
| { |
| WARN_ON_ONCE(rq->curr != prev); |
| prev->sched_class->put_prev_task(rq, prev, NULL); |
| } |
| |
| static inline void set_next_task(struct rq *rq, struct task_struct *next) |
| { |
| next->sched_class->set_next_task(rq, next, false); |
| } |
| |
| static inline void |
| __put_prev_set_next_dl_server(struct rq *rq, |
| struct task_struct *prev, |
| struct task_struct *next) |
| { |
| prev->dl_server = NULL; |
| next->dl_server = rq->dl_server; |
| rq->dl_server = NULL; |
| } |
| |
| static inline void put_prev_set_next_task(struct rq *rq, |
| struct task_struct *prev, |
| struct task_struct *next) |
| { |
| WARN_ON_ONCE(rq->curr != prev); |
| |
| __put_prev_set_next_dl_server(rq, prev, next); |
| |
| if (next == prev) |
| return; |
| |
| prev->sched_class->put_prev_task(rq, prev, next); |
| next->sched_class->set_next_task(rq, next, true); |
| } |
| |
| /* |
| * Helper to define a sched_class instance; each one is placed in a separate |
| * section which is ordered by the linker script: |
| * |
| * include/asm-generic/vmlinux.lds.h |
| * |
| * *CAREFUL* they are laid out in *REVERSE* order!!! |
| * |
| * Also enforce alignment on the instance, not the type, to guarantee layout. |
| */ |
| #define DEFINE_SCHED_CLASS(name) \ |
| const struct sched_class name##_sched_class \ |
| __aligned(__alignof__(struct sched_class)) \ |
| __section("__" #name "_sched_class") |
| |
| /* Defined in include/asm-generic/vmlinux.lds.h */ |
| extern struct sched_class __sched_class_highest[]; |
| extern struct sched_class __sched_class_lowest[]; |
| |
| extern const struct sched_class stop_sched_class; |
| extern const struct sched_class dl_sched_class; |
| extern const struct sched_class rt_sched_class; |
| extern const struct sched_class fair_sched_class; |
| extern const struct sched_class idle_sched_class; |
| |
| #ifdef CONFIG_SCHED_CLASS_EXT |
| extern const struct sched_class ext_sched_class; |
| |
| DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); /* SCX BPF scheduler loaded */ |
| DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */ |
| |
| #define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) |
| #define scx_switched_all() static_branch_unlikely(&__scx_switched_all) |
| #else /* !CONFIG_SCHED_CLASS_EXT */ |
| #define scx_enabled() false |
| #define scx_switched_all() false |
| #endif /* !CONFIG_SCHED_CLASS_EXT */ |
| |
| /* |
| * Iterate only active classes. SCX can take over all fair tasks or be |
| * completely disabled. If the former, skip fair. If the latter, skip SCX. |
| */ |
| static inline const struct sched_class *next_active_class(const struct sched_class *class) |
| { |
| class++; |
| #ifdef CONFIG_SCHED_CLASS_EXT |
| if (scx_switched_all() && class == &fair_sched_class) |
| class++; |
| if (!scx_enabled() && class == &ext_sched_class) |
| class++; |
| #endif |
| return class; |
| } |
| |
| #define for_class_range(class, _from, _to) \ |
| for (class = (_from); class < (_to); class++) |
| |
| #define for_each_class(class) \ |
| for_class_range(class, __sched_class_highest, __sched_class_lowest) |
| |
| #define for_active_class_range(class, _from, _to) \ |
| for (class = (_from); class != (_to); class = next_active_class(class)) |
| |
| #define for_each_active_class(class) \ |
| for_active_class_range(class, __sched_class_highest, __sched_class_lowest) |
| |
| #define sched_class_above(_a, _b) ((_a) < (_b)) |
| |
| static inline bool sched_stop_runnable(struct rq *rq) |
| { |
| return rq->stop && task_on_rq_queued(rq->stop); |
| } |
| |
| static inline bool sched_dl_runnable(struct rq *rq) |
| { |
| return rq->dl.dl_nr_running > 0; |
| } |
| |
| static inline bool sched_rt_runnable(struct rq *rq) |
| { |
| return rq->rt.rt_queued > 0; |
| } |
| |
| static inline bool sched_fair_runnable(struct rq *rq) |
| { |
| return rq->cfs.nr_running > 0; |
| } |
| |
| extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); |
| extern struct task_struct *pick_task_idle(struct rq *rq); |
| |
| #define SCA_CHECK 0x01 |
| #define SCA_MIGRATE_DISABLE 0x02 |
| #define SCA_MIGRATE_ENABLE 0x04 |
| #define SCA_USER 0x08 |
| |
| #ifdef CONFIG_SMP |
| |
| extern void update_group_capacity(struct sched_domain *sd, int cpu); |
| |
| extern void sched_balance_trigger(struct rq *rq); |
| |
| extern int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx); |
| extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx); |
| |
| static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu) |
| { |
| /* When not in the task's cpumask, no point in looking further. */ |
| if (!cpumask_test_cpu(cpu, p->cpus_ptr)) |
| return false; |
| |
| /* Can @cpu run a user thread? */ |
| if (!(p->flags & PF_KTHREAD) && !task_cpu_possible(cpu, p)) |
| return false; |
| |
| return true; |
| } |
| |
| static inline cpumask_t *alloc_user_cpus_ptr(int node) |
| { |
| /* |
| * See do_set_cpus_allowed() above for the rcu_head usage. |
| */ |
| int size = max_t(int, cpumask_size(), sizeof(struct rcu_head)); |
| |
| return kmalloc_node(size, GFP_KERNEL, node); |
| } |
| |
| static inline struct task_struct *get_push_task(struct rq *rq) |
| { |
| struct task_struct *p = rq->curr; |
| |
| lockdep_assert_rq_held(rq); |
| |
| if (rq->push_busy) |
| return NULL; |
| |
| if (p->nr_cpus_allowed == 1) |
| return NULL; |
| |
| if (p->migration_disabled) |
| return NULL; |
| |
| rq->push_busy = true; |
| return get_task_struct(p); |
| } |
| |
| extern int push_cpu_stop(void *arg); |
| |
| #else /* !CONFIG_SMP: */ |
| |
| static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu) |
| { |
| return true; |
| } |
| |
| static inline int __set_cpus_allowed_ptr(struct task_struct *p, |
| struct affinity_context *ctx) |
| { |
| return set_cpus_allowed_ptr(p, ctx->new_mask); |
| } |
| |
| static inline cpumask_t *alloc_user_cpus_ptr(int node) |
| { |
| return NULL; |
| } |
| |
| #endif /* !CONFIG_SMP */ |
| |
| #ifdef CONFIG_CPU_IDLE |
| |
| static inline void idle_set_state(struct rq *rq, |
| struct cpuidle_state *idle_state) |
| { |
| rq->idle_state = idle_state; |
| } |
| |
| static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| { |
| SCHED_WARN_ON(!rcu_read_lock_held()); |
| |
| return rq->idle_state; |
| } |
| |
| #else /* !CONFIG_CPU_IDLE: */ |
| |
| static inline void idle_set_state(struct rq *rq, |
| struct cpuidle_state *idle_state) |
| { |
| } |
| |
| static inline struct cpuidle_state *idle_get_state(struct rq *rq) |
| { |
| return NULL; |
| } |
| |
| #endif /* !CONFIG_CPU_IDLE */ |
| |
| extern void schedule_idle(void); |
| asmlinkage void schedule_user(void); |
| |
| extern void sysrq_sched_debug_show(void); |
| extern void sched_init_granularity(void); |
| extern void update_max_interval(void); |
| |
| extern void init_sched_dl_class(void); |
| extern void init_sched_rt_class(void); |
| extern void init_sched_fair_class(void); |
| |
| extern void resched_curr(struct rq *rq); |
| extern void resched_cpu(int cpu); |
| |
| extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); |
| extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); |
| |
| extern void init_dl_entity(struct sched_dl_entity *dl_se); |
| |
| #define BW_SHIFT 20 |
| #define BW_UNIT (1 << BW_SHIFT) |
| #define RATIO_SHIFT 8 |
| #define MAX_BW_BITS (64 - BW_SHIFT) |
| #define MAX_BW ((1ULL << MAX_BW_BITS) - 1) |
| |
| extern unsigned long to_ratio(u64 period, u64 runtime); |
| |
| extern void init_entity_runnable_average(struct sched_entity *se); |
| extern void post_init_entity_util_avg(struct task_struct *p); |
| |
| #ifdef CONFIG_NO_HZ_FULL |
| extern bool sched_can_stop_tick(struct rq *rq); |
| extern int __init sched_tick_offload_init(void); |
| |
| /* |
| * Tick may be needed by tasks in the runqueue depending on their policy and |
| * requirements. If tick is needed, lets send the target an IPI to kick it out of |
| * nohz mode if necessary. |
| */ |
| static inline void sched_update_tick_dependency(struct rq *rq) |
| { |
| int cpu = cpu_of(rq); |
| |
| if (!tick_nohz_full_cpu(cpu)) |
| return; |
| |
| if (sched_can_stop_tick(rq)) |
| tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); |
| else |
| tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); |
| } |
| #else /* !CONFIG_NO_HZ_FULL: */ |
| static inline int sched_tick_offload_init(void) { return 0; } |
| static inline void sched_update_tick_dependency(struct rq *rq) { } |
| #endif /* !CONFIG_NO_HZ_FULL */ |
| |
| static inline void add_nr_running(struct rq *rq, unsigned count) |
| { |
| unsigned prev_nr = rq->nr_running; |
| |
| rq->nr_running = prev_nr + count; |
| if (trace_sched_update_nr_running_tp_enabled()) { |
| call_trace_sched_update_nr_running(rq, count); |
| } |
| |
| #ifdef CONFIG_SMP |
| if (prev_nr < 2 && rq->nr_running >= 2) |
| set_rd_overloaded(rq->rd, 1); |
| #endif |
| |
| sched_update_tick_dependency(rq); |
| } |
| |
| static inline void sub_nr_running(struct rq *rq, unsigned count) |
| { |
| rq->nr_running -= count; |
| if (trace_sched_update_nr_running_tp_enabled()) { |
| call_trace_sched_update_nr_running(rq, -count); |
| } |
| |
| /* Check if we still need preemption */ |
| sched_update_tick_dependency(rq); |
| } |
| |
| static inline void __block_task(struct rq *rq, struct task_struct *p) |
| { |
| if (p->sched_contributes_to_load) |
| rq->nr_uninterruptible++; |
| |
| if (p->in_iowait) { |
| atomic_inc(&rq->nr_iowait); |
| delayacct_blkio_start(); |
| } |
| |
| ASSERT_EXCLUSIVE_WRITER(p->on_rq); |
| |
| /* |
| * The moment this write goes through, ttwu() can swoop in and migrate |
| * this task, rendering our rq->__lock ineffective. |
| * |
| * __schedule() try_to_wake_up() |
| * LOCK rq->__lock LOCK p->pi_lock |
| * pick_next_task() |
| * pick_next_task_fair() |
| * pick_next_entity() |
| * dequeue_entities() |
| * __block_task() |
| * RELEASE p->on_rq = 0 if (p->on_rq && ...) |
| * break; |
| * |
| * ACQUIRE (after ctrl-dep) |
| * |
| * cpu = select_task_rq(); |
| * set_task_cpu(p, cpu); |
| * ttwu_queue() |
| * ttwu_do_activate() |
| * LOCK rq->__lock |
| * activate_task() |
| * STORE p->on_rq = 1 |
| * UNLOCK rq->__lock |
| * |
| * Callers must ensure to not reference @p after this -- we no longer |
| * own it. |
| */ |
| smp_store_release(&p->on_rq, 0); |
| } |
| |
| extern void activate_task(struct rq *rq, struct task_struct *p, int flags); |
| extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); |
| |
| extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags); |
| |
| #ifdef CONFIG_PREEMPT_RT |
| # define SCHED_NR_MIGRATE_BREAK 8 |
| #else |
| # define SCHED_NR_MIGRATE_BREAK 32 |
| #endif |
| |
| extern const_debug unsigned int sysctl_sched_nr_migrate; |
| extern const_debug unsigned int sysctl_sched_migration_cost; |
| |
| extern unsigned int sysctl_sched_base_slice; |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| extern int sysctl_resched_latency_warn_ms; |
| extern int sysctl_resched_latency_warn_once; |
| |
| extern unsigned int sysctl_sched_tunable_scaling; |
| |
| extern unsigned int sysctl_numa_balancing_scan_delay; |
| extern unsigned int sysctl_numa_balancing_scan_period_min; |
| extern unsigned int sysctl_numa_balancing_scan_period_max; |
| extern unsigned int sysctl_numa_balancing_scan_size; |
| extern unsigned int sysctl_numa_balancing_hot_threshold; |
| #endif |
| |
| #ifdef CONFIG_SCHED_HRTICK |
| |
| /* |
| * Use hrtick when: |
| * - enabled by features |
| * - hrtimer is actually high res |
| */ |
| static inline int hrtick_enabled(struct rq *rq) |
| { |
| if (!cpu_active(cpu_of(rq))) |
| return 0; |
| return hrtimer_is_hres_active(&rq->hrtick_timer); |
| } |
| |
| static inline int hrtick_enabled_fair(struct rq *rq) |
| { |
| if (!sched_feat(HRTICK)) |
| return 0; |
| return hrtick_enabled(rq); |
| } |
| |
| static inline int hrtick_enabled_dl(struct rq *rq) |
| { |
| if (!sched_feat(HRTICK_DL)) |
| return 0; |
| return hrtick_enabled(rq); |
| } |
| |
| extern void hrtick_start(struct rq *rq, u64 delay); |
| |
| #else /* !CONFIG_SCHED_HRTICK: */ |
| |
| static inline int hrtick_enabled_fair(struct rq *rq) |
| { |
| return 0; |
| } |
| |
| static inline int hrtick_enabled_dl(struct rq *rq) |
| { |
| return 0; |
| } |
| |
| static inline int hrtick_enabled(struct rq *rq) |
| { |
| return 0; |
| } |
| |
| #endif /* !CONFIG_SCHED_HRTICK */ |
| |
| #ifndef arch_scale_freq_tick |
| static __always_inline void arch_scale_freq_tick(void) { } |
| #endif |
| |
| #ifndef arch_scale_freq_capacity |
| /** |
| * arch_scale_freq_capacity - get the frequency scale factor of a given CPU. |
| * @cpu: the CPU in question. |
| * |
| * Return: the frequency scale factor normalized against SCHED_CAPACITY_SCALE, i.e. |
| * |
| * f_curr |
| * ------ * SCHED_CAPACITY_SCALE |
| * f_max |
| */ |
| static __always_inline |
| unsigned long arch_scale_freq_capacity(int cpu) |
| { |
| return SCHED_CAPACITY_SCALE; |
| } |
| #endif |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| /* |
| * In double_lock_balance()/double_rq_lock(), we use raw_spin_rq_lock() to |
| * acquire rq lock instead of rq_lock(). So at the end of these two functions |
| * we need to call double_rq_clock_clear_update() to clear RQCF_UPDATED of |
| * rq->clock_update_flags to avoid the WARN_DOUBLE_CLOCK warning. |
| */ |
| static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) |
| { |
| rq1->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); |
| /* rq1 == rq2 for !CONFIG_SMP, so just clear RQCF_UPDATED once. */ |
| #ifdef CONFIG_SMP |
| rq2->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); |
| #endif |
| } |
| #else |
| static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) { } |
| #endif |
| |
| #define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \ |
| __DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \ |
| static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \ |
| { class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \ |
| _lock; return _t; } |
| |
| #ifdef CONFIG_SMP |
| |
| static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) |
| { |
| #ifdef CONFIG_SCHED_CORE |
| /* |
| * In order to not have {0,2},{1,3} turn into into an AB-BA, |
| * order by core-id first and cpu-id second. |
| * |
| * Notably: |
| * |
| * double_rq_lock(0,3); will take core-0, core-1 lock |
| * double_rq_lock(1,2); will take core-1, core-0 lock |
| * |
| * when only cpu-id is considered. |
| */ |
| if (rq1->core->cpu < rq2->core->cpu) |
| return true; |
| if (rq1->core->cpu > rq2->core->cpu) |
| return false; |
| |
| /* |
| * __sched_core_flip() relies on SMT having cpu-id lock order. |
| */ |
| #endif |
| return rq1->cpu < rq2->cpu; |
| } |
| |
| extern void double_rq_lock(struct rq *rq1, struct rq *rq2); |
| |
| #ifdef CONFIG_PREEMPTION |
| |
| /* |
| * fair double_lock_balance: Safely acquires both rq->locks in a fair |
| * way at the expense of forcing extra atomic operations in all |
| * invocations. This assures that the double_lock is acquired using the |
| * same underlying policy as the spinlock_t on this architecture, which |
| * reduces latency compared to the unfair variant below. However, it |
| * also adds more overhead and therefore may reduce throughput. |
| */ |
| static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| __releases(this_rq->lock) |
| __acquires(busiest->lock) |
| __acquires(this_rq->lock) |
| { |
| raw_spin_rq_unlock(this_rq); |
| double_rq_lock(this_rq, busiest); |
| |
| return 1; |
| } |
| |
| #else /* !CONFIG_PREEMPTION: */ |
| /* |
| * Unfair double_lock_balance: Optimizes throughput at the expense of |
| * latency by eliminating extra atomic operations when the locks are |
| * already in proper order on entry. This favors lower CPU-ids and will |
| * grant the double lock to lower CPUs over higher ids under contention, |
| * regardless of entry order into the function. |
| */ |
| static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| __releases(this_rq->lock) |
| __acquires(busiest->lock) |
| __acquires(this_rq->lock) |
| { |
| if (__rq_lockp(this_rq) == __rq_lockp(busiest) || |
| likely(raw_spin_rq_trylock(busiest))) { |
| double_rq_clock_clear_update(this_rq, busiest); |
| return 0; |
| } |
| |
| if (rq_order_less(this_rq, busiest)) { |
| raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); |
| double_rq_clock_clear_update(this_rq, busiest); |
| return 0; |
| } |
| |
| raw_spin_rq_unlock(this_rq); |
| double_rq_lock(this_rq, busiest); |
| |
| return 1; |
| } |
| |
| #endif /* !CONFIG_PREEMPTION */ |
| |
| /* |
| * double_lock_balance - lock the busiest runqueue, this_rq is locked already. |
| */ |
| static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) |
| { |
| lockdep_assert_irqs_disabled(); |
| |
| return _double_lock_balance(this_rq, busiest); |
| } |
| |
| static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
| __releases(busiest->lock) |
| { |
| if (__rq_lockp(this_rq) != __rq_lockp(busiest)) |
| raw_spin_rq_unlock(busiest); |
| lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_); |
| } |
| |
| static inline void double_lock(spinlock_t *l1, spinlock_t *l2) |
| { |
| if (l1 > l2) |
| swap(l1, l2); |
| |
| spin_lock(l1); |
| spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| } |
| |
| static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2) |
| { |
| if (l1 > l2) |
| swap(l1, l2); |
| |
| spin_lock_irq(l1); |
| spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| } |
| |
| static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) |
| { |
| if (l1 > l2) |
| swap(l1, l2); |
| |
| raw_spin_lock(l1); |
| raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING); |
| } |
| |
| static inline void double_raw_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2) |
| { |
| raw_spin_unlock(l1); |
| raw_spin_unlock(l2); |
| } |
| |
| DEFINE_LOCK_GUARD_2(double_raw_spinlock, raw_spinlock_t, |
| double_raw_lock(_T->lock, _T->lock2), |
| double_raw_unlock(_T->lock, _T->lock2)) |
| |
| /* |
| * double_rq_unlock - safely unlock two runqueues |
| * |
| * Note this does not restore interrupts like task_rq_unlock, |
| * you need to do so manually after calling. |
| */ |
| static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| __releases(rq1->lock) |
| __releases(rq2->lock) |
| { |
| if (__rq_lockp(rq1) != __rq_lockp(rq2)) |
| raw_spin_rq_unlock(rq2); |
| else |
| __release(rq2->lock); |
| raw_spin_rq_unlock(rq1); |
| } |
| |
| extern void set_rq_online (struct rq *rq); |
| extern void set_rq_offline(struct rq *rq); |
| |
| extern bool sched_smp_initialized; |
| |
| #else /* !CONFIG_SMP: */ |
| |
| /* |
| * double_rq_lock - safely lock two runqueues |
| * |
| * Note this does not disable interrupts like task_rq_lock, |
| * you need to do so manually before calling. |
| */ |
| static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) |
| __acquires(rq1->lock) |
| __acquires(rq2->lock) |
| { |
| WARN_ON_ONCE(!irqs_disabled()); |
| WARN_ON_ONCE(rq1 != rq2); |
| raw_spin_rq_lock(rq1); |
| __acquire(rq2->lock); /* Fake it out ;) */ |
| double_rq_clock_clear_update(rq1, rq2); |
| } |
| |
| /* |
| * double_rq_unlock - safely unlock two runqueues |
| * |
| * Note this does not restore interrupts like task_rq_unlock, |
| * you need to do so manually after calling. |
| */ |
| static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) |
| __releases(rq1->lock) |
| __releases(rq2->lock) |
| { |
| WARN_ON_ONCE(rq1 != rq2); |
| raw_spin_rq_unlock(rq1); |
| __release(rq2->lock); |
| } |
| |
| #endif /* !CONFIG_SMP */ |
| |
| DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq, |
| double_rq_lock(_T->lock, _T->lock2), |
| double_rq_unlock(_T->lock, _T->lock2)) |
| |
| extern struct sched_entity *__pick_root_entity(struct cfs_rq *cfs_rq); |
| extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq); |
| extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq); |
| |
| #ifdef CONFIG_SCHED_DEBUG |
| extern bool sched_debug_verbose; |
| |
| extern void print_cfs_stats(struct seq_file *m, int cpu); |
| extern void print_rt_stats(struct seq_file *m, int cpu); |
| extern void print_dl_stats(struct seq_file *m, int cpu); |
| extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); |
| extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); |
| extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); |
| |
| extern void resched_latency_warn(int cpu, u64 latency); |
| # ifdef CONFIG_NUMA_BALANCING |
| extern void show_numa_stats(struct task_struct *p, struct seq_file *m); |
| extern void |
| print_numa_stats(struct seq_file *m, int node, unsigned long tsf, |
| unsigned long tpf, unsigned long gsf, unsigned long gpf); |
| # endif /* CONFIG_NUMA_BALANCING */ |
| #else /* !CONFIG_SCHED_DEBUG: */ |
| static inline void resched_latency_warn(int cpu, u64 latency) { } |
| #endif /* !CONFIG_SCHED_DEBUG */ |
| |
| extern void init_cfs_rq(struct cfs_rq *cfs_rq); |
| extern void init_rt_rq(struct rt_rq *rt_rq); |
| extern void init_dl_rq(struct dl_rq *dl_rq); |
| |
| extern void cfs_bandwidth_usage_inc(void); |
| extern void cfs_bandwidth_usage_dec(void); |
| |
| #ifdef CONFIG_NO_HZ_COMMON |
| |
| #define NOHZ_BALANCE_KICK_BIT 0 |
| #define NOHZ_STATS_KICK_BIT 1 |
| #define NOHZ_NEWILB_KICK_BIT 2 |
| #define NOHZ_NEXT_KICK_BIT 3 |
| |
| /* Run sched_balance_domains() */ |
| #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) |
| /* Update blocked load */ |
| #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) |
| /* Update blocked load when entering idle */ |
| #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) |
| /* Update nohz.next_balance */ |
| #define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT) |
| |
| #define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK) |
| |
| #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) |
| |
| extern void nohz_balance_exit_idle(struct rq *rq); |
| #else /* !CONFIG_NO_HZ_COMMON: */ |
| static inline void nohz_balance_exit_idle(struct rq *rq) { } |
| #endif /* !CONFIG_NO_HZ_COMMON */ |
| |
| #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) |
| extern void nohz_run_idle_balance(int cpu); |
| #else |
| static inline void nohz_run_idle_balance(int cpu) { } |
| #endif |
| |
| #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| |
| struct irqtime { |
| u64 total; |
| u64 tick_delta; |
| u64 irq_start_time; |
| struct u64_stats_sync sync; |
| }; |
| |
| DECLARE_PER_CPU(struct irqtime, cpu_irqtime); |
| |
| /* |
| * Returns the irqtime minus the softirq time computed by ksoftirqd. |
| * Otherwise ksoftirqd's sum_exec_runtime is subtracted its own runtime |
| * and never move forward. |
| */ |
| static inline u64 irq_time_read(int cpu) |
| { |
| struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); |
| unsigned int seq; |
| u64 total; |
| |
| do { |
| seq = __u64_stats_fetch_begin(&irqtime->sync); |
| total = irqtime->total; |
| } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); |
| |
| return total; |
| } |
| |
| #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
| |
| #ifdef CONFIG_CPU_FREQ |
| |
| DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); |
| |
| /** |
| * cpufreq_update_util - Take a note about CPU utilization changes. |
| * @rq: Runqueue to carry out the update for. |
| * @flags: Update reason flags. |
| * |
| * This function is called by the scheduler on the CPU whose utilization is |
| * being updated. |
| * |
| * It can only be called from RCU-sched read-side critical sections. |
| * |
| * The way cpufreq is currently arranged requires it to evaluate the CPU |
| * performance state (frequency/voltage) on a regular basis to prevent it from |
| * being stuck in a completely inadequate performance level for too long. |
| * That is not guaranteed to happen if the updates are only triggered from CFS |
| * and DL, though, because they may not be coming in if only RT tasks are |
| * active all the time (or there are RT tasks only). |
| * |
| * As a workaround for that issue, this function is called periodically by the |
| * RT sched class to trigger extra cpufreq updates to prevent it from stalling, |
| * but that really is a band-aid. Going forward it should be replaced with |
| * solutions targeted more specifically at RT tasks. |
| */ |
| static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) |
| { |
| struct update_util_data *data; |
| |
| data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, |
| cpu_of(rq))); |
| if (data) |
| data->func(data, rq_clock(rq), flags); |
| } |
| #else /* !CONFIG_CPU_FREQ: */ |
| static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { } |
| #endif /* !CONFIG_CPU_FREQ */ |
| |
| #ifdef arch_scale_freq_capacity |
| # ifndef arch_scale_freq_invariant |
| # define arch_scale_freq_invariant() true |
| # endif |
| #else |
| # define arch_scale_freq_invariant() false |
| #endif |
| |
| #ifdef CONFIG_SMP |
| |
| unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, |
| unsigned long *min, |
| unsigned long *max); |
| |
| unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual, |
| unsigned long min, |
| unsigned long max); |
| |
| |
| /* |
| * Verify the fitness of task @p to run on @cpu taking into account the |
| * CPU original capacity and the runtime/deadline ratio of the task. |
| * |
| * The function will return true if the original capacity of @cpu is |
| * greater than or equal to task's deadline density right shifted by |
| * (BW_SHIFT - SCHED_CAPACITY_SHIFT) and false otherwise. |
| */ |
| static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu) |
| { |
| unsigned long cap = arch_scale_cpu_capacity(cpu); |
| |
| return cap >= p->dl.dl_density >> (BW_SHIFT - SCHED_CAPACITY_SHIFT); |
| } |
| |
| static inline unsigned long cpu_bw_dl(struct rq *rq) |
| { |
| return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; |
| } |
| |
| static inline unsigned long cpu_util_dl(struct rq *rq) |
| { |
| return READ_ONCE(rq->avg_dl.util_avg); |
| } |
| |
| |
| extern unsigned long cpu_util_cfs(int cpu); |
| extern unsigned long cpu_util_cfs_boost(int cpu); |
| |
| static inline unsigned long cpu_util_rt(struct rq *rq) |
| { |
| return READ_ONCE(rq->avg_rt.util_avg); |
| } |
| |
| #else /* !CONFIG_SMP */ |
| static inline bool update_other_load_avgs(struct rq *rq) { return false; } |
| #endif /* CONFIG_SMP */ |
| |
| #ifdef CONFIG_UCLAMP_TASK |
| |
| unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id); |
| |
| static inline unsigned long uclamp_rq_get(struct rq *rq, |
| enum uclamp_id clamp_id) |
| { |
| return READ_ONCE(rq->uclamp[clamp_id].value); |
| } |
| |
| static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, |
| unsigned int value) |
| { |
| WRITE_ONCE(rq->uclamp[clamp_id].value, value); |
| } |
| |
| static inline bool uclamp_rq_is_idle(struct rq *rq) |
| { |
| return rq->uclamp_flags & UCLAMP_FLAG_IDLE; |
| } |
| |
| /* Is the rq being capped/throttled by uclamp_max? */ |
| static inline bool uclamp_rq_is_capped(struct rq *rq) |
| { |
| unsigned long rq_util; |
| unsigned long max_util; |
| |
| if (!static_branch_likely(&sched_uclamp_used)) |
| return false; |
| |
| rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); |
| max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); |
| |
| return max_util != SCHED_CAPACITY_SCALE && rq_util >= max_util; |
| } |
| |
| /* |
| * When uclamp is compiled in, the aggregation at rq level is 'turned off' |
| * by default in the fast path and only gets turned on once userspace performs |
| * an operation that requires it. |
| * |
| * Returns true if userspace opted-in to use uclamp and aggregation at rq level |
| * hence is active. |
| */ |
| static inline bool uclamp_is_used(void) |
| { |
| return static_branch_likely(&sched_uclamp_used); |
| } |
| |
| #define for_each_clamp_id(clamp_id) \ |
| for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) |
| |
| extern unsigned int sysctl_sched_uclamp_util_min_rt_default; |
| |
| |
| static inline unsigned int uclamp_none(enum uclamp_id clamp_id) |
| { |
| if (clamp_id == UCLAMP_MIN) |
| return 0; |
| return SCHED_CAPACITY_SCALE; |
| } |
| |
| /* Integer rounded range for each bucket */ |
| #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) |
| |
| static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) |
| { |
| return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1); |
| } |
| |
| static inline void |
| uclamp_se_set(struct uclamp_se *uc_se, unsigned int value, bool user_defined) |
| { |
| uc_se->value = value; |
| uc_se->bucket_id = uclamp_bucket_id(value); |
| uc_se->user_defined = user_defined; |
| } |
| |
| #else /* !CONFIG_UCLAMP_TASK: */ |
| |
| static inline unsigned long |
| uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) |
| { |
| if (clamp_id == UCLAMP_MIN) |
| return 0; |
| |
| return SCHED_CAPACITY_SCALE; |
| } |
| |
| static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } |
| |
| static inline bool uclamp_is_used(void) |
| { |
| return false; |
| } |
| |
| static inline unsigned long |
| uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id) |
| { |
| if (clamp_id == UCLAMP_MIN) |
| return 0; |
| |
| return SCHED_CAPACITY_SCALE; |
| } |
| |
| static inline void |
| uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, unsigned int value) |
| { |
| } |
| |
| static inline bool uclamp_rq_is_idle(struct rq *rq) |
| { |
| return false; |
| } |
| |
| #endif /* !CONFIG_UCLAMP_TASK */ |
| |
| #ifdef CONFIG_HAVE_SCHED_AVG_IRQ |
| |
| static inline unsigned long cpu_util_irq(struct rq *rq) |
| { |
| return READ_ONCE(rq->avg_irq.util_avg); |
| } |
| |
| static inline |
| unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) |
| { |
| util *= (max - irq); |
| util /= max; |
| |
| return util; |
| |
| } |
| |
| #else /* !CONFIG_HAVE_SCHED_AVG_IRQ: */ |
| |
| static inline unsigned long cpu_util_irq(struct rq *rq) |
| { |
| return 0; |
| } |
| |
| static inline |
| unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max) |
| { |
| return util; |
| } |
| |
| #endif /* !CONFIG_HAVE_SCHED_AVG_IRQ */ |
| |
| #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) |
| |
| #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) |
| |
| DECLARE_STATIC_KEY_FALSE(sched_energy_present); |
| |
| static inline bool sched_energy_enabled(void) |
| { |
| return static_branch_unlikely(&sched_energy_present); |
| } |
| |
| extern struct cpufreq_governor schedutil_gov; |
| |
| #else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ |
| |
| #define perf_domain_span(pd) NULL |
| |
| static inline bool sched_energy_enabled(void) { return false; } |
| |
| #endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ |
| |
| #ifdef CONFIG_MEMBARRIER |
| |
| /* |
| * The scheduler provides memory barriers required by membarrier between: |
| * - prior user-space memory accesses and store to rq->membarrier_state, |
| * - store to rq->membarrier_state and following user-space memory accesses. |
| * In the same way it provides those guarantees around store to rq->curr. |
| */ |
| static inline void membarrier_switch_mm(struct rq *rq, |
| struct mm_struct *prev_mm, |
| struct mm_struct *next_mm) |
| { |
| int membarrier_state; |
| |
| if (prev_mm == next_mm) |
| return; |
| |
| membarrier_state = atomic_read(&next_mm->membarrier_state); |
| if (READ_ONCE(rq->membarrier_state) == membarrier_state) |
| return; |
| |
| WRITE_ONCE(rq->membarrier_state, membarrier_state); |
| } |
| |
| #else /* !CONFIG_MEMBARRIER :*/ |
| |
| static inline void membarrier_switch_mm(struct rq *rq, |
| struct mm_struct *prev_mm, |
| struct mm_struct *next_mm) |
| { |
| } |
| |
| #endif /* !CONFIG_MEMBARRIER */ |
| |
| #ifdef CONFIG_SMP |
| static inline bool is_per_cpu_kthread(struct task_struct *p) |
| { |
| if (!(p->flags & PF_KTHREAD)) |
| return false; |
| |
| if (p->nr_cpus_allowed != 1) |
| return false; |
| |
| return true; |
| } |
| #endif |
| |
| extern void swake_up_all_locked(struct swait_queue_head *q); |
| extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); |
| |
| extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags); |
| |
| #ifdef CONFIG_PREEMPT_DYNAMIC |
| extern int preempt_dynamic_mode; |
| extern int sched_dynamic_mode(const char *str); |
| extern void sched_dynamic_update(int mode); |
| #endif |
| |
| #ifdef CONFIG_SCHED_MM_CID |
| |
| #define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */ |
| #define MM_CID_SCAN_DELAY 100 /* 100ms */ |
| |
| extern raw_spinlock_t cid_lock; |
| extern int use_cid_lock; |
| |
| extern void sched_mm_cid_migrate_from(struct task_struct *t); |
| extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t); |
| extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr); |
| extern void init_sched_mm_cid(struct task_struct *t); |
| |
| static inline void __mm_cid_put(struct mm_struct *mm, int cid) |
| { |
| if (cid < 0) |
| return; |
| cpumask_clear_cpu(cid, mm_cidmask(mm)); |
| } |
| |
| /* |
| * The per-mm/cpu cid can have the MM_CID_LAZY_PUT flag set or transition to |
| * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to |
| * be held to transition to other states. |
| * |
| * State transitions synchronized with cmpxchg or try_cmpxchg need to be |
| * consistent across CPUs, which prevents use of this_cpu_cmpxchg. |
| */ |
| static inline void mm_cid_put_lazy(struct task_struct *t) |
| { |
| struct mm_struct *mm = t->mm; |
| struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; |
| int cid; |
| |
| lockdep_assert_irqs_disabled(); |
| cid = __this_cpu_read(pcpu_cid->cid); |
| if (!mm_cid_is_lazy_put(cid) || |
| !try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) |
| return; |
| __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); |
| } |
| |
| static inline int mm_cid_pcpu_unset(struct mm_struct *mm) |
| { |
| struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; |
| int cid, res; |
| |
| lockdep_assert_irqs_disabled(); |
| cid = __this_cpu_read(pcpu_cid->cid); |
| for (;;) { |
| if (mm_cid_is_unset(cid)) |
| return MM_CID_UNSET; |
| /* |
| * Attempt transition from valid or lazy-put to unset. |
| */ |
| res = cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, cid, MM_CID_UNSET); |
| if (res == cid) |
| break; |
| cid = res; |
| } |
| return cid; |
| } |
| |
| static inline void mm_cid_put(struct mm_struct *mm) |
| { |
| int cid; |
| |
| lockdep_assert_irqs_disabled(); |
| cid = mm_cid_pcpu_unset(mm); |
| if (cid == MM_CID_UNSET) |
| return; |
| __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); |
| } |
| |
| static inline int __mm_cid_try_get(struct mm_struct *mm) |
| { |
| struct cpumask *cpumask; |
| int cid; |
| |
| cpumask = mm_cidmask(mm); |
| /* |
| * Retry finding first zero bit if the mask is temporarily |
| * filled. This only happens during concurrent remote-clear |
| * which owns a cid without holding a rq lock. |
| */ |
| for (;;) { |
| cid = cpumask_first_zero(cpumask); |
| if (cid < nr_cpu_ids) |
| break; |
| cpu_relax(); |
| } |
| if (cpumask_test_and_set_cpu(cid, cpumask)) |
| return -1; |
| |
| return cid; |
| } |
| |
| /* |
| * Save a snapshot of the current runqueue time of this cpu |
| * with the per-cpu cid value, allowing to estimate how recently it was used. |
| */ |
| static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm) |
| { |
| struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); |
| |
| lockdep_assert_rq_held(rq); |
| WRITE_ONCE(pcpu_cid->time, rq->clock); |
| } |
| |
| static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm) |
| { |
| int cid; |
| |
| /* |
| * All allocations (even those using the cid_lock) are lock-free. If |
| * use_cid_lock is set, hold the cid_lock to perform cid allocation to |
| * guarantee forward progress. |
| */ |
| if (!READ_ONCE(use_cid_lock)) { |
| cid = __mm_cid_try_get(mm); |
| if (cid >= 0) |
| goto end; |
| raw_spin_lock(&cid_lock); |
| } else { |
| raw_spin_lock(&cid_lock); |
| cid = __mm_cid_try_get(mm); |
| if (cid >= 0) |
| goto unlock; |
| } |
| |
| /* |
| * cid concurrently allocated. Retry while forcing following |
| * allocations to use the cid_lock to ensure forward progress. |
| */ |
| WRITE_ONCE(use_cid_lock, 1); |
| /* |
| * Set use_cid_lock before allocation. Only care about program order |
| * because this is only required for forward progress. |
| */ |
| barrier(); |
| /* |
| * Retry until it succeeds. It is guaranteed to eventually succeed once |
| * all newcoming allocations observe the use_cid_lock flag set. |
| */ |
| do { |
| cid = __mm_cid_try_get(mm); |
| cpu_relax(); |
| } while (cid < 0); |
| /* |
| * Allocate before clearing use_cid_lock. Only care about |
| * program order because this is for forward progress. |
| */ |
| barrier(); |
| WRITE_ONCE(use_cid_lock, 0); |
| unlock: |
| raw_spin_unlock(&cid_lock); |
| end: |
| mm_cid_snapshot_time(rq, mm); |
| |
| return cid; |
| } |
| |
| static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm) |
| { |
| struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; |
| struct cpumask *cpumask; |
| int cid; |
| |
| lockdep_assert_rq_held(rq); |
| cpumask = mm_cidmask(mm); |
| cid = __this_cpu_read(pcpu_cid->cid); |
| if (mm_cid_is_valid(cid)) { |
| mm_cid_snapshot_time(rq, mm); |
| return cid; |
| } |
| if (mm_cid_is_lazy_put(cid)) { |
| if (try_cmpxchg(&this_cpu_ptr(pcpu_cid)->cid, &cid, MM_CID_UNSET)) |
| __mm_cid_put(mm, mm_cid_clear_lazy_put(cid)); |
| } |
| cid = __mm_cid_get(rq, mm); |
| __this_cpu_write(pcpu_cid->cid, cid); |
| |
| return cid; |
| } |
| |
| static inline void switch_mm_cid(struct rq *rq, |
| struct task_struct *prev, |
| struct task_struct *next) |
| { |
| /* |
| * Provide a memory barrier between rq->curr store and load of |
| * {prev,next}->mm->pcpu_cid[cpu] on rq->curr->mm transition. |
| * |
| * Should be adapted if context_switch() is modified. |
| */ |
| if (!next->mm) { // to kernel |
| /* |
| * user -> kernel transition does not guarantee a barrier, but |
| * we can use the fact that it performs an atomic operation in |
| * mmgrab(). |
| */ |
| if (prev->mm) // from user |
| smp_mb__after_mmgrab(); |
| /* |
| * kernel -> kernel transition does not change rq->curr->mm |
| * state. It stays NULL. |
| */ |
| } else { // to user |
| /* |
| * kernel -> user transition does not provide a barrier |
| * between rq->curr store and load of {prev,next}->mm->pcpu_cid[cpu]. |
| * Provide it here. |
| */ |
| if (!prev->mm) { // from kernel |
| smp_mb(); |
| } else { // from user |
| /* |
| * user->user transition relies on an implicit |
| * memory barrier in switch_mm() when |
| * current->mm changes. If the architecture |
| * switch_mm() does not have an implicit memory |
| * barrier, it is emitted here. If current->mm |
| * is unchanged, no barrier is needed. |
| */ |
| smp_mb__after_switch_mm(); |
| } |
| } |
| if (prev->mm_cid_active) { |
| mm_cid_snapshot_time(rq, prev->mm); |
| mm_cid_put_lazy(prev); |
| prev->mm_cid = -1; |
| } |
| if (next->mm_cid_active) |
| next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm); |
| } |
| |
| #else /* !CONFIG_SCHED_MM_CID: */ |
| static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next) { } |
| static inline void sched_mm_cid_migrate_from(struct task_struct *t) { } |
| static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { } |
| static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { } |
| static inline void init_sched_mm_cid(struct task_struct *t) { } |
| #endif /* !CONFIG_SCHED_MM_CID */ |
| |
| extern u64 avg_vruntime(struct cfs_rq *cfs_rq); |
| extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); |
| |
| #ifdef CONFIG_RT_MUTEXES |
| |
| static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) |
| { |
| if (pi_task) |
| prio = min(prio, pi_task->prio); |
| |
| return prio; |
| } |
| |
| static inline int rt_effective_prio(struct task_struct *p, int prio) |
| { |
| struct task_struct *pi_task = rt_mutex_get_top_task(p); |
| |
| return __rt_effective_prio(pi_task, prio); |
| } |
| |
| #else /* !CONFIG_RT_MUTEXES: */ |
| |
| static inline int rt_effective_prio(struct task_struct *p, int prio) |
| { |
| return prio; |
| } |
| |
| #endif /* !CONFIG_RT_MUTEXES */ |
| |
| extern int __sched_setscheduler(struct task_struct *p, const struct sched_attr *attr, bool user, bool pi); |
| extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx); |
| extern const struct sched_class *__setscheduler_class(int policy, int prio); |
| extern void set_load_weight(struct task_struct *p, bool update_load); |
| extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags); |
| extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags); |
| |
| extern void check_class_changing(struct rq *rq, struct task_struct *p, |
| const struct sched_class *prev_class); |
| extern void check_class_changed(struct rq *rq, struct task_struct *p, |
| const struct sched_class *prev_class, |
| int oldprio); |
| |
| #ifdef CONFIG_SMP |
| extern struct balance_callback *splice_balance_callbacks(struct rq *rq); |
| extern void balance_callbacks(struct rq *rq, struct balance_callback *head); |
| #else |
| |
| static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) |
| { |
| return NULL; |
| } |
| |
| static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) |
| { |
| } |
| |
| #endif |
| |
| #ifdef CONFIG_SCHED_CLASS_EXT |
| /* |
| * Used by SCX in the enable/disable paths to move tasks between sched_classes |
| * and establish invariants. |
| */ |
| struct sched_enq_and_set_ctx { |
| struct task_struct *p; |
| int queue_flags; |
| bool queued; |
| bool running; |
| }; |
| |
| void sched_deq_and_put_task(struct task_struct *p, int queue_flags, |
| struct sched_enq_and_set_ctx *ctx); |
| void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx); |
| |
| #endif /* CONFIG_SCHED_CLASS_EXT */ |
| |
| #include "ext.h" |
| |
| #endif /* _KERNEL_SCHED_SCHED_H */ |