Go to:
Gentoo Home
Documentation
Forums
Lists
Bugs
Planet
Store
Wiki
Get Gentoo!
Gentoo's Bugzilla – Attachment 874144 Details for
Bug 916954
sys-kernel/gentoo-sources-6.{6,7,8}.x: modified RT patch with BORE patch
Home
|
New
–
[Ex]
|
Browse
|
Search
|
Privacy Policy
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
[x]
|
Forgot Password
Login:
[x]
[patch]
BORE patch for 6.6.0
0001-linux6.6.y-bore3.2.9.patch (text/plain), 19.39 KB, created by
deim
on 2023-11-06 09:01:11 UTC
(
hide
)
Description:
BORE patch for 6.6.0
Filename:
MIME Type:
Creator:
deim
Created:
2023-11-06 09:01:11 UTC
Size:
19.39 KB
patch
obsolete
>From 37b082c18909b815efb0c38128a7b8c45a6ecf3a Mon Sep 17 00:00:00 2001 >From: Masahito S <firelzrd@gmail.com> >Date: Sat, 28 Oct 2023 08:31:20 +0900 >Subject: [PATCH] linux6.6.y-bore3.2.9 > >--- > include/linux/sched.h | 31 +++++++ > init/Kconfig | 19 ++++ > kernel/sched/autogroup.c | 4 + > kernel/sched/core.c | 163 +++++++++++++++++++++++++++++++++ > kernel/sched/debug.c | 3 + > kernel/sched/fair.c | 189 +++++++++++++++++++++++++++++++++++++-- > kernel/sched/features.h | 4 + > 7 files changed, 406 insertions(+), 7 deletions(-) > >diff --git a/include/linux/sched.h b/include/linux/sched.h >index 77f01ac385..5414a9a014 100644 >--- a/include/linux/sched.h >+++ b/include/linux/sched.h >@@ -545,6 +545,24 @@ struct sched_statistics { > #endif /* CONFIG_SCHEDSTATS */ > } ____cacheline_aligned; > >+#ifdef CONFIG_SCHED_BORE >+typedef union { >+ u16 u16; >+ s16 s16; >+ u8 u8[2]; >+ s8 s8[2]; >+} x16; >+ >+typedef union { >+ u32 u32; >+ s32 s32; >+ u16 u16[2]; >+ s16 s16[2]; >+ u8 u8[4]; >+ s8 s8[4]; >+} x32; >+#endif // CONFIG_SCHED_BORE >+ > struct sched_entity { > /* For load-balancing: */ > struct load_weight load; >@@ -559,6 +577,12 @@ struct sched_entity { > u64 sum_exec_runtime; > u64 prev_sum_exec_runtime; > u64 vruntime; >+#ifdef CONFIG_SCHED_BORE >+ u64 burst_time; >+ u16 prev_burst_penalty; >+ u16 curr_burst_penalty; >+ u16 burst_penalty; >+#endif // CONFIG_SCHED_BORE > s64 vlag; > u64 slice; > >@@ -989,6 +1013,13 @@ struct task_struct { > struct list_head children; > struct list_head sibling; > struct task_struct *group_leader; >+#ifdef CONFIG_SCHED_BORE >+ u16 child_burst_cache; >+ u16 child_burst_count_cache; >+ u64 child_burst_last_cached; >+ u16 group_burst_cache; >+ u64 group_burst_last_cached; >+#endif // CONFIG_SCHED_BORE > > /* > * 'ptraced' is the list of tasks this task is using ptrace() on. >diff --git a/init/Kconfig b/init/Kconfig >index 6d35728b94..689f34afce 100644 >--- a/init/Kconfig >+++ b/init/Kconfig >@@ -1258,6 +1258,25 @@ config CHECKPOINT_RESTORE > > If unsure, say N here. > >+config SCHED_BORE >+ bool "Burst-Oriented Response Enhancer" >+ default y >+ help >+ In Desktop and Mobile computing, one might prefer interactive >+ tasks to keep responsive no matter what they run in the background. >+ >+ Enabling this kernel feature modifies the scheduler to discriminate >+ tasks by their burst time (runtime since it last went sleeping or >+ yielding state) and prioritize those that run less bursty. >+ Such tasks usually include window compositor, widgets backend, >+ terminal emulator, video playback, games and so on. >+ With a little impact to scheduling fairness, it may improve >+ responsiveness especially under heavy background workload. >+ >+ You can turn it off by setting the sysctl kernel.sched_bore = 0. >+ >+ If unsure say Y here. >+ > config SCHED_AUTOGROUP > bool "Automatic process group scheduling" > select CGROUPS >diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c >index 991fc90025..fdeb340173 100644 >--- a/kernel/sched/autogroup.c >+++ b/kernel/sched/autogroup.c >@@ -4,7 +4,11 @@ > * Auto-group scheduling implementation: > */ > >+#ifdef CONFIG_SCHED_BORE >+unsigned int __read_mostly sysctl_sched_autogroup_enabled = 0; >+#else // CONFIG_SCHED_BORE > unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; >+#endif // CONFIG_SCHED_BORE > static struct autogroup autogroup_default; > static atomic_t autogroup_seq_nr; > >diff --git a/kernel/sched/core.c b/kernel/sched/core.c >index 802551e000..49e107ca29 100644 >--- a/kernel/sched/core.c >+++ b/kernel/sched/core.c >@@ -4484,6 +4484,158 @@ int wake_up_state(struct task_struct *p, unsigned int state) > return try_to_wake_up(p, state, 0); > } > >+#ifdef CONFIG_SCHED_BORE >+extern unsigned int sched_burst_cache_lifetime; >+extern unsigned int sched_bore; >+extern unsigned int sched_burst_fork_atavistic; >+ >+void __init sched_init_bore(void) { >+ init_task.child_burst_cache = 0; >+ init_task.child_burst_count_cache = 0; >+ init_task.child_burst_last_cached = 0; >+ init_task.group_burst_cache = 0; >+ init_task.group_burst_last_cached = 0; >+ init_task.se.burst_time = 0; >+ init_task.se.prev_burst_penalty = 0; >+ init_task.se.curr_burst_penalty = 0; >+ init_task.se.burst_penalty = 0; >+} >+ >+void inline sched_fork_bore(struct task_struct *p) { >+ p->child_burst_cache = 0; >+ p->child_burst_count_cache = 0; >+ p->child_burst_last_cached = 0; >+ p->group_burst_cache = 0; >+ p->group_burst_last_cached = 0; >+ p->se.burst_time = 0; >+ p->se.curr_burst_penalty = 0; >+} >+ >+static u32 count_child_tasks(struct task_struct *p) { >+ struct task_struct *child; >+ u32 cnt = 0; >+ list_for_each_entry(child, &p->children, sibling) {cnt++;} >+ return cnt; >+} >+ >+static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) { >+ return (p->child_burst_last_cached + sched_burst_cache_lifetime < now); >+} >+ >+static inline bool group_burst_cache_expired(struct task_struct *p, u64 now) { >+ return (p->group_burst_last_cached + sched_burst_cache_lifetime < now); >+} >+ >+static void __update_child_burst_cache( >+ struct task_struct *p, u32 cnt, u32 sum, u64 now) { >+ u16 avg = 0; >+ if (cnt) avg = sum / cnt; >+ p->child_burst_cache = max(avg, p->se.burst_penalty); >+ p->child_burst_count_cache = cnt; >+ p->child_burst_last_cached = now; >+} >+ >+static void update_child_burst_cache(struct task_struct *p, u64 now) { >+ struct task_struct *child; >+ u32 cnt = 0; >+ u32 sum = 0; >+ >+ list_for_each_entry(child, &p->children, sibling) { >+ cnt++; >+ sum += child->se.burst_penalty; >+ } >+ >+ __update_child_burst_cache(p, cnt, sum, now); >+} >+ >+static void update_child_burst_cache_atavistic( >+ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) { >+ struct task_struct *child, *dec; >+ u32 cnt = 0, dcnt = 0; >+ u32 sum = 0; >+ >+ list_for_each_entry(child, &p->children, sibling) { >+ dec = child; >+ while ((dcnt = count_child_tasks(dec)) == 1) >+ dec = list_first_entry(&dec->children, struct task_struct, sibling); >+ >+ if (!dcnt || !depth) { >+ cnt++; >+ sum += dec->se.burst_penalty; >+ } else { >+ if (child_burst_cache_expired(dec, now)) >+ update_child_burst_cache_atavistic(dec, now, depth - 1, &cnt, &sum); >+ else { >+ cnt += dec->child_burst_count_cache; >+ sum += (u32)dec->child_burst_cache * dec->child_burst_count_cache; >+ } >+ } >+ } >+ >+ __update_child_burst_cache(p, cnt, sum, now); >+ *acnt += cnt; >+ *asum += sum; >+} >+ >+static void update_group_burst_cache(struct task_struct *p, u64 now) { >+ struct task_struct *member; >+ u32 cnt = 0, sum = 0; >+ u16 avg = 0; >+ >+ for_each_thread(p, member) { >+ cnt++; >+ sum += member->se.burst_penalty; >+ } >+ >+ if (cnt) avg = sum / cnt; >+ p->group_burst_cache = max(avg, p->se.burst_penalty); >+ p->group_burst_last_cached = now; >+} >+ >+#define forked_task_is_process(p) (p->pid == p->tgid) >+#define bore_thread_fork_group_inherit (sched_burst_fork_atavistic & 4) >+ >+static void fork_burst_penalty(struct task_struct *p) { >+ struct sched_entity *se = &p->se; >+ struct task_struct *anc; >+ u64 now = ktime_get_ns(); >+ u32 cnt = 0, sum = 0, depth; >+ u16 burst_cache; >+ >+ if (likely(sched_bore)) { >+ read_lock(&tasklist_lock); >+ >+ if (forked_task_is_process(p) || >+ likely(!bore_thread_fork_group_inherit)) { >+ anc = p->real_parent; >+ depth = sched_burst_fork_atavistic & 3; >+ if (likely(depth)) { >+ while ((anc->real_parent != anc) && >+ (count_child_tasks(anc) == 1)) >+ anc = anc->real_parent; >+ if (child_burst_cache_expired(anc, now)) >+ update_child_burst_cache_atavistic( >+ anc, now, depth - 1, &cnt, &sum); >+ } else >+ if (child_burst_cache_expired(anc, now)) >+ update_child_burst_cache(anc, now); >+ >+ burst_cache = anc->child_burst_cache; >+ } else { >+ anc = p->group_leader; >+ if (group_burst_cache_expired(anc, now)) >+ update_group_burst_cache(anc, now); >+ >+ burst_cache = anc->group_burst_cache; >+ } >+ >+ read_unlock(&tasklist_lock); >+ se->prev_burst_penalty = max(se->prev_burst_penalty, burst_cache); >+ } >+ se->burst_penalty = se->prev_burst_penalty; >+} >+#endif // CONFIG_SCHED_BORE >+ > /* > * Perform scheduler related setup for a newly forked process p. > * p is forked by current. >@@ -4500,6 +4652,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) > p->se.prev_sum_exec_runtime = 0; > p->se.nr_migrations = 0; > p->se.vruntime = 0; >+#ifdef CONFIG_SCHED_BORE >+ sched_fork_bore(p); >+#endif // CONFIG_SCHED_BORE > p->se.vlag = 0; > p->se.slice = sysctl_sched_base_slice; > INIT_LIST_HEAD(&p->se.group_node); >@@ -4819,6 +4974,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) > > void sched_post_fork(struct task_struct *p) > { >+#ifdef CONFIG_SCHED_BORE >+ fork_burst_penalty(p); >+#endif // CONFIG_SCHED_BORE > uclamp_post_fork(p); > } > >@@ -9919,6 +10077,11 @@ void __init sched_init(void) > BUG_ON(&dl_sched_class != &stop_sched_class + 1); > #endif > >+#ifdef CONFIG_SCHED_BORE >+ sched_init_bore(); >+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 3.2.9 by Masahito Suzuki"); >+#endif // CONFIG_SCHED_BORE >+ > wait_bit_init(); > > #ifdef CONFIG_FAIR_GROUP_SCHED >diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c >index 4c3d0d9f3d..3fd364cb0c 100644 >--- a/kernel/sched/debug.c >+++ b/kernel/sched/debug.c >@@ -595,6 +595,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) > SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), > SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); > >+#ifdef CONFIG_SCHED_BORE >+ SEQ_printf(m, " %2d", ((x16*)&p->se.burst_penalty)->u8[1]); >+#endif > #ifdef CONFIG_NUMA_BALANCING > SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); > #endif >diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c >index df348aa55d..40ed348992 100644 >--- a/kernel/sched/fair.c >+++ b/kernel/sched/fair.c >@@ -19,6 +19,9 @@ > * > * Adaptive scheduling granularity, math enhancements by Peter Zijlstra > * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra >+ * >+ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler >+ * Copyright (C) 2021-2023 Masahito Suzuki <firelzrd@gmail.com> > */ > #include <linux/energy_model.h> > #include <linux/mmap_lock.h> >@@ -66,17 +69,28 @@ > * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) > * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus > * >- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) >+ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant) >+ * (EEVDF default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) > */ >+#ifdef CONFIG_SCHED_BORE >+unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; >+#else // CONFIG_SCHED_BORE > unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; >+#endif // CONFIG_SCHED_BORE > > /* > * Minimal preemption granularity for CPU-bound tasks: > * >- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) >+ * (BORE default: 3 msec constant, units: nanoseconds) >+ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) > */ >+#ifdef CONFIG_SCHED_BORE >+unsigned int sysctl_sched_base_slice = 3000000ULL; >+static unsigned int normalized_sysctl_sched_base_slice = 3000000ULL; >+#else // CONFIG_SCHED_BORE > unsigned int sysctl_sched_base_slice = 750000ULL; > static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; >+#endif // CONFIG_SCHED_BORE > > /* > * After fork, child runs first. If set to 0 (default) then >@@ -86,6 +100,66 @@ unsigned int sysctl_sched_child_runs_first __read_mostly; > > const_debug unsigned int sysctl_sched_migration_cost = 500000UL; > >+#ifdef CONFIG_SCHED_BORE >+unsigned int __read_mostly sched_bore = 1; >+unsigned int __read_mostly sched_burst_cache_lifetime = 60000000; >+unsigned int __read_mostly sched_burst_penalty_offset = 22; >+unsigned int __read_mostly sched_burst_penalty_scale = 1280; >+unsigned int __read_mostly sched_burst_smoothness_up = 1; >+unsigned int __read_mostly sched_burst_smoothness_down = 0; >+unsigned int __read_mostly sched_burst_fork_atavistic = 2; >+static int three = 3; >+static int seven = 7; >+static int sixty_four = 64; >+static int maxval_12_bits = 4095; >+ >+#define MAX_BURST_PENALTY ((40U << 8) - 1) >+ >+static inline u32 log2plus1_u64_u32f8(u64 v) { >+ x32 result; >+ int msb = fls64(v); >+ int excess_bits = msb - 9; >+ result.u8[0] = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits; >+ result.u8[1] = msb; >+ return result.u32; >+} >+ >+static inline u32 calc_burst_penalty(u64 burst_time) { >+ u32 greed, tolerance, penalty, scaled_penalty; >+ >+ greed = log2plus1_u64_u32f8(burst_time); >+ tolerance = sched_burst_penalty_offset << 8; >+ penalty = max(0, (s32)greed - (s32)tolerance); >+ scaled_penalty = penalty * sched_burst_penalty_scale >> 10; >+ >+ return min(MAX_BURST_PENALTY, scaled_penalty); >+} >+ >+static void update_burst_penalty(struct sched_entity *se) { >+ se->curr_burst_penalty = calc_burst_penalty(se->burst_time); >+ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty); >+} >+ >+static inline u64 penalty_scale(u64 delta, struct sched_entity *se) { >+ u32 score = ((x16*)&se->burst_penalty)->u8[1]; >+ return mul_u64_u32_shr(delta, sched_prio_to_wmult[score], 22); >+} >+ >+static inline u32 binary_smooth(u32 new, u32 old) { >+ int increment = new - old; >+ return (0 <= increment)? >+ old + ( increment >> sched_burst_smoothness_up): >+ old - (-increment >> sched_burst_smoothness_down); >+} >+ >+static void restart_burst(struct sched_entity *se) { >+ se->burst_penalty = se->prev_burst_penalty = >+ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty); >+ se->curr_burst_penalty = 0; >+ se->burst_time = 0; >+} >+#endif // CONFIG_SCHED_BORE >+ > int sched_thermal_decay_shift; > static int __init setup_sched_thermal_decay_shift(char *str) > { >@@ -145,6 +219,69 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536; > > #ifdef CONFIG_SYSCTL > static struct ctl_table sched_fair_sysctls[] = { >+#ifdef CONFIG_SCHED_BORE >+ { >+ .procname = "sched_bore", >+ .data = &sched_bore, >+ .maxlen = sizeof(unsigned int), >+ .mode = 0644, >+ .proc_handler = &proc_dointvec_minmax, >+ .extra1 = SYSCTL_ZERO, >+ .extra2 = SYSCTL_ONE, >+ }, >+ { >+ .procname = "sched_burst_cache_lifetime", >+ .data = &sched_burst_cache_lifetime, >+ .maxlen = sizeof(unsigned int), >+ .mode = 0644, >+ .proc_handler = proc_dointvec, >+ }, >+ { >+ .procname = "sched_burst_fork_atavistic", >+ .data = &sched_burst_fork_atavistic, >+ .maxlen = sizeof(unsigned int), >+ .mode = 0644, >+ .proc_handler = &proc_dointvec_minmax, >+ .extra1 = SYSCTL_ZERO, >+ .extra2 = &seven, >+ }, >+ { >+ .procname = "sched_burst_penalty_offset", >+ .data = &sched_burst_penalty_offset, >+ .maxlen = sizeof(unsigned int), >+ .mode = 0644, >+ .proc_handler = &proc_dointvec_minmax, >+ .extra1 = SYSCTL_ZERO, >+ .extra2 = &sixty_four, >+ }, >+ { >+ .procname = "sched_burst_penalty_scale", >+ .data = &sched_burst_penalty_scale, >+ .maxlen = sizeof(unsigned int), >+ .mode = 0644, >+ .proc_handler = &proc_dointvec_minmax, >+ .extra1 = SYSCTL_ZERO, >+ .extra2 = &maxval_12_bits, >+ }, >+ { >+ .procname = "sched_burst_smoothness_down", >+ .data = &sched_burst_smoothness_down, >+ .maxlen = sizeof(unsigned int), >+ .mode = 0644, >+ .proc_handler = &proc_dointvec_minmax, >+ .extra1 = SYSCTL_ZERO, >+ .extra2 = &three, >+ }, >+ { >+ .procname = "sched_burst_smoothness_up", >+ .data = &sched_burst_smoothness_up, >+ .maxlen = sizeof(unsigned int), >+ .mode = 0644, >+ .proc_handler = &proc_dointvec_minmax, >+ .extra1 = SYSCTL_ZERO, >+ .extra2 = &three, >+ }, >+#endif // CONFIG_SCHED_BORE > { > .procname = "sched_child_runs_first", > .data = &sysctl_sched_child_runs_first, >@@ -313,6 +450,9 @@ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) > if (unlikely(se->load.weight != NICE_0_LOAD)) > delta = __calc_delta(delta, NICE_0_LOAD, &se->load); > >+#ifdef CONFIG_SCHED_BORE >+ if (likely(sched_bore)) delta = penalty_scale(delta, se); >+#endif // CONFIG_SCHED_BORE > return delta; > } > >@@ -668,7 +808,7 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta) > * Specifically: avg_runtime() + 0 must result in entity_eligible() := true > * For this to be so, the result of this function must have a left bias. > */ >-u64 avg_vruntime(struct cfs_rq *cfs_rq) >+static u64 avg_key(struct cfs_rq *cfs_rq) > { > struct sched_entity *curr = cfs_rq->curr; > s64 avg = cfs_rq->avg_vruntime; >@@ -688,7 +828,11 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq) > avg = div_s64(avg, load); > } > >- return cfs_rq->min_vruntime + avg; >+ return avg; >+} >+ >+inline u64 avg_vruntime(struct cfs_rq *cfs_rq) { >+ return cfs_rq->min_vruntime + avg_key(cfs_rq); > } > > /* >@@ -981,7 +1125,6 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq) > return se; > } > >-#ifdef CONFIG_SCHED_DEBUG > struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) > { > struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); >@@ -995,6 +1138,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) > /************************************************************** > * Scheduling class statistics methods: > */ >+#ifdef CONFIG_SCHED_DEBUG > #ifdef CONFIG_SMP > int sched_update_scaling(void) > { >@@ -1173,7 +1317,11 @@ static void update_curr(struct cfs_rq *cfs_rq) > curr->sum_exec_runtime += delta_exec; > schedstat_add(cfs_rq->exec_clock, delta_exec); > >- curr->vruntime += calc_delta_fair(delta_exec, curr); >+#ifdef CONFIG_SCHED_BORE >+ curr->burst_time += delta_exec; >+ update_burst_penalty(curr); >+#endif // CONFIG_SCHED_BORE >+ curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr)); > update_deadline(cfs_rq, curr); > update_min_vruntime(cfs_rq); > >@@ -5053,6 +5201,23 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) > if (WARN_ON_ONCE(!load)) > load = 1; > lag = div_s64(lag, load); >+ >+#ifdef CONFIG_SCHED_BORE >+ if (flags & ENQUEUE_MIGRATED && likely(sched_bore)) { >+ struct sched_entity *last, *first; >+ s64 left_vruntime = vruntime, right_vruntime = vruntime; >+ >+ if (first = __pick_first_entity(cfs_rq)) >+ left_vruntime = first->vruntime; >+ >+ if (last = __pick_last_entity(cfs_rq)) >+ right_vruntime = last->vruntime; >+ >+ lag = clamp(lag, >+ (s64)vruntime - right_vruntime, >+ (s64)vruntime - left_vruntime); >+ } >+#endif // CONFIG_SCHED_BORE > } > > se->vruntime = vruntime - lag; >@@ -6611,6 +6776,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) > util_est_dequeue(&rq->cfs, p); > > for_each_sched_entity(se) { >+#ifdef CONFIG_SCHED_BORE >+ if (task_sleep) restart_burst(se); >+#endif // CONFIG_SCHED_BORE > cfs_rq = cfs_rq_of(se); > dequeue_entity(cfs_rq, se, flags); > >@@ -8341,8 +8509,12 @@ static void yield_task_fair(struct rq *rq) > /* > * Are we the only task in the tree? > */ >- if (unlikely(rq->nr_running == 1)) >+ if (unlikely(rq->nr_running == 1)) { >+#ifdef CONFIG_SCHED_BORE >+ restart_burst(se); >+#endif // CONFIG_SCHED_BORE > return; >+ } > > clear_buddies(cfs_rq, se); > >@@ -8351,6 +8523,9 @@ static void yield_task_fair(struct rq *rq) > * Update run-time statistics of the 'current'. > */ > update_curr(cfs_rq); >+#ifdef CONFIG_SCHED_BORE >+ restart_burst(se); >+#endif // CONFIG_SCHED_BORE > /* > * Tell update_rq_clock() that we've just updated, > * so we don't do microscopic update in schedule() >diff --git a/kernel/sched/features.h b/kernel/sched/features.h >index f770168230..a2e09c04f3 100644 >--- a/kernel/sched/features.h >+++ b/kernel/sched/features.h >@@ -6,7 +6,11 @@ > */ > SCHED_FEAT(PLACE_LAG, true) > SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) >+#ifdef CONFIG_SCHED_BORE >+SCHED_FEAT(RUN_TO_PARITY, false) >+#else // CONFIG_SCHED_BORE > SCHED_FEAT(RUN_TO_PARITY, true) >+#endif // CONFIG_SCHED_BORE > > /* > * Prefer to schedule the task we woke last (assuming it failed >-- >2.25.1 >
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 916954
:
874143
|
874144
|
874346
|
874895
|
875322
|
875323
|
879000
|
884278
|
884334
|
888012
|
888021
|
889979
|
889980