Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 916954 | Differences between
and this patch

Collapse All | Expand All

(-)a/include/linux/sched.h (+31 lines)
Lines 545-550 struct sched_statistics { Link Here
545
#endif /* CONFIG_SCHEDSTATS */
545
#endif /* CONFIG_SCHEDSTATS */
546
} ____cacheline_aligned;
546
} ____cacheline_aligned;
547
547
548
#ifdef CONFIG_SCHED_BORE
549
typedef union {
550
	u16	u16;
551
	s16	s16;
552
	u8	u8[2];
553
	s8	s8[2];
554
} x16;
555
556
typedef union {
557
	u32	u32;
558
	s32	s32;
559
	u16	u16[2];
560
	s16	s16[2];
561
	u8	u8[4];
562
	s8	s8[4];
563
} x32;
564
#endif // CONFIG_SCHED_BORE
565
548
struct sched_entity {
566
struct sched_entity {
549
	/* For load-balancing: */
567
	/* For load-balancing: */
550
	struct load_weight		load;
568
	struct load_weight		load;
Lines 559-564 struct sched_entity { Link Here
559
	u64				sum_exec_runtime;
577
	u64				sum_exec_runtime;
560
	u64				prev_sum_exec_runtime;
578
	u64				prev_sum_exec_runtime;
561
	u64				vruntime;
579
	u64				vruntime;
580
#ifdef CONFIG_SCHED_BORE
581
	u64				burst_time;
582
	u16				prev_burst_penalty;
583
	u16				curr_burst_penalty;
584
	u16				burst_penalty;
585
#endif // CONFIG_SCHED_BORE
562
	s64				vlag;
586
	s64				vlag;
563
	u64				slice;
587
	u64				slice;
564
588
Lines 989-994 struct task_struct { Link Here
989
	struct list_head		children;
1013
	struct list_head		children;
990
	struct list_head		sibling;
1014
	struct list_head		sibling;
991
	struct task_struct		*group_leader;
1015
	struct task_struct		*group_leader;
1016
#ifdef CONFIG_SCHED_BORE
1017
	u16	child_burst_cache;
1018
	u16	child_burst_count_cache;
1019
	u64	child_burst_last_cached;
1020
	u16	group_burst_cache;
1021
	u64	group_burst_last_cached;
1022
#endif // CONFIG_SCHED_BORE
992
1023
993
	/*
1024
	/*
994
	 * 'ptraced' is the list of tasks this task is using ptrace() on.
1025
	 * 'ptraced' is the list of tasks this task is using ptrace() on.
(-)a/init/Kconfig (+19 lines)
Lines 1258-1263 config CHECKPOINT_RESTORE Link Here
1258
1258
1259
	  If unsure, say N here.
1259
	  If unsure, say N here.
1260
1260
1261
config SCHED_BORE
1262
	bool "Burst-Oriented Response Enhancer"
1263
	default y
1264
	help
1265
	  In Desktop and Mobile computing, one might prefer interactive
1266
	  tasks to keep responsive no matter what they run in the background.
1267
1268
	  Enabling this kernel feature modifies the scheduler to discriminate
1269
	  tasks by their burst time (runtime since it last went sleeping or
1270
	  yielding state) and prioritize those that run less bursty.
1271
	  Such tasks usually include window compositor, widgets backend,
1272
	  terminal emulator, video playback, games and so on.
1273
	  With a little impact to scheduling fairness, it may improve
1274
	  responsiveness especially under heavy background workload.
1275
1276
	  You can turn it off by setting the sysctl kernel.sched_bore = 0.
1277
1278
	  If unsure say Y here.
1279
1261
config SCHED_AUTOGROUP
1280
config SCHED_AUTOGROUP
1262
	bool "Automatic process group scheduling"
1281
	bool "Automatic process group scheduling"
1263
	select CGROUPS
1282
	select CGROUPS
(-)a/kernel/sched/autogroup.c (+4 lines)
Lines 4-10 Link Here
4
 * Auto-group scheduling implementation:
4
 * Auto-group scheduling implementation:
5
 */
5
 */
6
6
7
#ifdef CONFIG_SCHED_BORE
8
unsigned int __read_mostly sysctl_sched_autogroup_enabled = 0;
9
#else // CONFIG_SCHED_BORE
7
unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
10
unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
11
#endif // CONFIG_SCHED_BORE
8
static struct autogroup autogroup_default;
12
static struct autogroup autogroup_default;
9
static atomic_t autogroup_seq_nr;
13
static atomic_t autogroup_seq_nr;
10
14
(-)a/kernel/sched/core.c (+163 lines)
Lines 4484-4489 int wake_up_state(struct task_struct *p, unsigned int state) Link Here
4484
	return try_to_wake_up(p, state, 0);
4484
	return try_to_wake_up(p, state, 0);
4485
}
4485
}
4486
4486
4487
#ifdef CONFIG_SCHED_BORE
4488
extern unsigned int sched_burst_cache_lifetime;
4489
extern unsigned int sched_bore;
4490
extern unsigned int sched_burst_fork_atavistic;
4491
4492
void __init sched_init_bore(void) {
4493
	init_task.child_burst_cache = 0;
4494
	init_task.child_burst_count_cache = 0;
4495
	init_task.child_burst_last_cached = 0;
4496
	init_task.group_burst_cache = 0;
4497
	init_task.group_burst_last_cached = 0;
4498
	init_task.se.burst_time = 0;
4499
	init_task.se.prev_burst_penalty = 0;
4500
	init_task.se.curr_burst_penalty = 0;
4501
	init_task.se.burst_penalty = 0;
4502
}
4503
4504
void inline sched_fork_bore(struct task_struct *p) {
4505
	p->child_burst_cache = 0;
4506
	p->child_burst_count_cache = 0;
4507
	p->child_burst_last_cached = 0;
4508
	p->group_burst_cache = 0;
4509
	p->group_burst_last_cached = 0;
4510
	p->se.burst_time = 0;
4511
	p->se.curr_burst_penalty = 0;
4512
}
4513
4514
static u32 count_child_tasks(struct task_struct *p) {
4515
	struct task_struct *child;
4516
	u32 cnt = 0;
4517
	list_for_each_entry(child, &p->children, sibling) {cnt++;}
4518
	return cnt;
4519
}
4520
4521
static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) {
4522
	return (p->child_burst_last_cached + sched_burst_cache_lifetime < now);
4523
}
4524
4525
static inline bool group_burst_cache_expired(struct task_struct *p, u64 now) {
4526
	return (p->group_burst_last_cached + sched_burst_cache_lifetime < now);
4527
}
4528
4529
static void __update_child_burst_cache(
4530
	struct task_struct *p, u32 cnt, u32 sum, u64 now) {
4531
	u16 avg = 0;
4532
	if (cnt) avg = sum / cnt;
4533
	p->child_burst_cache = max(avg, p->se.burst_penalty);
4534
	p->child_burst_count_cache = cnt;
4535
	p->child_burst_last_cached = now;
4536
}
4537
4538
static void update_child_burst_cache(struct task_struct *p, u64 now) {
4539
	struct task_struct *child;
4540
	u32 cnt = 0;
4541
	u32 sum = 0;
4542
4543
	list_for_each_entry(child, &p->children, sibling) {
4544
		cnt++;
4545
		sum += child->se.burst_penalty;
4546
	}
4547
4548
	__update_child_burst_cache(p, cnt, sum, now);
4549
}
4550
4551
static void update_child_burst_cache_atavistic(
4552
	struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) {
4553
	struct task_struct *child, *dec;
4554
	u32 cnt = 0, dcnt = 0;
4555
	u32 sum = 0;
4556
4557
	list_for_each_entry(child, &p->children, sibling) {
4558
		dec = child;
4559
		while ((dcnt = count_child_tasks(dec)) == 1)
4560
			dec = list_first_entry(&dec->children, struct task_struct, sibling);
4561
		
4562
		if (!dcnt || !depth) {
4563
			cnt++;
4564
			sum += dec->se.burst_penalty;
4565
		} else {
4566
			if (child_burst_cache_expired(dec, now))
4567
				update_child_burst_cache_atavistic(dec, now, depth - 1, &cnt, &sum);
4568
			else {
4569
				cnt += dec->child_burst_count_cache;
4570
				sum += (u32)dec->child_burst_cache * dec->child_burst_count_cache;
4571
			}
4572
		}
4573
	}
4574
4575
	__update_child_burst_cache(p, cnt, sum, now);
4576
	*acnt += cnt;
4577
	*asum += sum;
4578
}
4579
4580
static void update_group_burst_cache(struct task_struct *p, u64 now) {
4581
	struct task_struct *member;
4582
	u32 cnt = 0, sum = 0;
4583
	u16 avg = 0;
4584
4585
	for_each_thread(p, member) {
4586
		cnt++;
4587
		sum += member->se.burst_penalty;
4588
	}
4589
4590
	if (cnt) avg = sum / cnt;
4591
	p->group_burst_cache = max(avg, p->se.burst_penalty);
4592
	p->group_burst_last_cached = now;
4593
}
4594
4595
#define forked_task_is_process(p) (p->pid == p->tgid)
4596
#define bore_thread_fork_group_inherit (sched_burst_fork_atavistic & 4)
4597
4598
static void fork_burst_penalty(struct task_struct *p) {
4599
	struct sched_entity *se = &p->se;
4600
	struct task_struct *anc;
4601
	u64 now = ktime_get_ns();
4602
	u32 cnt = 0, sum = 0, depth;
4603
	u16 burst_cache;
4604
4605
	if (likely(sched_bore)) {
4606
		read_lock(&tasklist_lock);
4607
4608
		if (forked_task_is_process(p) ||
4609
		    likely(!bore_thread_fork_group_inherit)) {
4610
			anc = p->real_parent;
4611
			depth = sched_burst_fork_atavistic & 3;
4612
			if (likely(depth)) {
4613
				while ((anc->real_parent != anc) &&
4614
				       (count_child_tasks(anc) == 1))
4615
					anc = anc->real_parent;
4616
				if (child_burst_cache_expired(anc, now))
4617
					update_child_burst_cache_atavistic(
4618
						anc, now, depth - 1, &cnt, &sum);
4619
			} else
4620
				if (child_burst_cache_expired(anc, now))
4621
					update_child_burst_cache(anc, now);
4622
4623
			burst_cache = anc->child_burst_cache;
4624
		} else {
4625
			anc = p->group_leader;
4626
			if (group_burst_cache_expired(anc, now))
4627
				update_group_burst_cache(anc, now);
4628
			
4629
			burst_cache = anc->group_burst_cache;
4630
		}
4631
4632
		read_unlock(&tasklist_lock);
4633
		se->prev_burst_penalty = max(se->prev_burst_penalty, burst_cache);
4634
	}
4635
	se->burst_penalty = se->prev_burst_penalty;
4636
}
4637
#endif // CONFIG_SCHED_BORE
4638
4487
/*
4639
/*
4488
 * Perform scheduler related setup for a newly forked process p.
4640
 * Perform scheduler related setup for a newly forked process p.
4489
 * p is forked by current.
4641
 * p is forked by current.
Lines 4500-4505 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) Link Here
4500
	p->se.prev_sum_exec_runtime	= 0;
4652
	p->se.prev_sum_exec_runtime	= 0;
4501
	p->se.nr_migrations		= 0;
4653
	p->se.nr_migrations		= 0;
4502
	p->se.vruntime			= 0;
4654
	p->se.vruntime			= 0;
4655
#ifdef CONFIG_SCHED_BORE
4656
	sched_fork_bore(p);
4657
#endif // CONFIG_SCHED_BORE
4503
	p->se.vlag			= 0;
4658
	p->se.vlag			= 0;
4504
	p->se.slice			= sysctl_sched_base_slice;
4659
	p->se.slice			= sysctl_sched_base_slice;
4505
	INIT_LIST_HEAD(&p->se.group_node);
4660
	INIT_LIST_HEAD(&p->se.group_node);
Lines 4819-4824 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) Link Here
4819
4974
4820
void sched_post_fork(struct task_struct *p)
4975
void sched_post_fork(struct task_struct *p)
4821
{
4976
{
4977
#ifdef CONFIG_SCHED_BORE
4978
	fork_burst_penalty(p);
4979
#endif // CONFIG_SCHED_BORE
4822
	uclamp_post_fork(p);
4980
	uclamp_post_fork(p);
4823
}
4981
}
4824
4982
Lines 9919-9924 void __init sched_init(void) Link Here
9919
	BUG_ON(&dl_sched_class != &stop_sched_class + 1);
10077
	BUG_ON(&dl_sched_class != &stop_sched_class + 1);
9920
#endif
10078
#endif
9921
10079
10080
#ifdef CONFIG_SCHED_BORE
10081
	sched_init_bore();
10082
	printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 3.2.9 by Masahito Suzuki");
10083
#endif // CONFIG_SCHED_BORE
10084
9922
	wait_bit_init();
10085
	wait_bit_init();
9923
10086
9924
#ifdef CONFIG_FAIR_GROUP_SCHED
10087
#ifdef CONFIG_FAIR_GROUP_SCHED
(-)a/kernel/sched/debug.c (+3 lines)
Lines 595-600 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) Link Here
595
		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
595
		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
596
		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
596
		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
597
597
598
#ifdef CONFIG_SCHED_BORE
599
	SEQ_printf(m, " %2d", ((x16*)&p->se.burst_penalty)->u8[1]);
600
#endif
598
#ifdef CONFIG_NUMA_BALANCING
601
#ifdef CONFIG_NUMA_BALANCING
599
	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
602
	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
600
#endif
603
#endif
(-)a/kernel/sched/fair.c (-7 / +182 lines)
Lines 19-24 Link Here
19
 *
19
 *
20
 *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20
 *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21
 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
21
 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22
 *
23
 *  Burst-Oriented Response Enhancer (BORE) CPU Scheduler
24
 *  Copyright (C) 2021-2023 Masahito Suzuki <firelzrd@gmail.com>
22
 */
25
 */
23
#include <linux/energy_model.h>
26
#include <linux/energy_model.h>
24
#include <linux/mmap_lock.h>
27
#include <linux/mmap_lock.h>
Lines 66-82 Link Here
66
 *   SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
69
 *   SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
67
 *   SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
70
 *   SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
68
 *
71
 *
69
 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
72
 * (BORE  default SCHED_TUNABLESCALING_NONE = *1 constant)
73
 * (EEVDF default SCHED_TUNABLESCALING_LOG  = *(1+ilog(ncpus))
70
 */
74
 */
75
#ifdef CONFIG_SCHED_BORE
76
unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
77
#else // CONFIG_SCHED_BORE
71
unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
78
unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
79
#endif // CONFIG_SCHED_BORE
72
80
73
/*
81
/*
74
 * Minimal preemption granularity for CPU-bound tasks:
82
 * Minimal preemption granularity for CPU-bound tasks:
75
 *
83
 *
76
 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
84
 * (BORE  default: 3 msec constant, units: nanoseconds)
85
 * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
77
 */
86
 */
87
#ifdef CONFIG_SCHED_BORE
88
unsigned int sysctl_sched_base_slice			= 3000000ULL;
89
static unsigned int normalized_sysctl_sched_base_slice	= 3000000ULL;
90
#else // CONFIG_SCHED_BORE
78
unsigned int sysctl_sched_base_slice			= 750000ULL;
91
unsigned int sysctl_sched_base_slice			= 750000ULL;
79
static unsigned int normalized_sysctl_sched_base_slice	= 750000ULL;
92
static unsigned int normalized_sysctl_sched_base_slice	= 750000ULL;
93
#endif // CONFIG_SCHED_BORE
80
94
81
/*
95
/*
82
 * After fork, child runs first. If set to 0 (default) then
96
 * After fork, child runs first. If set to 0 (default) then
Lines 86-91 unsigned int sysctl_sched_child_runs_first __read_mostly; Link Here
86
100
87
const_debug unsigned int sysctl_sched_migration_cost	= 500000UL;
101
const_debug unsigned int sysctl_sched_migration_cost	= 500000UL;
88
102
103
#ifdef CONFIG_SCHED_BORE
104
unsigned int __read_mostly sched_bore                  = 1;
105
unsigned int __read_mostly sched_burst_cache_lifetime  = 60000000;
106
unsigned int __read_mostly sched_burst_penalty_offset  = 22;
107
unsigned int __read_mostly sched_burst_penalty_scale   = 1280;
108
unsigned int __read_mostly sched_burst_smoothness_up   = 1;
109
unsigned int __read_mostly sched_burst_smoothness_down = 0;
110
unsigned int __read_mostly sched_burst_fork_atavistic  = 2;
111
static int three          = 3;
112
static int seven          = 7;
113
static int sixty_four     = 64;
114
static int maxval_12_bits = 4095;
115
116
#define MAX_BURST_PENALTY ((40U << 8) - 1)
117
118
static inline u32 log2plus1_u64_u32f8(u64 v) {
119
	x32 result;
120
	int msb = fls64(v);
121
	int excess_bits = msb - 9;
122
	result.u8[0] = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits;
123
	result.u8[1] = msb;
124
	return result.u32;
125
}
126
127
static inline u32 calc_burst_penalty(u64 burst_time) {
128
	u32 greed, tolerance, penalty, scaled_penalty;
129
	
130
	greed = log2plus1_u64_u32f8(burst_time);
131
	tolerance = sched_burst_penalty_offset << 8;
132
	penalty = max(0, (s32)greed - (s32)tolerance);
133
	scaled_penalty = penalty * sched_burst_penalty_scale >> 10;
134
135
	return min(MAX_BURST_PENALTY, scaled_penalty);
136
}
137
138
static void update_burst_penalty(struct sched_entity *se) {
139
	se->curr_burst_penalty = calc_burst_penalty(se->burst_time);
140
	se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty);
141
}
142
143
static inline u64 penalty_scale(u64 delta, struct sched_entity *se) {
144
	u32 score = ((x16*)&se->burst_penalty)->u8[1];
145
	return mul_u64_u32_shr(delta, sched_prio_to_wmult[score], 22);
146
}
147
148
static inline u32 binary_smooth(u32 new, u32 old) {
149
  int increment = new - old;
150
  return (0 <= increment)?
151
    old + ( increment >> sched_burst_smoothness_up):
152
    old - (-increment >> sched_burst_smoothness_down);
153
}
154
155
static void restart_burst(struct sched_entity *se) {
156
	se->burst_penalty = se->prev_burst_penalty =
157
		binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty);
158
	se->curr_burst_penalty = 0;
159
	se->burst_time = 0;
160
}
161
#endif // CONFIG_SCHED_BORE
162
89
int sched_thermal_decay_shift;
163
int sched_thermal_decay_shift;
90
static int __init setup_sched_thermal_decay_shift(char *str)
164
static int __init setup_sched_thermal_decay_shift(char *str)
91
{
165
{
Lines 145-150 static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536; Link Here
145
219
146
#ifdef CONFIG_SYSCTL
220
#ifdef CONFIG_SYSCTL
147
static struct ctl_table sched_fair_sysctls[] = {
221
static struct ctl_table sched_fair_sysctls[] = {
222
#ifdef CONFIG_SCHED_BORE
223
	{
224
		.procname	= "sched_bore",
225
		.data		= &sched_bore,
226
		.maxlen		= sizeof(unsigned int),
227
		.mode		= 0644,
228
		.proc_handler	= &proc_dointvec_minmax,
229
		.extra1		= SYSCTL_ZERO,
230
		.extra2		= SYSCTL_ONE,
231
	},
232
	{
233
		.procname	= "sched_burst_cache_lifetime",
234
		.data		= &sched_burst_cache_lifetime,
235
		.maxlen		= sizeof(unsigned int),
236
		.mode		= 0644,
237
		.proc_handler = proc_dointvec,
238
	},
239
	{
240
		.procname	= "sched_burst_fork_atavistic",
241
		.data		= &sched_burst_fork_atavistic,
242
		.maxlen		= sizeof(unsigned int),
243
		.mode		= 0644,
244
		.proc_handler	= &proc_dointvec_minmax,
245
		.extra1		= SYSCTL_ZERO,
246
		.extra2		= &seven,
247
	},
248
	{
249
		.procname	= "sched_burst_penalty_offset",
250
		.data		= &sched_burst_penalty_offset,
251
		.maxlen		= sizeof(unsigned int),
252
		.mode		= 0644,
253
		.proc_handler	= &proc_dointvec_minmax,
254
		.extra1		= SYSCTL_ZERO,
255
		.extra2		= &sixty_four,
256
	},
257
	{
258
		.procname	= "sched_burst_penalty_scale",
259
		.data		= &sched_burst_penalty_scale,
260
		.maxlen		= sizeof(unsigned int),
261
		.mode		= 0644,
262
		.proc_handler	= &proc_dointvec_minmax,
263
		.extra1		= SYSCTL_ZERO,
264
		.extra2		= &maxval_12_bits,
265
	},
266
	{
267
		.procname	= "sched_burst_smoothness_down",
268
		.data		= &sched_burst_smoothness_down,
269
		.maxlen		= sizeof(unsigned int),
270
		.mode		= 0644,
271
		.proc_handler	= &proc_dointvec_minmax,
272
		.extra1		= SYSCTL_ZERO,
273
		.extra2		= &three,
274
	},
275
	{
276
		.procname	= "sched_burst_smoothness_up",
277
		.data		= &sched_burst_smoothness_up,
278
		.maxlen		= sizeof(unsigned int),
279
		.mode		= 0644,
280
		.proc_handler	= &proc_dointvec_minmax,
281
		.extra1		= SYSCTL_ZERO,
282
		.extra2		= &three,
283
	},
284
#endif // CONFIG_SCHED_BORE
148
	{
285
	{
149
		.procname       = "sched_child_runs_first",
286
		.procname       = "sched_child_runs_first",
150
		.data           = &sysctl_sched_child_runs_first,
287
		.data           = &sysctl_sched_child_runs_first,
Lines 313-318 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) Link Here
313
	if (unlikely(se->load.weight != NICE_0_LOAD))
450
	if (unlikely(se->load.weight != NICE_0_LOAD))
314
		delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
451
		delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
315
452
453
#ifdef CONFIG_SCHED_BORE
454
	if (likely(sched_bore)) delta = penalty_scale(delta, se);
455
#endif // CONFIG_SCHED_BORE
316
	return delta;
456
	return delta;
317
}
457
}
318
458
Lines 668-674 void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta) Link Here
668
 * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
808
 * Specifically: avg_runtime() + 0 must result in entity_eligible() := true
669
 * For this to be so, the result of this function must have a left bias.
809
 * For this to be so, the result of this function must have a left bias.
670
 */
810
 */
671
u64 avg_vruntime(struct cfs_rq *cfs_rq)
811
static u64 avg_key(struct cfs_rq *cfs_rq)
672
{
812
{
673
	struct sched_entity *curr = cfs_rq->curr;
813
	struct sched_entity *curr = cfs_rq->curr;
674
	s64 avg = cfs_rq->avg_vruntime;
814
	s64 avg = cfs_rq->avg_vruntime;
Lines 688-694 u64 avg_vruntime(struct cfs_rq *cfs_rq) Link Here
688
		avg = div_s64(avg, load);
828
		avg = div_s64(avg, load);
689
	}
829
	}
690
830
691
	return cfs_rq->min_vruntime + avg;
831
	return avg;
832
}
833
834
inline u64 avg_vruntime(struct cfs_rq *cfs_rq) {
835
	return cfs_rq->min_vruntime + avg_key(cfs_rq);
692
}
836
}
693
837
694
/*
838
/*
Lines 981-987 static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq) Link Here
981
	return se;
1125
	return se;
982
}
1126
}
983
1127
984
#ifdef CONFIG_SCHED_DEBUG
985
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
1128
struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
986
{
1129
{
987
	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
1130
	struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
Lines 995-1000 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) Link Here
995
/**************************************************************
1138
/**************************************************************
996
 * Scheduling class statistics methods:
1139
 * Scheduling class statistics methods:
997
 */
1140
 */
1141
#ifdef CONFIG_SCHED_DEBUG
998
#ifdef CONFIG_SMP
1142
#ifdef CONFIG_SMP
999
int sched_update_scaling(void)
1143
int sched_update_scaling(void)
1000
{
1144
{
Lines 1173-1179 static void update_curr(struct cfs_rq *cfs_rq) Link Here
1173
	curr->sum_exec_runtime += delta_exec;
1317
	curr->sum_exec_runtime += delta_exec;
1174
	schedstat_add(cfs_rq->exec_clock, delta_exec);
1318
	schedstat_add(cfs_rq->exec_clock, delta_exec);
1175
1319
1176
	curr->vruntime += calc_delta_fair(delta_exec, curr);
1320
#ifdef CONFIG_SCHED_BORE
1321
	curr->burst_time += delta_exec;
1322
	update_burst_penalty(curr);
1323
#endif // CONFIG_SCHED_BORE
1324
	curr->vruntime += max(1ULL, calc_delta_fair(delta_exec, curr));
1177
	update_deadline(cfs_rq, curr);
1325
	update_deadline(cfs_rq, curr);
1178
	update_min_vruntime(cfs_rq);
1326
	update_min_vruntime(cfs_rq);
1179
1327
Lines 5053-5058 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) Link Here
5053
		if (WARN_ON_ONCE(!load))
5201
		if (WARN_ON_ONCE(!load))
5054
			load = 1;
5202
			load = 1;
5055
		lag = div_s64(lag, load);
5203
		lag = div_s64(lag, load);
5204
5205
#ifdef CONFIG_SCHED_BORE
5206
		if (flags & ENQUEUE_MIGRATED && likely(sched_bore)) {
5207
			struct sched_entity *last, *first;
5208
			s64 left_vruntime = vruntime, right_vruntime = vruntime;
5209
5210
			if (first = __pick_first_entity(cfs_rq))
5211
				left_vruntime = first->vruntime;
5212
5213
			if (last = __pick_last_entity(cfs_rq))
5214
				right_vruntime = last->vruntime;
5215
5216
			lag = clamp(lag,
5217
				(s64)vruntime - right_vruntime,
5218
				(s64)vruntime - left_vruntime);
5219
		}
5220
#endif // CONFIG_SCHED_BORE
5056
	}
5221
	}
5057
5222
5058
	se->vruntime = vruntime - lag;
5223
	se->vruntime = vruntime - lag;
Lines 6611-6616 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) Link Here
6611
	util_est_dequeue(&rq->cfs, p);
6776
	util_est_dequeue(&rq->cfs, p);
6612
6777
6613
	for_each_sched_entity(se) {
6778
	for_each_sched_entity(se) {
6779
#ifdef CONFIG_SCHED_BORE
6780
		if (task_sleep) restart_burst(se);
6781
#endif // CONFIG_SCHED_BORE
6614
		cfs_rq = cfs_rq_of(se);
6782
		cfs_rq = cfs_rq_of(se);
6615
		dequeue_entity(cfs_rq, se, flags);
6783
		dequeue_entity(cfs_rq, se, flags);
6616
6784
Lines 8341-8348 static void yield_task_fair(struct rq *rq) Link Here
8341
	/*
8509
	/*
8342
	 * Are we the only task in the tree?
8510
	 * Are we the only task in the tree?
8343
	 */
8511
	 */
8344
	if (unlikely(rq->nr_running == 1))
8512
	if (unlikely(rq->nr_running == 1)) {
8513
#ifdef CONFIG_SCHED_BORE
8514
		restart_burst(se);
8515
#endif // CONFIG_SCHED_BORE
8345
		return;
8516
		return;
8517
	}
8346
8518
8347
	clear_buddies(cfs_rq, se);
8519
	clear_buddies(cfs_rq, se);
8348
8520
Lines 8351-8356 static void yield_task_fair(struct rq *rq) Link Here
8351
	 * Update run-time statistics of the 'current'.
8523
	 * Update run-time statistics of the 'current'.
8352
	 */
8524
	 */
8353
	update_curr(cfs_rq);
8525
	update_curr(cfs_rq);
8526
#ifdef CONFIG_SCHED_BORE
8527
	restart_burst(se);
8528
#endif // CONFIG_SCHED_BORE
8354
	/*
8529
	/*
8355
	 * Tell update_rq_clock() that we've just updated,
8530
	 * Tell update_rq_clock() that we've just updated,
8356
	 * so we don't do microscopic update in schedule()
8531
	 * so we don't do microscopic update in schedule()
(-)a/kernel/sched/features.h (-1 / +4 lines)
Lines 6-12 Link Here
6
 */
6
 */
7
SCHED_FEAT(PLACE_LAG, true)
7
SCHED_FEAT(PLACE_LAG, true)
8
SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
8
SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
9
#ifdef CONFIG_SCHED_BORE
10
SCHED_FEAT(RUN_TO_PARITY, false)
11
#else // CONFIG_SCHED_BORE
9
SCHED_FEAT(RUN_TO_PARITY, true)
12
SCHED_FEAT(RUN_TO_PARITY, true)
13
#endif // CONFIG_SCHED_BORE
10
14
11
/*
15
/*
12
 * Prefer to schedule the task we woke last (assuming it failed
16
 * Prefer to schedule the task we woke last (assuming it failed
13
- 

Return to bug 916954