Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 346021 | Differences between
and this patch

Collapse All | Expand All

(-)linux-2.6.36/include/linux/sched.h (+19 lines)
Lines 506-511 struct thread_group_cputimer { Link Here
506
	spinlock_t lock;
506
	spinlock_t lock;
507
};
507
};
508
508
509
struct autogroup;
510
509
/*
511
/*
510
 * NOTE! "signal_struct" does not have it's own
512
 * NOTE! "signal_struct" does not have it's own
511
 * locking, because a shared signal_struct always
513
 * locking, because a shared signal_struct always
Lines 573-578 struct signal_struct { Link Here
573
575
574
	struct tty_struct *tty; /* NULL if no tty */
576
	struct tty_struct *tty; /* NULL if no tty */
575
577
578
#ifdef CONFIG_SCHED_AUTOGROUP
579
	struct autogroup *autogroup;
580
#endif
576
	/*
581
	/*
577
	 * Cumulative resource counters for dead threads in the group,
582
	 * Cumulative resource counters for dead threads in the group,
578
	 * and for reaped dead child processes forked by this group.
583
	 * and for reaped dead child processes forked by this group.
Lines 1900-1905 int sched_rt_handler(struct ctl_table *t Link Here
1900
1905
1901
extern unsigned int sysctl_sched_compat_yield;
1906
extern unsigned int sysctl_sched_compat_yield;
1902
1907
1908
#ifdef CONFIG_SCHED_AUTOGROUP
1909
extern unsigned int sysctl_sched_autogroup_enabled;
1910
1911
extern void sched_autogroup_create_attach(struct task_struct *p);
1912
extern void sched_autogroup_detach(struct task_struct *p);
1913
extern void sched_autogroup_fork(struct signal_struct *sig);
1914
extern void sched_autogroup_exit(struct signal_struct *sig);
1915
#else
1916
static inline void sched_autogroup_create_attach(struct task_struct *p) { }
1917
static inline void sched_autogroup_detach(struct task_struct *p) { }
1918
static inline void sched_autogroup_fork(struct signal_struct *sig) { }
1919
static inline void sched_autogroup_exit(struct signal_struct *sig) { }
1920
#endif
1921
1903
#ifdef CONFIG_RT_MUTEXES
1922
#ifdef CONFIG_RT_MUTEXES
1904
extern int rt_mutex_getprio(struct task_struct *p);
1923
extern int rt_mutex_getprio(struct task_struct *p);
1905
extern void rt_mutex_setprio(struct task_struct *p, int prio);
1924
extern void rt_mutex_setprio(struct task_struct *p, int prio);
(-)linux-2.6.36/kernel/sched.c (-2 / +11 lines)
Lines 78-83 Link Here
78
78
79
#include "sched_cpupri.h"
79
#include "sched_cpupri.h"
80
#include "workqueue_sched.h"
80
#include "workqueue_sched.h"
81
#include "sched_autogroup.h"
81
82
82
#define CREATE_TRACE_POINTS
83
#define CREATE_TRACE_POINTS
83
#include <trace/events/sched.h>
84
#include <trace/events/sched.h>
Lines 268-273 struct task_group { Link Here
268
	struct task_group *parent;
269
	struct task_group *parent;
269
	struct list_head siblings;
270
	struct list_head siblings;
270
	struct list_head children;
271
	struct list_head children;
272
273
#if defined(CONFIG_SCHED_AUTOGROUP)
274
	struct autogroup *autogroup;
275
#endif
271
};
276
};
272
277
273
#define root_task_group init_task_group
278
#define root_task_group init_task_group
Lines 612-622 static inline int cpu_of(struct rq *rq) Link Here
612
 */
617
 */
613
static inline struct task_group *task_group(struct task_struct *p)
618
static inline struct task_group *task_group(struct task_struct *p)
614
{
619
{
620
	struct task_group *tg;
615
	struct cgroup_subsys_state *css;
621
	struct cgroup_subsys_state *css;
616
622
617
	css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
623
	css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
618
			lockdep_is_held(&task_rq(p)->lock));
624
			lockdep_is_held(&task_rq(p)->lock));
619
	return container_of(css, struct task_group, css);
625
	tg = container_of(css, struct task_group, css);
626
627
	return autogroup_task_group(p, tg);
620
}
628
}
621
629
622
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
630
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
Lines 1920-1925 static void deactivate_task(struct rq *r Link Here
1920
#include "sched_idletask.c"
1928
#include "sched_idletask.c"
1921
#include "sched_fair.c"
1929
#include "sched_fair.c"
1922
#include "sched_rt.c"
1930
#include "sched_rt.c"
1931
#include "sched_autogroup.c"
1923
#ifdef CONFIG_SCHED_DEBUG
1932
#ifdef CONFIG_SCHED_DEBUG
1924
# include "sched_debug.c"
1933
# include "sched_debug.c"
1925
#endif
1934
#endif
Lines 7749-7755 void __init sched_init(void) Link Here
7749
#ifdef CONFIG_CGROUP_SCHED
7758
#ifdef CONFIG_CGROUP_SCHED
7750
	list_add(&init_task_group.list, &task_groups);
7759
	list_add(&init_task_group.list, &task_groups);
7751
	INIT_LIST_HEAD(&init_task_group.children);
7760
	INIT_LIST_HEAD(&init_task_group.children);
7752
7761
	autogroup_init(&init_task);
7753
#endif /* CONFIG_CGROUP_SCHED */
7762
#endif /* CONFIG_CGROUP_SCHED */
7754
7763
7755
#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
7764
#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
(-)linux-2.6.36/kernel/fork.c (-1 / +4 lines)
Lines 173-180 static inline void free_signal_struct(st Link Here
173
173
174
static inline void put_signal_struct(struct signal_struct *sig)
174
static inline void put_signal_struct(struct signal_struct *sig)
175
{
175
{
176
	if (atomic_dec_and_test(&sig->sigcnt))
176
	if (atomic_dec_and_test(&sig->sigcnt)) {
177
		sched_autogroup_exit(sig);
177
		free_signal_struct(sig);
178
		free_signal_struct(sig);
179
	}
178
}
180
}
179
181
180
void __put_task_struct(struct task_struct *tsk)
182
void __put_task_struct(struct task_struct *tsk)
Lines 900-905 static int copy_signal(unsigned long clo Link Here
900
	posix_cpu_timers_init_group(sig);
902
	posix_cpu_timers_init_group(sig);
901
903
902
	tty_audit_fork(sig);
904
	tty_audit_fork(sig);
905
	sched_autogroup_fork(sig);
903
906
904
	sig->oom_adj = current->signal->oom_adj;
907
	sig->oom_adj = current->signal->oom_adj;
905
	sig->oom_score_adj = current->signal->oom_score_adj;
908
	sig->oom_score_adj = current->signal->oom_score_adj;
(-)linux-2.6.36/kernel/sys.c (-1 / +3 lines)
Lines 1080-1087 SYSCALL_DEFINE0(setsid) Link Here
1080
	err = session;
1080
	err = session;
1081
out:
1081
out:
1082
	write_unlock_irq(&tasklist_lock);
1082
	write_unlock_irq(&tasklist_lock);
1083
	if (err > 0)
1083
	if (err > 0) {
1084
		proc_sid_connector(group_leader);
1084
		proc_sid_connector(group_leader);
1085
		sched_autogroup_create_attach(group_leader);
1086
	}
1085
	return err;
1087
	return err;
1086
}
1088
}
1087
1089
(-)linux-2.6.36/kernel/sched_debug.c (-14 / +15 lines)
Lines 87-92 static void print_cfs_group_stats(struct Link Here
87
}
87
}
88
#endif
88
#endif
89
89
90
#if defined(CONFIG_CGROUP_SCHED) && \
91
	(defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
92
static void task_group_path(struct task_group *tg, char *buf, int buflen)
93
{
94
	/* may be NULL if the underlying cgroup isn't fully-created yet */
95
	if (!tg->css.cgroup) {
96
		buf[0] = '\0';
97
		autogroup_path(tg, buf, buflen);
98
		return;
99
	}
100
	cgroup_path(tg->css.cgroup, buf, buflen);
101
}
102
#endif
103
90
static void
104
static void
91
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
105
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
92
{
106
{
Lines 115-121 print_task(struct seq_file *m, struct rq Link Here
115
		char path[64];
129
		char path[64];
116
130
117
		rcu_read_lock();
131
		rcu_read_lock();
118
		cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
132
		task_group_path(task_group(p), path, sizeof(path));
119
		rcu_read_unlock();
133
		rcu_read_unlock();
120
		SEQ_printf(m, " %s", path);
134
		SEQ_printf(m, " %s", path);
121
	}
135
	}
Lines 147-165 static void print_rq(struct seq_file *m, Link Here
147
	read_unlock_irqrestore(&tasklist_lock, flags);
161
	read_unlock_irqrestore(&tasklist_lock, flags);
148
}
162
}
149
163
150
#if defined(CONFIG_CGROUP_SCHED) && \
151
	(defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED))
152
static void task_group_path(struct task_group *tg, char *buf, int buflen)
153
{
154
	/* may be NULL if the underlying cgroup isn't fully-created yet */
155
	if (!tg->css.cgroup) {
156
		buf[0] = '\0';
157
		return;
158
	}
159
	cgroup_path(tg->css.cgroup, buf, buflen);
160
}
161
#endif
162
163
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
164
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
164
{
165
{
165
	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
166
	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
(-)linux-2.6.36/kernel/sched_autogroup.h (+23 lines)
Line 0 Link Here
1
#ifdef CONFIG_SCHED_AUTOGROUP
2
3
static inline struct task_group *
4
autogroup_task_group(struct task_struct *p, struct task_group *tg);
5
6
#else /* !CONFIG_SCHED_AUTOGROUP */
7
8
static inline void autogroup_init(struct task_struct *init_task) {  }
9
10
static inline struct task_group *
11
autogroup_task_group(struct task_struct *p, struct task_group *tg)
12
{
13
	return tg;
14
}
15
16
#ifdef CONFIG_SCHED_DEBUG
17
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
18
{
19
	return 0;
20
}
21
#endif
22
23
#endif /* CONFIG_SCHED_AUTOGROUP */
(-)linux-2.6.36/kernel/sched_autogroup.c (+170 lines)
Line 0 Link Here
1
#ifdef CONFIG_SCHED_AUTOGROUP
2
3
unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
4
5
struct autogroup {
6
	struct task_group	*tg;
7
	struct kref		kref;
8
	unsigned long		id;
9
};
10
11
static struct autogroup autogroup_default;
12
static atomic_t autogroup_seq_nr;
13
14
static void autogroup_init(struct task_struct *init_task)
15
{
16
	autogroup_default.tg = &init_task_group;
17
	autogroup_default.id = 0;
18
	atomic_set(&autogroup_seq_nr, 1);
19
	kref_init(&autogroup_default.kref);
20
	init_task->signal->autogroup = &autogroup_default;
21
}
22
23
static inline void autogroup_destroy(struct kref *kref)
24
{
25
	struct autogroup *ag = container_of(kref, struct autogroup, kref);
26
	struct task_group *tg = ag->tg;
27
28
	kfree(ag);
29
	sched_destroy_group(tg);
30
}
31
32
static inline void autogroup_kref_put(struct autogroup *ag)
33
{
34
	kref_put(&ag->kref, autogroup_destroy);
35
}
36
37
static inline struct autogroup *autogroup_kref_get(struct autogroup *ag)
38
{
39
	kref_get(&ag->kref);
40
	return ag;
41
}
42
43
static inline struct autogroup *autogroup_create(void)
44
{
45
	struct autogroup *ag = kmalloc(sizeof(*ag), GFP_KERNEL);
46
47
	if (!ag)
48
		goto out_fail;
49
50
	ag->tg = sched_create_group(&init_task_group);
51
52
	if (IS_ERR(ag->tg))
53
		goto out_fail;
54
55
	kref_init(&ag->kref);
56
	ag->tg->autogroup = ag;
57
	ag->id = atomic_inc_return(&autogroup_seq_nr);
58
59
	return ag;
60
61
out_fail:
62
	if (ag) {
63
		kfree(ag);
64
		WARN_ON(1);
65
	} else
66
		WARN_ON(1);
67
68
	return autogroup_kref_get(&autogroup_default);
69
}
70
71
static inline bool
72
task_wants_autogroup(struct task_struct *p, struct task_group *tg)
73
{
74
	if (tg != &root_task_group)
75
		return false;
76
77
	if (p->sched_class != &fair_sched_class)
78
		return false;
79
80
	if (p->flags & PF_EXITING)
81
		return false;
82
83
	return true;
84
}
85
86
static inline struct task_group *
87
autogroup_task_group(struct task_struct *p, struct task_group *tg)
88
{
89
	int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
90
91
	if (enabled && task_wants_autogroup(p, tg))
92
		return p->signal->autogroup->tg;
93
94
	return tg;
95
}
96
97
static void
98
autogroup_move_group(struct task_struct *p, struct autogroup *ag)
99
{
100
	struct autogroup *prev;
101
	struct task_struct *t;
102
103
	spin_lock(&p->sighand->siglock);
104
105
	prev = p->signal->autogroup;
106
	if (prev == ag) {
107
		spin_unlock(&p->sighand->siglock);
108
		return;
109
	}
110
111
	p->signal->autogroup = autogroup_kref_get(ag);
112
	t = p;
113
114
	do {
115
		sched_move_task(p);
116
	} while_each_thread(p, t);
117
118
	spin_unlock(&p->sighand->siglock);
119
120
	autogroup_kref_put(prev);
121
}
122
123
/* Allocates GFP_KERNEL, cannot be called under any spinlock */
124
void sched_autogroup_create_attach(struct task_struct *p)
125
{
126
	struct autogroup *ag = autogroup_create();
127
128
	autogroup_move_group(p, ag);
129
	/* drop extra refrence added by autogroup_create() */
130
	autogroup_kref_put(ag);
131
}
132
EXPORT_SYMBOL(sched_autogroup_create_attach);
133
134
/* Cannot be called under siglock.  Currently has no users */
135
void sched_autogroup_detach(struct task_struct *p)
136
{
137
	autogroup_move_group(p, &autogroup_default);
138
}
139
EXPORT_SYMBOL(sched_autogroup_detach);
140
141
void sched_autogroup_fork(struct signal_struct *sig)
142
{
143
	struct sighand_struct *sighand = current->sighand;
144
145
	spin_lock(&sighand->siglock);
146
	sig->autogroup = autogroup_kref_get(current->signal->autogroup);
147
	spin_unlock(&sighand->siglock);
148
}
149
150
void sched_autogroup_exit(struct signal_struct *sig)
151
{
152
	autogroup_kref_put(sig->autogroup);
153
}
154
155
static int __init setup_autogroup(char *str)
156
{
157
	sysctl_sched_autogroup_enabled = 0;
158
159
	return 1;
160
}
161
162
__setup("noautogroup", setup_autogroup);
163
164
#ifdef CONFIG_SCHED_DEBUG
165
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
166
{
167
	return snprintf(buf, buflen, "%s-%ld", "autogroup", tg->autogroup->id);
168
}
169
#endif
170
#endif /* CONFIG_SCHED_AUTOGROUP */
(-)linux-2.6.36/kernel/sysctl.c (+11 lines)
Lines 384-389 static struct ctl_table kern_table[] = { Link Here
384
		.mode		= 0644,
384
		.mode		= 0644,
385
		.proc_handler	= proc_dointvec,
385
		.proc_handler	= proc_dointvec,
386
	},
386
	},
387
#ifdef CONFIG_SCHED_AUTOGROUP
388
	{
389
		.procname	= "sched_autogroup_enabled",
390
		.data		= &sysctl_sched_autogroup_enabled,
391
		.maxlen		= sizeof(unsigned int),
392
		.mode		= 0644,
393
		.proc_handler	= proc_dointvec,
394
		.extra1		= &zero,
395
		.extra2		= &one,
396
	},
397
#endif
387
#ifdef CONFIG_PROVE_LOCKING
398
#ifdef CONFIG_PROVE_LOCKING
388
	{
399
	{
389
		.procname	= "prove_locking",
400
		.procname	= "prove_locking",
(-)linux-2.6.36/init/Kconfig (+12 lines)
Lines 652-657 config DEBUG_BLK_CGROUP Link Here
652
652
653
endif # CGROUPS
653
endif # CGROUPS
654
654
655
config SCHED_AUTOGROUP
656
	bool "Automatic process group scheduling"
657
	select CGROUPS
658
	select CGROUP_SCHED
659
	select FAIR_GROUP_SCHED
660
	help
661
	  This option optimizes the scheduler for common desktop workloads by
662
	  automatically creating and populating task groups.  This separation
663
	  of workloads isolates aggressive CPU burners (like build jobs) from
664
	  desktop applications.  Task group autogeneration is currently based
665
	  upon task session.
666
655
config MM_OWNER
667
config MM_OWNER
656
	bool
668
	bool
657
669
(-)linux-2.6.36/Documentation/kernel-parameters.txt (+2 lines)
Lines 1610-1615 and is between 256 and 4096 characters. Link Here
1610
	noapic		[SMP,APIC] Tells the kernel to not make use of any
1610
	noapic		[SMP,APIC] Tells the kernel to not make use of any
1611
			IOAPICs that may be present in the system.
1611
			IOAPICs that may be present in the system.
1612
1612
1613
	noautogroup	Disable scheduler automatic task group creation.
1614
1613
	nobats		[PPC] Do not use BATs for mapping kernel lowmem
1615
	nobats		[PPC] Do not use BATs for mapping kernel lowmem
1614
			on "Classic" PPC cores.
1616
			on "Classic" PPC cores.
(-)linux-2.6.36.git/kernel/sched.c (-4 / +4 lines)
Lines 8297-8308 void sched_move_task(struct task_struct Link Here
8297
	if (unlikely(running))
8297
	if (unlikely(running))
8298
		tsk->sched_class->put_prev_task(rq, tsk);
8298
		tsk->sched_class->put_prev_task(rq, tsk);
8299
8299
8300
	set_task_rq(tsk, task_cpu(tsk));
8301
8302
#ifdef CONFIG_FAIR_GROUP_SCHED
8300
#ifdef CONFIG_FAIR_GROUP_SCHED
8303
	if (tsk->sched_class->moved_group)
8301
	if (tsk->sched_class->task_move_group)
8304
		tsk->sched_class->moved_group(tsk, on_rq);
8302
		tsk->sched_class->task_move_group(tsk, on_rq);
8303
	else
8305
#endif
8304
#endif
8305
		set_task_rq(tsk, task_cpu(tsk));
8306
8306
8307
	if (unlikely(running))
8307
	if (unlikely(running))
8308
		tsk->sched_class->set_curr_task(rq);
8308
		tsk->sched_class->set_curr_task(rq);
(-)linux-2.6.36.git/include/linux/sched.h (-1 / +1 lines)
Lines 1072-1078 struct sched_class { Link Here
1072
					 struct task_struct *task);
1072
					 struct task_struct *task);
1073
1073
1074
#ifdef CONFIG_FAIR_GROUP_SCHED
1074
#ifdef CONFIG_FAIR_GROUP_SCHED
1075
	void (*moved_group) (struct task_struct *p, int on_rq);
1075
	void (*task_move_group) (struct task_struct *p, int on_rq);
1076
#endif
1076
#endif
1077
};
1077
};
1078
1078
(-)linux-2.6.36.git/kernel/sched_fair.c (-6 / +19 lines)
Lines 3824-3836 static void set_curr_task_fair(struct rq Link Here
3824
}
3824
}
3825
3825
3826
#ifdef CONFIG_FAIR_GROUP_SCHED
3826
#ifdef CONFIG_FAIR_GROUP_SCHED
3827
static void moved_group_fair(struct task_struct *p, int on_rq)
3827
static void task_move_group_fair(struct task_struct *p, int on_rq)
3828
{
3828
{
3829
	struct cfs_rq *cfs_rq = task_cfs_rq(p);
3829
	/*
3830
3830
	 * If the task was not on the rq at the time of this cgroup movement
3831
	update_curr(cfs_rq);
3831
	 * it must have been asleep, sleeping tasks keep their ->vruntime
3832
	 * absolute on their old rq until wakeup (needed for the fair sleeper
3833
	 * bonus in place_entity()).
3834
	 *
3835
	 * If it was on the rq, we've just 'preempted' it, which does convert
3836
	 * ->vruntime to a relative base.
3837
	 *
3838
	 * Make sure both cases convert their relative position when migrating
3839
	 * to another cgroup's rq. This does somewhat interfere with the
3840
	 * fair sleeper stuff for the first placement, but who cares.
3841
	 */
3842
	if (!on_rq)
3843
		p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
3844
	set_task_rq(p, task_cpu(p));
3832
	if (!on_rq)
3845
	if (!on_rq)
3833
		place_entity(cfs_rq, &p->se, 1);
3846
		p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
3834
}
3847
}
3835
#endif
3848
#endif
3836
3849
Lines 3882-3888 static const struct sched_class fair_sch Link Here
3882
	.get_rr_interval	= get_rr_interval_fair,
3895
	.get_rr_interval	= get_rr_interval_fair,
3883
3896
3884
#ifdef CONFIG_FAIR_GROUP_SCHED
3897
#ifdef CONFIG_FAIR_GROUP_SCHED
3885
	.moved_group		= moved_group_fair,
3898
	.task_move_group	= task_move_group_fair,
3886
#endif
3899
#endif
3887
};
3900
};

Return to bug 346021