Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 227815
Collapse All | Expand All

(-)a/arch/um/include/as-layout.h (-7 / +6 lines)
Lines 23-38 Link Here
23
 */
23
 */
24
24
25
#ifdef __ASSEMBLY__
25
#ifdef __ASSEMBLY__
26
#define _AC(X, Y)	(Y)
26
#define _C(Y)	(Y)
27
#else
27
#else
28
#define __AC(X, Y)	(X (Y))
28
#define _C(Y)	((unsigned long) (Y))
29
#define _AC(X, Y)	__AC(X, Y)
30
#endif
29
#endif
31
30
32
#define STUB_START _AC(, 0x100000)
31
#define STUB_START _C(0x100000)
33
#define STUB_CODE _AC((unsigned long), STUB_START)
32
#define STUB_CODE STUB_START
34
#define STUB_DATA _AC((unsigned long), STUB_CODE + UM_KERN_PAGE_SIZE)
33
#define STUB_DATA (STUB_CODE + UM_KERN_PAGE_SIZE)
35
#define STUB_END _AC((unsigned long), STUB_DATA + UM_KERN_PAGE_SIZE)
34
#define STUB_END (STUB_DATA + UM_KERN_PAGE_SIZE)
36
35
37
#ifndef __ASSEMBLY__
36
#ifndef __ASSEMBLY__
38
37
(-)a/arch/um/include/kern_util.h (-2 / +2 lines)
Lines 20-28 extern int kmalloc_ok; Link Here
20
extern unsigned long alloc_stack(int order, int atomic);
20
extern unsigned long alloc_stack(int order, int atomic);
21
extern void free_stack(unsigned long stack, int order);
21
extern void free_stack(unsigned long stack, int order);
22
22
23
extern int do_signal(void);
23
extern void do_signal(void);
24
extern void copy_sc(struct uml_pt_regs *regs, void *from);
24
extern void copy_sc(struct uml_pt_regs *regs, void *from);
25
extern void interrupt_end(void);
25
extern int interrupt_end(void);
26
extern void relay_signal(int sig, struct uml_pt_regs *regs);
26
extern void relay_signal(int sig, struct uml_pt_regs *regs);
27
27
28
extern unsigned long segv(struct faultinfo fi, unsigned long ip,
28
extern unsigned long segv(struct faultinfo fi, unsigned long ip,
(-)a/arch/um/include/os.h (+1 lines)
Lines 265-270 extern int is_skas_winch(int pid, int fd, void *data); Link Here
265
extern int start_userspace(unsigned long stub_stack);
265
extern int start_userspace(unsigned long stub_stack);
266
extern int copy_context_skas0(unsigned long stack, int pid);
266
extern int copy_context_skas0(unsigned long stack, int pid);
267
extern void userspace(struct uml_pt_regs *regs);
267
extern void userspace(struct uml_pt_regs *regs);
268
extern void vcpu_userspace(struct uml_pt_regs *regs, int mm_fd);
268
extern int map_stub_pages(int fd, unsigned long code, unsigned long data,
269
extern int map_stub_pages(int fd, unsigned long code, unsigned long data,
269
			  unsigned long stack);
270
			  unsigned long stack);
270
extern void new_thread(void *stack, jmp_buf *buf, void (*handler)(void));
271
extern void new_thread(void *stack, jmp_buf *buf, void (*handler)(void));
(-)a/arch/um/include/siginfo_segv.h (+133 lines)
Line 0 Link Here
1
/*
2
 * Copyright (C) 2002- 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3
 * Licensed under the GPL
4
 */
5
6
#ifndef __SIGINFO_SIGSEGV_H_
7
#define __SIGINFO_SIGSEGV_H_
8
9
/*
10
 * Provide signal.h, except for replacing siginfo_t with one that has
11
 * the CPU trap number and error code in the SIGSEGV case.
12
 */
13
14
#include <time.h>
15
16
/* Rename the signal.h siginfo and siginfo_t out of the way */
17
#define siginfo old_siginfo
18
#define siginfo_t old_siginfo_t
19
20
#include <signal.h>
21
22
#undef siginfo
23
#undef siginfo_t
24
25
#define __ARCH_SI_TRAPNO
26
#define __ARCH_SI_ERROR
27
28
/* The new siginfo_t, plus associated definitions */
29
30
/*
31
 * This is the size (including padding) of the part of the
32
 * struct siginfo that is before the union.
33
 */
34
#ifndef __ARCH_SI_PREAMBLE_SIZE
35
#define __ARCH_SI_PREAMBLE_SIZE	(3 * sizeof(int))
36
#endif
37
38
#define SI_MAX_SIZE	128
39
#ifndef SI_PAD_SIZE
40
#define SI_PAD_SIZE	((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
41
#endif
42
43
#ifndef __ARCH_SI_UID_T
44
#define __ARCH_SI_UID_T	uid_t
45
#endif
46
47
/*
48
 * The default "si_band" type is "long", as specified by POSIX.
49
 * However, some architectures want to override this to "int"
50
 * for historical compatibility reasons, so we allow that.
51
 */
52
#ifndef __ARCH_SI_BAND_T
53
#define __ARCH_SI_BAND_T long
54
#endif
55
56
#define __user
57
58
typedef struct siginfo {
59
	int si_signo;
60
	int si_errno;
61
	int si_code;
62
63
	union {
64
		int _pad[SI_PAD_SIZE];
65
66
		/* kill() */
67
		struct {
68
			pid_t _pid;		/* sender's pid */
69
			__ARCH_SI_UID_T _uid;	/* sender's uid */
70
		} _kill;
71
72
		/* POSIX.1b timers */
73
		struct {
74
			timer_t _tid;		/* timer id */
75
			int _overrun;		/* overrun count */
76
			char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
77
			sigval_t _sigval;	/* same as below */
78
			int _sys_private;       /* not to be passed to user */
79
		} _timer;
80
81
		/* POSIX.1b signals */
82
		struct {
83
			pid_t _pid;		/* sender's pid */
84
			__ARCH_SI_UID_T _uid;	/* sender's uid */
85
			sigval_t _sigval;
86
		} _rt;
87
88
		/* SIGCHLD */
89
		struct {
90
			pid_t _pid;		/* which child */
91
			__ARCH_SI_UID_T _uid;	/* sender's uid */
92
			int _status;		/* exit code */
93
			clock_t _utime;
94
			clock_t _stime;
95
		} _sigchld;
96
97
		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
98
		struct {
99
			void __user *_addr; /* faulting insn/memory ref. */
100
#ifdef __ARCH_SI_TRAPNO
101
			int _trapno;	/* TRAP # which caused the signal */
102
#endif
103
#ifdef __ARCH_SI_ERROR
104
			int _error;	/* CPU error code */
105
#endif
106
		} _sigfault;
107
108
		/* SIGPOLL */
109
		struct {
110
			__ARCH_SI_BAND_T _band;	/* POLL_IN, POLL_OUT, POLL_MSG */
111
			int _fd;
112
		} _sigpoll;
113
	} _sifields;
114
} siginfo_t;
115
116
#ifdef __ARCH_SI_TRAPNO
117
#define si_trapno	_sifields._sigfault._trapno
118
#endif
119
#ifdef __ARCH_SI_ERROR
120
#define si_error	_sifields._sigfault._error
121
#endif
122
123
#undef si_addr
124
#define si_addr	_sifields._sigfault._addr
125
126
#define GET_FAULTINFO_FROM_SI(fi, si) \
127
	{ \
128
		(fi).cr2 = (unsigned long) (si).si_addr; \
129
		(fi).error_code = (si).si_error; \
130
		(fi).trap_no = (si).si_trapno; \
131
	}
132
133
#endif
(-)a/arch/um/include/skas/mm_id.h (-1 / +1 lines)
Lines 7-13 Link Here
7
#define __MM_ID_H
7
#define __MM_ID_H
8
8
9
struct mm_id {
9
struct mm_id {
10
	union {
10
	struct {
11
		int mm_fd;
11
		int mm_fd;
12
		int pid;
12
		int pid;
13
	} u;
13
	} u;
(-)a/arch/um/include/skas/skas.h (-1 / +111 lines)
Lines 6-23 Link Here
6
#ifndef __SKAS_H
6
#ifndef __SKAS_H
7
#define __SKAS_H
7
#define __SKAS_H
8
8
9
#ifndef __KERNEL__
10
#include <unistd.h>
11
#include <sys/syscall.h>
12
#endif
13
#include "uml-config.h"
14
15
#ifdef UML_CONFIG_X86_32
16
#define __NR_new_mm             327
17
#define __NR_switch_mm          328
18
#define __NR_vcpu		329
19
#else
20
#define __NR_new_mm             288
21
#define __NR_switch_mm          289
22
#define __NR_vcpu		290
23
#endif
24
25
#define PTRACE_SWITCH_MM 34
26
27
#ifndef __ASSEMBLY__
28
29
#include <asm/user.h>
9
#include "sysdep/ptrace.h"
30
#include "sysdep/ptrace.h"
10
31
32
#define STUB_ADDR(x) (STUB_CODE + (unsigned long) (x) - \
33
		      (unsigned long) &__syscall_stub_start)
34
11
extern int userspace_pid[];
35
extern int userspace_pid[];
12
extern int proc_mm, ptrace_faultinfo, ptrace_ldt;
36
extern int proc_mm, ptrace_faultinfo, ptrace_ldt;
13
extern int skas_needs_stub;
37
extern int skas_needs_stub;
14
38
39
extern int have_switch_mm;
40
extern int have_ptrace_switch_mm;
41
extern int have_siginfo_segv;
42
extern int have_vcpu;
43
extern int self_mm_fd;
44
15
extern int user_thread(unsigned long stack, int flags);
45
extern int user_thread(unsigned long stack, int flags);
16
extern void new_thread_handler(void);
46
extern void new_thread_handler(void);
17
extern void handle_syscall(struct uml_pt_regs *regs);
47
extern void handle_syscall(struct uml_pt_regs *regs);
18
extern int new_mm(unsigned long stack);
48
extern int make_new_mm(unsigned long stack);
19
extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
49
extern void get_skas_faultinfo(int pid, struct faultinfo * fi);
20
extern long execute_syscall_skas(void *r);
50
extern long execute_syscall_skas(void *r);
21
extern unsigned long current_stub_stack(void);
51
extern unsigned long current_stub_stack(void);
22
52
53
#ifndef __KERNEL__
54
#include <errno.h>
55
#include <asm/ldt.h>
56
#include "siginfo_segv.h"
57
58
#ifdef UML_CONFIG_X86_32
59
#define GDT_ENTRY_TLS_ENTRIES 3
60
61
struct vcpu_arch {
62
	struct user_desc tls_array[GDT_ENTRY_TLS_ENTRIES];
63
};
64
#else
65
struct vcpu_arch { };
66
#endif
67
68
struct user_regs {
69
	unsigned long regs[MAX_REG_NR];
70
#ifdef UML_CONFIG_X86_32
71
	struct user_fxsr_struct *fp_state;
72
	struct user_fxsr_struct fpregs;
73
#else
74
	struct user_i387_struct *fp_state;
75
	struct user_i387_struct fpregs;
76
#endif
77
};
78
79
struct vcpu_user {
80
	enum { VCPU_SYSCALL, VCPU_SIGNAL } event;
81
	struct user_regs regs;
82
	siginfo_t siginfo;
83
	struct vcpu_arch arch;
84
};
85
86
static inline long new_mm(void)
87
{
88
	int ret = syscall(__NR_new_mm, 0, 0, 0, 0, 0, 0);
89
90
	if (ret < 0)
91
		return -errno;
92
93
	return ret;
94
}
95
96
static inline long switch_mm(int mm_fd, struct user_regs *save_regs,
97
			     struct user_regs *new_regs, unsigned long ip,
98
			     unsigned long sp)
99
{
100
	int ret = syscall(__NR_switch_mm, mm_fd, save_regs, new_regs, ip, sp,
101
			  0);
102
103
	if (ret < 0)
104
		return -errno;
105
106
	return 0;
107
}
108
109
static inline long vcpu(long mm_fd, struct vcpu_user *vcpu)
110
{
111
	int ret = syscall(__NR_vcpu, mm_fd, vcpu, 0, 0, 0, 0);
112
113
	if (ret < 0)
114
		return -errno;
115
116
	return ret;
117
}
118
119
static inline int get_thread_area(struct user_desc *u_info)
120
{
121
	int ret = syscall(__NR_get_thread_area, u_info, 0, 0, 0, 0, 0);
122
123
	if (ret < 0)
124
		return -errno;
125
126
	return ret;
127
}
128
129
#endif
130
131
#endif
132
23
#endif
133
#endif
(-)a/arch/um/include/skas_ptrace.h (-13 / +4 lines)
Lines 1-5 Link Here
1
/* 
1
/* 
2
 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
2
 * Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3
 * Licensed under the GPL
3
 * Licensed under the GPL
4
 */
4
 */
5
5
Lines 7-25 Link Here
7
#define __SKAS_PTRACE_H
7
#define __SKAS_PTRACE_H
8
8
9
#define PTRACE_FAULTINFO 52
9
#define PTRACE_FAULTINFO 52
10
#define PTRACE_SWITCH_MM 55
10
#ifndef OLD_PTRACE_SWITCH_MM
11
#define OLD_PTRACE_SWITCH_MM 55
12
#endif
11
13
12
#include "sysdep/skas_ptrace.h"
14
#include "sysdep/skas_ptrace.h"
13
15
14
#endif
16
#endif
15
16
/*
17
 * Overrides for Emacs so that we follow Linus's tabbing style.
18
 * Emacs will notice this stuff at the end of the file and automatically
19
 * adjust the settings for this buffer only.  This must remain at the end
20
 * of the file.
21
 * ---------------------------------------------------------------------------
22
 * Local variables:
23
 * c-file-style: "linux"
24
 * End:
25
 */
(-)a/arch/um/include/sysdep-i386/ptrace.h (-1 / +1 lines)
Lines 156-162 struct syscall_args { Link Here
156
	} while (0)
156
	} while (0)
157
157
158
#define UPT_SET_SYSCALL_RETURN(r, res) \
158
#define UPT_SET_SYSCALL_RETURN(r, res) \
159
	REGS_SET_SYSCALL_RETURN((r)->regs, (res))
159
	REGS_SET_SYSCALL_RETURN((r)->gp, (res))
160
160
161
#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
161
#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
162
162
(-)a/arch/um/include/sysdep-i386/ptrace_user.h (+2 lines)
Lines 43-48 Link Here
43
43
44
#define FP_SIZE ((HOST_XFP_SIZE > HOST_FP_SIZE) ? HOST_XFP_SIZE : HOST_FP_SIZE)
44
#define FP_SIZE ((HOST_XFP_SIZE > HOST_FP_SIZE) ? HOST_XFP_SIZE : HOST_FP_SIZE)
45
45
46
#define FP_SIZE ((HOST_XFP_SIZE > HOST_FP_SIZE) ? HOST_XFP_SIZE : HOST_FP_SIZE)
47
46
#ifndef FRAME_SIZE
48
#ifndef FRAME_SIZE
47
#define FRAME_SIZE (17)
49
#define FRAME_SIZE (17)
48
#endif
50
#endif
(-)a/arch/um/include/sysdep-i386/tls.h (-3 / +9 lines)
Lines 1-7 Link Here
1
#ifndef _SYSDEP_TLS_H
1
#ifndef _SYSDEP_TLS_H
2
#define _SYSDEP_TLS_H
2
#define _SYSDEP_TLS_H
3
3
4
# ifndef __KERNEL__
4
#ifndef __KERNEL__
5
5
6
/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
6
/* Change name to avoid conflicts with the original one from <asm/ldt.h>, which
7
 * may be named user_desc (but in 2.4 and in header matching its API was named
7
 * may be named user_desc (but in 2.4 and in header matching its API was named
Lines 19-31 typedef struct um_dup_user_desc { Link Here
19
	unsigned int  useable:1;
19
	unsigned int  useable:1;
20
} user_desc_t;
20
} user_desc_t;
21
21
22
# else /* __KERNEL__ */
22
#else /* __KERNEL__ */
23
23
24
#  include <asm/ldt.h>
24
#include <asm/host_ldt.h>
25
typedef struct user_desc user_desc_t;
25
typedef struct user_desc user_desc_t;
26
26
27
# endif /* __KERNEL__ */
27
# endif /* __KERNEL__ */
28
28
29
struct uml_tls_struct {
30
	user_desc_t tls;
31
	unsigned flushed:1;
32
	unsigned present:1;
33
};
34
29
#define GDT_ENTRY_TLS_MIN_I386 6
35
#define GDT_ENTRY_TLS_MIN_I386 6
30
#define GDT_ENTRY_TLS_MIN_X86_64 12
36
#define GDT_ENTRY_TLS_MIN_X86_64 12
31
37
(-)a/arch/um/include/sysdep-x86_64/ptrace.h (-5 / +3 lines)
Lines 225-240 struct syscall_args { Link Here
225
	})
225
	})
226
226
227
#define UPT_SET_SYSCALL_RETURN(r, res) \
227
#define UPT_SET_SYSCALL_RETURN(r, res) \
228
	REGS_SET_SYSCALL_RETURN((r)->regs, (res))
228
	REGS_SET_SYSCALL_RETURN((r)->gp, (res))
229
229
230
#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
230
#define UPT_RESTART_SYSCALL(r) REGS_RESTART_SYSCALL((r)->gp)
231
231
232
#define UPT_SEGV_IS_FIXABLE(r) REGS_SEGV_IS_FIXABLE(&r->skas)
232
#define UPT_SEGV_IS_FIXABLE(r) REGS_SEGV_IS_FIXABLE(&(r)->skas)
233
233
234
#define UPT_FAULTINFO(r) (&(r)->faultinfo)
234
#define UPT_FAULTINFO(r) (&(r)->faultinfo)
235
235
236
static inline void arch_init_registers(int pid)
236
extern void arch_init_registers(int pid);
237
{
238
}
239
237
240
#endif
238
#endif
(-)a/arch/um/include/sysdep-x86_64/ptrace_user.h (+2 lines)
Lines 72-77 Link Here
72
72
73
#define FP_SIZE (HOST_FP_SIZE)
73
#define FP_SIZE (HOST_FP_SIZE)
74
74
75
#define FP_SIZE (HOST_FP_SIZE)
76
75
#endif
77
#endif
76
78
77
/*
79
/*
(-)a/arch/um/kernel/process.c (-4 / +13 lines)
Lines 111-122 void *_switch_to(void *prev, void *next, void *last) Link Here
111
111
112
}
112
}
113
113
114
void interrupt_end(void)
114
int interrupt_end(void)
115
{
115
{
116
	if (need_resched())
116
	if (need_resched())
117
		schedule();
117
		schedule();
118
	if (test_tsk_thread_flag(current, TIF_SIGPENDING))
118
	if (test_thread_flag(TIF_SIGPENDING))
119
		do_signal();
119
		do_signal();
120
	return current->mm->context.id.u.mm_fd;
120
}
121
}
121
122
122
void exit_thread(void)
123
void exit_thread(void)
Lines 152-158 void new_thread_handler(void) Link Here
152
	if (n == 1) {
153
	if (n == 1) {
153
		/* Handle any immediate reschedules or signals */
154
		/* Handle any immediate reschedules or signals */
154
		interrupt_end();
155
		interrupt_end();
155
		userspace(&current->thread.regs.regs);
156
		if (have_vcpu)
157
			vcpu_userspace(&current->thread.regs.regs,
158
				       current->mm->context.id.u.mm_fd);
159
		else
160
			userspace(&current->thread.regs.regs);
156
	}
161
	}
157
	else do_exit(0);
162
	else do_exit(0);
158
}
163
}
Lines 176-182 void fork_handler(void) Link Here
176
	/* Handle any immediate reschedules or signals */
181
	/* Handle any immediate reschedules or signals */
177
	interrupt_end();
182
	interrupt_end();
178
183
179
	userspace(&current->thread.regs.regs);
184
	if (have_vcpu)
185
		vcpu_userspace(&current->thread.regs.regs,
186
			       current->mm->context.id.u.mm_fd);
187
	else
188
		userspace(&current->thread.regs.regs);
180
}
189
}
181
190
182
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
191
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
(-)a/arch/um/kernel/ptrace.c (-1 / +34 lines)
Lines 192-198 long arch_ptrace(struct task_struct *child, long request, long addr, long data) Link Here
192
	}
192
	}
193
#endif
193
#endif
194
#ifdef CONFIG_PROC_MM
194
#ifdef CONFIG_PROC_MM
195
	case PTRACE_SWITCH_MM: {
195
	case OLD_PTRACE_SWITCH_MM: {
196
		struct mm_struct *old = child->mm;
196
		struct mm_struct *old = child->mm;
197
		struct mm_struct *new = proc_mm_get_mm(data);
197
		struct mm_struct *new = proc_mm_get_mm(data);
198
198
Lines 292-294 void syscall_trace(struct uml_pt_regs *regs, int entryexit) Link Here
292
		current->exit_code = 0;
292
		current->exit_code = 0;
293
	}
293
	}
294
}
294
}
295
296
int ptrace_to_pt_regs(struct pt_regs *to, struct user_regs __user *from)
297
{
298
	struct user_regs regs;
299
	int rem;
300
301
	rem = copy_from_user(&regs, from, sizeof(regs));
302
	if (rem)
303
		return -EFAULT;
304
305
	memcpy(&to->regs.gp, &regs.regs, sizeof(to->regs.gp));
306
307
	return put_fp_registers(userspace_pid[0],
308
				(unsigned long *) &regs.fpregs);
309
}
310
311
int pt_regs_to_ptrace(struct user_regs __user *to, struct pt_regs *from)
312
{
313
	struct user_regs regs;
314
	int err;
315
316
	err = get_fp_registers(userspace_pid[0],
317
			       (unsigned long *) &regs.fpregs);
318
	if (err)
319
		return err;
320
321
	memcpy(&regs.regs, &from->regs.gp, sizeof(regs.regs));
322
323
	if(copy_to_user(to, &regs, sizeof(regs)))
324
		return -EFAULT;
325
326
	return 0;
327
}
(-)a/arch/um/kernel/reboot.c (-1 / +1 lines)
Lines 12-18 void (*pm_power_off)(void); Link Here
12
12
13
static void kill_off_processes(void)
13
static void kill_off_processes(void)
14
{
14
{
15
	if (proc_mm)
15
	if (proc_mm || have_switch_mm)
16
		/*
16
		/*
17
		 * FIXME: need to loop over userspace_pids
17
		 * FIXME: need to loop over userspace_pids
18
		 */
18
		 */
(-)a/arch/um/kernel/signal.c (-7 / +9 lines)
Lines 85-92 static int handle_signal(struct pt_regs *regs, unsigned long signr, Link Here
85
	return err;
85
	return err;
86
}
86
}
87
87
88
static int kern_do_signal(struct pt_regs *regs)
88
extern int unvcpu(struct pt_regs *regs, siginfo_t *siginfo);
89
90
void do_signal(void)
89
{
91
{
92
	struct pt_regs *regs = &current->thread.regs;
90
	struct k_sigaction ka_copy;
93
	struct k_sigaction ka_copy;
91
	siginfo_t info;
94
	siginfo_t info;
92
	sigset_t *oldset;
95
	sigset_t *oldset;
Lines 98-103 static int kern_do_signal(struct pt_regs *regs) Link Here
98
		oldset = &current->blocked;
101
		oldset = &current->blocked;
99
102
100
	while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) {
103
	while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) {
104
		if (test_thread_flag(TIF_VCPU)) {
105
			PT_REGS_SET_SYSCALL_RETURN(regs, unvcpu(regs, &info));
106
			return;
107
		}
108
101
		handled_sig = 1;
109
		handled_sig = 1;
102
		/* Whee!  Actually deliver the signal.  */
110
		/* Whee!  Actually deliver the signal.  */
103
		if (!handle_signal(regs, sig, &ka_copy, &info, oldset)) {
111
		if (!handle_signal(regs, sig, &ka_copy, &info, oldset)) {
Lines 150-161 static int kern_do_signal(struct pt_regs *regs) Link Here
150
		clear_thread_flag(TIF_RESTORE_SIGMASK);
158
		clear_thread_flag(TIF_RESTORE_SIGMASK);
151
		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
159
		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
152
	}
160
	}
153
	return handled_sig;
154
}
155
156
int do_signal(void)
157
{
158
	return kern_do_signal(&current->thread.regs);
159
}
161
}
160
162
161
/*
163
/*
(-)a/arch/um/kernel/skas/clone.c (-1 / +1 lines)
Lines 3-10 Link Here
3
 * Licensed under the GPL
3
 * Licensed under the GPL
4
 */
4
 */
5
5
6
#include <signal.h>
7
#include <sched.h>
6
#include <sched.h>
7
#include <signal.h>
8
#include <asm/unistd.h>
8
#include <asm/unistd.h>
9
#include <sys/time.h>
9
#include <sys/time.h>
10
#include "as-layout.h"
10
#include "as-layout.h"
(-)a/arch/um/kernel/skas/mmu.c (-3 / +19 lines)
Lines 46-51 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, Link Here
46
	return -ENOMEM;
46
	return -ENOMEM;
47
}
47
}
48
48
49
extern int copy_context_skas4(struct mm_id *id);
50
extern int get_new_mm(void);
51
49
int init_new_context(struct task_struct *task, struct mm_struct *mm)
52
int init_new_context(struct task_struct *task, struct mm_struct *mm)
50
{
53
{
51
 	struct mm_context *from_mm = NULL;
54
 	struct mm_context *from_mm = NULL;
Lines 64-76 int init_new_context(struct task_struct *task, struct mm_struct *mm) Link Here
64
		from_mm = &current->mm->context;
67
		from_mm = &current->mm->context;
65
68
66
	if (proc_mm) {
69
	if (proc_mm) {
67
		ret = new_mm(stack);
70
		ret = make_new_mm(stack);
68
		if (ret < 0) {
71
		if (ret < 0) {
69
			printk(KERN_ERR "init_new_context_skas - "
72
			printk(KERN_ERR "init_new_context_skas - "
70
			       "new_mm failed, errno = %d\n", ret);
73
			       "make_new_mm failed, errno = %d\n", ret);
71
			goto out_free;
74
			goto out_free;
72
		}
75
		}
73
		to_mm->id.u.mm_fd = ret;
76
		to_mm->id.u.mm_fd = ret;
77
	} else if (have_switch_mm) {
78
		to_mm->id.u.mm_fd = get_new_mm();
79
		if (to_mm->id.u.mm_fd < 0) {
80
			ret = to_mm->id.u.mm_fd;
81
			goto out_free;
82
		}
83
84
		ret = copy_context_skas4(&to_mm->id);
85
		if (ret < 0) {
86
			os_close_file(to_mm->id.u.mm_fd);
87
			to_mm->id.u.mm_fd = -1;
88
			goto out_free;
89
		}
74
	}
90
	}
75
	else {
91
	else {
76
		if (from_mm)
92
		if (from_mm)
Lines 167-173 void destroy_context(struct mm_struct *mm) Link Here
167
{
183
{
168
	struct mm_context *mmu = &mm->context;
184
	struct mm_context *mmu = &mm->context;
169
185
170
	if (proc_mm)
186
	if (proc_mm || have_switch_mm)
171
		os_close_file(mmu->id.u.mm_fd);
187
		os_close_file(mmu->id.u.mm_fd);
172
	else {
188
	else {
173
		/*
189
		/*
(-)a/arch/um/kernel/skas/process.c (-2 / +3 lines)
Lines 10-16 Link Here
10
#include "os.h"
10
#include "os.h"
11
#include "skas.h"
11
#include "skas.h"
12
12
13
int new_mm(unsigned long stack)
13
int make_new_mm(unsigned long stack)
14
{
14
{
15
	int fd, err;
15
	int fd, err;
16
16
Lines 55-61 int __init start_uml(void) Link Here
55
{
55
{
56
	stack_protections((unsigned long) &cpu0_irqstack);
56
	stack_protections((unsigned long) &cpu0_irqstack);
57
	set_sigstack(cpu0_irqstack, THREAD_SIZE);
57
	set_sigstack(cpu0_irqstack, THREAD_SIZE);
58
	if (proc_mm) {
58
59
	if (!have_vcpu && (proc_mm || have_switch_mm)) {
59
		userspace_pid[0] = start_userspace(0);
60
		userspace_pid[0] = start_userspace(0);
60
		if (userspace_pid[0] < 0) {
61
		if (userspace_pid[0] < 0) {
61
			printf("start_uml - start_userspace returned %d\n",
62
			printf("start_uml - start_userspace returned %d\n",
(-)a/arch/um/kernel/skas/syscall.c (+7 lines)
Lines 12-23 Link Here
12
extern int syscall_table_size;
12
extern int syscall_table_size;
13
#define NR_syscalls (syscall_table_size / sizeof(void *))
13
#define NR_syscalls (syscall_table_size / sizeof(void *))
14
14
15
extern int unvcpu(struct pt_regs *regs, siginfo_t *siginfo);
16
15
void handle_syscall(struct uml_pt_regs *r)
17
void handle_syscall(struct uml_pt_regs *r)
16
{
18
{
17
	struct pt_regs *regs = container_of(r, struct pt_regs, regs);
19
	struct pt_regs *regs = container_of(r, struct pt_regs, regs);
18
	long result;
20
	long result;
19
	int syscall;
21
	int syscall;
20
22
23
	if (test_thread_flag(TIF_VCPU)) {
24
		REGS_SET_SYSCALL_RETURN(r->gp, unvcpu(regs, NULL));
25
		return;
26
	}
27
21
	syscall_trace(r, 0);
28
	syscall_trace(r, 0);
22
29
23
	/*
30
	/*
(-)a/arch/um/kernel/syscall.c (-10 / +28 lines)
Lines 1-17 Link Here
1
/*
1
/*
2
 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2
 * Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3
 * Licensed under the GPL
3
 * Licensed under the GPL
4
 */
4
 */
5
5
6
#include "linux/file.h"
6
#include <linux/file.h>
7
#include "linux/fs.h"
7
#include <linux/fs.h>
8
#include "linux/mm.h"
8
#include <linux/mm.h>
9
#include "linux/sched.h"
9
#include <linux/sched.h>
10
#include "linux/utsname.h"
10
#include <linux/utsname.h>
11
#include "asm/current.h"
11
#include <asm/current.h>
12
#include "asm/mman.h"
12
#include <asm/mman.h>
13
#include "asm/uaccess.h"
13
#include <asm/uaccess.h>
14
#include "asm/unistd.h"
14
#include <asm/unistd.h>
15
15
16
long sys_fork(void)
16
long sys_fork(void)
17
{
17
{
Lines 148-150 int kernel_execve(const char *filename, char *const argv[], char *const envp[]) Link Here
148
148
149
	return ret;
149
	return ret;
150
}
150
}
151
152
extern long do_switch_mm(int fd, long __user *save, long __user *new,
153
			 unsigned long ip, unsigned long sp,
154
			 struct pt_regs *regs);
155
156
long sys_switch_mm(int fd, long __user *save, long __user *new,
157
		   unsigned long ip, unsigned long sp)
158
{
159
	return do_switch_mm(fd, save, new, ip, sp, &current->thread.regs);
160
}
161
162
extern long do_vcpu(int mm_fd, struct vcpu_user __user *new,
163
		    struct pt_regs *regs);
164
165
long sys_vcpu(int mm_fd, struct vcpu_user __user *new)
166
{
167
	return do_vcpu(mm_fd, new, &current->thread.regs);
168
}
(-)a/arch/um/kernel/um_arch.c (-1 / +3 lines)
Lines 284-290 int __init linux_main(int argc, char **argv) Link Here
284
284
285
	can_do_skas();
285
	can_do_skas();
286
286
287
	if (proc_mm && ptrace_faultinfo)
287
	if (have_switch_mm)
288
		mode = "SKAS4";
289
	else if (proc_mm && ptrace_faultinfo)
288
		mode = "SKAS3";
290
		mode = "SKAS3";
289
	else
291
	else
290
		mode = "SKAS0";
292
		mode = "SKAS0";
(-)a/arch/um/os-Linux/skas/mem.c (-77 / +122 lines)
Lines 22-28 Link Here
22
#include "sysdep/stub.h"
22
#include "sysdep/stub.h"
23
#include "uml-config.h"
23
#include "uml-config.h"
24
24
25
extern unsigned long batch_syscall_stub, __syscall_stub_start;
25
extern unsigned long batch_syscall_stub, switch_mm_stub, __syscall_stub_start;
26
26
27
extern void wait_stub_done(int pid);
27
extern void wait_stub_done(int pid);
28
28
Lines 41-74 static unsigned long syscall_regs[MAX_REG_NR]; Link Here
41
static int __init init_syscall_regs(void)
41
static int __init init_syscall_regs(void)
42
{
42
{
43
	get_safe_registers(syscall_regs);
43
	get_safe_registers(syscall_regs);
44
	syscall_regs[REGS_IP_INDEX] = STUB_CODE +
44
45
		((unsigned long) &batch_syscall_stub -
45
	syscall_regs[REGS_IP_INDEX] = STUB_ADDR(&batch_syscall_stub);
46
		 (unsigned long) &__syscall_stub_start);
47
	return 0;
46
	return 0;
48
}
47
}
49
48
50
__initcall(init_syscall_regs);
49
__initcall(init_syscall_regs);
51
50
52
extern int proc_mm;
51
static int syscall_stub_done(unsigned long stack)
52
{
53
	unsigned long *syscall, *data, offset;
54
	int ret, n;
55
56
	/*
57
	 * When the stub stops, we find the following values on the
58
	 * beginning of the stack:
59
	 * (long) return_value
60
	 * (long) offset to failed sycall data (0 if no error)
61
	 */
62
	ret = *((unsigned long *) stack);
63
	offset = *((unsigned long *) stack + 1);
64
	if (offset == 0)
65
		return 0;
66
67
	data = (unsigned long *)(stack + offset - STUB_DATA);
68
	printk(UM_KERN_ERR "syscall_stub_done : ret = %d, offset = %ld, "
69
	       "data = %p\n", ret, offset, data);
70
	syscall = (unsigned long *)((unsigned long)data + data[0]);
71
	printk(UM_KERN_ERR "syscall_stub_done : syscall %ld failed, "
72
	       "return value = 0x%x, expected return value = 0x%lx\n",
73
	       syscall[0], ret, syscall[7]);
74
	printk(UM_KERN_ERR "    syscall parameters: "
75
	       "0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
76
	       syscall[1], syscall[2], syscall[3],
77
	       syscall[4], syscall[5], syscall[6]);
78
	for (n = 1; n < data[0]/sizeof(long); n++) {
79
		if (n == 1)
80
			printk(UM_KERN_ERR "    additional syscall "
81
			       "data:");
82
		if (n % 4 == 1)
83
			printk("\n" UM_KERN_ERR "      ");
84
		printk(UM_KERN_CONT "  0x%lx", data[n]);
85
	}
86
	if (n > 1)
87
		printk("\n");
53
88
54
int single_count = 0;
89
	return ret;
55
int multi_count = 0;
90
}
56
int multi_op_count = 0;
57
91
58
static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr)
92
static long do_syscall_stub(struct mm_id *mm_idp, void **addr)
59
{
93
{
60
	int n, i;
94
	long ret;
61
	long ret, offset;
95
	int n, i, err, pid = mm_idp->u.pid;
62
	unsigned long * data;
63
	unsigned long * syscall;
64
	int err, pid = mm_idp->u.pid;
65
96
66
	if (proc_mm)
97
	if (proc_mm)
67
		/* FIXME: Need to look up userspace_pid by cpu */
98
		/* FIXME: Need to look up userspace_pid by cpu */
68
		pid = userspace_pid[0];
99
		pid = userspace_pid[0];
69
100
70
	multi_count++;
71
72
	n = ptrace_setregs(pid, syscall_regs);
101
	n = ptrace_setregs(pid, syscall_regs);
73
	if (n < 0) {
102
	if (n < 0) {
74
		printk(UM_KERN_ERR "Registers - \n");
103
		printk(UM_KERN_ERR "Registers - \n");
Lines 85-136 static inline long do_syscall_stub(struct mm_id * mm_idp, void **addr) Link Here
85
114
86
	wait_stub_done(pid);
115
	wait_stub_done(pid);
87
116
88
	/*
117
	ret = syscall_stub_done(mm_idp->stack);
89
	 * When the stub stops, we find the following values on the
90
	 * beginning of the stack:
91
	 * (long )return_value
92
	 * (long )offset to failed sycall-data (0, if no error)
93
	 */
94
	ret = *((unsigned long *) mm_idp->stack);
95
	offset = *((unsigned long *) mm_idp->stack + 1);
96
	if (offset) {
97
		data = (unsigned long *)(mm_idp->stack + offset - STUB_DATA);
98
		printk(UM_KERN_ERR "do_syscall_stub : ret = %ld, offset = %ld, "
99
		       "data = %p\n", ret, offset, data);
100
		syscall = (unsigned long *)((unsigned long)data + data[0]);
101
		printk(UM_KERN_ERR "do_syscall_stub: syscall %ld failed, "
102
		       "return value = 0x%lx, expected return value = 0x%lx\n",
103
		       syscall[0], ret, syscall[7]);
104
		printk(UM_KERN_ERR "    syscall parameters: "
105
		       "0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
106
		       syscall[1], syscall[2], syscall[3],
107
		       syscall[4], syscall[5], syscall[6]);
108
		for (n = 1; n < data[0]/sizeof(long); n++) {
109
			if (n == 1)
110
				printk(UM_KERN_ERR "    additional syscall "
111
				       "data:");
112
			if (n % 4 == 1)
113
				printk("\n" UM_KERN_ERR "      ");
114
			printk("  0x%lx", data[n]);
115
		}
116
		if (n > 1)
117
			printk("\n");
118
	}
119
	else ret = 0;
120
118
121
	*addr = check_init_stack(mm_idp, NULL);
119
	*addr = check_init_stack(mm_idp, NULL);
122
120
123
	return ret;
121
	return ret;
124
}
122
}
125
123
126
long run_syscall_stub(struct mm_id * mm_idp, int syscall,
124
static struct user_regs return_regs;
125
126
long do_syscall_stub_skas4(struct mm_id *mm_idp, void **addr, unsigned long ip,
127
			   unsigned long sp)
128
{
129
	long ret;
130
	unsigned long *ptr;
131
	int err;
132
	sigset_t sigs, old;
133
134
	ptr = (unsigned long *) (mm_idp->stack + UM_KERN_PAGE_SIZE -
135
				 sizeof(long));
136
	*ptr = (unsigned long) &return_regs;
137
	*(ptr - 1) = self_mm_fd;
138
139
	sigfillset(&sigs);
140
	sigprocmask(SIG_SETMASK, &sigs, &old);
141
	err = switch_mm(mm_idp->u.mm_fd, &return_regs, NULL, ip, sp);
142
	sigprocmask(SIG_SETMASK, &old, NULL);
143
144
	ret = syscall_stub_done(mm_idp->stack);
145
146
	*addr = check_init_stack(mm_idp, NULL);
147
148
	return ret;
149
}
150
151
static int flush_syscalls(struct mm_id *mm_idp, void **addr, int extra)
152
{
153
	unsigned long *stack = check_init_stack(mm_idp, *addr);
154
	int current, end;
155
156
	current = ((unsigned long) stack) & ~UM_KERN_PAGE_MASK;
157
	end = UM_KERN_PAGE_SIZE;
158
159
	if (have_switch_mm)
160
		end -= 2 * sizeof(long);
161
162
	if (current + (10 + extra) * sizeof(long) < end)
163
		return 0;
164
165
	if (have_switch_mm)
166
		return do_syscall_stub_skas4(mm_idp, addr,
167
					     STUB_ADDR(&switch_mm_stub), 0);
168
	else
169
		return do_syscall_stub(mm_idp, addr);
170
}
171
172
long run_syscall_stub(struct mm_id *mm_idp, int syscall,
127
		      unsigned long *args, long expected, void **addr,
173
		      unsigned long *args, long expected, void **addr,
128
		      int done)
174
		      int done)
129
{
175
{
130
	unsigned long *stack = check_init_stack(mm_idp, *addr);
176
	unsigned long *stack;
177
	int ret;
131
178
132
	if (done && *addr == NULL)
179
	ret = flush_syscalls(mm_idp, addr, 0);
133
		single_count++;
180
	if (ret)
181
		return ret;
182
183
	stack = check_init_stack(mm_idp, *addr);
134
184
135
	*stack += sizeof(long);
185
	*stack += sizeof(long);
136
	stack += *stack / sizeof(long);
186
	stack += *stack / sizeof(long);
Lines 144-188 long run_syscall_stub(struct mm_id * mm_idp, int syscall, Link Here
144
	*stack++ = args[5];
194
	*stack++ = args[5];
145
	*stack++ = expected;
195
	*stack++ = expected;
146
	*stack = 0;
196
	*stack = 0;
147
	multi_op_count++;
148
197
149
	if (!done && ((((unsigned long) stack) & ~UM_KERN_PAGE_MASK) <
198
	if (!done) {
150
		     UM_KERN_PAGE_SIZE - 10 * sizeof(long))) {
151
		*addr = stack;
199
		*addr = stack;
152
		return 0;
200
		return 0;
153
	}
201
	}
154
202
155
	return do_syscall_stub(mm_idp, addr);
203
	if (have_switch_mm)
204
		return do_syscall_stub_skas4(mm_idp, addr,
205
					     STUB_ADDR(&switch_mm_stub), 0);
206
	else
207
		return do_syscall_stub(mm_idp, addr);
208
209
	*addr = stack;
210
	return 0;
156
}
211
}
157
212
158
long syscall_stub_data(struct mm_id * mm_idp,
213
long syscall_stub_data(struct mm_id *mm_idp, unsigned long *data,
159
		       unsigned long *data, int data_count,
214
		       int data_count, void **addr, void **stub_addr)
160
		       void **addr, void **stub_addr)
161
{
215
{
162
	unsigned long *stack;
216
	unsigned long *stack;
163
	int ret = 0;
217
	int ret;
164
218
165
	/*
219
	ret = flush_syscalls(mm_idp, addr, data_count);
166
	 * If *addr still is uninitialized, it *must* contain NULL.
220
	if (ret)
167
	 * Thus in this case do_syscall_stub correctly won't be called.
221
		return ret;
168
	 */
169
	if ((((unsigned long) *addr) & ~UM_KERN_PAGE_MASK) >=
170
	   UM_KERN_PAGE_SIZE - (10 + data_count) * sizeof(long)) {
171
		ret = do_syscall_stub(mm_idp, addr);
172
		/* in case of error, don't overwrite data on stack */
173
		if (ret)
174
			return ret;
175
	}
176
222
177
	stack = check_init_stack(mm_idp, *addr);
223
	stack = check_init_stack(mm_idp, *addr);
178
	*addr = stack;
224
	*stack = data_count;
179
225
	*addr = stack++;
180
	*stack = data_count * sizeof(long);
181
226
182
	memcpy(stack + 1, data, data_count * sizeof(long));
227
	memcpy(stack, data, data_count);
183
228
184
	*stub_addr = (void *)(((unsigned long)(stack + 1) &
229
	*stub_addr = (void *)(((unsigned long) stack & ~UM_KERN_PAGE_MASK) +
185
			       ~UM_KERN_PAGE_MASK) + STUB_DATA);
230
			      STUB_DATA);
186
231
187
	return 0;
232
	return 0;
188
}
233
}
(-)a/arch/um/os-Linux/skas/process.c (-17 / +169 lines)
Lines 3-8 Link Here
3
 * Licensed under the GPL
3
 * Licensed under the GPL
4
 */
4
 */
5
5
6
/* Include this first, before anything else includes <signal.h> */
7
#include "siginfo_segv.h"
8
6
#include <stdlib.h>
9
#include <stdlib.h>
7
#include <unistd.h>
10
#include <unistd.h>
8
#include <sched.h>
11
#include <sched.h>
Lines 96-106 bad_wait: Link Here
96
99
97
extern unsigned long current_stub_stack(void);
100
extern unsigned long current_stub_stack(void);
98
101
102
#ifndef PTRACE_GETSIGINFO
103
#define PTRACE_GETSIGINFO	0x4202
104
#endif
105
99
void get_skas_faultinfo(int pid, struct faultinfo * fi)
106
void get_skas_faultinfo(int pid, struct faultinfo * fi)
100
{
107
{
108
	siginfo_t si;
101
	int err;
109
	int err;
102
110
103
	if (ptrace_faultinfo) {
111
	if (have_siginfo_segv) {
112
		err = ptrace(PTRACE_GETSIGINFO, pid, 0, &si);
113
		if (err)
114
			printk(UM_KERN_ERR "PTRACE_GETSIGINFO failed, "
115
			       "err = %d\n", errno);
116
117
		GET_FAULTINFO_FROM_SI(*fi, si);
118
	} else if (ptrace_faultinfo) {
104
		err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
119
		err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
105
		if (err) {
120
		if (err) {
106
			printk(UM_KERN_ERR "get_skas_faultinfo - "
121
			printk(UM_KERN_ERR "get_skas_faultinfo - "
Lines 113-120 void get_skas_faultinfo(int pid, struct faultinfo * fi) Link Here
113
			memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
128
			memset((char *)fi + sizeof(struct ptrace_faultinfo), 0,
114
			       sizeof(struct faultinfo) -
129
			       sizeof(struct faultinfo) -
115
			       sizeof(struct ptrace_faultinfo));
130
			       sizeof(struct ptrace_faultinfo));
116
	}
131
	} else {
117
	else {
118
		unsigned long fpregs[FP_SIZE];
132
		unsigned long fpregs[FP_SIZE];
119
133
120
		err = get_fp_registers(pid, fpregs);
134
		err = get_fp_registers(pid, fpregs);
Lines 248-259 static int userspace_tramp(void *stack) Link Here
248
			}
262
			}
249
		}
263
		}
250
	}
264
	}
251
	if (!ptrace_faultinfo && (stack != NULL)) {
265
	if (!ptrace_faultinfo) {
252
		struct sigaction sa;
266
		struct sigaction sa;
253
267
		unsigned long v = STUB_ADDR(stub_segv_handler);
254
		unsigned long v = STUB_CODE +
255
				  (unsigned long) stub_segv_handler -
256
				  (unsigned long) &__syscall_stub_start;
257
268
258
		set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE);
269
		set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE);
259
		sigemptyset(&sa.sa_mask);
270
		sigemptyset(&sa.sa_mask);
Lines 295-301 int start_userspace(unsigned long stub_stack) Link Here
295
	sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
306
	sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
296
307
297
	flags = CLONE_FILES;
308
	flags = CLONE_FILES;
298
	if (proc_mm)
309
	if (proc_mm || have_switch_mm)
299
		flags |= CLONE_VM;
310
		flags |= CLONE_VM;
300
	else
311
	else
301
		flags |= SIGCHLD;
312
		flags |= SIGCHLD;
Lines 347-352 int start_userspace(unsigned long stub_stack) Link Here
347
	return err;
358
	return err;
348
}
359
}
349
360
361
#ifdef UML_CONFIG_X86_32
362
extern void init_vcpu_tls(struct user_desc *tls);
363
364
static void arch_init_vcpu(struct vcpu_arch *vcpu)
365
{
366
	init_vcpu_tls(vcpu->tls_array);
367
}
368
#else
369
static void arch_init_vcpu(struct vcpu_arch *vcpu)
370
{
371
}
372
#endif
373
374
extern unsigned long fp_regs[FP_SIZE];
375
376
void vcpu_userspace(struct uml_pt_regs *regs, int mm_fd)
377
{
378
	struct vcpu_user vcpu_state;
379
	int err;
380
381
	memcpy(&vcpu_state.regs.fpregs, fp_regs, sizeof(fp_regs));
382
	vcpu_state.regs.fp_state = &vcpu_state.regs.fpregs;
383
	while (1) {
384
		memcpy(&vcpu_state.regs.regs, &regs->gp,
385
		       sizeof(vcpu_state.regs.regs));
386
		arch_init_vcpu(&vcpu_state.arch);
387
388
		err = vcpu(mm_fd, &vcpu_state);
389
		if (err)
390
			panic("userspace - could not resume userspace process, "
391
			      "errno = %d\n", errno);
392
393
		regs->is_user = 1;
394
		memcpy(&regs->gp, &vcpu_state.regs.regs,
395
		       sizeof(vcpu_state.regs.regs));
396
397
		UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
398
		if (vcpu_state.event == VCPU_SYSCALL) {
399
			UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp);
400
			handle_syscall(regs);
401
		}
402
		else if (vcpu_state.event == VCPU_SIGNAL){
403
			int sig = vcpu_state.siginfo.si_signo;
404
		  	switch(sig) {
405
			case SIGSEGV:
406
				GET_FAULTINFO_FROM_SI(regs->faultinfo,
407
						      vcpu_state.siginfo);
408
				(*sig_info[SIGSEGV])(SIGSEGV, regs);
409
				break;
410
			case SIGTRAP:
411
				relay_signal(SIGTRAP, regs);
412
				break;
413
			case SIGVTALRM:
414
				block_signals();
415
				(*sig_info[sig])(sig, regs);
416
				unblock_signals();
417
				break;
418
			case SIGIO:
419
			case SIGILL:
420
			case SIGBUS:
421
			case SIGFPE:
422
			case SIGWINCH:
423
				block_signals();
424
				(*sig_info[sig])(sig, regs);
425
				unblock_signals();
426
				break;
427
			default:
428
			        printk(UM_KERN_ERR "userspace - child stopped "
429
				       "with signal %d\n", sig);
430
			}
431
			/* Avoid -ERESTARTSYS handling in host */
432
			if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
433
				PT_SYSCALL_NR(regs->gp) = -1;
434
		}
435
436
		mm_fd = interrupt_end();
437
	}
438
}
439
350
void userspace(struct uml_pt_regs *regs)
440
void userspace(struct uml_pt_regs *regs)
351
{
441
{
352
	struct itimerval timer;
442
	struct itimerval timer;
Lines 446-453 void userspace(struct uml_pt_regs *regs) Link Here
446
				       "with signal %d\n", sig);
536
				       "with signal %d\n", sig);
447
				fatal_sigsegv();
537
				fatal_sigsegv();
448
			}
538
			}
449
			pid = userspace_pid[0];
539
540
			/*
541
			 * userspace_pid can change in in_interrupt since
542
			 * PTRACE_SWITCH_MM can cause a process to change
543
			 * address spaces
544
			 */
450
			interrupt_end();
545
			interrupt_end();
546
			pid = userspace_pid[0];
451
547
452
			/* Avoid -ERESTARTSYS handling in host */
548
			/* Avoid -ERESTARTSYS handling in host */
453
			if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
549
			if (PT_SYSCALL_NR_OFFSET != PT_SYSCALL_RET_OFFSET)
Lines 462-470 static int __init init_thread_regs(void) Link Here
462
{
558
{
463
	get_safe_registers(thread_regs);
559
	get_safe_registers(thread_regs);
464
	/* Set parent's instruction pointer to start of clone-stub */
560
	/* Set parent's instruction pointer to start of clone-stub */
465
	thread_regs[REGS_IP_INDEX] = STUB_CODE +
561
	thread_regs[REGS_IP_INDEX] = STUB_ADDR(stub_clone_handler);
466
				(unsigned long) stub_clone_handler -
467
				(unsigned long) &__syscall_stub_start;
468
	thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE -
562
	thread_regs[REGS_SP_INDEX] = STUB_DATA + UM_KERN_PAGE_SIZE -
469
		sizeof(void *);
563
		sizeof(void *);
470
#ifdef __SIGNAL_FRAMESIZE
564
#ifdef __SIGNAL_FRAMESIZE
Lines 554-559 int copy_context_skas0(unsigned long new_stack, int pid) Link Here
554
	return err;
648
	return err;
555
}
649
}
556
650
651
extern unsigned long switch_mm_stub;
652
extern long task_size;
653
654
static void unmap_new_as(void)
655
{
656
	void (*p)(void);
657
	void *addr;
658
	unsigned long stack = (unsigned long) &stack & ~(UM_KERN_PAGE_SIZE - 1);
659
	unsigned long long data_offset, code_offset;
660
	int fd = phys_mapping(to_phys((void *) stack), &data_offset);
661
662
	addr = mmap((void *) STUB_DATA, UM_KERN_PAGE_SIZE,
663
		    PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd,
664
		    data_offset);
665
	if (addr == MAP_FAILED)
666
		panic("Failed to remap stack");
667
668
	fd = phys_mapping(to_phys(&__syscall_stub_start), &code_offset);
669
	addr = mmap((void *) STUB_CODE, UM_KERN_PAGE_SIZE,
670
		    PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd,
671
		    code_offset);
672
	if (addr == MAP_FAILED)
673
		panic("Failed to remap code");
674
675
	p = (void (*)(void)) (STUB_ADDR(&switch_mm_stub));
676
	(*p)();
677
}
678
679
extern long do_syscall_stub_skas4(struct mm_id *mm_idp, void **addr,
680
				  unsigned long ip, unsigned long sp);
681
682
int copy_context_skas4(struct mm_id *id)
683
{
684
	void *data = NULL;
685
	int err;
686
687
	err = unmap(id, 0, STUB_START, 0, &data);
688
	if (err)
689
		return err;
690
691
	if (STUB_END < task_size) {
692
		err = unmap(id, STUB_END, task_size - STUB_END, 0, &data);
693
		if (err)
694
			return err;
695
	}
696
697
	return do_syscall_stub_skas4(id, &data, (unsigned long) unmap_new_as,
698
				     id->stack + UM_KERN_PAGE_SIZE / 2);
699
}
700
557
/*
701
/*
558
 * This is used only, if stub pages are needed, while proc_mm is
702
 * This is used only, if stub pages are needed, while proc_mm is
559
 * available. Opening /proc/mm creates a new mm_context, which lacks
703
 * available. Opening /proc/mm creates a new mm_context, which lacks
Lines 713-728 void reboot_skas(void) Link Here
713
void __switch_mm(struct mm_id *mm_idp)
857
void __switch_mm(struct mm_id *mm_idp)
714
{
858
{
715
	int err;
859
	int err;
716
717
	/* FIXME: need cpu pid in __switch_mm */
860
	/* FIXME: need cpu pid in __switch_mm */
861
862
	if (have_vcpu)
863
		return;
864
718
	if (proc_mm) {
865
	if (proc_mm) {
719
		err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
866
		err = ptrace(OLD_PTRACE_SWITCH_MM, userspace_pid[0], 0,
720
			     mm_idp->u.mm_fd);
867
			     mm_idp->u.mm_fd);
721
		if (err) {
868
		if (err) {
722
			printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM "
869
			printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM "
723
			       "failed, errno = %d\n", errno);
870
			       "failed, errno = %d\n", errno);
724
			fatal_sigsegv();
871
			fatal_sigsegv();
725
		}
872
		}
726
	}
873
	} else if (have_ptrace_switch_mm) {
727
	else userspace_pid[0] = mm_idp->u.pid;
874
		err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
875
			     mm_idp->u.mm_fd);
876
		if (err)
877
			panic("__switch_mm - PTRACE_SWITCH_MM "
878
			      "failed, errno = %d\n", errno);
879
	} else userspace_pid[0] = mm_idp->u.pid;
728
}
880
}
(-)a/arch/um/os-Linux/start_up.c (-43 / +472 lines)
Lines 3-8 Link Here
3
 * Licensed under the GPL
3
 * Licensed under the GPL
4
 */
4
 */
5
5
6
/* Include this first, before anything else includes <signal.h> */
7
#include "siginfo_segv.h"
8
6
#include <stdio.h>
9
#include <stdio.h>
7
#include <stdlib.h>
10
#include <stdlib.h>
8
#include <stdarg.h>
11
#include <stdarg.h>
Lines 23-29 Link Here
23
#include "mem_user.h"
26
#include "mem_user.h"
24
#include "ptrace_user.h"
27
#include "ptrace_user.h"
25
#include "registers.h"
28
#include "registers.h"
29
#include "skas.h"
26
#include "skas_ptrace.h"
30
#include "skas_ptrace.h"
31
#include "sysdep/sigcontext.h"
32
#include "user.h"
27
33
28
static int ptrace_child(void)
34
static int ptrace_child(void)
29
{
35
{
Lines 142-155 static int stop_ptraced_child(int pid, int exitcode, int mustexit) Link Here
142
}
148
}
143
149
144
/* Changed only during early boot */
150
/* Changed only during early boot */
145
int ptrace_faultinfo = 1;
151
int ptrace_faultinfo;
146
int ptrace_ldt = 1;
152
static int disable_ptrace_faultinfo;
147
int proc_mm = 1;
153
148
int skas_needs_stub = 0;
154
int ptrace_ldt;
155
static int disable_ptrace_ldt;
156
157
int proc_mm;
158
static int disable_proc_mm;
159
160
int have_switch_mm;
161
static int disable_switch_mm;
162
163
int have_siginfo_segv;
164
static int disable_siginfo_segv;
165
166
int have_ptrace_switch_mm;
167
static int disable_ptrace_switch_mm;
168
169
int have_vcpu;
170
static int disable_vcpu;
171
172
int skas_needs_stub;
149
173
150
static int __init skas0_cmd_param(char *str, int* add)
174
static int __init skas0_cmd_param(char *str, int* add)
151
{
175
{
152
	ptrace_faultinfo = proc_mm = 0;
176
	disable_ptrace_faultinfo = 1;
177
	disable_ptrace_ldt = 1;
178
	disable_proc_mm = 1;
179
180
	disable_switch_mm = 1;
181
	disable_siginfo_segv = 1;
182
	disable_ptrace_switch_mm = 1;
183
	disable_vcpu = 1;
184
153
	return 0;
185
	return 0;
154
}
186
}
155
187
Lines 159-173 static int __init mode_skas0_cmd_param(char *str, int* add) Link Here
159
	__attribute__((alias("skas0_cmd_param")));
191
	__attribute__((alias("skas0_cmd_param")));
160
192
161
__uml_setup("skas0", skas0_cmd_param,
193
__uml_setup("skas0", skas0_cmd_param,
162
		"skas0\n"
194
"skas0\n"
163
		"    Disables SKAS3 usage, so that SKAS0 is used, unless \n"
195
"    Disables SKAS3 and SKAS4 usage, so that SKAS0 is used\n\n");
164
	        "    you specify mode=tt.\n\n");
165
196
166
__uml_setup("mode=skas0", mode_skas0_cmd_param,
197
__uml_setup("mode=skas0", mode_skas0_cmd_param,
167
		"mode=skas0\n"
198
"mode=skas0\n"
168
		"    Disables SKAS3 usage, so that SKAS0 is used, unless you \n"
199
"    Disables SKAS3 and SKAS4 usage, so that SKAS0 is used.\n\n");
169
		"    specify mode=tt. Note that this was recently added - on \n"
170
		"    older kernels you must use simply \"skas0\".\n\n");
171
200
172
/* Changed only during early boot */
201
/* Changed only during early boot */
173
static int force_sysemu_disabled = 0;
202
static int force_sysemu_disabled = 0;
Lines 362-368 void __init os_early_checks(void) Link Here
362
391
363
static int __init noprocmm_cmd_param(char *str, int* add)
392
static int __init noprocmm_cmd_param(char *str, int* add)
364
{
393
{
365
	proc_mm = 0;
394
	disable_proc_mm = 1;
366
	return 0;
395
	return 0;
367
}
396
}
368
397
Lines 374-380 __uml_setup("noprocmm", noprocmm_cmd_param, Link Here
374
403
375
static int __init noptracefaultinfo_cmd_param(char *str, int* add)
404
static int __init noptracefaultinfo_cmd_param(char *str, int* add)
376
{
405
{
377
	ptrace_faultinfo = 0;
406
	disable_ptrace_faultinfo = 1;
378
	return 0;
407
	return 0;
379
}
408
}
380
409
Lines 386-392 __uml_setup("noptracefaultinfo", noptracefaultinfo_cmd_param, Link Here
386
415
387
static int __init noptraceldt_cmd_param(char *str, int* add)
416
static int __init noptraceldt_cmd_param(char *str, int* add)
388
{
417
{
389
	ptrace_ldt = 0;
418
	disable_ptrace_ldt = 1;
390
	return 0;
419
	return 0;
391
}
420
}
392
421
Lines 396-402 __uml_setup("noptraceldt", noptraceldt_cmd_param, Link Here
396
"    To support PTRACE_LDT, the host needs to be patched using\n"
425
"    To support PTRACE_LDT, the host needs to be patched using\n"
397
"    the current skas3 patch.\n\n");
426
"    the current skas3 patch.\n\n");
398
427
399
static inline void check_skas3_ptrace_faultinfo(void)
428
static inline void __init check_skas3_ptrace_faultinfo(void)
400
{
429
{
401
	struct ptrace_faultinfo fi;
430
	struct ptrace_faultinfo fi;
402
	int pid, n;
431
	int pid, n;
Lines 406-428 static inline void check_skas3_ptrace_faultinfo(void) Link Here
406
435
407
	n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
436
	n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
408
	if (n < 0) {
437
	if (n < 0) {
409
		ptrace_faultinfo = 0;
410
		if (errno == EIO)
438
		if (errno == EIO)
411
			non_fatal("not found\n");
439
			non_fatal("not found\n");
412
		else
440
		else
413
			perror("not found");
441
			perror("not found");
414
	}
442
	} else if (disable_ptrace_faultinfo)
443
		non_fatal("found but disabled on command line\n");
415
	else {
444
	else {
416
		if (!ptrace_faultinfo)
445
		ptrace_faultinfo = 1;
417
			non_fatal("found but disabled on command line\n");
446
		non_fatal("found\n");
418
		else
419
			non_fatal("found\n");
420
	}
447
	}
421
448
422
	stop_ptraced_child(pid, 1, 1);
449
	stop_ptraced_child(pid, 1, 1);
423
}
450
}
424
451
425
static inline void check_skas3_ptrace_ldt(void)
452
static inline void __init check_skas3_ptrace_ldt(void)
426
{
453
{
427
#ifdef PTRACE_LDT
454
#ifdef PTRACE_LDT
428
	int pid, n;
455
	int pid, n;
Lines 442-479 static inline void check_skas3_ptrace_ldt(void) Link Here
442
		else {
469
		else {
443
			perror("not found");
470
			perror("not found");
444
		}
471
		}
445
		ptrace_ldt = 0;
472
	} else if (disable_ptrace_ldt)
446
	}
473
		non_fatal("found, but use is disabled\n");
447
	else {
474
	else {
448
		if (ptrace_ldt)
475
		ptrace_ldt = 1;
449
			non_fatal("found\n");
476
		non_fatal("found\n");
450
		else
451
			non_fatal("found, but use is disabled\n");
452
	}
477
	}
453
478
454
	stop_ptraced_child(pid, 1, 1);
479
	stop_ptraced_child(pid, 1, 1);
455
#else
456
	/* PTRACE_LDT might be disabled via cmdline option.
457
	 * We want to override this, else we might use the stub
458
	 * without real need
459
	 */
460
	ptrace_ldt = 1;
461
#endif
480
#endif
462
}
481
}
463
482
464
static inline void check_skas3_proc_mm(void)
483
static inline void __init check_skas3_proc_mm(void)
465
{
484
{
466
	non_fatal("  - /proc/mm...");
485
	non_fatal("  - /proc/mm...");
467
	if (access("/proc/mm", W_OK) < 0) {
486
	if (access("/proc/mm", W_OK) < 0)
468
		proc_mm = 0;
469
		perror("not found");
487
		perror("not found");
470
	}
488
	else if (disable_proc_mm)
471
	else if (!proc_mm)
472
		non_fatal("found but disabled on command line\n");
489
		non_fatal("found but disabled on command line\n");
473
	else non_fatal("found\n");
490
	else {
491
		proc_mm = 1;
492
		non_fatal("found\n");
493
	}
474
}
494
}
475
495
476
void can_do_skas(void)
496
static void __init can_do_skas3(void)
477
{
497
{
478
	non_fatal("Checking for the skas3 patch in the host:\n");
498
	non_fatal("Checking for the skas3 patch in the host:\n");
479
499
Lines 481-488 void can_do_skas(void) Link Here
481
	check_skas3_ptrace_faultinfo();
501
	check_skas3_ptrace_faultinfo();
482
	check_skas3_ptrace_ldt();
502
	check_skas3_ptrace_ldt();
483
503
484
	if (!proc_mm || !ptrace_faultinfo || !ptrace_ldt)
504
	if (!proc_mm || (!ptrace_faultinfo && !have_siginfo_segv) ||
505
	    !ptrace_ldt)
506
		skas_needs_stub = 1;
507
}
508
509
static void *fault_address;
510
511
static __init int check_fault_info(struct faultinfo *fi)
512
{
513
	return (FAULT_ADDRESS(*fi) == (unsigned long) fault_address) &&
514
		FAULT_WRITE(*fi) && SEGV_IS_FIXABLE(fi);
515
}
516
517
static jmp_buf siginfo_buf;
518
519
static void __init segv_handler(int sig, siginfo_t *si, void *foo)
520
{
521
	struct faultinfo fi;
522
	int n;
523
524
	GET_FAULTINFO_FROM_SI(fi, *si);
525
	n = check_fault_info(&fi) ? 1 : 2;
526
	longjmp(siginfo_buf, n);
527
}
528
529
static int __init fault(void)
530
{
531
	struct sigaction sa, old;
532
	int err, n;
533
534
	/*
535
	 * The cast is needed because the CPP manipulations of
536
	 * siginfo_t resulted in sa_sigaction having an old_siginfo_t
537
	 * parameter.
538
	 */
539
	sa.sa_sigaction = (void (*)(int, old_siginfo_t *, void *)) segv_handler;
540
	sigemptyset(&sa.sa_mask);
541
	sa.sa_flags = SA_SIGINFO | SA_NODEFER;
542
543
	err = sigaction(SIGSEGV, &sa, &old);
544
	if (err)
545
		fatal_perror("sigaction");
546
547
	/*
548
	 * Provide a guaranteed invalid address by mapping a page into
549
	 * a hole in the address space and then unmapping it.
550
	 */
551
	fault_address = mmap(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
552
		    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
553
	if (fault_address == MAP_FAILED)
554
		fatal_perror("mmap failed");
555
556
	if (munmap(fault_address, UM_KERN_PAGE_SIZE) < 0)
557
		fatal_perror("munmap failed");
558
559
	n = setjmp(siginfo_buf);
560
	if (n == 0)
561
		*((unsigned long *) fault_address) = 0;
562
563
	err = sigaction(SIGSEGV, &old, NULL);
564
565
	return n;
566
}
567
568
static int __init nogetsiginfo_cmd_param(char *str, int *add)
569
{
570
	disable_siginfo_segv = 1;
571
	return 0;
572
}
573
574
__uml_setup("nogetsiginfo", nogetsiginfo_cmd_param,
575
"nogetsiginfo\n"
576
"    Turns off usage of PTRACE_GETSIGINFO to read page fault information\n"
577
"    from a child process, even if the host supports it.\n\n");
578
579
#ifndef PTRACE_GETSIGINFO
580
#define PTRACE_GETSIGINFO	0x4202
581
#endif
582
583
static int __init check_siginfo(void)
584
{
585
	siginfo_t si;
586
	struct faultinfo fi;
587
	int ok, pid, err, status;
588
589
	non_fatal("\tFull CPU fault information in siginfo_t ... ");
590
	ok = fault();
591
	if (ok)
592
		non_fatal("OK\n");
593
	else {
594
		non_fatal("Failed\n");
595
		return 0;
596
	}
597
598
	non_fatal("\tFull CPU fault information in PTRACE_GETSIGINFO ... ");
599
600
	pid = fork();
601
	if (pid < 0)
602
		fatal_perror("fork failed");
603
	else if (pid == 0) {
604
		ptrace(PTRACE_TRACEME, 0, 0, 0);
605
		fault();
606
		exit(1);
607
	}
608
609
	while (1) {
610
		err = waitpid(pid, &status, WUNTRACED);
611
		if (err < 0)
612
			fatal_perror("wait failed");
613
614
		if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGSEGV))
615
			break;
616
	}
617
618
	err = ptrace(PTRACE_GETSIGINFO, pid, 0, &si);
619
	if (err < 0)
620
		fatal_perror("PTRACE_GETSIGINFO failed");
621
622
	ptrace(PTRACE_KILL, pid, 0, 0);
623
624
	GET_FAULTINFO_FROM_SI(fi, si);
625
	ok = check_fault_info(&fi);
626
	if (ok)
627
		non_fatal("OK\n");
628
	else
629
		non_fatal("Failed\n");
630
631
	if (disable_siginfo_segv)
632
		non_fatal("Extended PTRACE_GETSIGINFO disabled on command "
633
			  "line\n");
634
	else
635
		have_siginfo_segv = 1;
636
637
	return ok;
638
}
639
640
static struct user_regs return_regs;
641
int self_mm_fd;
642
643
static int switch_mm_works;
644
645
static __init void after_switch(void)
646
{
647
	/*
648
	 * If we are really in a new address space, setting this to
649
	 * zero won't affect the value of 1 already set in the old
650
	 * address space.
651
	 */
652
	switch_mm_works = 0;
653
654
	switch_mm(self_mm_fd, NULL, &return_regs, 0, 0);
655
}
656
657
static int __init check_switch_mm(void)
658
{
659
	char *mm_stack;
660
	int err, there = -1;
661
662
	non_fatal("\t/proc/self/mm ... ");
663
	self_mm_fd = open("/proc/self/mm", O_RDONLY);
664
	if (self_mm_fd < 0)
665
		goto bad;
666
	non_fatal("OK\n");
667
668
	mm_stack = mmap(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
669
			MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
670
	if (mm_stack == MAP_FAILED)
671
		goto bad;
672
673
	non_fatal("\tnew_mm ... ");
674
	there = new_mm();
675
	if (there < 0)
676
		goto bad_unmap;
677
	non_fatal("OK\n");
678
679
	switch_mm_works = 1;
680
681
	non_fatal("\tswitching over ... ");
682
	err = switch_mm(there, &return_regs, NULL, (unsigned long) after_switch,
683
			((unsigned long) &mm_stack[UM_KERN_PAGE_SIZE]) -
684
			sizeof(void *));
685
	if (err < 0)
686
		goto bad_close;
687
	non_fatal("switched back ... ");
688
	if (!switch_mm_works)
689
		goto bad_close;
690
	else
691
		non_fatal("OK\n");
692
693
	munmap(mm_stack, UM_KERN_PAGE_SIZE);
694
	close(there);
695
696
	if (disable_switch_mm)
697
		non_fatal("switch_mm support disabled on command line\n");
698
	else
699
		have_switch_mm = 1;
700
701
	return 1;
702
 bad_close:
703
	if (there > 0)
704
		close(there);
705
 bad_unmap:
706
	munmap(mm_stack, UM_KERN_PAGE_SIZE);
707
 bad:
708
	non_fatal("Failed - \n");
709
	perror("");
710
	return 0;
711
}
712
713
static int ptrace_switch_mm_works;
714
715
static int __init after_ptrace_switch(void)
716
{
717
	ptrace_switch_mm_works = 1;
718
	exit(0);
719
}
720
721
static int __init check_ptrace_switch_mm(void)
722
{
723
	void *stack;
724
	unsigned long regs[MAX_REG_NR];
725
	int pid, here, err, status;
726
727
	non_fatal("\tPTRACE_SWITCH_MM ... ");
728
	pid = fork();
729
	if (pid == 0){
730
		ptrace(PTRACE_TRACEME, 0, 0, 0);
731
		kill(getpid(), SIGSTOP);
732
733
		exit(0);
734
	}
735
	else if (pid < 0)
736
		goto bad;
737
738
	stack = mmap(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
739
		     MAP_SHARED | MAP_ANONYMOUS, -1, 0);
740
	if (stack == MAP_FAILED)
741
		goto bad;
742
743
	here = open("/proc/self/mm", O_RDONLY);
744
	if (here < 0)
745
		goto bad_unmap;
746
747
	err = waitpid(pid, &status, WUNTRACED);
748
	if (err < 0)
749
		goto bad_close;
750
	else if (err != pid) {
751
		non_fatal("waitpid returned %d, expected %d\n", err, pid);
752
		goto bad_close;
753
	} else if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
754
		non_fatal("waitpid returned status 0x%d\n", status);
755
		goto bad_close;
756
	}
757
758
	err = ptrace(PTRACE_GETREGS, pid, 0, regs);
759
	if (err < 0)
760
		goto bad_close;
761
762
	regs[REGS_IP_INDEX] = (unsigned long) after_ptrace_switch;
763
	regs[REGS_SP_INDEX] = (unsigned long) stack + UM_KERN_PAGE_SIZE -
764
		sizeof(void *);
765
766
	if (ptrace(PTRACE_SETREGS, pid, 0, regs) < 0)
767
		goto bad_close;
768
769
	if (ptrace(PTRACE_SWITCH_MM, pid, NULL, here) < 0)
770
		goto bad_close;
771
772
	if (ptrace(PTRACE_CONT, pid, NULL, 0) < 0)
773
		goto bad_close;
774
775
	err = waitpid(pid, &status, WUNTRACED);
776
	if (err < 0)
777
		goto bad_close;
778
	else if(err != pid) {
779
		non_fatal("waitpid returned %d, expected %d\n", err, pid);
780
		goto bad_close;
781
	} else if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
782
		non_fatal("waitpid returned status 0x%d\n", status);
783
		goto bad_close;
784
	}
785
786
	if (!ptrace_switch_mm_works)
787
		goto bad_close;
788
	else
789
		non_fatal("OK\n");
790
791
	if (disable_ptrace_switch_mm)
792
		non_fatal("PTRACE_SWITCH_MM support disabled on command "
793
			  "line\n");
794
	else
795
		have_ptrace_switch_mm = 1;
796
797
	close(here);
798
	munmap(stack, UM_KERN_PAGE_SIZE);
799
800
	return 1;
801
802
 bad_close:
803
	close(here);
804
 bad_unmap:
805
	munmap(stack, UM_KERN_PAGE_SIZE);
806
 bad:
807
	non_fatal("Failed - \n");
808
	perror("");
809
	return 0;
810
}
811
812
#ifdef UML_CONFIG_X86_32
813
extern int host_gdt_entry_tls_min;
814
extern void host_tls_support(void);
815
816
static __init int init_vcpu_arch(struct vcpu_arch *vcpu){
817
	struct user_desc *tls = vcpu->tls_array;
818
	int i, err;
819
820
	host_tls_support();
821
	memset(tls, 0, sizeof(vcpu->tls_array));
822
	for (i = 0; i < ARRAY_SIZE(vcpu->tls_array); i++) {
823
		tls[i].entry_number = host_gdt_entry_tls_min + i;
824
		err = get_thread_area(&tls[i]);
825
		if (err) {
826
			perror("get_thread_area");
827
			return err;
828
		}
829
	}
830
	return 0;
831
}
832
#else
833
static int init_vcpu_arch(struct vcpu_arch *vcpu){
834
	return 0;
835
}
836
#endif
837
838
static struct vcpu_user vcpu_data;
839
840
static __init int check_vcpu(void)
841
{
842
	void *stack;
843
	int err;
844
845
	non_fatal("\tvcpu ... ");
846
847
	stack = mmap(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
848
		     MAP_SHARED | MAP_ANONYMOUS, -1, 0);
849
	if (stack == MAP_FAILED)
850
		goto bad;
851
852
	get_safe_registers(vcpu_data.regs.regs);
853
 	vcpu_data.regs.regs[REGS_IP_INDEX] = (unsigned long) ptrace_child;
854
	vcpu_data.regs.regs[REGS_SP_INDEX] = (unsigned long) stack +
855
		UM_KERN_PAGE_SIZE - sizeof(void *);
856
857
	if (init_vcpu_arch(&vcpu_data.arch))
858
		goto bad;
859
860
	err = vcpu(-1, &vcpu_data);
861
	munmap(stack, UM_KERN_PAGE_SIZE);
862
	if (err) {
863
		non_fatal("vcpu failed with errno %d\n", err);
864
		goto bad;
865
	}
866
867
	if (vcpu_data.event != VCPU_SYSCALL) {
868
		non_fatal("vcpu returned with event = %d\n", vcpu_data.event);
869
		goto bad;
870
	}
871
872
	non_fatal("OK\n");
873
874
	if (disable_vcpu)
875
		non_fatal("vcpu support disabled on command line\n");
876
	else
877
		have_vcpu = 1;
878
879
	return 1;
880
881
 bad:
882
	non_fatal("Failed\n");
883
	return 0;
884
}
885
886
static int __init can_do_skas4(void)
887
{
888
	int ret;
889
890
	non_fatal("Checking for SKAS4 support in the host:\n");
891
892
	ret = check_switch_mm() && check_ptrace_switch_mm() && check_siginfo()
893
		&& check_vcpu();
894
	if (ret)
485
		skas_needs_stub = 1;
895
		skas_needs_stub = 1;
896
897
	return ret;
898
}
899
900
void __init can_do_skas(void)
901
{
902
	if (!can_do_skas4())
903
		can_do_skas3();
904
}
905
906
int get_new_mm(void)
907
{
908
	int err;
909
910
	err = new_mm();
911
	if (err < 0)
912
		err = -errno;
913
914
	return err;
486
}
915
}
487
916
488
int __init parse_iomem(char *str, int *add)
917
int __init parse_iomem(char *str, int *add)
(-)a/arch/um/os-Linux/sys-i386/registers.c (-3 / +30 lines)
Lines 4-13 Link Here
4
 * Licensed under the GPL
4
 * Licensed under the GPL
5
 */
5
 */
6
6
7
#include <stdio.h>
8
#include <stdlib.h>
7
#include <errno.h>
9
#include <errno.h>
10
#include <asm/ldt.h>
11
#include <sys/syscall.h>
12
#include <unistd.h>
8
#include "kern_constants.h"
13
#include "kern_constants.h"
9
#include "longjmp.h"
14
#include "longjmp.h"
10
#include "user.h"
15
#include "user.h"
16
#include "skas.h"
11
#include "sysdep/ptrace_user.h"
17
#include "sysdep/ptrace_user.h"
12
18
13
int save_fp_registers(int pid, unsigned long *fp_regs)
19
int save_fp_registers(int pid, unsigned long *fp_regs)
Lines 72-83 int put_fp_registers(int pid, unsigned long *regs) Link Here
72
		return restore_fp_registers(pid, regs);
78
		return restore_fp_registers(pid, regs);
73
}
79
}
74
80
81
extern int host_gdt_entry_tls_min;
82
83
#define GDT_ENTRY_TLS_ENTRIES 3
84
#define GDT_ENTRY_TLS_MIN 6
85
#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
86
87
struct user_desc tls[GDT_ENTRY_TLS_ENTRIES];
88
89
unsigned long fp_regs[FP_SIZE];
90
75
void arch_init_registers(int pid)
91
void arch_init_registers(int pid)
76
{
92
{
77
	unsigned long fpx_regs[HOST_XFP_SIZE];
93
	struct user_desc *entry;
78
	int err;
94
	int err, i;
79
95
80
	err = ptrace(PTRACE_GETFPXREGS, pid, 0, fpx_regs);
96
	for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) {
97
		entry = &tls[i];
98
		entry->entry_number = i + GDT_ENTRY_TLS_MIN;
99
		err = get_thread_area(entry);
100
		if (err) {
101
			perror("get_thread_area");
102
			exit(1);
103
		}
104
	}
105
106
	err = ptrace(PTRACE_GETFPXREGS, pid, 0, fp_regs);
81
	if (!err)
107
	if (!err)
82
		return;
108
		return;
83
109
Lines 87-89 void arch_init_registers(int pid) Link Here
87
113
88
	have_fpx_regs = 0;
114
	have_fpx_regs = 0;
89
}
115
}
116
(-)a/arch/um/os-Linux/sys-x86_64/registers.c (-1 / +14 lines)
Lines 1-5 Link Here
1
/*
1
/*
2
 * Copyright (C) 2006 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
2
 * Copyright (C) 2006 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3
 * Licensed under the GPL
3
 * Licensed under the GPL
4
 */
4
 */
5
5
Lines 10-15 Link Here
10
#include "kern_constants.h"
10
#include "kern_constants.h"
11
#include "longjmp.h"
11
#include "longjmp.h"
12
#include "user.h"
12
#include "user.h"
13
#include "sysdep/ptrace_user.h"
13
14
14
int save_fp_registers(int pid, unsigned long *fp_regs)
15
int save_fp_registers(int pid, unsigned long *fp_regs)
15
{
16
{
Lines 50-52 int put_fp_registers(int pid, unsigned long *regs) Link Here
50
{
51
{
51
	return restore_fp_registers(pid, regs);
52
	return restore_fp_registers(pid, regs);
52
}
53
}
54
55
unsigned long fp_regs[FP_SIZE];
56
57
void arch_init_registers(int pid)
58
{
59
	int err;
60
61
	err = ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs);
62
	if(err)
63
		panic("arch_init_registers : PTRACE_GETFPREGS failed, "
64
		      "errno = %d", errno);
65
}
(-)a/arch/um/sys-i386/ldt.c (-1 / +1 lines)
Lines 437-443 long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm) Link Here
437
		/*
437
		/*
438
		 * We have a valid from_mm, so we now have to copy the LDT of
438
		 * We have a valid from_mm, so we now have to copy the LDT of
439
		 * from_mm to new_mm, because using proc_mm an new mm with
439
		 * from_mm to new_mm, because using proc_mm an new mm with
440
		 * an empty/default LDT was created in new_mm()
440
		 * an empty/default LDT was created in make_new_mm()
441
		 */
441
		 */
442
		copy = ((struct proc_mm_op) { .op 	= MM_COPY_SEGMENTS,
442
		copy = ((struct proc_mm_op) { .op 	= MM_COPY_SEGMENTS,
443
					      .u 	=
443
					      .u 	=
(-)a/arch/um/sys-i386/signal.c (-26 / +10 lines)
Lines 164-169 static int convert_fxsr_from_user(struct user_fxsr_struct *fxsave, Link Here
164
164
165
extern int have_fpx_regs;
165
extern int have_fpx_regs;
166
166
167
extern unsigned long fp_regs[FP_SIZE];
168
167
static int copy_sc_from_user(struct pt_regs *regs,
169
static int copy_sc_from_user(struct pt_regs *regs,
168
			     struct sigcontext __user *from)
170
			     struct sigcontext __user *from)
169
{
171
{
Lines 177-200 static int copy_sc_from_user(struct pt_regs *regs, Link Here
177
	pid = userspace_pid[current_thread_info()->cpu];
179
	pid = userspace_pid[current_thread_info()->cpu];
178
	copy_sc(&regs->regs, &sc);
180
	copy_sc(&regs->regs, &sc);
179
	if (have_fpx_regs) {
181
	if (have_fpx_regs) {
180
		struct user_fxsr_struct fpx;
182
		struct user_fxsr_struct *fpx =
181
183
			(struct user_fxsr_struct *) &fp_regs;
182
		err = copy_from_user(&fpx, &sc.fpstate->_fxsr_env[0],
183
				     sizeof(struct user_fxsr_struct));
184
		if (err)
185
			return 1;
186
184
187
		err = convert_fxsr_from_user(&fpx, sc.fpstate);
185
		err = convert_fxsr_from_user(fpx, sc.fpstate);
188
		if (err)
186
		if (err)
189
			return 1;
187
			return 1;
190
191
		err = restore_fpx_registers(pid, (unsigned long *) &fpx);
192
		if (err < 0) {
193
			printk(KERN_ERR "copy_sc_from_user - "
194
			       "restore_fpx_registers failed, errno = %d\n",
195
			       -err);
196
			return 1;
197
		}
198
	}
188
	}
199
	else {
189
	else {
200
		struct user_i387_struct fp;
190
		struct user_i387_struct fp;
Lines 250-274 static int copy_sc_to_user(struct sigcontext __user *to, Link Here
250
240
251
	pid = userspace_pid[current_thread_info()->cpu];
241
	pid = userspace_pid[current_thread_info()->cpu];
252
	if (have_fpx_regs) {
242
	if (have_fpx_regs) {
253
		struct user_fxsr_struct fpx;
243
		struct user_fxsr_struct *fpx =
254
244
			(struct user_fxsr_struct *) &fp_regs;
255
		err = save_fpx_registers(pid, (unsigned long *) &fpx);
256
		if (err < 0){
257
			printk(KERN_ERR "copy_sc_to_user - save_fpx_registers "
258
			       "failed, errno = %d\n", err);
259
			return 1;
260
		}
261
245
262
		err = convert_fxsr_to_user(to_fp, &fpx);
246
		err = convert_fxsr_to_user(to_fp, fpx);
263
		if (err)
247
		if (err)
264
			return 1;
248
			return 1;
265
249
266
		err |= __put_user(fpx.swd, &to_fp->status);
250
		err |= __put_user(fpx->swd, &to_fp->status);
267
		err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic);
251
		err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic);
268
		if (err)
252
		if (err)
269
			return 1;
253
			return 1;
270
254
271
		if (copy_to_user(&to_fp->_fxsr_env[0], &fpx,
255
		if (copy_to_user(&to_fp->_fxsr_env[0], fpx,
272
				 sizeof(struct user_fxsr_struct)))
256
				 sizeof(struct user_fxsr_struct)))
273
			return 1;
257
			return 1;
274
	}
258
	}
(-)a/arch/um/sys-i386/stub.S (-39 / +47 lines)
Lines 1-52 Link Here
1
#include "uml-config.h"
1
#include "uml-config.h"
2
#include "as-layout.h"
2
#include "as-layout.h"
3
#include "skas/skas.h"
4
5
#define PROCESS_LIST \
6
	/* load pointer to first operation */ \
7
	mov	$(STUB_DATA + 8), %esp; \
8
1: \
9
	/* load length of additional data */ \
10
	mov	0x0(%esp), %eax; \
11
	/* if(length == 0) : end of list */ \
12
	/* write possible 0 to header */ \
13
	mov	%eax, STUB_DATA + 4; \
14
	cmpl	$0, %eax; \
15
	jz	2f; \
16
	/* save current pointer */ \
17
	mov	%esp, STUB_DATA + 4; \
18
	/* skip additional data */ \
19
	add	%eax, %esp; \
20
	/* load syscall-# */ \
21
	pop	%eax; \
22
	/* load syscall params */ \
23
	pop	%ebx; \
24
	pop	%ecx; \
25
	pop	%edx; \
26
	pop	%esi; \
27
 	pop	%edi; \
28
	pop	%ebp; \
29
	/* execute syscall */ \
30
	int	$0x80; \
31
	/* check return value */ \
32
	pop	%ebx; \
33
	cmp	%ebx, %eax; \
34
	je	1b; \
35
2: \
36
	/* save return value */ \
37
	mov	%eax, STUB_DATA;
3
38
4
	.globl syscall_stub
39
	.globl syscall_stub
5
.section .__syscall_stub, "x"
40
.section .__syscall_stub, "x"
6
41
7
	.globl batch_syscall_stub
42
	.globl batch_syscall_stub
8
batch_syscall_stub:
43
batch_syscall_stub:
9
	/* load pointer to first operation */
44
	PROCESS_LIST
10
	mov	$(STUB_DATA+8), %esp
45
	/* stop */
11
46
	int3
12
again:
13
	/* load length of additional data */
14
	mov	0x0(%esp), %eax
15
16
	/* if(length == 0) : end of list */
17
	/* write possible 0 to header */
18
	mov	%eax, STUB_DATA+4
19
	cmpl	$0, %eax
20
	jz	done
21
22
	/* save current pointer */
23
	mov	%esp, STUB_DATA+4
24
25
	/* skip additional data */
26
	add	%eax, %esp
27
28
	/* load syscall-# */
29
	pop	%eax
30
47
31
	/* load syscall params */
48
 	.globl switch_mm_stub
32
	pop	%ebx
49
switch_mm_stub:
33
	pop	%ecx
50
	PROCESS_LIST
34
	pop	%edx
35
	pop	%esi
36
 	pop	%edi
37
	pop	%ebp
38
51
39
	/* execute syscall */
52
	mov	$__NR_switch_mm, %eax
53
	mov	STUB_DATA + UM_KERN_PAGE_SIZE - 8, %ebx
54
	xor	%ecx, %ecx
55
	mov	STUB_DATA + UM_KERN_PAGE_SIZE - 4, %edx
56
	xor	%esi, %esi
57
	xor	%edi, %edi
40
	int	$0x80
58
	int	$0x80
41
59
42
	/* check return value */
43
	pop	%ebx
44
	cmp	%ebx, %eax
45
	je	again
46
47
done:
48
	/* save return value */
49
	mov	%eax, STUB_DATA
50
51
	/* stop */
52
	int3
60
	int3
(-)a/arch/um/sys-i386/tls.c (-12 / +32 lines)
Lines 6-15 Link Here
6
#include "linux/percpu.h"
6
#include "linux/percpu.h"
7
#include "linux/sched.h"
7
#include "linux/sched.h"
8
#include "asm/uaccess.h"
8
#include "asm/uaccess.h"
9
#include <asm/unistd.h>
10
#include <asm/segment.h>
11
#include "kern.h"
9
#include "os.h"
12
#include "os.h"
10
#include "skas.h"
13
#include "skas.h"
11
#include "sysdep/tls.h"
14
#include "sysdep/tls.h"
12
15
16
void copy_tls(struct user_desc *to)
17
{
18
	memcpy(to, current->thread.arch.tls_array,
19
	       sizeof(current->thread.arch.tls_array));
20
}
21
13
/*
22
/*
14
 * If needed we can detect when it's uninitialized.
23
 * If needed we can detect when it's uninitialized.
15
 *
24
 *
Lines 18-28 Link Here
18
static int host_supports_tls = -1;
27
static int host_supports_tls = -1;
19
int host_gdt_entry_tls_min;
28
int host_gdt_entry_tls_min;
20
29
21
int do_set_thread_area(struct user_desc *info)
30
static int do_set_thread_area(struct user_desc *info)
22
{
31
{
23
	int ret;
32
	int ret;
24
	u32 cpu;
33
	u32 cpu;
25
34
35
	if(have_vcpu)
36
		return 0;
37
26
	cpu = get_cpu();
38
	cpu = get_cpu();
27
	ret = os_set_thread_area(info, userspace_pid[cpu]);
39
	ret = os_set_thread_area(info, userspace_pid[cpu]);
28
	put_cpu();
40
	put_cpu();
Lines 300-305 int sys_set_thread_area(struct user_desc __user *user_desc) Link Here
300
	ret = do_set_thread_area(&info);
312
	ret = do_set_thread_area(&info);
301
	if (ret)
313
	if (ret)
302
		return ret;
314
		return ret;
315
303
	return set_tls_entry(current, &info, idx, 1);
316
	return set_tls_entry(current, &info, idx, 1);
304
}
317
}
305
318
Lines 366-396 out: Link Here
366
	return ret;
379
	return ret;
367
}
380
}
368
381
382
extern struct user_desc tls[GDT_ENTRY_TLS_ENTRIES];
383
369
/*
384
/*
370
 * This code is really i386-only, but it detects and logs x86_64 GDT indexes
385
 * This code is really i386-only, but it detects and logs x86_64 GDT indexes
371
 * if a 32-bit UML is running on a 64-bit host.
386
 * if a 32-bit UML is running on a 64-bit host.
372
 */
387
 */
373
static int __init __setup_host_supports_tls(void)
388
void __init host_tls_support(void)
374
{
389
{
375
	check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
390
	check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
376
	if (host_supports_tls) {
391
	if (host_supports_tls) {
377
		printk(KERN_INFO "Host TLS support detected\n");
392
		printf("Host TLS support detected\n");
378
		printk(KERN_INFO "Detected host type: ");
393
		printf("Detected host type: ");
379
		switch (host_gdt_entry_tls_min) {
394
		switch (host_gdt_entry_tls_min) {
380
		case GDT_ENTRY_TLS_MIN_I386:
395
		case GDT_ENTRY_TLS_MIN_I386:
381
			printk(KERN_CONT "i386");
396
			printf("i386\n");
382
			break;
397
			break;
383
		case GDT_ENTRY_TLS_MIN_X86_64:
398
		case GDT_ENTRY_TLS_MIN_X86_64:
384
			printk(KERN_CONT "x86_64");
399
			printf("x86_64\n");
385
			break;
400
			break;
386
		}
401
		}
387
		printk(KERN_CONT " (GDT indexes %d to %d)\n",
402
		printf(" (GDT indexes %d to %d)\n", host_gdt_entry_tls_min,
388
		       host_gdt_entry_tls_min,
389
		       host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
403
		       host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
390
	} else
404
	} else
391
		printk(KERN_ERR "  Host TLS support NOT detected! "
405
		printf("Host TLS support NOT detected! "
392
				"TLS support inside UML will not work\n");
406
		       "TLS support inside UML will not work\n");
393
	return 0;
394
}
407
}
395
408
396
__initcall(__setup_host_supports_tls);
409
void init_vcpu_tls(struct user_desc *to)
410
{
411
	struct uml_tls_struct *tls = current->thread.arch.tls_array;
412
	int i;
413
414
	for (i = 0; i < ARRAY_SIZE(current->thread.arch.tls_array); i++)
415
		to[i] = tls[i].tls;
416
}
(-)a/arch/um/sys-x86_64/signal.c (-14 / +24 lines)
Lines 42-47 void copy_sc(struct uml_pt_regs *regs, void *from) Link Here
42
#undef GETREG
42
#undef GETREG
43
}
43
}
44
44
45
static unsigned long fp_regs[HOST_FP_SIZE];
46
45
static int copy_sc_from_user(struct pt_regs *regs,
47
static int copy_sc_from_user(struct pt_regs *regs,
46
			     struct sigcontext __user *from,
48
			     struct sigcontext __user *from,
47
			     struct _fpstate __user *fpp)
49
			     struct _fpstate __user *fpp)
Lines 81-93 static int copy_sc_from_user(struct pt_regs *regs, Link Here
81
	if (err)
83
	if (err)
82
		return 1;
84
		return 1;
83
85
84
	err = restore_fp_registers(userspace_pid[current_thread_info()->cpu],
86
	if (have_vcpu)
85
				   (unsigned long *) &fp);
87
		memcpy(fp_regs, &fp, sizeof(fp_regs));
86
	if (err < 0) {
88
	else {
87
		printk(KERN_ERR "copy_sc_from_user - "
89
		err = restore_fp_registers(userspace_pid[current_thread_info()->cpu],
88
		       "restore_fp_registers failed, errno = %d\n",
90
					   (unsigned long *) &fp);
89
		       -err);
91
		if (err < 0) {
90
		return 1;
92
			printk(KERN_ERR "copy_sc_from_user - "
93
			       "restore_fp_registers failed, errno = %d\n",
94
			       -err);
95
			return 1;
96
		}
91
	}
97
	}
92
98
93
	return 0;
99
	return 0;
Lines 143-156 static int copy_sc_to_user(struct sigcontext __user *to, Link Here
143
	if (err)
149
	if (err)
144
		return 1;
150
		return 1;
145
151
146
	err = save_fp_registers(userspace_pid[current_thread_info()->cpu],
152
	if (have_vcpu)
147
				(unsigned long *) &fp);
153
		memcpy(&fp, fp_regs, sizeof(fp));
148
	if (err < 0) {
154
	else {
149
		printk(KERN_ERR "copy_sc_from_user - restore_fp_registers "
155
		err = save_fp_registers(userspace_pid[current_thread_info()->cpu],
150
		       "failed, errno = %d\n", -err);
156
					(unsigned long *) &fp);
151
		return 1;
157
		if (err < 0) {
158
			printk(KERN_ERR "copy_sc_from_user - "
159
			       "restore_fp_registers failed, errno = %d\n",
160
			       -err);
161
			return 1;
162
		}
152
	}
163
	}
153
154
	if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
164
	if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
155
		return 1;
165
		return 1;
156
166
(-)a/arch/um/sys-x86_64/stub.S (-56 / +57 lines)
Lines 1-67 Link Here
1
#include "uml-config.h"
1
#include "uml-config.h"
2
#include "as-layout.h"
2
#include "as-layout.h"
3
#include "skas/skas.h"
3
4
4
	.globl syscall_stub
5
#define PROCESS_LIST \
5
.section .__syscall_stub, "x"
6
	mov	$(STUB_DATA >> 32), %rbx; \
6
syscall_stub:
7
	sal	$32, %rbx; \
7
	syscall
8
	mov	$(STUB_DATA & 0xffffffff), %rax; \
8
	/* We don't have 64-bit constants, so this constructs the address
9
	or	%rax, %rbx; \
9
	 * we need.
10
	/* load pointer to first operation */ \
10
	 */
11
	mov	%rbx, %rsp; \
11
	movq	$(STUB_DATA >> 32), %rbx
12
	add	$0x10, %rsp; \
12
	salq	$32, %rbx
13
1: \
13
	movq	$(STUB_DATA & 0xffffffff), %rcx
14
	/* load length of additional data */ \
14
	or	%rcx, %rbx
15
	mov	0x0(%rsp), %rax; \
15
	movq	%rax, (%rbx)
16
	/* if(length == 0) : end of list */ \
16
	int3
17
	/* write possible 0 to header */ \
18
	mov	%rax, 8(%rbx); \
19
	cmp	$0, %rax; \
20
	jz	2f; \
21
	/* save current pointer */ \
22
	mov	%rsp, 8(%rbx); \
23
	/* skip additional data */ \
24
	add	%rax, %rsp; \
25
	/* load syscall-# */ \
26
	pop	%rax; \
27
	/* load syscall params */ \
28
	pop	%rdi; \
29
	pop	%rsi; \
30
	pop	%rdx; \
31
	pop	%r10; \
32
 	pop	%r8; \
33
	pop	%r9; \
34
	/* execute syscall */ \
35
	syscall; \
36
	/* check return value */ \
37
	pop	%rcx; \
38
	cmp	%rcx, %rax; \
39
	je	1b; \
40
2: \
41
	/* save return value */ \
42
	mov	%rax, (%rbx); \
17
43
44
.section .__syscall_stub, "x"
18
	.globl batch_syscall_stub
45
	.globl batch_syscall_stub
19
batch_syscall_stub:
46
batch_syscall_stub:
20
	mov	$(STUB_DATA >> 32), %rbx
47
	PROCESS_LIST
21
	sal	$32, %rbx
48
	/* stop */
22
	mov	$(STUB_DATA & 0xffffffff), %rax
49
	int3
23
	or	%rax, %rbx
24
	/* load pointer to first operation */
25
	mov	%rbx, %rsp
26
	add	$0x10, %rsp
27
again:
28
	/* load length of additional data */
29
	mov	0x0(%rsp), %rax
30
31
	/* if(length == 0) : end of list */
32
	/* write possible 0 to header */
33
	mov	%rax, 8(%rbx)
34
	cmp	$0, %rax
35
	jz	done
36
37
	/* save current pointer */
38
	mov	%rsp, 8(%rbx)
39
40
	/* skip additional data */
41
	add	%rax, %rsp
42
43
	/* load syscall-# */
44
	pop	%rax
45
50
46
	/* load syscall params */
51
 	.globl switch_mm_stub
47
	pop	%rdi
52
switch_mm_stub:
48
	pop	%rsi
53
	PROCESS_LIST
49
	pop	%rdx
50
	pop	%r10
51
 	pop	%r8
52
	pop	%r9
53
54
54
	/* execute syscall */
55
	mov	$__NR_switch_mm, %rax
56
	mov	$(STUB_DATA >> 32), %rdi
57
	sal	$32, %rdi
58
	mov	$(STUB_DATA & 0xffffffff + 4096 - 8), %rsi
59
	add	%rdi, %rsi
60
	mov	(%rsi), %rdx
61
	sub	$8, %rsi
62
	mov	(%rsi), %rdi
63
	xor	%rsi, %rsi
64
	xor	%r10, %r10
65
	xor	%r8, %r8
55
	syscall
66
	syscall
56
67
57
	/* check return value */
58
	pop	%rcx
59
	cmp	%rcx, %rax
60
	je	again
61
62
done:
63
	/* save return value */
64
	mov	%rax, (%rbx)
65
66
	/* stop */
67
	int3
68
	int3
(-)a/arch/um/sys-x86_64/syscall_table.c (+2 lines)
Lines 39-44 Link Here
39
#define stub_rt_sigsuspend sys_rt_sigsuspend
39
#define stub_rt_sigsuspend sys_rt_sigsuspend
40
#define stub_sigaltstack sys_sigaltstack
40
#define stub_sigaltstack sys_sigaltstack
41
#define stub_rt_sigreturn sys_rt_sigreturn
41
#define stub_rt_sigreturn sys_rt_sigreturn
42
#define stub_switch_mm sys_switch_mm
43
#define stub_vcpu sys_vcpu
42
44
43
#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
45
#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
44
#undef _ASM_X86_64_UNISTD_H_
46
#undef _ASM_X86_64_UNISTD_H_
(-)a/arch/um/sys-x86_64/syscalls.c (-51 / +68 lines)
Lines 28-88 asmlinkage long sys_uname64(struct new_utsname __user * name) Link Here
28
28
29
long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
29
long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
30
{
30
{
31
	unsigned long *ptr = addr, tmp;
31
	long ret = 0;
32
	long ret;
32
33
	int pid = task->mm->context.id.u.pid;
33
	if (have_vcpu) {
34
		unsigned long *regs = task->thread.regs.regs.gp;
35
		switch (code) {
36
		case ARCH_SET_FS:
37
			task->thread.arch.fs = (unsigned long) addr;
38
			regs[HOST_FS_BASE] = (unsigned long) addr;
39
			break;
40
		case ARCH_SET_GS:
41
			regs[HOST_GS_BASE] = (unsigned long) addr;
42
			break;
43
		case ARCH_GET_FS:
44
			ret = put_user(regs[HOST_FS_BASE], addr);
45
			break;
46
		case ARCH_GET_GS:
47
			ret = put_user(regs[HOST_GS_BASE], addr);
48
			break;
49
		}
50
	} else {
51
		unsigned long *ptr = addr, tmp;
52
		int pid = userspace_pid[0];
34
53
35
	/*
36
	 * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to
37
	 * be safe), we need to call arch_prctl on the host because
38
	 * setting %fs may result in something else happening (like a
39
	 * GDT or thread.fs being set instead).  So, we let the host
40
	 * fiddle the registers and thread struct and restore the
41
	 * registers afterwards.
42
	 *
43
	 * So, the saved registers are stored to the process (this
44
	 * needed because a stub may have been the last thing to run),
45
	 * arch_prctl is run on the host, then the registers are read
46
	 * back.
47
	 */
48
	switch (code) {
49
	case ARCH_SET_FS:
50
	case ARCH_SET_GS:
51
		ret = restore_registers(pid, &current->thread.regs.regs);
52
		if (ret)
53
			return ret;
54
		break;
55
	case ARCH_GET_FS:
56
	case ARCH_GET_GS:
57
		/*
54
		/*
58
		 * With these two, we read to a local pointer and
55
		 * With ARCH_SET_FS (and ARCH_SET_GS is treated similarly to
59
		 * put_user it to the userspace pointer that we were
56
		 * be safe), we need to call arch_prctl on the host because
60
		 * given.  If addr isn't valid (because it hasn't been
57
		 * setting %fs may result in something else happening (like a
61
		 * faulted in or is just bogus), we want put_user to
58
		 * GDT or thread.fs being set instead).  So, we let the host
62
		 * fault it in (or return -EFAULT) instead of having
59
		 * fiddle the registers and thread struct and restore the
63
		 * the host return -EFAULT.
60
		 * registers afterwards.
61
		 *
62
		 * So, the saved registers are stored to the process (this
63
		 * needed because a stub may have been the last thing to run),
64
		 * arch_prctl is run on the host, then the registers are read
65
		 * back.
64
		 */
66
		 */
65
		ptr = &tmp;
67
		switch (code) {
66
	}
68
		case ARCH_SET_FS:
67
69
		case ARCH_SET_GS:
68
	ret = os_arch_prctl(pid, code, ptr);
70
			restore_registers(pid, &current->thread.regs.regs);
69
	if (ret)
71
			break;
70
		return ret;
72
		case ARCH_GET_FS:
73
		case ARCH_GET_GS:
74
			/*
75
			 * With these two, we read to a local pointer and
76
			 * put_user it to the userspace pointer that we were
77
			 * given.  If addr isn't valid (because it hasn't been
78
			 * faulted in or is just bogus), we want put_user to
79
			 * fault it in (or return -EFAULT) instead of having
80
			 * the host return -EFAULT.
81
			 */
82
			ptr = &tmp;
83
		}
71
84
72
	switch (code) {
85
		ret = os_arch_prctl(pid, code, ptr);
73
	case ARCH_SET_FS:
86
		if (ret)
74
		current->thread.arch.fs = (unsigned long) ptr;
87
			return ret;
75
		ret = save_registers(pid, &current->thread.regs.regs);
88
		switch (code) {
76
		break;
89
		case ARCH_SET_FS:
77
	case ARCH_SET_GS:
90
			current->thread.arch.fs = (unsigned long) ptr;
78
		ret = save_registers(pid, &current->thread.regs.regs);
91
			save_registers(pid, &current->thread.regs.regs);
79
		break;
92
			break;
80
	case ARCH_GET_FS:
93
		case ARCH_SET_GS:
81
		ret = put_user(tmp, addr);
94
			save_registers(pid, &current->thread.regs.regs);
82
		break;
95
			break;
83
	case ARCH_GET_GS:
96
		case ARCH_GET_FS:
84
		ret = put_user(tmp, addr);
97
			ret = put_user(tmp, addr);
85
		break;
98
			break;
99
		case ARCH_GET_GS:
100
			ret = put_user(tmp, addr);
101
			break;
102
		}
86
	}
103
	}
87
104
88
	return ret;
105
	return ret;
(-)a/arch/x86/ia32/ia32_signal.c (+2 lines)
Lines 68-73 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) Link Here
68
				  &to->_sifields._pad[0]);
68
				  &to->_sifields._pad[0]);
69
		switch (from->si_code >> 16) {
69
		switch (from->si_code >> 16) {
70
		case __SI_FAULT >> 16:
70
		case __SI_FAULT >> 16:
71
			err |= __put_user(from->si_trapno, &to->si_trapno);
72
			err |= __put_user(from->si_error, &to->si_error);
71
			break;
73
			break;
72
		case __SI_CHLD >> 16:
74
		case __SI_CHLD >> 16:
73
			err |= __put_user(from->si_utime, &to->si_utime);
75
			err |= __put_user(from->si_utime, &to->si_utime);
(-)a/arch/x86/ia32/ia32entry.S (+3 lines)
Lines 373-378 quiet_ni_syscall: Link Here
373
	PTREGSCALL stub32_vfork, sys_vfork, %rdi
373
	PTREGSCALL stub32_vfork, sys_vfork, %rdi
374
	PTREGSCALL stub32_iopl, sys_iopl, %rsi
374
	PTREGSCALL stub32_iopl, sys_iopl, %rsi
375
	PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
375
	PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
376
	PTREGSCALL stub32_switch_mm, sys_switch_mm, %r9
376
377
377
ENTRY(ia32_ptregs_common)
378
ENTRY(ia32_ptregs_common)
378
	popq %r11
379
	popq %r11
Lines 727-730 ia32_sys_call_table: Link Here
727
	.quad sys32_fallocate
728
	.quad sys32_fallocate
728
	.quad compat_sys_timerfd_settime	/* 325 */
729
	.quad compat_sys_timerfd_settime	/* 325 */
729
	.quad compat_sys_timerfd_gettime
730
	.quad compat_sys_timerfd_gettime
731
	.quad sys_new_mm
732
	.quad stub32_switch_mm
730
ia32_syscall_end:
733
ia32_syscall_end:
(-)a/arch/x86/kernel/entry_32.S (-1 / +1 lines)
Lines 371-377 ENTRY(system_call) Link Here
371
	GET_THREAD_INFO(%ebp)
371
	GET_THREAD_INFO(%ebp)
372
					# system call tracing in operation / emulation
372
					# system call tracing in operation / emulation
373
	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
373
	/* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
374
	testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
374
	testl $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_VCPU),TI_flags(%ebp)
375
	jnz syscall_trace_entry
375
	jnz syscall_trace_entry
376
	cmpl $(nr_syscalls), %eax
376
	cmpl $(nr_syscalls), %eax
377
	jae syscall_badsys
377
	jae syscall_badsys
(-)a/arch/x86/kernel/entry_64.S (-1 / +25 lines)
Lines 244-250 ENTRY(system_call_after_swapgs) Link Here
244
	movq  %rcx,RIP-ARGOFFSET(%rsp)
244
	movq  %rcx,RIP-ARGOFFSET(%rsp)
245
	CFI_REL_OFFSET rip,RIP-ARGOFFSET
245
	CFI_REL_OFFSET rip,RIP-ARGOFFSET
246
	GET_THREAD_INFO(%rcx)
246
	GET_THREAD_INFO(%rcx)
247
	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
247
	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP|_TIF_VCPU),threadinfo_flags(%rcx)
248
	jnz tracesys
248
	jnz tracesys
249
	cmpq $__NR_syscall_max,%rax
249
	cmpq $__NR_syscall_max,%rax
250
	ja badsys
250
	ja badsys
Lines 323-328 tracesys: Link Here
323
	FIXUP_TOP_OF_STACK %rdi
323
	FIXUP_TOP_OF_STACK %rdi
324
	movq %rsp,%rdi
324
	movq %rsp,%rdi
325
	call syscall_trace_enter
325
	call syscall_trace_enter
326
	testl %eax, %eax
327
	jz 2f
328
	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
329
	RESTORE_REST
330
	jmp int_ret_from_sys_call
331
2:
326
	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
332
	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
327
	RESTORE_REST
333
	RESTORE_REST
328
	cmpq $__NR_syscall_max,%rax
334
	cmpq $__NR_syscall_max,%rax
Lines 425-430 END(\label) Link Here
425
	PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
431
	PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
426
	PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
432
	PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
427
	PTREGSCALL stub_iopl, sys_iopl, %rsi
433
	PTREGSCALL stub_iopl, sys_iopl, %rsi
434
	PTREGSCALL stub_switch_mm, sys_switch_mm, %r9
428
435
429
ENTRY(ptregscall_common)
436
ENTRY(ptregscall_common)
430
	popq %r11
437
	popq %r11
Lines 481-486 ENTRY(stub_rt_sigreturn) Link Here
481
END(stub_rt_sigreturn)
488
END(stub_rt_sigreturn)
482
489
483
/*
490
/*
491
 * vcpu is special too
492
 */
493
ENTRY(stub_vcpu)
494
	CFI_STARTPROC
495
	addq $8, %rsp
496
	CFI_ADJUST_CFA_OFFSET	-8
497
	SAVE_REST
498
	movq %rsp,%rdx
499
	FIXUP_TOP_OF_STACK %r11
500
	call sys_vcpu
501
	movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
502
	RESTORE_REST
503
	jmp int_ret_from_sys_call
504
	CFI_ENDPROC
505
END(stub_vcpu)
506
507
/*
484
 * initial frame state for interrupts and exceptions
508
 * initial frame state for interrupts and exceptions
485
 */
509
 */
486
	.macro _frame ref
510
	.macro _frame ref
(-)a/arch/x86/kernel/ptrace.c (-6 / +194 lines)
Lines 307-314 static int set_flags(struct task_struct *task, unsigned long value) Link Here
307
	return 0;
307
	return 0;
308
}
308
}
309
309
310
static int putreg(struct task_struct *child,
310
int putreg(struct task_struct *child, unsigned long offset, unsigned long value)
311
		  unsigned long offset, unsigned long value)
312
{
311
{
313
	switch (offset) {
312
	switch (offset) {
314
	case offsetof(struct user_regs_struct, cs):
313
	case offsetof(struct user_regs_struct, cs):
Lines 360-366 static int putreg(struct task_struct *child, Link Here
360
	return 0;
359
	return 0;
361
}
360
}
362
361
363
static unsigned long getreg(struct task_struct *task, unsigned long offset)
362
unsigned long getreg(struct task_struct *task, unsigned long offset)
364
{
363
{
365
	switch (offset) {
364
	switch (offset) {
366
	case offsetof(struct user_regs_struct, cs):
365
	case offsetof(struct user_regs_struct, cs):
Lines 1036-1042 long arch_ptrace(struct task_struct *child, long request, long addr, long data) Link Here
1036
				       value);				\
1035
				       value);				\
1037
		break
1036
		break
1038
1037
1039
static int putreg32(struct task_struct *child, unsigned regno, u32 value)
1038
int putreg32(struct task_struct *child, unsigned regno, u32 value)
1040
{
1039
{
1041
	struct pt_regs *regs = task_pt_regs(child);
1040
	struct pt_regs *regs = task_pt_regs(child);
1042
1041
Lines 1101-1107 static int putreg32(struct task_struct *child, unsigned regno, u32 value) Link Here
1101
				       offsetof(struct user_regs_struct, rs)); \
1100
				       offsetof(struct user_regs_struct, rs)); \
1102
		break
1101
		break
1103
1102
1104
static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1103
int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1105
{
1104
{
1106
	struct pt_regs *regs = task_pt_regs(child);
1105
	struct pt_regs *regs = task_pt_regs(child);
1107
1106
Lines 1254-1259 asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) Link Here
1254
	case PTRACE_SETOPTIONS:
1253
	case PTRACE_SETOPTIONS:
1255
	case PTRACE_SET_THREAD_AREA:
1254
	case PTRACE_SET_THREAD_AREA:
1256
	case PTRACE_GET_THREAD_AREA:
1255
	case PTRACE_GET_THREAD_AREA:
1256
	case PTRACE_SWITCH_MM:
1257
#ifdef X86_BTS
1257
#ifdef X86_BTS
1258
	case PTRACE_BTS_CONFIG:
1258
	case PTRACE_BTS_CONFIG:
1259
	case PTRACE_BTS_STATUS:
1259
	case PTRACE_BTS_STATUS:
Lines 1453-1458 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) Link Here
1453
	force_sig_info(SIGTRAP, &info, tsk);
1453
	force_sig_info(SIGTRAP, &info, tsk);
1454
}
1454
}
1455
1455
1456
extern int unvcpu(struct pt_regs *regs, siginfo_t *siginfo);
1457
1456
/* notification of system call entry/exit
1458
/* notification of system call entry/exit
1457
 * - triggered by current->work.syscall_trace
1459
 * - triggered by current->work.syscall_trace
1458
 */
1460
 */
Lines 1489-1494 int do_syscall_trace(struct pt_regs *regs, int entryexit) Link Here
1489
			goto out;
1491
			goto out;
1490
	}
1492
	}
1491
1493
1494
	if (test_thread_flag(TIF_VCPU)) {
1495
		if (entryexit)
1496
			return 0;
1497
1498
		regs->ax = unvcpu(regs, NULL);
1499
		return 1;
1500
	}
1501
1492
	if (!(current->ptrace & PT_PTRACED))
1502
	if (!(current->ptrace & PT_PTRACED))
1493
		goto out;
1503
		goto out;
1494
1504
Lines 1533-1538 out: Link Here
1533
	return 1;
1543
	return 1;
1534
}
1544
}
1535
1545
1546
int ptrace_to_pt_regs(struct pt_regs *regs, struct __user user_regs *ptrace)
1547
{
1548
	struct user_fxsr_struct *fp;
1549
	int i, err;
1550
1551
	if (!access_ok(VERIFY_READ, ptrace, sizeof(*ptrace)))
1552
		return -EFAULT;
1553
1554
	for (i = 0; i < FRAME_SIZE; i++) {
1555
		unsigned long n;
1556
1557
		if (__get_user(n, &ptrace->regs[i]))
1558
			return -EFAULT;
1559
		err = putreg(current, i * 4, n);
1560
		if (err)
1561
			return err;
1562
	}
1563
1564
	if (__get_user(fp, &ptrace->fp_state))
1565
		return -EFAULT;
1566
1567
	if (fp == NULL) {
1568
		clear_used_math();
1569
		return 0;
1570
	}
1571
1572
	set_used_math();
1573
1574
	return xfpregs_set(current, NULL, 0, sizeof(*fp), NULL, fp);
1575
}
1576
1577
int pt_regs_to_ptrace(struct __user user_regs *ptrace, struct pt_regs *regs)
1578
{
1579
	int i;
1580
1581
	if (!access_ok(VERIFY_WRITE, ptrace, sizeof(*ptrace)))
1582
		return -EFAULT;
1583
1584
	for (i = 0; i < FRAME_SIZE; i++) {
1585
		unsigned long n = getreg(current, i * 4);
1586
		if (__put_user(n, &ptrace->regs[i]))
1587
			return -EFAULT;
1588
	}
1589
1590
	if (!used_math()) {
1591
		if (__put_user(NULL, &ptrace->fp_state))
1592
			return -EFAULT;
1593
		return 0;
1594
	}
1595
1596
	if (__put_user(&ptrace->fpregs, &ptrace->fp_state))
1597
		return -EFAULT;
1598
1599
	clear_used_math();
1600
1601
	return xfpregs_get(current, NULL, 0, sizeof(ptrace->fpregs), NULL,
1602
			  &ptrace->fpregs);
1603
}
1536
#else  /* CONFIG_X86_64 */
1604
#else  /* CONFIG_X86_64 */
1537
1605
1538
static void syscall_trace(struct pt_regs *regs)
1606
static void syscall_trace(struct pt_regs *regs)
Lines 1558-1568 static void syscall_trace(struct pt_regs *regs) Link Here
1558
	}
1626
	}
1559
}
1627
}
1560
1628
1561
asmlinkage void syscall_trace_enter(struct pt_regs *regs)
1629
extern int unvcpu(struct pt_regs *regs, siginfo_t *siginfo);
1630
1631
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1562
{
1632
{
1563
	/* do the secure computing check first */
1633
	/* do the secure computing check first */
1564
	secure_computing(regs->orig_ax);
1634
	secure_computing(regs->orig_ax);
1565
1635
1636
	if (test_thread_flag(TIF_VCPU)) {
1637
		regs->ax = unvcpu(regs, NULL);
1638
		return 1;
1639
	}
1640
1566
	if (test_thread_flag(TIF_SYSCALL_TRACE)
1641
	if (test_thread_flag(TIF_SYSCALL_TRACE)
1567
	    && (current->ptrace & PT_PTRACED))
1642
	    && (current->ptrace & PT_PTRACED))
1568
		syscall_trace(regs);
1643
		syscall_trace(regs);
Lines 1580-1585 asmlinkage void syscall_trace_enter(struct pt_regs *regs) Link Here
1580
					    regs->dx, regs->r10);
1655
					    regs->dx, regs->r10);
1581
		}
1656
		}
1582
	}
1657
	}
1658
1659
	return 0;
1583
}
1660
}
1584
1661
1585
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1662
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
Lines 1593-1596 asmlinkage void syscall_trace_leave(struct pt_regs *regs) Link Here
1593
		syscall_trace(regs);
1670
		syscall_trace(regs);
1594
}
1671
}
1595
1672
1673
int ptrace_to_pt_regs(struct pt_regs *regs, struct user_regs *ptrace)
1674
{
1675
	struct user_i387_struct *fp;
1676
	int i, err;
1677
1678
#ifdef CONFIG_IA32_EMULATION
1679
	if (test_thread_flag(TIF_IA32)) {
1680
		for (i = 0; i < MAX_REG32_NR; i++) {
1681
			err = putreg32(current, i * 4, ptrace->u.regs32[i]);
1682
			if (err)
1683
				return err;
1684
		}
1685
1686
		return 0;
1687
	}
1688
#endif
1689
	for (i = 0; i < MAX_REG_NR; i++){
1690
		if(i * 8 == offsetof(struct user_regs_struct, fs))
1691
			continue;
1692
1693
		err = putreg(current, i * 8, ptrace->u.regs64.regs[i]);
1694
		if (err)
1695
			return err;
1696
	}
1697
1698
	if (__get_user(fp, &ptrace->u.regs64.fp_state))
1699
		return -EFAULT;
1700
1701
	if (fp == NULL) {
1702
		clear_used_math();
1703
		return 0;
1704
	}
1705
1706
	set_used_math();
1707
1708
	return xfpregs_set(current, NULL, 0, sizeof(*fp), NULL, fp);
1709
}
1710
1711
extern int getreg32(struct task_struct *child, unsigned regno, u32 *val);
1712
1713
int pt_regs_to_ptrace(struct __user user_regs *ptrace, struct pt_regs *regs)
1714
{
1715
	int i, err;
1716
1717
#ifdef CONFIG_IA32_EMULATION
1718
	if (test_thread_flag(TIF_IA32)) {
1719
		if (!access_ok(VERIFY_WRITE, &ptrace->u.regs32,
1720
			       sizeof(&ptrace->u.regs32)))
1721
			return -EFAULT;
1722
1723
		for (i = 0; i < ARRAY_SIZE(ptrace->u.regs32); i++) {
1724
			u32 n;
1725
1726
			err = getreg32(current, i * 4, &n);
1727
			if (err)
1728
				return err;
1729
1730
			err = __put_user(n, &ptrace->u.regs32[i]);
1731
			if (err)
1732
				return err;
1733
		}
1734
1735
		return 0;
1736
	}
1737
#endif
1738
	if (!access_ok(VERIFY_WRITE, &ptrace->u.regs64,
1739
		       sizeof(ptrace->u.regs64)))
1740
		return -EFAULT;
1741
1742
	for (i = 0; i < ARRAY_SIZE(ptrace->u.regs64.regs); i++) {
1743
		unsigned long n = getreg(current, i * 8);
1744
		err = __put_user(n, &ptrace->u.regs64.regs[i]);
1745
		if (err)
1746
			return err;
1747
	}
1748
1749
	if (!used_math()) {
1750
		if (__put_user(NULL, &ptrace->u.regs64.fp_state))
1751
			return -EFAULT;
1752
		return 0;
1753
	}
1754
1755
	if (__put_user(&ptrace->u.regs64.fpregs, &ptrace->u.regs64.fp_state))
1756
		return -EFAULT;
1757
1758
	clear_used_math();
1759
1760
	return xfpregs_get(current, NULL, 0, sizeof(ptrace->u.regs64.fpregs),
1761
			   NULL, &ptrace->u.regs64.fpregs);
1762
}
1763
1764
#define RIP_INDEX (128 / sizeof(long))
1765
#define RSP_INDEX (152 / sizeof(long))
1766
1767
unsigned long ptrace_ip(struct user_regs *regs)
1768
{
1769
#ifdef CONFIG_IA32_EMULATION
1770
	if (test_thread_flag(TIF_IA32))
1771
		return ptrace_ip32(regs->u.regs32);
1772
#endif
1773
	return regs->u.regs64.regs[RIP_INDEX];
1774
}
1775
1776
unsigned long ptrace_sp(struct user_regs *regs)
1777
{
1778
#ifdef CONFIG_IA32_EMULATION
1779
	if (test_thread_flag(TIF_IA32))
1780
		return ptrace_sp32(regs->u.regs32);
1781
#endif
1782
	return regs->u.regs64.regs[RSP_INDEX];
1783
}
1596
#endif	/* CONFIG_X86_32 */
1784
#endif	/* CONFIG_X86_32 */
(-)a/arch/x86/kernel/signal_32.c (+7 lines)
Lines 573-578 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, Link Here
573
	return ret;
573
	return ret;
574
}
574
}
575
575
576
extern int unvcpu(struct pt_regs *regs, siginfo_t *siginfo);
577
576
/*
578
/*
577
 * Note that 'init' is a special process: it doesn't get signals it doesn't
579
 * Note that 'init' is a special process: it doesn't get signals it doesn't
578
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
580
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
Lines 603-608 static void do_signal(struct pt_regs *regs) Link Here
603
605
604
	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
606
	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
605
	if (signr > 0) {
607
	if (signr > 0) {
608
		if (test_thread_flag(TIF_VCPU)) {
609
			regs->ax = unvcpu(regs, &info);
610
			return;
611
		}
612
606
		/* Re-enable any watchpoints before delivering the
613
		/* Re-enable any watchpoints before delivering the
607
		 * signal to user space. The processor register will
614
		 * signal to user space. The processor register will
608
		 * have been cleared if the watchpoint triggered
615
		 * have been cleared if the watchpoint triggered
(-)a/arch/x86/kernel/signal_64.c (+7 lines)
Lines 407-412 handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, Link Here
407
	return ret;
407
	return ret;
408
}
408
}
409
409
410
extern int unvcpu(struct pt_regs *regs, siginfo_t *siginfo);
411
410
/*
412
/*
411
 * Note that 'init' is a special process: it doesn't get signals it doesn't
413
 * Note that 'init' is a special process: it doesn't get signals it doesn't
412
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
414
 * want to handle. Thus you cannot kill init even with a SIGKILL even by
Lines 435-440 static void do_signal(struct pt_regs *regs) Link Here
435
437
436
	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
438
	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
437
	if (signr > 0) {
439
	if (signr > 0) {
440
		if (test_thread_flag(TIF_VCPU)) {
441
			regs->ax = unvcpu(regs, &info);
442
			return;
443
		}
444
438
		/* Re-enable any watchpoints before delivering the
445
		/* Re-enable any watchpoints before delivering the
439
		 * signal to user space. The processor register will
446
		 * signal to user space. The processor register will
440
		 * have been cleared if the watchpoint triggered
447
		 * have been cleared if the watchpoint triggered
(-)a/arch/x86/kernel/sys_i386_32.c (+26 lines)
Lines 21-26 Link Here
21
21
22
#include <asm/uaccess.h>
22
#include <asm/uaccess.h>
23
#include <asm/unistd.h>
23
#include <asm/unistd.h>
24
#include <asm/user.h>
24
25
25
/*
26
/*
26
 * sys_pipe() is the normal C calling standard for creating
27
 * sys_pipe() is the normal C calling standard for creating
Lines 261-263 int kernel_execve(const char *filename, char *const argv[], char *const envp[]) Link Here
261
	: "0" (__NR_execve),"ri" (filename),"c" (argv), "d" (envp) : "memory");
262
	: "0" (__NR_execve),"ri" (filename),"c" (argv), "d" (envp) : "memory");
262
	return __res;
263
	return __res;
263
}
264
}
265
266
extern long do_switch_mm(int fd, struct __user user_regs *save,
267
			 struct __user user_regs *new, unsigned long ip,
268
			 unsigned long sp, struct pt_regs *regs);
269
270
asmlinkage long sys_switch_mm(struct pt_regs regs)
271
{
272
	return do_switch_mm(regs.bx, (struct __user user_regs *) regs.cx,
273
			    (struct __user user_regs *) regs.dx, regs.si,
274
			    regs.di, &regs);
275
}
276
277
extern long do_vcpu(int mm_fd, struct vcpu_user __user *new,
278
		    struct pt_regs *regs);
279
280
asmlinkage long sys_vcpu(struct pt_regs regs)
281
{
282
	int err;
283
284
	err = do_vcpu(regs.bx, (struct vcpu_user __user *) regs.cx, &regs);
285
	if (err)
286
		return err;
287
288
	return regs.ax;
289
}
(-)a/arch/x86/kernel/sys_x86_64.c (+26 lines)
Lines 251-253 asmlinkage long sys_uname(struct new_utsname __user * name) Link Here
251
		err |= copy_to_user(&name->machine, "i686", 5); 		
251
		err |= copy_to_user(&name->machine, "i686", 5); 		
252
	return err ? -EFAULT : 0;
252
	return err ? -EFAULT : 0;
253
}
253
}
254
255
extern long do_switch_mm(int fd, struct __user user_regs *save,
256
			 struct __user user_regs *new, unsigned long ip,
257
			 unsigned long sp, struct pt_regs *regs);
258
259
asmlinkage long sys_switch_mm(int fd, struct __user user_regs *save,
260
			      struct __user user_regs *new, unsigned long ip,
261
			      unsigned long sp, struct pt_regs *regs)
262
{
263
	return do_switch_mm(fd, save, new, ip, sp, regs);
264
}
265
266
extern long do_vcpu(int mm_fd, struct vcpu_user __user *new,
267
		    struct pt_regs *regs);
268
269
asmlinkage long sys_vcpu(int mm_fd, struct vcpu_user __user *new,
270
			 struct pt_regs *regs)
271
{
272
	int err;
273
274
	err = do_vcpu(mm_fd, new, regs);
275
	if (err)
276
		return err;
277
278
	return regs->ax;
279
}
(-)a/arch/x86/kernel/syscall_table_32.S (+3 lines)
Lines 326-328 ENTRY(sys_call_table) Link Here
326
	.long sys_fallocate
326
	.long sys_fallocate
327
	.long sys_timerfd_settime	/* 325 */
327
	.long sys_timerfd_settime	/* 325 */
328
	.long sys_timerfd_gettime
328
	.long sys_timerfd_gettime
329
	.long sys_new_mm
330
	.long sys_switch_mm
331
	.long sys_vcpu
(-)a/arch/x86/mm/fault.c (+2 lines)
Lines 173-178 static void force_sig_info_fault(int si_signo, int si_code, Link Here
173
	info.si_errno = 0;
173
	info.si_errno = 0;
174
	info.si_code = si_code;
174
	info.si_code = si_code;
175
	info.si_addr = (void __user *)address;
175
	info.si_addr = (void __user *)address;
176
	info.si_trapno = tsk->thread.trap_no;
177
	info.si_error = tsk->thread.error_code;
176
	force_sig_info(si_signo, &info, tsk);
178
	force_sig_info(si_signo, &info, tsk);
177
}
179
}
178
180
(-)a/fs/proc/base.c (+32 lines)
Lines 2279-2284 static int proc_pid_io_accounting(struct task_struct *task, char *buffer) Link Here
2279
}
2279
}
2280
#endif
2280
#endif
2281
2281
2282
static int proc_pid_mm_open(struct inode *inode, struct file *file)
2283
{
2284
	struct task_struct *task = pid_task(proc_pid(inode), PIDTYPE_PID);
2285
	struct mm_struct *mm;
2286
2287
	if (task == NULL)
2288
		return -ENOENT;
2289
2290
	mm = get_task_mm(task);
2291
	if (mm == NULL)
2292
		return -EINVAL;
2293
2294
	file->private_data = mm;
2295
	return 0;
2296
}
2297
2298
static int proc_pid_mm_release(struct inode *inode, struct file *file)
2299
{
2300
	struct mm_struct *mm = file->private_data;
2301
2302
	if(mm != NULL)
2303
		mmput(mm);
2304
2305
	return 0;
2306
}
2307
2308
const struct file_operations proc_pid_mm_operations = {
2309
	.open		= proc_pid_mm_open,
2310
	.release	= proc_pid_mm_release,
2311
};
2312
2282
/*
2313
/*
2283
 * Thread groups
2314
 * Thread groups
2284
 */
2315
 */
Lines 2350-2355 static const struct pid_entry tgid_base_stuff[] = { Link Here
2350
#ifdef CONFIG_TASK_IO_ACCOUNTING
2381
#ifdef CONFIG_TASK_IO_ACCOUNTING
2351
	INF("io",	S_IRUGO, pid_io_accounting),
2382
	INF("io",	S_IRUGO, pid_io_accounting),
2352
#endif
2383
#endif
2384
	REG("mm", S_IRUSR | S_IWUSR, pid_mm),
2353
};
2385
};
2354
2386
2355
static int proc_tgid_base_readdir(struct file * filp,
2387
static int proc_tgid_base_readdir(struct file * filp,
(-)a/include/asm-generic/siginfo.h (+6 lines)
Lines 82-87 typedef struct siginfo { Link Here
82
#ifdef __ARCH_SI_TRAPNO
82
#ifdef __ARCH_SI_TRAPNO
83
			int _trapno;	/* TRAP # which caused the signal */
83
			int _trapno;	/* TRAP # which caused the signal */
84
#endif
84
#endif
85
#ifdef __ARCH_SI_ERROR
86
			int _error;	/* CPU error code */
87
#endif
85
		} _sigfault;
88
		} _sigfault;
86
89
87
		/* SIGPOLL */
90
		/* SIGPOLL */
Lines 112-117 typedef struct siginfo { Link Here
112
#ifdef __ARCH_SI_TRAPNO
115
#ifdef __ARCH_SI_TRAPNO
113
#define si_trapno	_sifields._sigfault._trapno
116
#define si_trapno	_sifields._sigfault._trapno
114
#endif
117
#endif
118
#ifdef __ARCH_SI_ERROR
119
#define si_error	_sifields._sigfault._error
120
#endif
115
#define si_band		_sifields._sigpoll._band
121
#define si_band		_sifields._sigpoll._band
116
#define si_fd		_sifields._sigpoll._fd
122
#define si_fd		_sifields._sigpoll._fd
117
123
(-)a/include/asm-um/desc.h (+6 lines)
Lines 1-6 Link Here
1
#ifndef __UM_DESC_H
1
#ifndef __UM_DESC_H
2
#define __UM_DESC_H
2
#define __UM_DESC_H
3
3
4
#ifdef CONFIG_64BIT
5
#define LM(info) (info)->lm == 0
6
#else
7
#define LM(info) (1)
8
#endif
4
/* Taken from asm-i386/desc.h, it's the only thing we need. The rest wouldn't
9
/* Taken from asm-i386/desc.h, it's the only thing we need. The rest wouldn't
5
 * compile, and has never been used. */
10
 * compile, and has never been used. */
6
#define LDT_empty(info) (\
11
#define LDT_empty(info) (\
Lines 11-16 Link Here
11
	(info)->seg_32bit	== 0	&& \
16
	(info)->seg_32bit	== 0	&& \
12
	(info)->limit_in_pages	== 0	&& \
17
	(info)->limit_in_pages	== 0	&& \
13
	(info)->seg_not_present	== 1	&& \
18
	(info)->seg_not_present	== 1	&& \
19
	LM(info)			&& \
14
	(info)->useable		== 0	)
20
	(info)->useable		== 0	)
15
21
16
#endif
22
#endif
(-)a/include/asm-um/host_ldt-i386.h (-11 / +2 lines)
Lines 1-7 Link Here
1
#ifndef __ASM_HOST_LDT_I386_H
1
#ifndef __ASM_HOST_LDT_I386_H
2
#define __ASM_HOST_LDT_I386_H
2
#define __ASM_HOST_LDT_I386_H
3
3
4
#include "asm/arch/ldt.h"
4
#include <asm/desc.h>
5
#include <asm/arch/ldt.h>
5
6
6
/*
7
/*
7
 * macros stolen from include/asm-i386/desc.h
8
 * macros stolen from include/asm-i386/desc.h
Lines 21-34 Link Here
21
	((info)->useable << 20) | \
22
	((info)->useable << 20) | \
22
	0x7000)
23
	0x7000)
23
24
24
#define LDT_empty(info) (\
25
	(info)->base_addr	== 0	&& \
26
	(info)->limit		== 0	&& \
27
	(info)->contents	== 0	&& \
28
	(info)->read_exec_only	== 1	&& \
29
	(info)->seg_32bit	== 0	&& \
30
	(info)->limit_in_pages	== 0	&& \
31
	(info)->seg_not_present	== 1	&& \
32
	(info)->useable		== 0	)
33
34
#endif
25
#endif
(-)a/include/asm-um/host_ldt-x86_64.h (-12 / +2 lines)
Lines 1-7 Link Here
1
#ifndef __ASM_HOST_LDT_X86_64_H
1
#ifndef __ASM_HOST_LDT_X86_64_H
2
#define __ASM_HOST_LDT_X86_64_H
2
#define __ASM_HOST_LDT_X86_64_H
3
3
4
#include "asm/arch/ldt.h"
4
#include <asm/desc.h>
5
#include <asm/arch/ldt.h>
5
6
6
/*
7
/*
7
 * macros stolen from include/asm-x86_64/desc.h
8
 * macros stolen from include/asm-x86_64/desc.h
Lines 24-38 Link Here
24
	/* ((info)->lm << 21) | */ \
25
	/* ((info)->lm << 21) | */ \
25
	0x7000)
26
	0x7000)
26
27
27
#define LDT_empty(info) (\
28
	(info)->base_addr	== 0	&& \
29
	(info)->limit		== 0	&& \
30
	(info)->contents	== 0	&& \
31
	(info)->read_exec_only	== 1	&& \
32
	(info)->seg_32bit	== 0	&& \
33
	(info)->limit_in_pages	== 0	&& \
34
	(info)->seg_not_present	== 1	&& \
35
	(info)->useable		== 0	&& \
36
	(info)->lm              == 0)
37
38
#endif
28
#endif
(-)a/include/asm-um/processor-i386.h (-13 / +11 lines)
Lines 1-25 Link Here
1
/*
1
/*
2
 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
2
 * Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3
 * Licensed under the GPL
3
 * Licensed under the GPL
4
 */
4
 */
5
5
6
#ifndef __UM_PROCESSOR_I386_H
6
#ifndef __UM_PROCESSOR_I386_H
7
#define __UM_PROCESSOR_I386_H
7
#define __UM_PROCESSOR_I386_H
8
8
9
#include "linux/string.h"
9
#include <linux/kernel.h>
10
#include "asm/host_ldt.h"
10
#include <linux/string.h>
11
#include "asm/segment.h"
11
#include <asm/host_ldt.h>
12
12
#include <asm/segment.h>
13
extern int host_has_cmov;
14
15
/* include faultinfo structure */
16
#include "sysdep/faultinfo.h"
13
#include "sysdep/faultinfo.h"
14
#include "sysdep/tls.h"
17
15
18
struct uml_tls_struct {
16
extern int host_has_cmov;
19
	struct user_desc tls;
20
	unsigned flushed:1;
21
	unsigned present:1;
22
};
23
17
24
struct arch_thread {
18
struct arch_thread {
25
	struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
19
	struct uml_tls_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
Lines 38-45 struct arch_thread { Link Here
38
32
39
static inline void arch_flush_thread(struct arch_thread *thread)
33
static inline void arch_flush_thread(struct arch_thread *thread)
40
{
34
{
35
	int i;
36
41
	/* Clear any TLS still hanging */
37
	/* Clear any TLS still hanging */
42
	memset(&thread->tls_array, 0, sizeof(thread->tls_array));
38
	memset(&thread->tls_array, 0, sizeof(thread->tls_array));
39
	for (i = 0; i < ARRAY_SIZE(thread->tls_array); i++)
40
		thread->tls_array[i].tls.entry_number = GDT_ENTRY_TLS_MIN + i;
43
}
41
}
44
42
45
static inline void arch_copy_thread(struct arch_thread *from,
43
static inline void arch_copy_thread(struct arch_thread *from,
(-)a/include/asm-um/ptrace-generic.h (+9 lines)
Lines 34-39 struct pt_regs { Link Here
34
34
35
#define instruction_pointer(regs) PT_REGS_IP(regs)
35
#define instruction_pointer(regs) PT_REGS_IP(regs)
36
36
37
struct user_regs {
38
	unsigned long regs[MAX_REG_NR];
39
	void *ptr;
40
	unsigned long fpregs[FP_SIZE];
41
};
42
43
extern int ptrace_to_pt_regs(struct pt_regs *to, struct user_regs __user *from);
44
extern int pt_regs_to_ptrace(struct user_regs __user *to, struct pt_regs *from);
45
37
struct task_struct;
46
struct task_struct;
38
47
39
extern long subarch_ptrace(struct task_struct *child, long request, long addr,
48
extern long subarch_ptrace(struct task_struct *child, long request, long addr,
(-)a/include/asm-um/ptrace-i386.h (-2 / +11 lines)
Lines 8-15 Link Here
8
8
9
#define HOST_AUDIT_ARCH AUDIT_ARCH_I386
9
#define HOST_AUDIT_ARCH AUDIT_ARCH_I386
10
10
11
#include "linux/compiler.h"
11
#include "user_constants.h"
12
#include "asm/ptrace-generic.h"
12
#define FP_SIZE ((HOST_XFP_SIZE > HOST_FP_SIZE) ? HOST_XFP_SIZE : HOST_FP_SIZE)
13
14
#include <linux/compiler.h>
15
#include <asm/ptrace-generic.h>
13
#include <asm/user.h>
16
#include <asm/user.h>
14
#include "sysdep/ptrace.h"
17
#include "sysdep/ptrace.h"
15
18
Lines 40-45 Link Here
40
43
41
#define user_mode(r) UPT_IS_USER(&(r)->regs)
44
#define user_mode(r) UPT_IS_USER(&(r)->regs)
42
45
46
#define pt_regs_ip(r) (r).regs.gp[EIP]
47
#define pt_regs_sp(r) (r).regs.gp[UESP]
48
49
#define ptrace_ip(r) (r)->regs[EIP]
50
#define ptrace_sp(r) (r)->regs[UESP]
51
43
/*
52
/*
44
 * Forward declaration to avoid including sysdep/tls.h, which causes a
53
 * Forward declaration to avoid including sysdep/tls.h, which causes a
45
 * circular include, and compilation failures.
54
 * circular include, and compilation failures.
(-)a/include/asm-um/ptrace-x86_64.h (+9 lines)
Lines 7-12 Link Here
7
#ifndef __UM_PTRACE_X86_64_H
7
#ifndef __UM_PTRACE_X86_64_H
8
#define __UM_PTRACE_X86_64_H
8
#define __UM_PTRACE_X86_64_H
9
9
10
#include "user_constants.h"
11
#define FP_SIZE (HOST_FP_SIZE)
12
10
#include "linux/compiler.h"
13
#include "linux/compiler.h"
11
#include "asm/errno.h"
14
#include "asm/errno.h"
12
#include "asm/host_ldt.h"
15
#include "asm/host_ldt.h"
Lines 62-67 Link Here
62
65
63
#define PT_FIX_EXEC_STACK(sp) do ; while(0)
66
#define PT_FIX_EXEC_STACK(sp) do ; while(0)
64
67
68
#define pt_regs_ip(r) (r).regs.gp[RIP / sizeof(long)]
69
#define pt_regs_sp(r) (r).regs.gp[RSP / sizeof(long)]
70
71
#define ptrace_ip(r) (r)->regs[RIP / sizeof(long)]
72
#define ptrace_sp(r) (r)->regs[RSP / sizeof(long)]
73
65
#define profile_pc(regs) PT_REGS_IP(regs)
74
#define profile_pc(regs) PT_REGS_IP(regs)
66
75
67
static inline int ptrace_get_thread_area(struct task_struct *child, int idx,
76
static inline int ptrace_get_thread_area(struct task_struct *child, int idx,
(-)a/include/asm-um/thread_info.h (+2 lines)
Lines 83-88 static inline struct thread_info *current_thread_info(void) Link Here
83
#define TIF_MEMDIE	 	5
83
#define TIF_MEMDIE	 	5
84
#define TIF_SYSCALL_AUDIT	6
84
#define TIF_SYSCALL_AUDIT	6
85
#define TIF_RESTORE_SIGMASK	7
85
#define TIF_RESTORE_SIGMASK	7
86
#define TIF_VCPU		8
86
87
87
#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
88
#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
88
#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
89
#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
Lines 91-95 static inline struct thread_info *current_thread_info(void) Link Here
91
#define _TIF_MEMDIE		(1 << TIF_MEMDIE)
92
#define _TIF_MEMDIE		(1 << TIF_MEMDIE)
92
#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
93
#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
93
#define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
94
#define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
95
#define _TIF_VCPU		(1 << TIF_VCPU)
94
96
95
#endif
97
#endif
(-)a/include/asm-x86/Kbuild (+1 lines)
Lines 21-25 unifdef-y += posix_types_64.h Link Here
21
unifdef-y += ptrace.h
21
unifdef-y += ptrace.h
22
unifdef-y += unistd_32.h
22
unifdef-y += unistd_32.h
23
unifdef-y += unistd_64.h
23
unifdef-y += unistd_64.h
24
unifdef-y += user.h
24
unifdef-y += vm86.h
25
unifdef-y += vm86.h
25
unifdef-y += vsyscall.h
26
unifdef-y += vsyscall.h
(-)a/include/asm-x86/ia32.h (+2 lines)
Lines 119-124 typedef struct compat_siginfo{ Link Here
119
		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
119
		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
120
		struct {
120
		struct {
121
			unsigned int _addr;	/* faulting insn/memory ref. */
121
			unsigned int _addr;	/* faulting insn/memory ref. */
122
			int _trapno;	/* TRAP # which caused the signal */
123
			int _error;	/* CPU error code */
122
		} _sigfault;
124
		} _sigfault;
123
125
124
		/* SIGPOLL */
126
		/* SIGPOLL */
(-)a/include/asm-x86/ptrace.h (-1 / +59 lines)
Lines 3-9 Link Here
3
3
4
#include <linux/compiler.h>	/* For __user */
4
#include <linux/compiler.h>	/* For __user */
5
#include <asm/ptrace-abi.h>
5
#include <asm/ptrace-abi.h>
6
6
#include <asm/user.h>
7
7
8
#ifndef __ASSEMBLY__
8
#ifndef __ASSEMBLY__
9
9
Lines 55-60 struct pt_regs { Link Here
55
	int  ss;
55
	int  ss;
56
};
56
};
57
57
58
#define pt_regs_ip(r) (r).ip
59
#define pt_regs_sp(r) (r).sp
60
61
struct user_regs {
62
	unsigned long regs[FRAME_SIZE];
63
	struct user_fxsr_struct *fp_state;
64
	struct user_fxsr_struct fpregs;
65
};
66
67
#define ptrace_ip(r) (r)->regs.ip
68
#define ptrace_sp(r) (r)->regs.sp
69
70
struct pt_regs;
71
extern int ptrace_to_pt_regs(struct pt_regs *regs,
72
			     struct user_regs __user *ptrace);
73
extern int pt_regs_to_ptrace(struct __user user_regs *ptrace,
74
			     struct pt_regs *regs);
75
58
#include <asm/vm86.h>
76
#include <asm/vm86.h>
59
#include <asm/segment.h>
77
#include <asm/segment.h>
60
78
Lines 227-232 extern int do_get_thread_area(struct task_struct *p, int idx, Link Here
227
extern int do_set_thread_area(struct task_struct *p, int idx,
245
extern int do_set_thread_area(struct task_struct *p, int idx,
228
			      struct user_desc __user *info, int can_allocate);
246
			      struct user_desc __user *info, int can_allocate);
229
247
248
#ifdef CONFIG_X86_64
249
#ifdef CONFIG_IA32_EMULATION
250
#define MAX_REG32_NR 17
251
252
#define EIP 12
253
#define UESP 15
254
255
#define ptrace_ip32(regs) (unsigned long) (regs)[EIP]
256
#define ptrace_sp32(regs) (unsigned long) (regs)[UESP]
257
258
#endif
259
260
#define MAX_REG_NR (sizeof(struct user_regs_struct) / sizeof(long))
261
262
struct user_regs {
263
	union {
264
		struct  {
265
			unsigned long regs[MAX_REG_NR];
266
			struct user_i387_struct *fp_state;
267
			struct user_i387_struct fpregs;
268
		} regs64;
269
#ifdef CONFIG_IA32_EMULATION
270
		u32 regs32[MAX_REG32_NR];
271
#endif
272
	} u;
273
};
274
275
#define pt_regs_ip(regs) (regs).ip
276
#define pt_regs_sp(regs) (regs).sp
277
278
extern unsigned long ptrace_ip(struct user_regs *regs);
279
extern unsigned long ptrace_sp(struct user_regs *regs);
280
281
extern int ptrace_to_pt_regs(struct pt_regs *regs,
282
			     struct user_regs __user *ptrace);
283
extern int pt_regs_to_ptrace(struct __user user_regs *ptrace,
284
			     struct pt_regs *regs);
285
#else
286
#endif
287
230
#endif /* __KERNEL__ */
288
#endif /* __KERNEL__ */
231
289
232
#endif /* !__ASSEMBLY__ */
290
#endif /* !__ASSEMBLY__ */
(-)a/include/asm-x86/siginfo.h (+3 lines)
Lines 5-10 Link Here
5
# define __ARCH_SI_PREAMBLE_SIZE	(4 * sizeof(int))
5
# define __ARCH_SI_PREAMBLE_SIZE	(4 * sizeof(int))
6
#endif
6
#endif
7
7
8
#define __ARCH_SI_TRAPNO
9
#define __ARCH_SI_ERROR
10
8
#include <asm-generic/siginfo.h>
11
#include <asm-generic/siginfo.h>
9
12
10
#endif
13
#endif
(-)a/include/asm-x86/thread_info_32.h (+2 lines)
Lines 142-147 static inline struct thread_info *current_thread_info(void) Link Here
142
#define TIF_DEBUGCTLMSR		22	/* uses thread_struct.debugctlmsr */
142
#define TIF_DEBUGCTLMSR		22	/* uses thread_struct.debugctlmsr */
143
#define TIF_DS_AREA_MSR 	23      /* uses thread_struct.ds_area_msr */
143
#define TIF_DS_AREA_MSR 	23      /* uses thread_struct.ds_area_msr */
144
#define TIF_BTS_TRACE_TS        24      /* record scheduling event timestamps */
144
#define TIF_BTS_TRACE_TS        24      /* record scheduling event timestamps */
145
#define TIF_VCPU		25
145
146
146
#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
147
#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
147
#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
148
#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
Lines 161-166 static inline struct thread_info *current_thread_info(void) Link Here
161
#define _TIF_DEBUGCTLMSR	(1<<TIF_DEBUGCTLMSR)
162
#define _TIF_DEBUGCTLMSR	(1<<TIF_DEBUGCTLMSR)
162
#define _TIF_DS_AREA_MSR	(1<<TIF_DS_AREA_MSR)
163
#define _TIF_DS_AREA_MSR	(1<<TIF_DS_AREA_MSR)
163
#define _TIF_BTS_TRACE_TS	(1<<TIF_BTS_TRACE_TS)
164
#define _TIF_BTS_TRACE_TS	(1<<TIF_BTS_TRACE_TS)
165
#define _TIF_VCPU		(1<<TIF_VCPU)
164
166
165
/* work to do on interrupt/exception return */
167
/* work to do on interrupt/exception return */
166
#define _TIF_WORK_MASK \
168
#define _TIF_WORK_MASK \
(-)a/include/asm-x86/thread_info_64.h (+2 lines)
Lines 125-130 static inline struct thread_info *stack_thread_info(void) Link Here
125
#define TIF_DEBUGCTLMSR		25	/* uses thread_struct.debugctlmsr */
125
#define TIF_DEBUGCTLMSR		25	/* uses thread_struct.debugctlmsr */
126
#define TIF_DS_AREA_MSR		26      /* uses thread_struct.ds_area_msr */
126
#define TIF_DS_AREA_MSR		26      /* uses thread_struct.ds_area_msr */
127
#define TIF_BTS_TRACE_TS	27      /* record scheduling event timestamps */
127
#define TIF_BTS_TRACE_TS	27      /* record scheduling event timestamps */
128
#define TIF_VCPU		28
128
129
129
#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
130
#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
130
#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
131
#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
Lines 146-151 static inline struct thread_info *stack_thread_info(void) Link Here
146
#define _TIF_DEBUGCTLMSR	(1<<TIF_DEBUGCTLMSR)
147
#define _TIF_DEBUGCTLMSR	(1<<TIF_DEBUGCTLMSR)
147
#define _TIF_DS_AREA_MSR	(1<<TIF_DS_AREA_MSR)
148
#define _TIF_DS_AREA_MSR	(1<<TIF_DS_AREA_MSR)
148
#define _TIF_BTS_TRACE_TS	(1<<TIF_BTS_TRACE_TS)
149
#define _TIF_BTS_TRACE_TS	(1<<TIF_BTS_TRACE_TS)
150
#define _TIF_VCPU		(1<<TIF_VCPU)
149
151
150
/* work to do on interrupt/exception return */
152
/* work to do on interrupt/exception return */
151
#define _TIF_WORK_MASK \
153
#define _TIF_WORK_MASK \
(-)a/include/asm-x86/unistd_32.h (+3 lines)
Lines 332-337 Link Here
332
#define __NR_fallocate		324
332
#define __NR_fallocate		324
333
#define __NR_timerfd_settime	325
333
#define __NR_timerfd_settime	325
334
#define __NR_timerfd_gettime	326
334
#define __NR_timerfd_gettime	326
335
#define __NR_new_mm		327
336
#define __NR_switch_mm		328
337
#define __NR_vcpu		329
335
338
336
#ifdef __KERNEL__
339
#ifdef __KERNEL__
337
340
(-)a/include/asm-x86/unistd_64.h (+6 lines)
Lines 639-644 __SYSCALL(__NR_fallocate, sys_fallocate) Link Here
639
__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
639
__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
640
#define __NR_timerfd_gettime			287
640
#define __NR_timerfd_gettime			287
641
__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
641
__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
642
#define __NR_new_mm				288
643
__SYSCALL(__NR_new_mm, sys_new_mm)
644
#define __NR_switch_mm				289
645
__SYSCALL(__NR_switch_mm, stub_switch_mm)
646
#define __NR_vcpu				290
647
__SYSCALL(__NR_vcpu, stub_vcpu)
642
648
643
649
644
#ifndef __NO_STUBS
650
#ifndef __NO_STUBS
(-)a/include/linux/init_task.h (+1 lines)
Lines 193-198 extern struct group_info init_groups; Link Here
193
		[PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),		\
193
		[PIDTYPE_SID]  = INIT_PID_LINK(PIDTYPE_SID),		\
194
	},								\
194
	},								\
195
	.dirties = INIT_PROP_LOCAL_SINGLE(dirties),			\
195
	.dirties = INIT_PROP_LOCAL_SINGLE(dirties),			\
196
	.vcpu		= NULL,						\
196
	INIT_IDS							\
197
	INIT_IDS							\
197
	INIT_TRACE_IRQFLAGS						\
198
	INIT_TRACE_IRQFLAGS						\
198
	INIT_LOCKDEP							\
199
	INIT_LOCKDEP							\
(-)a/include/linux/ptrace.h (+2 lines)
Lines 21-26 Link Here
21
21
22
#define PTRACE_SYSCALL		  24
22
#define PTRACE_SYSCALL		  24
23
23
24
#define PTRACE_SWITCH_MM	  34
25
24
/* 0x4200-0x4300 are reserved for architecture-independent additions.  */
26
/* 0x4200-0x4300 are reserved for architecture-independent additions.  */
25
#define PTRACE_SETOPTIONS	0x4200
27
#define PTRACE_SETOPTIONS	0x4200
26
#define PTRACE_GETEVENTMSG	0x4201
28
#define PTRACE_GETEVENTMSG	0x4201
(-)a/include/linux/sched.h (+21 lines)
Lines 65-70 struct sched_param { Link Here
65
#include <asm/page.h>
65
#include <asm/page.h>
66
#include <asm/ptrace.h>
66
#include <asm/ptrace.h>
67
#include <asm/cputime.h>
67
#include <asm/cputime.h>
68
#include <asm/ldt.h>
68
69
69
#include <linux/smp.h>
70
#include <linux/smp.h>
70
#include <linux/sem.h>
71
#include <linux/sem.h>
Lines 991-996 struct sched_rt_entity { Link Here
991
#endif
992
#endif
992
};
993
};
993
994
995
struct vcpu_user {
996
	enum { VCPU_SYSCALL, VCPU_SIGNAL } event;
997
	struct user_regs regs;
998
	siginfo_t siginfo;
999
#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
1000
	struct user_desc tls_array[GDT_ENTRY_TLS_ENTRIES];
1001
#endif
1002
};
1003
1004
struct vcpu {
1005
	struct vcpu_user user;
1006
	struct mm_struct *mm;
1007
	struct vcpu_user __user *state;
1008
#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
1009
	struct user_desc tls[GDT_ENTRY_TLS_ENTRIES];
1010
#endif
1011
};
1012
994
struct task_struct {
1013
struct task_struct {
995
	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1014
	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
996
	void *stack;
1015
	void *stack;
Lines 1103-1108 struct task_struct { Link Here
1103
  	cputime_t it_prof_expires, it_virt_expires;
1122
  	cputime_t it_prof_expires, it_virt_expires;
1104
	unsigned long long it_sched_expires;
1123
	unsigned long long it_sched_expires;
1105
	struct list_head cpu_timers[3];
1124
	struct list_head cpu_timers[3];
1125
	struct vcpu *vcpu;
1106
1126
1107
/* process credentials */
1127
/* process credentials */
1108
	uid_t uid,euid,suid,fsuid;
1128
	uid_t uid,euid,suid,fsuid;
Lines 1750-1755 static inline int sas_ss_flags(unsigned long sp) Link Here
1750
 * Routines for handling mm_structs
1770
 * Routines for handling mm_structs
1751
 */
1771
 */
1752
extern struct mm_struct * mm_alloc(void);
1772
extern struct mm_struct * mm_alloc(void);
1773
extern struct mm_struct *dup_mm(struct task_struct *tsk);
1753
1774
1754
/* mmdrop drops the mm and the page tables */
1775
/* mmdrop drops the mm and the page tables */
1755
extern void __mmdrop(struct mm_struct *);
1776
extern void __mmdrop(struct mm_struct *);
(-)a/include/linux/signalfd.h (-1 / +3 lines)
Lines 26-31 struct signalfd_siginfo { Link Here
26
	__u64 ssi_utime;
26
	__u64 ssi_utime;
27
	__u64 ssi_stime;
27
	__u64 ssi_stime;
28
	__u64 ssi_addr;
28
	__u64 ssi_addr;
29
	__u32 ssi_trap_no;
30
	__u32 ssi_error_code;
29
31
30
	/*
32
	/*
31
	 * Pad strcture to 128 bytes. Remember to update the
33
	 * Pad strcture to 128 bytes. Remember to update the
Lines 36-42 struct signalfd_siginfo { Link Here
36
	 * comes out of a read(2) and we really don't want to have
38
	 * comes out of a read(2) and we really don't want to have
37
	 * a compat on read(2).
39
	 * a compat on read(2).
38
	 */
40
	 */
39
	__u8 __pad[48];
41
	__u8 __pad[40];
40
};
42
};
41
43
42
44
(-)a/kernel/Makefile (-1 / +1 lines)
Lines 9-15 obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ Link Here
9
	    rcupdate.o extable.o params.o posix-timers.o \
9
	    rcupdate.o extable.o params.o posix-timers.o \
10
	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
10
	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11
	    hrtimer.o rwsem.o nsproxy.o srcu.o \
11
	    hrtimer.o rwsem.o nsproxy.o srcu.o \
12
	    notifier.o ksysfs.o pm_qos_params.o
12
	    notifier.o ksysfs.o pm_qos_params.o vcpu.o
13
13
14
obj-$(CONFIG_SYSCTL) += sysctl_check.o
14
obj-$(CONFIG_SYSCTL) += sysctl_check.o
15
obj-$(CONFIG_STACKTRACE) += stacktrace.o
15
obj-$(CONFIG_STACKTRACE) += stacktrace.o
(-)a/kernel/exit.c (+5 lines)
Lines 175-180 repeat: Link Here
175
175
176
	write_unlock_irq(&tasklist_lock);
176
	write_unlock_irq(&tasklist_lock);
177
	release_thread(p);
177
	release_thread(p);
178
179
	if (p->vcpu && p->vcpu->mm)
180
		mmput(p->vcpu->mm);
181
	kfree(p->vcpu);
182
178
	call_rcu(&p->rcu, delayed_put_task_struct);
183
	call_rcu(&p->rcu, delayed_put_task_struct);
179
184
180
	p = leader;
185
	p = leader;
(-)a/kernel/fork.c (-1 / +3 lines)
Lines 498-504 void mm_release(struct task_struct *tsk, struct mm_struct *mm) Link Here
498
 * Allocate a new mm structure and copy contents from the
498
 * Allocate a new mm structure and copy contents from the
499
 * mm structure of the passed in task structure.
499
 * mm structure of the passed in task structure.
500
 */
500
 */
501
static struct mm_struct *dup_mm(struct task_struct *tsk)
501
struct mm_struct *dup_mm(struct task_struct *tsk)
502
{
502
{
503
	struct mm_struct *mm, *oldmm = current->mm;
503
	struct mm_struct *mm, *oldmm = current->mm;
504
	int err;
504
	int err;
Lines 1086-1091 static struct task_struct *copy_process(unsigned long clone_flags, Link Here
1086
	clear_tsk_thread_flag(p, TIF_SIGPENDING);
1086
	clear_tsk_thread_flag(p, TIF_SIGPENDING);
1087
	init_sigpending(&p->pending);
1087
	init_sigpending(&p->pending);
1088
1088
1089
	p->vcpu = NULL;
1090
1089
	p->utime = cputime_zero;
1091
	p->utime = cputime_zero;
1090
	p->stime = cputime_zero;
1092
	p->stime = cputime_zero;
1091
	p->gtime = cputime_zero;
1093
	p->gtime = cputime_zero;
(-)a/kernel/ptrace.c (+6 lines)
Lines 420-425 static int ptrace_resume(struct task_struct *child, long request, long data) Link Here
420
	return 0;
420
	return 0;
421
}
421
}
422
422
423
extern int do_switch(struct task_struct *task, int fd);
424
423
int ptrace_request(struct task_struct *child, long request,
425
int ptrace_request(struct task_struct *child, long request,
424
		   long addr, long data)
426
		   long addr, long data)
425
{
427
{
Lines 471-476 int ptrace_request(struct task_struct *child, long request, Link Here
471
			return 0;
473
			return 0;
472
		return ptrace_resume(child, request, SIGKILL);
474
		return ptrace_resume(child, request, SIGKILL);
473
475
476
	case PTRACE_SWITCH_MM:
477
		ret = do_switch(child, data);
478
		break;
479
474
	default:
480
	default:
475
		break;
481
		break;
476
	}
482
	}
(-)a/kernel/signal.c (-1 / +7 lines)
Lines 1785-1790 relock: Link Here
1785
		if (!signr)
1785
		if (!signr)
1786
			break; /* will return 0 */
1786
			break; /* will return 0 */
1787
1787
1788
		if (test_thread_flag(TIF_VCPU))
1789
			break;
1790
1788
		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1791
		if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1789
			ptrace_signal_deliver(regs, cookie);
1792
			ptrace_signal_deliver(regs, cookie);
1790
1793
Lines 2106-2112 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) Link Here
2106
	 */
2109
	 */
2107
	err = __put_user(from->si_signo, &to->si_signo);
2110
	err = __put_user(from->si_signo, &to->si_signo);
2108
	err |= __put_user(from->si_errno, &to->si_errno);
2111
	err |= __put_user(from->si_errno, &to->si_errno);
2109
	err |= __put_user((short)from->si_code, &to->si_code);
2112
	err |= __put_user(from->si_code, &to->si_code);
2110
	switch (from->si_code & __SI_MASK) {
2113
	switch (from->si_code & __SI_MASK) {
2111
	case __SI_KILL:
2114
	case __SI_KILL:
2112
		err |= __put_user(from->si_pid, &to->si_pid);
2115
		err |= __put_user(from->si_pid, &to->si_pid);
Lines 2126-2131 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) Link Here
2126
#ifdef __ARCH_SI_TRAPNO
2129
#ifdef __ARCH_SI_TRAPNO
2127
		err |= __put_user(from->si_trapno, &to->si_trapno);
2130
		err |= __put_user(from->si_trapno, &to->si_trapno);
2128
#endif
2131
#endif
2132
#ifdef __ARCH_SI_ERROR
2133
		err |= __put_user(from->si_error, &to->si_error);
2134
#endif
2129
		break;
2135
		break;
2130
	case __SI_CHLD:
2136
	case __SI_CHLD:
2131
		err |= __put_user(from->si_pid, &to->si_pid);
2137
		err |= __put_user(from->si_pid, &to->si_pid);
(-)a/kernel/vcpu.c (+129 lines)
Line 0 Link Here
1
/*
2
 * Copyright (C) 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3
 * Licensed under the GPL
4
 */
5
6
#include <linux/sched.h>
7
#include <asm/uaccess.h>
8
#include <asm/desc.h>
9
10
extern asmlinkage int sys_get_thread_area(struct user_desc __user *u_info);
11
extern asmlinkage int sys_set_thread_area(struct user_desc __user *u_info);
12
extern int do_switch(struct task_struct *task, int fd);
13
14
long do_vcpu(int mm_fd, struct vcpu_user __user *new, struct pt_regs *regs)
15
{
16
	mm_segment_t fs;
17
	struct vcpu *vcpu;
18
	int err;
19
20
	if (current->vcpu == NULL) {
21
		current->vcpu = kmalloc(sizeof(struct vcpu), GFP_KERNEL);
22
		if (current->vcpu == NULL)
23
			return -ENOMEM;
24
	}
25
26
	vcpu = current->vcpu;
27
	vcpu->mm = NULL;
28
	vcpu->state = new;
29
30
	fs = get_fs();
31
	set_fs(KERNEL_DS);
32
	err = pt_regs_to_ptrace(&vcpu->user.regs, regs);
33
	set_fs(fs);
34
	if (err)
35
		return err;
36
37
	err = ptrace_to_pt_regs(regs, &new->regs);
38
	if (err)
39
		return err;
40
41
#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
42
	{ int i;
43
44
	memcpy(vcpu->tls, current->thread.tls_array, sizeof(vcpu->tls));
45
	for (i = 0; i < ARRAY_SIZE(new->tls_array); i++){
46
		fs = get_fs();
47
		set_fs(KERNEL_DS);
48
		vcpu->tls[i].entry_number = GDT_ENTRY_TLS_MIN + i;
49
		err = sys_get_thread_area(&vcpu->tls[i]);
50
		set_fs(fs);
51
		if (err)
52
			return err;
53
54
		err = sys_set_thread_area(&new->tls_array[i]);
55
		if (err)
56
			return err;
57
	}
58
	}
59
#endif
60
61
	if (mm_fd != -1) {
62
		vcpu->mm = current->mm;
63
		atomic_inc(&vcpu->mm->mm_users);
64
65
		err = do_switch(current, mm_fd);
66
		if (err)
67
			return err;
68
	}
69
70
#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
71
	loadsegment(gs, current->thread.gs);
72
#endif
73
	set_thread_flag(TIF_VCPU);
74
75
	return 0;
76
}
77
78
extern void do_switch_mm_struct(struct task_struct *task,
79
				struct mm_struct *new);
80
81
int unvcpu(struct pt_regs *regs, siginfo_t *siginfo)
82
{
83
	mm_segment_t fs;
84
	struct vcpu *vcpu;
85
	int err, event;
86
87
	clear_thread_flag(TIF_VCPU);
88
89
	vcpu = current->vcpu;
90
	if (vcpu->mm != NULL) {
91
		do_switch_mm_struct(current, vcpu->mm);
92
		mmput(vcpu->mm);
93
		vcpu->mm = NULL;
94
	}
95
96
	err = pt_regs_to_ptrace(&vcpu->state->regs, regs);
97
	if (err)
98
		return err;
99
100
	err = -EFAULT;
101
	if ((siginfo != NULL) &&
102
	    (copy_to_user(&vcpu->state->siginfo, siginfo,
103
			  sizeof(siginfo_t)) != 0))
104
		return err;
105
106
	event = (siginfo != NULL) ? VCPU_SIGNAL : VCPU_SYSCALL;
107
	if (copy_to_user(&vcpu->state->event, &event, sizeof(event)) != 0)
108
		return err;
109
110
#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
111
	{ int i;
112
	for (i = 0; i < ARRAY_SIZE(vcpu->state->tls_array); i++){
113
		fs = get_fs();
114
		set_fs(KERNEL_DS);
115
		err = sys_set_thread_area(&vcpu->tls[i]);
116
		set_fs(fs);
117
		if (err)
118
			return err;
119
	}
120
	}
121
#endif
122
123
	fs = get_fs();
124
	set_fs(KERNEL_DS);
125
	err = ptrace_to_pt_regs(regs, &vcpu->user.regs);
126
	set_fs(fs);
127
128
	return err;
129
}
(-)a/mm/Makefile (-2 / +2 lines)
Lines 4-11 Link Here
4
4
5
mmu-y			:= nommu.o
5
mmu-y			:= nommu.o
6
mmu-$(CONFIG_MMU)	:= fremap.o highmem.o madvise.o memory.o mincore.o \
6
mmu-$(CONFIG_MMU)	:= fremap.o highmem.o madvise.o memory.o mincore.o \
7
			   mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
7
			   mlock.o mmap.o mmfs.o mprotect.o mremap.o msync.o \
8
			   vmalloc.o
8
			   rmap.o vmalloc.o
9
9
10
obj-y			:= bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
10
obj-y			:= bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
11
			   page_alloc.o page-writeback.o pdflush.o \
11
			   page_alloc.o page-writeback.o pdflush.o \
(-)a/mm/mmfs.c (+215 lines)
Line 0 Link Here
1
#define __FRAME_OFFSETS
2
#include <linux/file.h>
3
#include <linux/fs.h>
4
#include <linux/mount.h>
5
#include <linux/sched.h>
6
#include <asm/mmu_context.h>
7
#include <asm/ptrace.h>
8
#include <asm/uaccess.h>
9
#include <asm/user.h>
10
11
static int release_mm(struct inode *inode, struct file *file)
12
{
13
	struct mm_struct *mm = file->private_data;
14
15
	mmput(mm);
16
	return 0;
17
}
18
19
#define MM_MAGIC 0xE0AAC500
20
21
static int mm_get_sb(struct file_system_type *fs_type,
22
		     int flags, const char *dev_name, void *data,
23
		     struct vfsmount *mnt)
24
{
25
	return get_sb_pseudo(fs_type, "mm:", NULL, MM_MAGIC, mnt);
26
}
27
28
static struct vfsmount *mm_mnt;
29
30
static struct file_system_type mm_fs_type = {
31
	.name		= "mm",
32
	.get_sb		= mm_get_sb,
33
	.kill_sb	= kill_anon_super,
34
};
35
36
static int __init init_mm_fs(void)
37
{
38
	int err;
39
40
	err = register_filesystem(&mm_fs_type);
41
	if (err)
42
		return err;
43
44
	mm_mnt = kern_mount(&mm_fs_type);
45
	if (IS_ERR(mm_mnt)) {
46
		err = PTR_ERR(mm_mnt);
47
		unregister_filesystem(&mm_fs_type);
48
	}
49
50
	return err;
51
}
52
53
static void __exit exit_mm_fs(void)
54
{
55
	unregister_filesystem(&mm_fs_type);
56
	mntput(mm_mnt);
57
}
58
59
fs_initcall(init_mm_fs);
60
module_exit(exit_mm_fs);
61
62
static int mm_delete_dentry(struct dentry *dentry)
63
{
64
	/*
65
	 * At creation time, we pretended this dentry was hashed
66
	 * (by clearing DCACHE_UNHASHED bit in d_flags)
67
	 * At delete time, we restore the truth : not hashed.
68
	 * (so that dput() can proceed correctly)
69
	 */
70
	dentry->d_flags |= DCACHE_UNHASHED;
71
	return 0;
72
}
73
74
/*
75
 * pipefs_dname() is called from d_path().
76
 */
77
static char *mm_dname(struct dentry *dentry, char *buffer, int buflen)
78
{
79
	return dynamic_dname(dentry, buffer, buflen, "mm:[%lu]",
80
			     dentry->d_inode->i_ino);
81
}
82
83
static struct dentry_operations mm_dentry_operations = {
84
	.d_delete	= mm_delete_dentry,
85
	.d_dname	= mm_dname,
86
};
87
88
static struct file_operations mm_fops = {
89
	.release	= release_mm,
90
};
91
92
asmlinkage long sys_new_mm(void)
93
{
94
	struct file *file;
95
	struct mm_struct *mm;
96
	struct inode *inode;
97
	struct dentry *dentry;
98
	struct qstr name = { .name = "" };
99
	int err, fd;
100
101
	mm = dup_mm(current);
102
	if (mm == NULL)
103
		return -ENOMEM;
104
105
	fd = get_unused_fd();
106
	if (fd < 0) {
107
		err = fd;
108
		goto out_free;
109
	}
110
111
	err = -ENOMEM;
112
	dentry = d_alloc(mm_mnt->mnt_sb->s_root, &name);
113
	if (dentry == NULL)
114
		goto out_put;
115
116
	dentry->d_op = &mm_dentry_operations;
117
	dentry->d_flags &= ~DCACHE_UNHASHED;
118
119
	inode = new_inode(mm_mnt->mnt_sb);
120
	if (inode == NULL)
121
		goto out_dput;
122
123
	inode->i_mode = S_IRUSR;
124
	inode->i_uid = current->fsuid;
125
	inode->i_gid = current->fsgid;
126
	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
127
128
	d_instantiate(dentry, inode);
129
130
	file = alloc_file(mm_mnt, dentry, FMODE_READ, &mm_fops);
131
	if (file == NULL)
132
		goto out_dput;
133
134
	file->f_flags = O_RDONLY;
135
	file->private_data = mm;
136
137
	fd_install(fd, file);
138
139
	return fd;
140
141
 out_dput:
142
	dput(dentry);
143
 out_put:
144
	put_unused_fd(fd);
145
 out_free:
146
	mmput(mm);
147
	return err;
148
}
149
150
void do_switch_mm_struct(struct task_struct *task, struct mm_struct *new)
151
{
152
	struct mm_struct *old = task->mm;
153
154
	task_lock(task);
155
156
	atomic_inc(&new->mm_users);
157
	task->mm = new;
158
	task->active_mm = new;
159
160
	if (task == current)
161
		switch_mm(old, task->mm, task);
162
163
	task_unlock(task);
164
165
	mmput(old);
166
}
167
168
extern const struct file_operations proc_pid_mm_operations;
169
170
int do_switch(struct task_struct *task, int fd)
171
{
172
	struct file *file = fget(fd);
173
	int err;
174
175
	if (!file)
176
		return -EBADF;
177
178
	err = -EINVAL;
179
	if ((file->f_op != &mm_fops) && (file->f_op != &proc_pid_mm_operations))
180
		goto out;
181
182
	do_switch_mm_struct(task, file->private_data);
183
184
	err = 0;
185
186
 out:
187
	fput(file);
188
	return err;
189
}
190
191
long do_switch_mm(int fd, struct __user user_regs *save,
192
		  struct __user user_regs *new, unsigned long ip,
193
		  unsigned long sp, struct pt_regs *regs)
194
{
195
	int ret;
196
197
	if (current->mm == NULL)
198
		return -EINVAL;
199
200
	if ((save != NULL) && pt_regs_to_ptrace(save, regs))
201
		return -EFAULT;
202
203
	ret = do_switch(current, fd);
204
	if (ret)
205
		return ret;
206
207
	if (new != NULL)
208
		ret = ptrace_to_pt_regs(regs, new);
209
	else {
210
		pt_regs_ip(*regs) = ip;
211
		pt_regs_sp(*regs) = sp;
212
	}
213
214
	return ret;
215
}

Return to bug 227815