Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 377831
Collapse All | Expand All

(-)a/arch/x86/Kconfig (+4 lines)
Lines 480-485 config SCHED_OMIT_FRAME_POINTER Link Here
480
480
481
menuconfig PARAVIRT_GUEST
481
menuconfig PARAVIRT_GUEST
482
	bool "Paravirtualized guest support"
482
	bool "Paravirtualized guest support"
483
 	depends on !IPIPE
483
	---help---
484
	---help---
484
	  Say Y here to get to see options related to running Linux under
485
	  Say Y here to get to see options related to running Linux under
485
	  various hypervisors.  This option alone does not add any kernel code.
486
	  various hypervisors.  This option alone does not add any kernel code.
Lines 531-536 source "arch/x86/lguest/Kconfig" Link Here
531
532
532
config PARAVIRT
533
config PARAVIRT
533
	bool "Enable paravirtualization code"
534
	bool "Enable paravirtualization code"
535
 	depends on !IPIPE
534
	---help---
536
	---help---
535
	  This changes the kernel so it can modify itself when it is run
537
	  This changes the kernel so it can modify itself when it is run
536
	  under a hypervisor, potentially improving performance significantly
538
	  under a hypervisor, potentially improving performance significantly
Lines 750-755 config SCHED_MC Link Here
750
752
751
source "kernel/Kconfig.preempt"
753
source "kernel/Kconfig.preempt"
752
754
755
source "kernel/ipipe/Kconfig"
756
753
config X86_UP_APIC
757
config X86_UP_APIC
754
	bool "Local APIC support on uniprocessors"
758
	bool "Local APIC support on uniprocessors"
755
	depends on X86_32 && !SMP && !X86_32_NON_STANDARD
759
	depends on X86_32 && !SMP && !X86_32_NON_STANDARD
(-)a/arch/x86/include/asm/apic.h (+6 lines)
Lines 404-410 static inline u32 safe_apic_wait_icr_idle(void) Link Here
404
}
404
}
405
405
406
406
407
#ifdef CONFIG_IPIPE
408
#define ack_APIC_irq() do { } while(0)
409
static inline void __ack_APIC_irq(void)
410
#else /* !CONFIG_IPIPE */
411
#define __ack_APIC_irq() ack_APIC_irq()
407
static inline void ack_APIC_irq(void)
412
static inline void ack_APIC_irq(void)
413
#endif /* CONFIG_IPIPE */
408
{
414
{
409
#ifdef CONFIG_X86_LOCAL_APIC
415
#ifdef CONFIG_X86_LOCAL_APIC
410
	/*
416
	/*
(-)a/arch/x86/include/asm/apicdef.h (+4 lines)
Lines 143-148 Link Here
143
# define MAX_LOCAL_APIC 32768
143
# define MAX_LOCAL_APIC 32768
144
#endif
144
#endif
145
145
146
#ifndef __ASSEMBLY__
146
/*
147
/*
147
 * All x86-64 systems are xAPIC compatible.
148
 * All x86-64 systems are xAPIC compatible.
148
 * In the following, "apicid" is a physical APIC ID.
149
 * In the following, "apicid" is a physical APIC ID.
Lines 418-421 struct local_apic { Link Here
418
#else
419
#else
419
 #define BAD_APICID 0xFFFFu
420
 #define BAD_APICID 0xFFFFu
420
#endif
421
#endif
422
423
#endif /* !__ASSEMBLY__ */
424
421
#endif /* _ASM_X86_APICDEF_H */
425
#endif /* _ASM_X86_APICDEF_H */
(-)a/arch/x86/include/asm/entry_arch.h (+2 lines)
Lines 22-27 BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1, Link Here
22
		 smp_invalidate_interrupt)
22
		 smp_invalidate_interrupt)
23
BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
23
BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
24
		 smp_invalidate_interrupt)
24
		 smp_invalidate_interrupt)
25
#ifndef CONFIG_IPIPE
25
BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
26
BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
26
		 smp_invalidate_interrupt)
27
		 smp_invalidate_interrupt)
27
BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
28
BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
Lines 32-37 BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6, Link Here
32
		 smp_invalidate_interrupt)
33
		 smp_invalidate_interrupt)
33
BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
34
BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
34
		 smp_invalidate_interrupt)
35
		 smp_invalidate_interrupt)
36
#endif /* !CONFIG_IPIPE */
35
#endif
37
#endif
36
38
37
BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR)
39
BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR)
(-)a/arch/x86/include/asm/hw_irq.h (+8 lines)
Lines 35-40 extern void spurious_interrupt(void); Link Here
35
extern void thermal_interrupt(void);
35
extern void thermal_interrupt(void);
36
extern void reschedule_interrupt(void);
36
extern void reschedule_interrupt(void);
37
extern void mce_self_interrupt(void);
37
extern void mce_self_interrupt(void);
38
#ifdef CONFIG_IPIPE
39
void ipipe_ipi0(void);
40
void ipipe_ipi1(void);
41
void ipipe_ipi2(void);
42
void ipipe_ipi3(void);
43
void ipipe_ipiX(void);
44
#endif
38
45
39
extern void invalidate_interrupt(void);
46
extern void invalidate_interrupt(void);
40
extern void invalidate_interrupt0(void);
47
extern void invalidate_interrupt0(void);
Lines 115-120 extern void smp_invalidate_interrupt(struct pt_regs *); Link Here
115
#else
122
#else
116
extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
123
extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
117
#endif
124
#endif
125
extern asmlinkage void smp_reboot_interrupt(void);
118
#endif
126
#endif
119
127
120
extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
128
extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
(-)a/arch/x86/include/asm/i387.h (+3 lines)
Lines 289-299 static inline void __clear_fpu(struct task_struct *tsk) Link Here
289
static inline void kernel_fpu_begin(void)
289
static inline void kernel_fpu_begin(void)
290
{
290
{
291
	struct thread_info *me = current_thread_info();
291
	struct thread_info *me = current_thread_info();
292
	unsigned long flags;
292
	preempt_disable();
293
	preempt_disable();
294
	local_irq_save_hw_cond(flags);
293
	if (me->status & TS_USEDFPU)
295
	if (me->status & TS_USEDFPU)
294
		__save_init_fpu(me->task);
296
		__save_init_fpu(me->task);
295
	else
297
	else
296
		clts();
298
		clts();
299
	local_irq_restore_hw_cond(flags);
297
}
300
}
298
301
299
static inline void kernel_fpu_end(void)
302
static inline void kernel_fpu_end(void)
(-)a/arch/x86/include/asm/i8259.h (-1 / +1 lines)
Lines 24-30 extern unsigned int cached_irq_mask; Link Here
24
#define SLAVE_ICW4_DEFAULT	0x01
24
#define SLAVE_ICW4_DEFAULT	0x01
25
#define PIC_ICW4_AEOI		2
25
#define PIC_ICW4_AEOI		2
26
26
27
extern spinlock_t i8259A_lock;
27
extern ipipe_spinlock_t i8259A_lock;
28
28
29
extern void init_8259A(int auto_eoi);
29
extern void init_8259A(int auto_eoi);
30
extern void enable_8259A_irq(unsigned int irq);
30
extern void enable_8259A_irq(unsigned int irq);
(-)a/arch/x86/include/asm/ipi.h (+5 lines)
Lines 68-73 __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest Link Here
68
	 * to the APIC.
68
	 * to the APIC.
69
	 */
69
	 */
70
	unsigned int cfg;
70
	unsigned int cfg;
71
	unsigned long flags;
72
73
	local_irq_save_hw(flags);
71
74
72
	/*
75
	/*
73
	 * Wait for idle.
76
	 * Wait for idle.
Lines 83-88 __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest Link Here
83
	 * Send the IPI. The write to APIC_ICR fires this off.
86
	 * Send the IPI. The write to APIC_ICR fires this off.
84
	 */
87
	 */
85
	native_apic_mem_write(APIC_ICR, cfg);
88
	native_apic_mem_write(APIC_ICR, cfg);
89
90
	local_irq_restore_hw(flags);
86
}
91
}
87
92
88
/*
93
/*
(-)a/arch/x86/include/asm/ipipe.h (+158 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   arch/x86/include/asm/ipipe.h
3
 *
4
 *   Copyright (C) 2007 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __X86_IPIPE_H
23
#define __X86_IPIPE_H
24
25
#ifdef CONFIG_IPIPE
26
27
#ifndef IPIPE_ARCH_STRING
28
#define IPIPE_ARCH_STRING	"2.6-03"
29
#define IPIPE_MAJOR_NUMBER	2
30
#define IPIPE_MINOR_NUMBER	6
31
#define IPIPE_PATCH_NUMBER	3
32
#endif
33
34
DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
35
36
DECLARE_PER_CPU(unsigned long, __ipipe_cr2);
37
38
static inline unsigned __ipipe_get_irq_vector(int irq)
39
{
40
#ifdef CONFIG_X86_IO_APIC
41
	unsigned __ipipe_get_ioapic_irq_vector(int irq);
42
	return __ipipe_get_ioapic_irq_vector(irq);
43
#elif defined(CONFIG_X86_LOCAL_APIC)
44
	return irq >= IPIPE_FIRST_APIC_IRQ && irq < IPIPE_NR_XIRQS ?
45
		ipipe_apic_irq_vector(irq) : irq + IRQ0_VECTOR;
46
#else
47
	return irq + IRQ0_VECTOR;
48
#endif
49
}
50
51
#ifdef CONFIG_X86_32
52
# include "ipipe_32.h"
53
#else
54
# include "ipipe_64.h"
55
#endif
56
57
/*
58
 * The logical processor id and the current Linux task are read from the PDA,
59
 * so this is always safe, regardless of the underlying stack.
60
 */
61
#define ipipe_processor_id()	raw_smp_processor_id()
62
#define ipipe_safe_current()	current
63
64
#define prepare_arch_switch(next)		\
65
do {						\
66
	ipipe_schedule_notify(current, next);	\
67
	local_irq_disable_hw();			\
68
} while(0)
69
70
#define task_hijacked(p)						\
71
	({ int x = __ipipe_root_domain_p;				\
72
	__clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status));	\
73
	if (x) local_irq_enable_hw(); !x; })
74
75
struct ipipe_domain;
76
77
struct ipipe_sysinfo {
78
79
	int ncpus;		/* Number of CPUs on board */
80
	u64 cpufreq;		/* CPU frequency (in Hz) */
81
82
	/* Arch-dependent block */
83
84
	struct {
85
		unsigned tmirq;	/* Timer tick IRQ */
86
		u64 tmfreq;	/* Timer frequency */
87
	} archdep;
88
};
89
90
/* Private interface -- Internal use only */
91
92
#define __ipipe_check_platform()	do { } while(0)
93
#define __ipipe_init_platform()		do { } while(0)
94
#define __ipipe_enable_irq(irq)		irq_to_desc(irq)->chip->enable(irq)
95
#define __ipipe_disable_irq(irq)	irq_to_desc(irq)->chip->disable(irq)
96
97
#ifdef CONFIG_SMP
98
void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd);
99
#else
100
#define __ipipe_hook_critical_ipi(ipd) do { } while(0)
101
#endif
102
103
#define __ipipe_disable_irqdesc(ipd, irq)	do { } while(0)
104
105
void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq);
106
107
void __ipipe_enable_pipeline(void);
108
109
void __ipipe_do_critical_sync(unsigned irq, void *cookie);
110
111
void __ipipe_serial_debug(const char *fmt, ...);
112
113
extern int __ipipe_tick_irq;
114
115
#ifdef CONFIG_X86_LOCAL_APIC
116
#define ipipe_update_tick_evtdev(evtdev)				\
117
	do {								\
118
		if (strcmp((evtdev)->name, "lapic") == 0)		\
119
			__ipipe_tick_irq =				\
120
				ipipe_apic_vector_irq(LOCAL_TIMER_VECTOR); \
121
		else							\
122
			__ipipe_tick_irq = 0;				\
123
	} while (0)
124
#else
125
#define ipipe_update_tick_evtdev(evtdev)				\
126
	__ipipe_tick_irq = 0
127
#endif
128
129
int __ipipe_check_lapic(void);
130
131
int __ipipe_check_tickdev(const char *devname);
132
133
#define __ipipe_syscall_watched_p(p, sc)	\
134
	(((p)->flags & PF_EVNOTIFY) || (unsigned long)sc >= NR_syscalls)
135
136
#define __ipipe_root_tick_p(regs)	((regs)->flags & X86_EFLAGS_IF)
137
138
#else /* !CONFIG_IPIPE */
139
140
#define ipipe_update_tick_evtdev(evtdev)	do { } while (0)
141
#define task_hijacked(p)			0
142
143
#endif /* CONFIG_IPIPE */
144
145
#if defined(CONFIG_SMP) && defined(CONFIG_IPIPE)
146
#define __ipipe_move_root_irq(irq)					\
147
	do {								\
148
		if (irq < NR_IRQS) {					\
149
			struct irq_chip *chip = irq_to_desc(irq)->chip;	\
150
			if (chip->move)					\
151
				chip->move(irq);			\
152
		}							\
153
	} while (0)
154
#else /* !(CONFIG_SMP && CONFIG_IPIPE) */
155
#define __ipipe_move_root_irq(irq)	do { } while (0)
156
#endif /* !(CONFIG_SMP && CONFIG_IPIPE) */
157
158
#endif	/* !__X86_IPIPE_H */
(-)a/arch/x86/include/asm/ipipe_32.h (+156 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   arch/x86/include/asm/ipipe_32.h
3
 *
4
 *   Copyright (C) 2002-2005 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __X86_IPIPE_32_H
23
#define __X86_IPIPE_32_H
24
25
#include <linux/cpumask.h>
26
#include <linux/list.h>
27
#include <linux/threads.h>
28
#include <linux/ipipe_percpu.h>
29
#include <asm/ptrace.h>
30
31
#define ipipe_read_tsc(t)  __asm__ __volatile__("rdtsc" : "=A" (t))
32
#define ipipe_cpu_freq() ({ unsigned long long __freq = cpu_has_tsc?(1000LL * cpu_khz):CLOCK_TICK_RATE; __freq; })
33
34
#define ipipe_tsc2ns(t) \
35
({ \
36
	unsigned long long delta = (t)*1000; \
37
	do_div(delta, cpu_khz/1000+1); \
38
	(unsigned long)delta; \
39
})
40
41
#define ipipe_tsc2us(t) \
42
({ \
43
    unsigned long long delta = (t); \
44
    do_div(delta, cpu_khz/1000+1); \
45
    (unsigned long)delta; \
46
})
47
48
/* Private interface -- Internal use only */
49
50
int __ipipe_handle_irq(struct pt_regs *regs);
51
52
static inline unsigned long __ipipe_ffnz(unsigned long ul)
53
{
54
      __asm__("bsrl %1, %0":"=r"(ul)
55
      :	"r"(ul));
56
	return ul;
57
}
58
59
struct irq_desc;
60
61
void __ipipe_ack_edge_irq(unsigned irq, struct irq_desc *desc);
62
63
void __ipipe_end_edge_irq(unsigned irq, struct irq_desc *desc);
64
65
static inline void __ipipe_call_root_xirq_handler(unsigned irq,
66
						  ipipe_irq_handler_t handler)
67
{
68
	struct pt_regs *regs = &__raw_get_cpu_var(__ipipe_tick_regs);
69
70
	regs->orig_ax = ~__ipipe_get_irq_vector(irq);
71
72
	__asm__ __volatile__("pushfl\n\t"
73
			     "pushl %%cs\n\t"
74
			     "pushl $__xirq_end\n\t"
75
			     "pushl %%eax\n\t"
76
			     "pushl %%gs\n\t"
77
			     "pushl %%fs\n\t"
78
			     "pushl %%es\n\t"
79
			     "pushl %%ds\n\t"
80
			     "pushl %%eax\n\t"
81
			     "pushl %%ebp\n\t"
82
			     "pushl %%edi\n\t"
83
			     "pushl %%esi\n\t"
84
			     "pushl %%edx\n\t"
85
			     "pushl %%ecx\n\t"
86
			     "pushl %%ebx\n\t"
87
			     "movl  %2,%%eax\n\t"
88
			     "call *%1\n\t"
89
			     "jmp ret_from_intr\n\t"
90
			     "__xirq_end: cli\n"
91
			     : /* no output */
92
			     : "a" (~irq), "r" (handler), "rm" (regs));
93
}
94
95
void irq_enter(void);
96
void irq_exit(void);
97
98
static inline void __ipipe_call_root_virq_handler(unsigned irq,
99
						  ipipe_irq_handler_t handler,
100
						  void *cookie)
101
{
102
	irq_enter();
103
	__asm__ __volatile__("pushfl\n\t"
104
			     "pushl %%cs\n\t"
105
			     "pushl $__virq_end\n\t"
106
			     "pushl $-1\n\t"
107
			     "pushl %%gs\n\t"
108
			     "pushl %%fs\n\t"
109
			     "pushl %%es\n\t"
110
			     "pushl %%ds\n\t"
111
			     "pushl %%eax\n\t"
112
			     "pushl %%ebp\n\t"
113
			     "pushl %%edi\n\t"
114
			     "pushl %%esi\n\t"
115
			     "pushl %%edx\n\t"
116
			     "pushl %%ecx\n\t"
117
			     "pushl %%ebx\n\t"
118
			     "pushl %2\n\t"
119
			     "pushl %%eax\n\t"
120
			     "call *%1\n\t"
121
			     "addl $8,%%esp\n"
122
			     : /* no output */
123
			     : "a" (irq), "r" (handler), "d" (cookie));
124
	irq_exit();
125
	__asm__ __volatile__("jmp ret_from_intr\n\t"
126
			     "__virq_end: cli\n"
127
			     : /* no output */
128
			     : /* no input */);
129
}
130
131
/*
132
 * When running handlers, enable hw interrupts for all domains but the
133
 * one heading the pipeline, so that IRQs can never be significantly
134
 * deferred for the latter.
135
 */
136
#define __ipipe_run_isr(ipd, irq)					\
137
do {									\
138
	if (!__ipipe_pipeline_head_p(ipd))				\
139
		local_irq_enable_hw();					\
140
	if (ipd == ipipe_root_domain) {					\
141
		if (likely(!ipipe_virtual_irq_p(irq)))			\
142
			__ipipe_call_root_xirq_handler(irq,		\
143
						       ipd->irqs[irq].handler); \
144
		else							\
145
			__ipipe_call_root_virq_handler(irq,		\
146
						       ipd->irqs[irq].handler, \
147
						       ipd->irqs[irq].cookie); \
148
	} else {							\
149
		__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
150
		ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);	\
151
		__set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
152
	}								\
153
	local_irq_disable_hw();						\
154
} while(0)
155
156
#endif	/* !__X86_IPIPE_32_H */
(-)a/arch/x86/include/asm/ipipe_64.h (+161 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   arch/x86/include/asm/ipipe_64.h
3
 *
4
 *   Copyright (C) 2007 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __X86_IPIPE_64_H
23
#define __X86_IPIPE_64_H
24
25
#include <asm/ptrace.h>
26
#include <asm/irq.h>
27
#include <linux/cpumask.h>
28
#include <linux/list.h>
29
#include <linux/ipipe_percpu.h>
30
#ifdef CONFIG_SMP
31
#include <asm/mpspec.h>
32
#include <linux/thread_info.h>
33
#endif
34
35
#define ipipe_read_tsc(t)  do {		\
36
	unsigned int __a,__d;			\
37
	asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
38
	(t) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
39
} while(0)
40
41
extern unsigned cpu_khz;
42
#define ipipe_cpu_freq() ({ unsigned long __freq = (1000UL * cpu_khz); __freq; })
43
#define ipipe_tsc2ns(t)	(((t) * 1000UL) / (ipipe_cpu_freq() / 1000000UL))
44
#define ipipe_tsc2us(t)	((t) / (ipipe_cpu_freq() / 1000000UL))
45
46
/* Private interface -- Internal use only */
47
48
int __ipipe_handle_irq(struct pt_regs *regs);
49
50
static inline unsigned long __ipipe_ffnz(unsigned long ul)
51
{
52
      __asm__("bsrq %1, %0":"=r"(ul)
53
	      :	"rm"(ul));
54
      return ul;
55
}
56
57
struct irq_desc;
58
59
void __ipipe_ack_edge_irq(unsigned irq, struct irq_desc *desc);
60
61
void __ipipe_end_edge_irq(unsigned irq, struct irq_desc *desc);
62
63
static inline void __ipipe_call_root_xirq_handler(unsigned irq,
64
						  void (*handler)(unsigned, void *))
65
{
66
	struct pt_regs *regs = &__raw_get_cpu_var(__ipipe_tick_regs);
67
68
	regs->orig_ax = ~__ipipe_get_irq_vector(irq);
69
70
	__asm__ __volatile__("movq  %%rsp, %%rax\n\t"
71
			     "pushq $0\n\t"
72
			     "pushq %%rax\n\t"
73
			     "pushfq\n\t"
74
			     "pushq %[kernel_cs]\n\t"
75
			     "pushq $__xirq_end\n\t"
76
			     "pushq %[vector]\n\t"
77
			     "subq  $9*8,%%rsp\n\t"
78
			     "movq  %%rdi,8*8(%%rsp)\n\t"
79
			     "movq  %%rsi,7*8(%%rsp)\n\t"
80
			     "movq  %%rdx,6*8(%%rsp)\n\t"
81
			     "movq  %%rcx,5*8(%%rsp)\n\t"
82
			     "movq  %%rax,4*8(%%rsp)\n\t"
83
			     "movq  %%r8,3*8(%%rsp)\n\t"
84
			     "movq  %%r9,2*8(%%rsp)\n\t"
85
			     "movq  %%r10,1*8(%%rsp)\n\t"
86
			     "movq  %%r11,(%%rsp)\n\t"
87
			     "call  *%[handler]\n\t"
88
			     "cli\n\t"
89
			     "jmp exit_intr\n\t"
90
			     "__xirq_end: cli\n"
91
			     : /* no output */
92
			     : [kernel_cs] "i" (__KERNEL_CS),
93
			       [vector] "rm" (regs->orig_ax),
94
			       [handler] "r" (handler), "D" (regs)
95
			     : "rax");
96
}
97
98
void irq_enter(void);
99
void irq_exit(void);
100
101
static inline void __ipipe_call_root_virq_handler(unsigned irq,
102
						  void (*handler)(unsigned, void *),
103
						  void *cookie)
104
{
105
	irq_enter();
106
	__asm__ __volatile__("movq  %%rsp, %%rax\n\t"
107
			     "pushq $0\n\t"
108
			     "pushq %%rax\n\t"
109
			     "pushfq\n\t"
110
			     "pushq %[kernel_cs]\n\t"
111
			     "pushq $__virq_end\n\t"
112
			     "pushq $-1\n\t"
113
			     "subq  $9*8,%%rsp\n\t"
114
			     "movq  %%rdi,8*8(%%rsp)\n\t"
115
			     "movq  %%rsi,7*8(%%rsp)\n\t"
116
			     "movq  %%rdx,6*8(%%rsp)\n\t"
117
			     "movq  %%rcx,5*8(%%rsp)\n\t"
118
			     "movq  %%rax,4*8(%%rsp)\n\t"
119
			     "movq  %%r8,3*8(%%rsp)\n\t"
120
			     "movq  %%r9,2*8(%%rsp)\n\t"
121
			     "movq  %%r10,1*8(%%rsp)\n\t"
122
			     "movq  %%r11,(%%rsp)\n\t"
123
			     "call  *%[handler]\n\t"
124
			     : /* no output */
125
			     : [kernel_cs] "i" (__KERNEL_CS),
126
			       [handler] "r" (handler), "D" (irq), "S" (cookie)
127
			     : "rax");
128
	irq_exit();
129
	__asm__ __volatile__("cli\n\t"
130
			     "jmp exit_intr\n\t"
131
			     "__virq_end: cli\n"
132
			     : /* no output */
133
			     : /* no input */);
134
}
135
136
/*
137
 * When running handlers, enable hw interrupts for all domains but the
138
 * one heading the pipeline, so that IRQs can never be significantly
139
 * deferred for the latter.
140
 */
141
#define __ipipe_run_isr(ipd, irq)					\
142
	do {								\
143
		if (!__ipipe_pipeline_head_p(ipd))			\
144
			local_irq_enable_hw();				\
145
		if (ipd == ipipe_root_domain) {				\
146
			if (likely(!ipipe_virtual_irq_p(irq))) 		\
147
				__ipipe_call_root_xirq_handler(		\
148
					irq, (ipd)->irqs[irq].handler);	\
149
			else						\
150
				__ipipe_call_root_virq_handler(		\
151
					irq, (ipd)->irqs[irq].handler,	\
152
					(ipd)->irqs[irq].cookie);	\
153
		} else {						\
154
			__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
155
			ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
156
			__set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
157
		}							\
158
		local_irq_disable_hw();					\
159
	} while(0)
160
161
#endif	/* !__X86_IPIPE_64_H */
(-)a/arch/x86/include/asm/ipipe_base.h (+210 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   arch/x86/include/asm/ipipe_base.h
3
 *
4
 *   Copyright (C) 2007-2009 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __X86_IPIPE_BASE_H
23
#define __X86_IPIPE_BASE_H
24
25
#include <linux/threads.h>
26
#include <asm/apicdef.h>
27
#include <asm/irq_vectors.h>
28
29
#ifdef CONFIG_X86_32
30
#define IPIPE_NR_FAULTS		33 /* 32 from IDT + iret_error */
31
#else
32
#define IPIPE_NR_FAULTS		32
33
#endif
34
35
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
36
/*
37
 * System interrupts are mapped beyond the last defined external IRQ
38
 * number.
39
 */
40
#define IPIPE_NR_XIRQS		(NR_IRQS + 32)
41
#define IPIPE_FIRST_APIC_IRQ	NR_IRQS
42
#define IPIPE_SERVICE_VECTOR0	(INVALIDATE_TLB_VECTOR_END + 1)
43
#define IPIPE_SERVICE_IPI0	ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR0)
44
#define IPIPE_SERVICE_VECTOR1	(INVALIDATE_TLB_VECTOR_END + 2)
45
#define IPIPE_SERVICE_IPI1	ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR1)
46
#define IPIPE_SERVICE_VECTOR2	(INVALIDATE_TLB_VECTOR_END + 3)
47
#define IPIPE_SERVICE_IPI2	ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR2)
48
#define IPIPE_SERVICE_VECTOR3	(INVALIDATE_TLB_VECTOR_END + 4)
49
#define IPIPE_SERVICE_IPI3	ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR3)
50
#ifdef CONFIG_SMP
51
#define IPIPE_CRITICAL_VECTOR	(INVALIDATE_TLB_VECTOR_END + 5)
52
#define IPIPE_CRITICAL_IPI	ipipe_apic_vector_irq(IPIPE_CRITICAL_VECTOR)
53
#endif
54
#define ipipe_apic_irq_vector(irq)  ((irq) - IPIPE_FIRST_APIC_IRQ + FIRST_SYSTEM_VECTOR)
55
#define ipipe_apic_vector_irq(vec)  ((vec) - FIRST_SYSTEM_VECTOR + IPIPE_FIRST_APIC_IRQ)
56
#else /* !(CONFIG_X86_64 || CONFIG_X86_LOCAL_APIC) */
57
#define IPIPE_NR_XIRQS		NR_IRQS
58
#endif /* !(CONFIG_X86_64 || CONFIG_X86_LOCAL_APIC) */
59
60
/* Pseudo-vectors used for kernel events */
61
#define IPIPE_FIRST_EVENT	IPIPE_NR_FAULTS
62
#define IPIPE_EVENT_SYSCALL	(IPIPE_FIRST_EVENT)
63
#define IPIPE_EVENT_SCHEDULE	(IPIPE_FIRST_EVENT + 1)
64
#define IPIPE_EVENT_SIGWAKE	(IPIPE_FIRST_EVENT + 2)
65
#define IPIPE_EVENT_SETSCHED	(IPIPE_FIRST_EVENT + 3)
66
#define IPIPE_EVENT_INIT	(IPIPE_FIRST_EVENT + 4)
67
#define IPIPE_EVENT_EXIT	(IPIPE_FIRST_EVENT + 5)
68
#define IPIPE_EVENT_CLEANUP	(IPIPE_FIRST_EVENT + 6)
69
#define IPIPE_LAST_EVENT	IPIPE_EVENT_CLEANUP
70
#define IPIPE_NR_EVENTS		(IPIPE_LAST_EVENT + 1)
71
72
#define ex_do_divide_error			0
73
#define ex_do_debug				1
74
/* NMI not pipelined. */
75
#define ex_do_int3				3
76
#define ex_do_overflow				4
77
#define ex_do_bounds				5
78
#define ex_do_invalid_op			6
79
#define ex_do_device_not_available		7
80
/* Double fault not pipelined. */
81
#define ex_do_coprocessor_segment_overrun	9
82
#define ex_do_invalid_TSS			10
83
#define ex_do_segment_not_present		11
84
#define ex_do_stack_segment			12
85
#define ex_do_general_protection		13
86
#define ex_do_page_fault			14
87
#define ex_do_spurious_interrupt_bug		15
88
#define ex_do_coprocessor_error			16
89
#define ex_do_alignment_check			17
90
#define ex_machine_check_vector			18
91
#define ex_reserved				ex_machine_check_vector
92
#define ex_do_simd_coprocessor_error		19
93
#define ex_do_iret_error			32
94
95
#ifndef __ASSEMBLY__
96
97
#ifdef CONFIG_SMP
98
99
#include <asm/alternative.h>
100
101
#ifdef CONFIG_X86_32
102
#define GET_ROOT_STATUS_ADDR					\
103
	"pushfl; cli;"						\
104
	"movl %%fs:per_cpu__this_cpu_off, %%eax;"		\
105
	"lea per_cpu__ipipe_percpu_darray(%%eax), %%eax;"
106
#define PUT_ROOT_STATUS_ADDR	"popfl;"
107
#define TEST_AND_SET_ROOT_STATUS \
108
	"btsl $0,(%%eax);"
109
#define TEST_ROOT_STATUS \
110
	"btl $0,(%%eax);"
111
#define ROOT_TEST_CLOBBER_LIST  "eax"
112
#else /* CONFIG_X86_64 */
113
#define GET_ROOT_STATUS_ADDR					\
114
	"pushfq; cli;"						\
115
	"movq %%gs:per_cpu__this_cpu_off, %%rax;"		\
116
	"lea per_cpu__ipipe_percpu_darray(%%rax), %%rax;"
117
#define PUT_ROOT_STATUS_ADDR	"popfq;"
118
#define TEST_AND_SET_ROOT_STATUS \
119
	"btsl $0,(%%rax);"
120
#define TEST_ROOT_STATUS \
121
	"btl $0,(%%rax);"
122
#define ROOT_TEST_CLOBBER_LIST  "rax"
123
#endif /* CONFIG_X86_64 */
124
125
static inline void __ipipe_stall_root(void)
126
{
127
	__asm__ __volatile__(GET_ROOT_STATUS_ADDR
128
			     LOCK_PREFIX
129
			     TEST_AND_SET_ROOT_STATUS
130
			     PUT_ROOT_STATUS_ADDR
131
			     : : : ROOT_TEST_CLOBBER_LIST, "memory");
132
}
133
134
static inline unsigned long __ipipe_test_and_stall_root(void)
135
{
136
	int oldbit;
137
138
	__asm__ __volatile__(GET_ROOT_STATUS_ADDR
139
			     LOCK_PREFIX
140
			     TEST_AND_SET_ROOT_STATUS
141
			     "sbbl %0,%0;"
142
			     PUT_ROOT_STATUS_ADDR
143
			     :"=r" (oldbit)
144
			     : : ROOT_TEST_CLOBBER_LIST, "memory");
145
	return oldbit;
146
}
147
148
static inline unsigned long __ipipe_test_root(void)
149
{
150
	int oldbit;
151
152
	__asm__ __volatile__(GET_ROOT_STATUS_ADDR
153
			     TEST_ROOT_STATUS
154
			     "sbbl %0,%0;"
155
			     PUT_ROOT_STATUS_ADDR
156
			     :"=r" (oldbit)
157
			     : : ROOT_TEST_CLOBBER_LIST);
158
	return oldbit;
159
}
160
161
#else /* !CONFIG_SMP */
162
163
#if __GNUC__ >= 4
164
/* Alias to ipipe_root_cpudom_var(status) */
165
extern unsigned long __ipipe_root_status;
166
#else
167
extern unsigned long *const __ipipe_root_status_addr;
168
#define __ipipe_root_status	(*__ipipe_root_status_addr)
169
#endif
170
171
static inline void __ipipe_stall_root(void)
172
{
173
	volatile unsigned long *p = &__ipipe_root_status;
174
	__asm__ __volatile__("btsl $0,%0;"
175
			     :"+m" (*p) : : "memory");
176
}
177
178
static inline unsigned long __ipipe_test_and_stall_root(void)
179
{
180
	volatile unsigned long *p = &__ipipe_root_status;
181
	int oldbit;
182
183
	__asm__ __volatile__("btsl $0,%1;"
184
			     "sbbl %0,%0;"
185
			     :"=r" (oldbit), "+m" (*p)
186
			     : : "memory");
187
	return oldbit;
188
}
189
190
static inline unsigned long __ipipe_test_root(void)
191
{
192
	volatile unsigned long *p = &__ipipe_root_status;
193
	int oldbit;
194
195
	__asm__ __volatile__("btl $0,%1;"
196
			     "sbbl %0,%0;"
197
			     :"=r" (oldbit)
198
			     :"m" (*p));
199
	return oldbit;
200
}
201
202
#endif /* !CONFIG_SMP */
203
204
void __ipipe_halt_root(void);
205
206
void __ipipe_serial_debug(const char *fmt, ...);
207
208
#endif	/* !__ASSEMBLY__ */
209
210
#endif	/* !__X86_IPIPE_BASE_H */
(-)a/arch/x86/include/asm/irq_vectors.h (+10 lines)
Lines 91-100 Link Here
91
#define THRESHOLD_APIC_VECTOR		0xf9
91
#define THRESHOLD_APIC_VECTOR		0xf9
92
#define REBOOT_VECTOR			0xf8
92
#define REBOOT_VECTOR			0xf8
93
93
94
#ifdef CONFIG_IPIPE
95
/* f0-f2 used for TLB flush, f3-f7 reserved for the I-pipe */
96
#define INVALIDATE_TLB_VECTOR_END	0xf2
97
#define INVALIDATE_TLB_VECTOR_START	0xf0
98
#define NUM_INVALIDATE_TLB_VECTORS	3
99
#else /* !CONFIG_IPIPE */
94
/* f0-f7 used for spreading out TLB flushes: */
100
/* f0-f7 used for spreading out TLB flushes: */
95
#define INVALIDATE_TLB_VECTOR_END	0xf7
101
#define INVALIDATE_TLB_VECTOR_END	0xf7
96
#define INVALIDATE_TLB_VECTOR_START	0xf0
102
#define INVALIDATE_TLB_VECTOR_START	0xf0
97
#define NUM_INVALIDATE_TLB_VECTORS	   8
103
#define NUM_INVALIDATE_TLB_VECTORS	   8
104
#endif
98
105
99
/*
106
/*
100
 * Local APIC timer IRQ vector is on a different priority level,
107
 * Local APIC timer IRQ vector is on a different priority level,
Lines 120-125 Link Here
120
 */
127
 */
121
#define MCE_SELF_VECTOR			0xeb
128
#define MCE_SELF_VECTOR			0xeb
122
129
130
/* I-pipe: Lowest number of vectors above */
131
#define FIRST_SYSTEM_VECTOR		0xea
132
123
/*
133
/*
124
 * First APIC vector available to drivers: (vectors 0x30-0xee) we
134
 * First APIC vector available to drivers: (vectors 0x30-0xee) we
125
 * start at 0x31(0x41) to spread out vectors evenly between priority
135
 * start at 0x31(0x41) to spread out vectors evenly between priority
(-)a/arch/x86/include/asm/irqflags.h (-4 / +125 lines)
Lines 4-9 Link Here
4
#include <asm/processor-flags.h>
4
#include <asm/processor-flags.h>
5
5
6
#ifndef __ASSEMBLY__
6
#ifndef __ASSEMBLY__
7
8
#include <linux/ipipe_base.h>
9
#include <linux/ipipe_trace.h>
10
7
/*
11
/*
8
 * Interrupt control:
12
 * Interrupt control:
9
 */
13
 */
Lines 12-17 static inline unsigned long native_save_fl(void) Link Here
12
{
16
{
13
	unsigned long flags;
17
	unsigned long flags;
14
18
19
#ifdef CONFIG_IPIPE
20
	flags = (!__ipipe_test_root()) << 9;
21
	barrier();
22
#else
15
	/*
23
	/*
16
	 * "=rm" is safe here, because "pop" adjusts the stack before
24
	 * "=rm" is safe here, because "pop" adjusts the stack before
17
	 * it evaluates its effective address -- this is part of the
25
	 * it evaluates its effective address -- this is part of the
Lines 22-52 static inline unsigned long native_save_fl(void) Link Here
22
		     : "=rm" (flags)
30
		     : "=rm" (flags)
23
		     : /* no input */
31
		     : /* no input */
24
		     : "memory");
32
		     : "memory");
33
#endif
25
34
26
	return flags;
35
	return flags;
27
}
36
}
28
37
29
static inline void native_restore_fl(unsigned long flags)
38
static inline void native_restore_fl(unsigned long flags)
30
{
39
{
40
#ifdef CONFIG_IPIPE
41
	barrier();
42
	__ipipe_restore_root(!(flags & X86_EFLAGS_IF));
43
#else
31
	asm volatile("push %0 ; popf"
44
	asm volatile("push %0 ; popf"
32
		     : /* no output */
45
		     : /* no output */
33
		     :"g" (flags)
46
		     :"g" (flags)
34
		     :"memory", "cc");
47
		     :"memory", "cc");
48
#endif
35
}
49
}
36
50
37
static inline void native_irq_disable(void)
51
static inline void native_irq_disable(void)
38
{
52
{
53
#ifdef CONFIG_IPIPE
54
	ipipe_check_context(ipipe_root_domain);
55
	__ipipe_stall_root();
56
	barrier();
57
#else
39
	asm volatile("cli": : :"memory");
58
	asm volatile("cli": : :"memory");
59
#endif
40
}
60
}
41
61
42
static inline void native_irq_enable(void)
62
static inline void native_irq_enable(void)
43
{
63
{
64
#ifdef CONFIG_IPIPE
65
	barrier();
66
	__ipipe_unstall_root();
67
#else
44
	asm volatile("sti": : :"memory");
68
	asm volatile("sti": : :"memory");
69
#endif
45
}
70
}
46
71
47
static inline void native_safe_halt(void)
72
static inline void native_safe_halt(void)
48
{
73
{
74
#ifdef CONFIG_IPIPE
75
	barrier();
76
	__ipipe_halt_root();
77
#else
49
	asm volatile("sti; hlt": : :"memory");
78
	asm volatile("sti; hlt": : :"memory");
79
#endif
50
}
80
}
51
81
52
static inline void native_halt(void)
82
static inline void native_halt(void)
Lines 71-76 static inline void raw_local_irq_restore(unsigned long flags) Link Here
71
	native_restore_fl(flags);
101
	native_restore_fl(flags);
72
}
102
}
73
103
104
static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
105
{
106
	/*
107
	 * Merge virtual and real interrupt mask bits into a single
108
	 * (32bit) word.
109
	 */
110
	return (real & ~(1L << 31)) | ((virt != 0) << 31);
111
}
112
113
static inline int raw_demangle_irq_bits(unsigned long *x)
114
{
115
	int virt = (*x & (1L << 31)) != 0;
116
	*x &= ~(1L << 31);
117
	return virt;
118
}
119
120
#define local_irq_save_hw_notrace(x) \
121
	__asm__ __volatile__("pushf ; pop %0 ; cli":"=g" (x): /* no input */ :"memory")
122
#define local_irq_restore_hw_notrace(x) \
123
	__asm__ __volatile__("push %0 ; popf": /* no output */ :"g" (x):"memory", "cc")
124
125
#define local_save_flags_hw(x)	__asm__ __volatile__("pushf ; pop %0":"=g" (x): /* no input */)
126
127
#define irqs_disabled_hw()		\
128
    ({					\
129
	unsigned long x;		\
130
	local_save_flags_hw(x);		\
131
	!((x) & X86_EFLAGS_IF);		\
132
    })
133
134
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
135
#define local_irq_disable_hw() do {			\
136
		if (!irqs_disabled_hw()) {		\
137
			local_irq_disable_hw_notrace();	\
138
			ipipe_trace_begin(0x80000000);	\
139
		}					\
140
	} while (0)
141
#define local_irq_enable_hw() do {			\
142
		if (irqs_disabled_hw()) {		\
143
			ipipe_trace_end(0x80000000);	\
144
			local_irq_enable_hw_notrace();	\
145
		}					\
146
	} while (0)
147
#define local_irq_save_hw(x) do {			\
148
		local_save_flags_hw(x);			\
149
		if ((x) & X86_EFLAGS_IF) {		\
150
			local_irq_disable_hw_notrace();	\
151
			ipipe_trace_begin(0x80000001);	\
152
		}					\
153
	} while (0)
154
#define local_irq_restore_hw(x) do {			\
155
		if ((x) & X86_EFLAGS_IF)		\
156
			ipipe_trace_end(0x80000001);	\
157
		local_irq_restore_hw_notrace(x);	\
158
	} while (0)
159
#else /* !CONFIG_IPIPE_TRACE_IRQSOFF */
160
#define local_irq_save_hw(x)		local_irq_save_hw_notrace(x)
161
#define local_irq_restore_hw(x)		local_irq_restore_hw_notrace(x)
162
#define local_irq_enable_hw()		local_irq_enable_hw_notrace()
163
#define local_irq_disable_hw()		local_irq_disable_hw_notrace()
164
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
165
166
#define local_irq_disable_hw_notrace()	__asm__ __volatile__("cli": : :"memory")
167
#define local_irq_enable_hw_notrace()	__asm__ __volatile__("sti": : :"memory")
168
74
static inline void raw_local_irq_disable(void)
169
static inline void raw_local_irq_disable(void)
75
{
170
{
76
	native_irq_disable();
171
	native_irq_disable();
Lines 104-119 static inline void halt(void) Link Here
104
 */
199
 */
105
static inline unsigned long __raw_local_irq_save(void)
200
static inline unsigned long __raw_local_irq_save(void)
106
{
201
{
202
#ifdef CONFIG_IPIPE
203
	unsigned long flags = (!__ipipe_test_and_stall_root()) << 9;
204
	barrier();
205
#else
107
	unsigned long flags = __raw_local_save_flags();
206
	unsigned long flags = __raw_local_save_flags();
108
207
109
	raw_local_irq_disable();
208
	raw_local_irq_disable();
209
#endif
110
210
111
	return flags;
211
	return flags;
112
}
212
}
113
#else
213
#else
114
214
115
#define ENABLE_INTERRUPTS(x)	sti
215
#ifdef CONFIG_IPIPE
116
#define DISABLE_INTERRUPTS(x)	cli
216
#ifdef CONFIG_X86_32
217
#define DISABLE_INTERRUPTS(clobbers)	PER_CPU(ipipe_percpu_darray, %eax); btsl $0,(%eax); sti
218
#define ENABLE_INTERRUPTS(clobbers)	call __ipipe_unstall_root
219
#else /* CONFIG_X86_64 */
220
/* Not worth virtualizing in x86_64 mode. */
221
#define DISABLE_INTERRUPTS(clobbers)	cli
222
#define ENABLE_INTERRUPTS(clobbers)	sti
223
#endif /* CONFIG_X86_64 */
224
#define ENABLE_INTERRUPTS_HW_COND	sti
225
#define DISABLE_INTERRUPTS_HW_COND	cli
226
#define DISABLE_INTERRUPTS_HW(clobbers)	cli
227
#define ENABLE_INTERRUPTS_HW(clobbers)	sti
228
#else /* !CONFIG_IPIPE */
229
#define ENABLE_INTERRUPTS(x)		sti
230
#define DISABLE_INTERRUPTS(x)		cli
231
#define ENABLE_INTERRUPTS_HW_COND
232
#define DISABLE_INTERRUPTS_HW_COND
233
#define DISABLE_INTERRUPTS_HW(clobbers)	DISABLE_INTERRUPTS(clobbers)
234
#define ENABLE_INTERRUPTS_HW(clobbers)	ENABLE_INTERRUPTS(clobbers)
235
#endif /* !CONFIG_IPIPE */
117
236
118
#ifdef CONFIG_X86_64
237
#ifdef CONFIG_X86_64
119
#define SWAPGS	swapgs
238
#define SWAPGS	swapgs
Lines 156-163 static inline unsigned long __raw_local_irq_save(void) Link Here
156
#define raw_local_save_flags(flags)				\
275
#define raw_local_save_flags(flags)				\
157
	do { (flags) = __raw_local_save_flags(); } while (0)
276
	do { (flags) = __raw_local_save_flags(); } while (0)
158
277
159
#define raw_local_irq_save(flags)				\
278
#define raw_local_irq_save(flags) do {			\
160
	do { (flags) = __raw_local_irq_save(); } while (0)
279
		ipipe_check_context(ipipe_root_domain);	\
280
		(flags) = __raw_local_irq_save();	\
281
	} while (0)
161
282
162
static inline int raw_irqs_disabled_flags(unsigned long flags)
283
static inline int raw_irqs_disabled_flags(unsigned long flags)
163
{
284
{
(-)a/arch/x86/include/asm/mmu_context.h (-3 / +19 lines)
Lines 30-40 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) Link Here
30
#endif
30
#endif
31
}
31
}
32
32
33
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
33
static inline void __switch_mm(struct mm_struct *prev, struct mm_struct *next,
34
			     struct task_struct *tsk)
34
			       struct task_struct *tsk)
35
{
35
{
36
	unsigned cpu = smp_processor_id();
36
	unsigned cpu = smp_processor_id();
37
37
38
#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
39
	WARN_ON_ONCE(!irqs_disabled_hw());
40
#endif
38
	if (likely(prev != next)) {
41
	if (likely(prev != next)) {
39
		/* stop flush ipis for the previous mm */
42
		/* stop flush ipis for the previous mm */
40
		cpumask_clear_cpu(cpu, mm_cpumask(prev));
43
		cpumask_clear_cpu(cpu, mm_cpumask(prev));
Lines 70-79 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, Link Here
70
#endif
73
#endif
71
}
74
}
72
75
76
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
77
			     struct task_struct *tsk)
78
{
79
	unsigned long flags;
80
	local_irq_save_hw_cond(flags);
81
	__switch_mm(prev, next, tsk);
82
	local_irq_restore_hw_cond(flags);
83
}
84
85
#define ipipe_mm_switch_protect(flags)	local_irq_save_hw_cond(flags)
86
#define ipipe_mm_switch_unprotect(flags) \
87
	local_irq_restore_hw_cond(flags)
88
73
#define activate_mm(prev, next)			\
89
#define activate_mm(prev, next)			\
74
do {						\
90
do {						\
75
	paravirt_activate_mm((prev), (next));	\
91
	paravirt_activate_mm((prev), (next));	\
76
	switch_mm((prev), (next), NULL);	\
92
	__switch_mm((prev), (next), NULL);	\
77
} while (0);
93
} while (0);
78
94
79
#ifdef CONFIG_X86_32
95
#ifdef CONFIG_X86_32
(-)a/arch/x86/include/asm/nmi.h (-1 / +1 lines)
Lines 29-35 extern void setup_apic_nmi_watchdog(void *); Link Here
29
extern void stop_apic_nmi_watchdog(void *);
29
extern void stop_apic_nmi_watchdog(void *);
30
extern void disable_timer_nmi_watchdog(void);
30
extern void disable_timer_nmi_watchdog(void);
31
extern void enable_timer_nmi_watchdog(void);
31
extern void enable_timer_nmi_watchdog(void);
32
extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
32
extern int (*nmi_watchdog_tick)(struct pt_regs *regs, unsigned reason);
33
extern void cpu_nmi_set_wd_enabled(void);
33
extern void cpu_nmi_set_wd_enabled(void);
34
34
35
extern atomic_t nmi_active;
35
extern atomic_t nmi_active;
(-)a/arch/x86/include/asm/processor.h (+1 lines)
Lines 435-440 struct thread_struct { Link Here
435
	unsigned short		ds;
435
	unsigned short		ds;
436
	unsigned short		fsindex;
436
	unsigned short		fsindex;
437
	unsigned short		gsindex;
437
	unsigned short		gsindex;
438
 	unsigned long		rip;
438
#endif
439
#endif
439
#ifdef CONFIG_X86_32
440
#ifdef CONFIG_X86_32
440
	unsigned long		ip;
441
	unsigned long		ip;
(-)a/arch/x86/include/asm/system.h (-1 / +9 lines)
Lines 126-133 do { \ Link Here
126
#define switch_to(prev, next, last) \
126
#define switch_to(prev, next, last) \
127
	asm volatile(SAVE_CONTEXT					  \
127
	asm volatile(SAVE_CONTEXT					  \
128
	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
128
	     "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */	  \
129
	     "movq $thread_return,%P[threadrip](%[prev])\n\t" /* save RIP */	  \
129
	     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
130
	     "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */	  \
130
	     "call __switch_to\n\t"					  \
131
	     "pushq %P[threadrip](%[next])\n\t" /* restore RIP */	  \
132
	     "jmp __switch_to\n\t"					  \
131
	     ".globl thread_return\n"					  \
133
	     ".globl thread_return\n"					  \
132
	     "thread_return:\n\t"					  \
134
	     "thread_return:\n\t"					  \
133
	     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \
135
	     "movq "__percpu_arg([current_task])",%%rsi\n\t"		  \
Lines 141-146 do { \ Link Here
141
	       __switch_canary_oparam					  \
143
	       __switch_canary_oparam					  \
142
	     : [next] "S" (next), [prev] "D" (prev),			  \
144
	     : [next] "S" (next), [prev] "D" (prev),			  \
143
	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
145
	       [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
146
	       [threadrip] "i" (offsetof(struct task_struct, thread.rip)), \
144
	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
147
	       [ti_flags] "i" (offsetof(struct thread_info, flags)),	  \
145
	       [_tif_fork] "i" (_TIF_FORK),			  	  \
148
	       [_tif_fork] "i" (_TIF_FORK),			  	  \
146
	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
149
	       [thread_info] "i" (offsetof(struct task_struct, stack)),   \
Lines 305-312 static inline void native_wbinvd(void) Link Here
305
#else
308
#else
306
#define read_cr0()	(native_read_cr0())
309
#define read_cr0()	(native_read_cr0())
307
#define write_cr0(x)	(native_write_cr0(x))
310
#define write_cr0(x)	(native_write_cr0(x))
311
#ifdef CONFIG_IPIPE
312
#define read_cr2()	__raw_get_cpu_var(__ipipe_cr2)
313
#define write_cr2(x)	__raw_get_cpu_var(__ipipe_cr2) = (x)
314
#else /* !CONFIG_IPIPE */
308
#define read_cr2()	(native_read_cr2())
315
#define read_cr2()	(native_read_cr2())
309
#define write_cr2(x)	(native_write_cr2(x))
316
#define write_cr2(x)	(native_write_cr2(x))
317
#endif /* !CONFIG_IPIPE */
310
#define read_cr3()	(native_read_cr3())
318
#define read_cr3()	(native_read_cr3())
311
#define write_cr3(x)	(native_write_cr3(x))
319
#define write_cr3(x)	(native_write_cr3(x))
312
#define read_cr4()	(native_read_cr4())
320
#define read_cr4()	(native_read_cr4())
(-)a/arch/x86/include/asm/traps.h (-1 / +1 lines)
Lines 82-89 extern int panic_on_unrecovered_nmi; Link Here
82
void math_error(void __user *);
82
void math_error(void __user *);
83
void math_emulate(struct math_emu_info *);
83
void math_emulate(struct math_emu_info *);
84
#ifndef CONFIG_X86_32
84
#ifndef CONFIG_X86_32
85
asmlinkage void smp_thermal_interrupt(void);
86
asmlinkage void mce_threshold_interrupt(void);
85
asmlinkage void mce_threshold_interrupt(void);
87
#endif
86
#endif
87
asmlinkage void smp_thermal_interrupt(void);
88
88
89
#endif /* _ASM_X86_TRAPS_H */
89
#endif /* _ASM_X86_TRAPS_H */
(-)a/arch/x86/kernel/Makefile (+1 lines)
Lines 85-90 obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o Link Here
85
obj-$(CONFIG_KGDB)		+= kgdb.o
85
obj-$(CONFIG_KGDB)		+= kgdb.o
86
obj-$(CONFIG_VM86)		+= vm86_32.o
86
obj-$(CONFIG_VM86)		+= vm86_32.o
87
obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
87
obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
88
obj-$(CONFIG_IPIPE)		+= ipipe.o
88
89
89
obj-$(CONFIG_HPET_TIMER) 	+= hpet.o
90
obj-$(CONFIG_HPET_TIMER) 	+= hpet.o
90
91
(-)a/arch/x86/kernel/apic/apic.c (-10 / +14 lines)
Lines 446-452 static void lapic_timer_setup(enum clock_event_mode mode, Link Here
446
	if (evt->features & CLOCK_EVT_FEAT_DUMMY)
446
	if (evt->features & CLOCK_EVT_FEAT_DUMMY)
447
		return;
447
		return;
448
448
449
	local_irq_save(flags);
449
	local_irq_save_hw(flags);
450
450
451
	switch (mode) {
451
	switch (mode) {
452
	case CLOCK_EVT_MODE_PERIODIC:
452
	case CLOCK_EVT_MODE_PERIODIC:
Lines 466-472 static void lapic_timer_setup(enum clock_event_mode mode, Link Here
466
		break;
466
		break;
467
	}
467
	}
468
468
469
	local_irq_restore(flags);
469
	local_irq_restore_hw(flags);
470
}
470
}
471
471
472
/*
472
/*
Lines 982-988 void lapic_shutdown(void) Link Here
982
	if (!cpu_has_apic && !apic_from_smp_config())
982
	if (!cpu_has_apic && !apic_from_smp_config())
983
		return;
983
		return;
984
984
985
	local_irq_save(flags);
985
	local_irq_save_hw(flags);
986
986
987
#ifdef CONFIG_X86_32
987
#ifdef CONFIG_X86_32
988
	if (!enabled_via_apicbase)
988
	if (!enabled_via_apicbase)
Lines 992-998 void lapic_shutdown(void) Link Here
992
		disable_local_APIC();
992
		disable_local_APIC();
993
993
994
994
995
	local_irq_restore(flags);
995
	local_irq_restore_hw(flags);
996
}
996
}
997
997
998
/*
998
/*
Lines 1166-1171 static void __cpuinit lapic_setup_esr(void) Link Here
1166
			oldvalue, value);
1166
			oldvalue, value);
1167
}
1167
}
1168
1168
1169
int __ipipe_check_lapic(void)
1170
{
1171
	return !(lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY);
1172
}
1169
1173
1170
/**
1174
/**
1171
 * setup_local_APIC - setup the local APIC
1175
 * setup_local_APIC - setup the local APIC
Lines 1229-1235 void __cpuinit setup_local_APIC(void) Link Here
1229
		value = apic_read(APIC_ISR + i*0x10);
1233
		value = apic_read(APIC_ISR + i*0x10);
1230
		for (j = 31; j >= 0; j--) {
1234
		for (j = 31; j >= 0; j--) {
1231
			if (value & (1<<j))
1235
			if (value & (1<<j))
1232
				ack_APIC_irq();
1236
				__ack_APIC_irq();
1233
		}
1237
		}
1234
	}
1238
	}
1235
1239
Lines 1735-1741 void smp_spurious_interrupt(struct pt_regs *regs) Link Here
1735
	 */
1739
	 */
1736
	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1740
	v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
1737
	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1741
	if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
1738
		ack_APIC_irq();
1742
		__ack_APIC_irq();
1739
1743
1740
	inc_irq_stat(irq_spurious_count);
1744
	inc_irq_stat(irq_spurious_count);
1741
1745
Lines 2004-2016 static int lapic_suspend(struct sys_device *dev, pm_message_t state) Link Here
2004
		apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2008
		apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2005
#endif
2009
#endif
2006
2010
2007
	local_irq_save(flags);
2011
	local_irq_save_hw(flags);
2008
	disable_local_APIC();
2012
	disable_local_APIC();
2009
2013
2010
	if (intr_remapping_enabled)
2014
	if (intr_remapping_enabled)
2011
		disable_intr_remapping();
2015
		disable_intr_remapping();
2012
2016
2013
	local_irq_restore(flags);
2017
	local_irq_restore_hw(flags);
2014
	return 0;
2018
	return 0;
2015
}
2019
}
2016
2020
Lines 2025-2031 static int lapic_resume(struct sys_device *dev) Link Here
2025
	if (!apic_pm_state.active)
2029
	if (!apic_pm_state.active)
2026
		return 0;
2030
		return 0;
2027
2031
2028
	local_irq_save(flags);
2032
	local_irq_save_hw(flags);
2029
	if (intr_remapping_enabled) {
2033
	if (intr_remapping_enabled) {
2030
		ioapic_entries = alloc_ioapic_entries();
2034
		ioapic_entries = alloc_ioapic_entries();
2031
		if (!ioapic_entries) {
2035
		if (!ioapic_entries) {
Lines 2091-2097 static int lapic_resume(struct sys_device *dev) Link Here
2091
		free_ioapic_entries(ioapic_entries);
2095
		free_ioapic_entries(ioapic_entries);
2092
	}
2096
	}
2093
restore:
2097
restore:
2094
	local_irq_restore(flags);
2098
	local_irq_restore_hw(flags);
2095
2099
2096
	return ret;
2100
	return ret;
2097
}
2101
}
(-)a/arch/x86/kernel/apic/apic_flat_64.c (-2 / +2 lines)
Lines 72-80 static inline void _flat_send_IPI_mask(unsigned long mask, int vector) Link Here
72
{
72
{
73
	unsigned long flags;
73
	unsigned long flags;
74
74
75
	local_irq_save(flags);
75
	local_irq_save_hw(flags);
76
	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
76
	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
77
	local_irq_restore(flags);
77
	local_irq_restore_hw(flags);
78
}
78
}
79
79
80
static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
80
static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
(-)a/arch/x86/kernel/apic/io_apic.c (-9 / +118 lines)
Lines 75-82 Link Here
75
 */
75
 */
76
int sis_apic_bug = -1;
76
int sis_apic_bug = -1;
77
77
78
static DEFINE_SPINLOCK(ioapic_lock);
78
static IPIPE_DEFINE_SPINLOCK(ioapic_lock);
79
static DEFINE_SPINLOCK(vector_lock);
79
static IPIPE_DEFINE_SPINLOCK(vector_lock);
80
#ifdef CONFIG_IPIPE
81
unsigned long bugous_edge_irq_triggers[(NR_IRQS + BITS_PER_LONG - 1) / BITS_PER_LONG];
82
#endif
80
83
81
/*
84
/*
82
 * # of IRQ routing registers
85
 * # of IRQ routing registers
Lines 417-422 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned Link Here
417
	writel(value, &io_apic->data);
420
	writel(value, &io_apic->data);
418
}
421
}
419
422
423
#if !defined(CONFIG_IPIPE) || defined(CONFIG_SMP)
424
420
static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
425
static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
421
{
426
{
422
	struct irq_pin_list *entry;
427
	struct irq_pin_list *entry;
Lines 440-445 static bool io_apic_level_ack_pending(struct irq_cfg *cfg) Link Here
440
	return false;
445
	return false;
441
}
446
}
442
447
448
#endif /* !CONFIG_IPIPE || CONFIG_SMP */
449
443
union entry_union {
450
union entry_union {
444
	struct { u32 w1, w2; };
451
	struct { u32 w1, w2; };
445
	struct IO_APIC_route_entry entry;
452
	struct IO_APIC_route_entry entry;
Lines 615-620 static void mask_IO_APIC_irq_desc(struct irq_desc *desc) Link Here
615
	BUG_ON(!cfg);
622
	BUG_ON(!cfg);
616
623
617
	spin_lock_irqsave(&ioapic_lock, flags);
624
	spin_lock_irqsave(&ioapic_lock, flags);
625
 	ipipe_irq_lock(desc->irq);
618
	__mask_IO_APIC_irq(cfg);
626
	__mask_IO_APIC_irq(cfg);
619
	spin_unlock_irqrestore(&ioapic_lock, flags);
627
	spin_unlock_irqrestore(&ioapic_lock, flags);
620
}
628
}
Lines 625-631 static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) Link Here
625
	unsigned long flags;
633
	unsigned long flags;
626
634
627
	spin_lock_irqsave(&ioapic_lock, flags);
635
	spin_lock_irqsave(&ioapic_lock, flags);
636
#ifdef CONFIG_IPIPE
637
 	if (test_and_clear_bit(desc->irq, &bugous_edge_irq_triggers[0]))
638
		__unmask_and_level_IO_APIC_irq(cfg);
639
	else
640
#endif
628
	__unmask_IO_APIC_irq(cfg);
641
	__unmask_IO_APIC_irq(cfg);
642
	ipipe_irq_unlock(desc->irq);
629
	spin_unlock_irqrestore(&ioapic_lock, flags);
643
	spin_unlock_irqrestore(&ioapic_lock, flags);
630
}
644
}
631
645
Lines 2250-2255 static unsigned int startup_ioapic_irq(unsigned int irq) Link Here
2250
	}
2264
	}
2251
	cfg = irq_cfg(irq);
2265
	cfg = irq_cfg(irq);
2252
	__unmask_IO_APIC_irq(cfg);
2266
	__unmask_IO_APIC_irq(cfg);
2267
	ipipe_irq_unlock(irq);
2253
	spin_unlock_irqrestore(&ioapic_lock, flags);
2268
	spin_unlock_irqrestore(&ioapic_lock, flags);
2254
2269
2255
	return was_pending;
2270
	return was_pending;
Lines 2529-2551 static void irq_complete_move(struct irq_desc **descp) Link Here
2529
static inline void irq_complete_move(struct irq_desc **descp) {}
2544
static inline void irq_complete_move(struct irq_desc **descp) {}
2530
#endif
2545
#endif
2531
2546
2547
#if defined(CONFIG_IPIPE) && defined(CONFIG_SMP)
2548
2549
#ifdef CONFIG_INTR_REMAP
2550
static void eoi_ioapic_irq(struct irq_desc *desc);
2551
#else /* !CONFIG_INTR_REMAP */
2552
static inline void eoi_ioapic_irq(struct irq_desc *desc) {}
2553
#endif /* !CONFIG_INTR_REMAP */
2554
2555
static void move_apic_irq(unsigned int irq)
2556
{
2557
	struct irq_desc *desc = irq_to_desc(irq);
2558
	struct irq_cfg *cfg;
2559
2560
	if (desc->handle_irq == &handle_edge_irq) {
2561
		spin_lock(&desc->lock);
2562
		irq_complete_move(&desc);
2563
		move_native_irq(irq);
2564
		spin_unlock(&desc->lock);
2565
	} else if (desc->handle_irq == &handle_fasteoi_irq) {
2566
		spin_lock(&desc->lock);
2567
		irq_complete_move(&desc);
2568
		if (irq_remapped(irq))
2569
			eoi_ioapic_irq(desc);
2570
		if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2571
			cfg = desc->chip_data;
2572
			if (!io_apic_level_ack_pending(cfg))
2573
				move_masked_irq(irq);
2574
			unmask_IO_APIC_irq_desc(desc);
2575
		}
2576
		spin_unlock(&desc->lock);
2577
	} else
2578
		WARN_ON_ONCE(1);
2579
}
2580
#endif /* CONFIG_IPIPE && CONFIG_SMP */
2581
2532
static void ack_apic_edge(unsigned int irq)
2582
static void ack_apic_edge(unsigned int irq)
2533
{
2583
{
2584
#ifndef CONFIG_IPIPE
2534
	struct irq_desc *desc = irq_to_desc(irq);
2585
	struct irq_desc *desc = irq_to_desc(irq);
2535
2586
2536
	irq_complete_move(&desc);
2587
	irq_complete_move(&desc);
2537
	move_native_irq(irq);
2588
	move_native_irq(irq);
2538
	ack_APIC_irq();
2589
#endif /* CONFIG_IPIPE */
2590
	__ack_APIC_irq();
2539
}
2591
}
2540
2592
2541
atomic_t irq_mis_count;
2593
atomic_t irq_mis_count;
2542
2594
2543
static void ack_apic_level(unsigned int irq)
2595
static void ack_apic_level(unsigned int irq)
2544
{
2596
{
2545
	struct irq_desc *desc = irq_to_desc(irq);
2546
	unsigned long v;
2597
	unsigned long v;
2547
	int i;
2598
	int i;
2548
	struct irq_cfg *cfg;
2599
	struct irq_cfg *cfg;
2600
#ifndef CONFIG_IPIPE
2601
	struct irq_desc *desc = irq_to_desc(irq);
2549
	int do_unmask_irq = 0;
2602
	int do_unmask_irq = 0;
2550
2603
2551
	irq_complete_move(&desc);
2604
	irq_complete_move(&desc);
Lines 2628-2633 static void ack_apic_level(unsigned int irq) Link Here
2628
		__unmask_and_level_IO_APIC_irq(cfg);
2681
		__unmask_and_level_IO_APIC_irq(cfg);
2629
		spin_unlock(&ioapic_lock);
2682
		spin_unlock(&ioapic_lock);
2630
	}
2683
	}
2684
#else /* CONFIG_IPIPE */
2685
	/*
2686
	 * Prevent low priority IRQs grabbed by high priority domains
2687
	 * from being delayed, waiting for a high priority interrupt
2688
	 * handler running in a low priority domain to complete.
2689
	 */
2690
	cfg = irq_cfg(irq);
2691
	i = cfg->vector;
2692
	v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2693
	spin_lock(&ioapic_lock);
2694
	if (unlikely(!(v & (1 << (i & 0x1f))))) {
2695
		/* IO-APIC erratum: see comment above. */
2696
		atomic_inc(&irq_mis_count);
2697
		__mask_and_edge_IO_APIC_irq(cfg);
2698
		set_bit(irq, &bugous_edge_irq_triggers[0]);
2699
	} else
2700
		__mask_IO_APIC_irq(cfg);
2701
	spin_unlock(&ioapic_lock);
2702
	__ack_APIC_irq();
2703
#endif /* CONFIG_IPIPE */
2631
}
2704
}
2632
2705
2633
#ifdef CONFIG_INTR_REMAP
2706
#ifdef CONFIG_INTR_REMAP
Lines 2656-2669 eoi_ioapic_irq(struct irq_desc *desc) Link Here
2656
2729
2657
static void ir_ack_apic_edge(unsigned int irq)
2730
static void ir_ack_apic_edge(unsigned int irq)
2658
{
2731
{
2659
	ack_APIC_irq();
2732
	__ack_APIC_irq();
2660
}
2733
}
2661
2734
2662
static void ir_ack_apic_level(unsigned int irq)
2735
static void ir_ack_apic_level(unsigned int irq)
2663
{
2736
{
2664
	struct irq_desc *desc = irq_to_desc(irq);
2737
	struct irq_desc *desc = irq_to_desc(irq);
2665
2738
2666
	ack_APIC_irq();
2739
	__ack_APIC_irq();
2667
	eoi_ioapic_irq(desc);
2740
	eoi_ioapic_irq(desc);
2668
}
2741
}
2669
#endif /* CONFIG_INTR_REMAP */
2742
#endif /* CONFIG_INTR_REMAP */
Lines 2677-2682 static struct irq_chip ioapic_chip __read_mostly = { Link Here
2677
	.eoi		= ack_apic_level,
2750
	.eoi		= ack_apic_level,
2678
#ifdef CONFIG_SMP
2751
#ifdef CONFIG_SMP
2679
	.set_affinity	= set_ioapic_affinity_irq,
2752
	.set_affinity	= set_ioapic_affinity_irq,
2753
#ifdef CONFIG_IPIPE
2754
	.move		= move_apic_irq,
2755
#endif
2680
#endif
2756
#endif
2681
	.retrigger	= ioapic_retrigger_irq,
2757
	.retrigger	= ioapic_retrigger_irq,
2682
};
2758
};
Lines 2691-2696 static struct irq_chip ir_ioapic_chip __read_mostly = { Link Here
2691
	.eoi		= ir_ack_apic_level,
2767
	.eoi		= ir_ack_apic_level,
2692
#ifdef CONFIG_SMP
2768
#ifdef CONFIG_SMP
2693
	.set_affinity	= set_ir_ioapic_affinity_irq,
2769
	.set_affinity	= set_ir_ioapic_affinity_irq,
2770
#ifdef CONFIG_IPIPE
2771
	.move		= move_apic_irq,
2772
#endif
2694
#endif
2773
#endif
2695
#endif
2774
#endif
2696
	.retrigger	= ioapic_retrigger_irq,
2775
	.retrigger	= ioapic_retrigger_irq,
Lines 2736-2758 static inline void init_IO_APIC_traps(void) Link Here
2736
2815
2737
static void mask_lapic_irq(unsigned int irq)
2816
static void mask_lapic_irq(unsigned int irq)
2738
{
2817
{
2739
	unsigned long v;
2818
	unsigned long v, flags;
2740
2819
2820
 	local_irq_save_hw_cond(flags);
2821
 	ipipe_irq_lock(irq);
2741
	v = apic_read(APIC_LVT0);
2822
	v = apic_read(APIC_LVT0);
2742
	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2823
	apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2824
  	local_irq_restore_hw_cond(flags);
2743
}
2825
}
2744
2826
2745
static void unmask_lapic_irq(unsigned int irq)
2827
static void unmask_lapic_irq(unsigned int irq)
2746
{
2828
{
2747
	unsigned long v;
2829
	unsigned long v, flags;
2748
2830
2831
  	local_irq_save_hw_cond(flags);
2749
	v = apic_read(APIC_LVT0);
2832
	v = apic_read(APIC_LVT0);
2750
	apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2833
	apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2834
  	ipipe_irq_unlock(irq);
2835
  	local_irq_restore_hw_cond(flags);
2751
}
2836
}
2752
2837
2753
static void ack_lapic_irq(unsigned int irq)
2838
static void ack_lapic_irq(unsigned int irq)
2754
{
2839
{
2755
	ack_APIC_irq();
2840
	__ack_APIC_irq();
2756
}
2841
}
2757
2842
2758
static struct irq_chip lapic_chip __read_mostly = {
2843
static struct irq_chip lapic_chip __read_mostly = {
Lines 2760-2765 static struct irq_chip lapic_chip __read_mostly = { Link Here
2760
	.mask		= mask_lapic_irq,
2845
	.mask		= mask_lapic_irq,
2761
	.unmask		= unmask_lapic_irq,
2846
	.unmask		= unmask_lapic_irq,
2762
	.ack		= ack_lapic_irq,
2847
	.ack		= ack_lapic_irq,
2848
#if defined(CONFIG_IPIPE) && defined(CONFIG_SMP)
2849
	.move		= move_apic_irq,
2850
#endif
2763
};
2851
};
2764
2852
2765
static void lapic_register_intr(int irq, struct irq_desc *desc)
2853
static void lapic_register_intr(int irq, struct irq_desc *desc)
Lines 3007-3012 static inline void __init check_timer(void) Link Here
3007
		    "...trying to set up timer as Virtual Wire IRQ...\n");
3095
		    "...trying to set up timer as Virtual Wire IRQ...\n");
3008
3096
3009
	lapic_register_intr(0, desc);
3097
	lapic_register_intr(0, desc);
3098
#if defined(CONFIG_IPIPE) && defined(CONFIG_X86_64)
3099
	irq_to_desc(0)->ipipe_ack = __ipipe_ack_edge_irq;
3100
	irq_to_desc(0)->ipipe_end = __ipipe_end_edge_irq;
3101
#endif
3010
	apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);	/* Fixed mode */
3102
	apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);	/* Fixed mode */
3011
	enable_8259A_irq(0);
3103
	enable_8259A_irq(0);
3012
3104
Lines 3404-3409 static struct irq_chip msi_chip = { Link Here
3404
	.ack		= ack_apic_edge,
3496
	.ack		= ack_apic_edge,
3405
#ifdef CONFIG_SMP
3497
#ifdef CONFIG_SMP
3406
	.set_affinity	= set_msi_irq_affinity,
3498
	.set_affinity	= set_msi_irq_affinity,
3499
#ifdef CONFIG_IPIPE
3500
	.move		= move_apic_irq,
3501
#endif
3407
#endif
3502
#endif
3408
	.retrigger	= ioapic_retrigger_irq,
3503
	.retrigger	= ioapic_retrigger_irq,
3409
};
3504
};
Lines 3416-3421 static struct irq_chip msi_ir_chip = { Link Here
3416
	.ack		= ir_ack_apic_edge,
3511
	.ack		= ir_ack_apic_edge,
3417
#ifdef CONFIG_SMP
3512
#ifdef CONFIG_SMP
3418
	.set_affinity	= ir_set_msi_irq_affinity,
3513
	.set_affinity	= ir_set_msi_irq_affinity,
3514
#ifdef CONFIG_IPIPE
3515
	.move	= move_apic_irq,
3516
#endif
3419
#endif
3517
#endif
3420
#endif
3518
#endif
3421
	.retrigger	= ioapic_retrigger_irq,
3519
	.retrigger	= ioapic_retrigger_irq,
Lines 3704-3709 static struct irq_chip ht_irq_chip = { Link Here
3704
	.ack		= ack_apic_edge,
3802
	.ack		= ack_apic_edge,
3705
#ifdef CONFIG_SMP
3803
#ifdef CONFIG_SMP
3706
	.set_affinity	= set_ht_irq_affinity,
3804
	.set_affinity	= set_ht_irq_affinity,
3805
#ifdef CONFIG_IPIPE
3806
	.move	= move_apic_irq,
3807
#endif
3707
#endif
3808
#endif
3708
	.retrigger	= ioapic_retrigger_irq,
3809
	.retrigger	= ioapic_retrigger_irq,
3709
};
3810
};
Lines 4075-4080 int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity) Link Here
4075
	return 0;
4176
	return 0;
4076
}
4177
}
4077
4178
4179
#ifdef CONFIG_IPIPE
4180
unsigned __ipipe_get_ioapic_irq_vector(int irq)
4181
{
4182
	return irq >= IPIPE_FIRST_APIC_IRQ && irq < IPIPE_NR_XIRQS ?
4183
		ipipe_apic_irq_vector(irq) : irq_cfg(irq)->vector;
4184
}
4185
#endif /* CONFIG_IPIPE */
4186
4078
/*
4187
/*
4079
 * This function currently is only a helper for the i386 smp boot process where
4188
 * This function currently is only a helper for the i386 smp boot process where
4080
 * we need to reprogram the ioredtbls to cater for the cpus which have come online
4189
 * we need to reprogram the ioredtbls to cater for the cpus which have come online
(-)a/arch/x86/kernel/apic/ipi.c (-10 / +10 lines)
Lines 29-40 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) Link Here
29
	 * to an arbitrary mask, so I do a unicast to each CPU instead.
29
	 * to an arbitrary mask, so I do a unicast to each CPU instead.
30
	 * - mbligh
30
	 * - mbligh
31
	 */
31
	 */
32
	local_irq_save(flags);
32
	local_irq_save_hw(flags);
33
	for_each_cpu(query_cpu, mask) {
33
	for_each_cpu(query_cpu, mask) {
34
		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
34
		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
35
				query_cpu), vector, APIC_DEST_PHYSICAL);
35
				query_cpu), vector, APIC_DEST_PHYSICAL);
36
	}
36
	}
37
	local_irq_restore(flags);
37
	local_irq_restore_hw(flags);
38
}
38
}
39
39
40
void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
40
void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
Lines 46-59 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, Link Here
46
46
47
	/* See Hack comment above */
47
	/* See Hack comment above */
48
48
49
	local_irq_save(flags);
49
	local_irq_save_hw(flags);
50
	for_each_cpu(query_cpu, mask) {
50
	for_each_cpu(query_cpu, mask) {
51
		if (query_cpu == this_cpu)
51
		if (query_cpu == this_cpu)
52
			continue;
52
			continue;
53
		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
53
		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
54
				 query_cpu), vector, APIC_DEST_PHYSICAL);
54
				 query_cpu), vector, APIC_DEST_PHYSICAL);
55
	}
55
	}
56
	local_irq_restore(flags);
56
	local_irq_restore_hw(flags);
57
}
57
}
58
58
59
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
59
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
Lines 68-79 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, Link Here
68
	 * should be modified to do 1 message per cluster ID - mbligh
68
	 * should be modified to do 1 message per cluster ID - mbligh
69
	 */
69
	 */
70
70
71
	local_irq_save(flags);
71
	local_irq_save_hw(flags);
72
	for_each_cpu(query_cpu, mask)
72
	for_each_cpu(query_cpu, mask)
73
		__default_send_IPI_dest_field(
73
		__default_send_IPI_dest_field(
74
			apic->cpu_to_logical_apicid(query_cpu), vector,
74
			apic->cpu_to_logical_apicid(query_cpu), vector,
75
			apic->dest_logical);
75
			apic->dest_logical);
76
	local_irq_restore(flags);
76
	local_irq_restore_hw(flags);
77
}
77
}
78
78
79
void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
79
void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
Lines 85-91 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, Link Here
85
85
86
	/* See Hack comment above */
86
	/* See Hack comment above */
87
87
88
	local_irq_save(flags);
88
	local_irq_save_hw(flags);
89
	for_each_cpu(query_cpu, mask) {
89
	for_each_cpu(query_cpu, mask) {
90
		if (query_cpu == this_cpu)
90
		if (query_cpu == this_cpu)
91
			continue;
91
			continue;
Lines 93-99 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, Link Here
93
			apic->cpu_to_logical_apicid(query_cpu), vector,
93
			apic->cpu_to_logical_apicid(query_cpu), vector,
94
			apic->dest_logical);
94
			apic->dest_logical);
95
		}
95
		}
96
	local_irq_restore(flags);
96
	local_irq_restore_hw(flags);
97
}
97
}
98
98
99
#ifdef CONFIG_X86_32
99
#ifdef CONFIG_X86_32
Lines 109-118 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) Link Here
109
	if (WARN_ONCE(!mask, "empty IPI mask"))
109
	if (WARN_ONCE(!mask, "empty IPI mask"))
110
		return;
110
		return;
111
111
112
	local_irq_save(flags);
112
	local_irq_save_hw(flags);
113
	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
113
	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
114
	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
114
	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
115
	local_irq_restore(flags);
115
	local_irq_restore_hw(flags);
116
}
116
}
117
117
118
void default_send_IPI_allbutself(int vector)
118
void default_send_IPI_allbutself(int vector)
(-)a/arch/x86/kernel/apic/nmi.c (-1 / +5 lines)
Lines 59-64 static unsigned int nmi_hz = HZ; Link Here
59
static DEFINE_PER_CPU(short, wd_enabled);
59
static DEFINE_PER_CPU(short, wd_enabled);
60
static int endflag __initdata;
60
static int endflag __initdata;
61
61
62
static int default_nmi_watchdog_tick(struct pt_regs * regs, unsigned reason);
63
int (*nmi_watchdog_tick) (struct pt_regs * regs, unsigned reason) = &default_nmi_watchdog_tick;
64
EXPORT_SYMBOL(nmi_watchdog_tick);
65
62
static inline unsigned int get_nmi_count(int cpu)
66
static inline unsigned int get_nmi_count(int cpu)
63
{
67
{
64
	return per_cpu(irq_stat, cpu).__nmi_count;
68
	return per_cpu(irq_stat, cpu).__nmi_count;
Lines 387-393 void touch_nmi_watchdog(void) Link Here
387
EXPORT_SYMBOL(touch_nmi_watchdog);
391
EXPORT_SYMBOL(touch_nmi_watchdog);
388
392
389
notrace __kprobes int
393
notrace __kprobes int
390
nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
394
default_nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
391
{
395
{
392
	/*
396
	/*
393
	 * Since current_thread_info()-> is always on the stack, and we
397
	 * Since current_thread_info()-> is always on the stack, and we
(-)a/arch/x86/kernel/apic/x2apic_cluster.c (-6 / +6 lines)
Lines 61-73 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) Link Here
61
61
62
	x2apic_wrmsr_fence();
62
	x2apic_wrmsr_fence();
63
63
64
	local_irq_save(flags);
64
	local_irq_save_hw(flags);
65
	for_each_cpu(query_cpu, mask) {
65
	for_each_cpu(query_cpu, mask) {
66
		__x2apic_send_IPI_dest(
66
		__x2apic_send_IPI_dest(
67
			per_cpu(x86_cpu_to_logical_apicid, query_cpu),
67
			per_cpu(x86_cpu_to_logical_apicid, query_cpu),
68
			vector, apic->dest_logical);
68
			vector, apic->dest_logical);
69
	}
69
	}
70
	local_irq_restore(flags);
70
	local_irq_restore_hw(flags);
71
}
71
}
72
72
73
static void
73
static void
Lines 79-85 static void Link Here
79
79
80
	x2apic_wrmsr_fence();
80
	x2apic_wrmsr_fence();
81
81
82
	local_irq_save(flags);
82
	local_irq_save_hw(flags);
83
	for_each_cpu(query_cpu, mask) {
83
	for_each_cpu(query_cpu, mask) {
84
		if (query_cpu == this_cpu)
84
		if (query_cpu == this_cpu)
85
			continue;
85
			continue;
Lines 87-93 static void Link Here
87
				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
87
				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
88
				vector, apic->dest_logical);
88
				vector, apic->dest_logical);
89
	}
89
	}
90
	local_irq_restore(flags);
90
	local_irq_restore_hw(flags);
91
}
91
}
92
92
93
static void x2apic_send_IPI_allbutself(int vector)
93
static void x2apic_send_IPI_allbutself(int vector)
Lines 98-104 static void x2apic_send_IPI_allbutself(int vector) Link Here
98
98
99
	x2apic_wrmsr_fence();
99
	x2apic_wrmsr_fence();
100
100
101
	local_irq_save(flags);
101
	local_irq_save_hw(flags);
102
	for_each_online_cpu(query_cpu) {
102
	for_each_online_cpu(query_cpu) {
103
		if (query_cpu == this_cpu)
103
		if (query_cpu == this_cpu)
104
			continue;
104
			continue;
Lines 106-112 static void x2apic_send_IPI_allbutself(int vector) Link Here
106
				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
106
				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
107
				vector, apic->dest_logical);
107
				vector, apic->dest_logical);
108
	}
108
	}
109
	local_irq_restore(flags);
109
	local_irq_restore_hw(flags);
110
}
110
}
111
111
112
static void x2apic_send_IPI_all(int vector)
112
static void x2apic_send_IPI_all(int vector)
(-)a/arch/x86/kernel/apic/x2apic_phys.c (-6 / +6 lines)
Lines 62-73 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) Link Here
62
62
63
	x2apic_wrmsr_fence();
63
	x2apic_wrmsr_fence();
64
64
65
	local_irq_save(flags);
65
	local_irq_save_hw(flags);
66
	for_each_cpu(query_cpu, mask) {
66
	for_each_cpu(query_cpu, mask) {
67
		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
67
		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
68
				       vector, APIC_DEST_PHYSICAL);
68
				       vector, APIC_DEST_PHYSICAL);
69
	}
69
	}
70
	local_irq_restore(flags);
70
	local_irq_restore_hw(flags);
71
}
71
}
72
72
73
static void
73
static void
Lines 79-92 static void Link Here
79
79
80
	x2apic_wrmsr_fence();
80
	x2apic_wrmsr_fence();
81
81
82
	local_irq_save(flags);
82
	local_irq_save_hw(flags);
83
	for_each_cpu(query_cpu, mask) {
83
	for_each_cpu(query_cpu, mask) {
84
		if (query_cpu != this_cpu)
84
		if (query_cpu != this_cpu)
85
			__x2apic_send_IPI_dest(
85
			__x2apic_send_IPI_dest(
86
				per_cpu(x86_cpu_to_apicid, query_cpu),
86
				per_cpu(x86_cpu_to_apicid, query_cpu),
87
				vector, APIC_DEST_PHYSICAL);
87
				vector, APIC_DEST_PHYSICAL);
88
	}
88
	}
89
	local_irq_restore(flags);
89
	local_irq_restore_hw(flags);
90
}
90
}
91
91
92
static void x2apic_send_IPI_allbutself(int vector)
92
static void x2apic_send_IPI_allbutself(int vector)
Lines 97-110 static void x2apic_send_IPI_allbutself(int vector) Link Here
97
97
98
	x2apic_wrmsr_fence();
98
	x2apic_wrmsr_fence();
99
99
100
	local_irq_save(flags);
100
	local_irq_save_hw(flags);
101
	for_each_online_cpu(query_cpu) {
101
	for_each_online_cpu(query_cpu) {
102
		if (query_cpu == this_cpu)
102
		if (query_cpu == this_cpu)
103
			continue;
103
			continue;
104
		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
104
		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
105
				       vector, APIC_DEST_PHYSICAL);
105
				       vector, APIC_DEST_PHYSICAL);
106
	}
106
	}
107
	local_irq_restore(flags);
107
	local_irq_restore_hw(flags);
108
}
108
}
109
109
110
static void x2apic_send_IPI_all(int vector)
110
static void x2apic_send_IPI_all(int vector)
(-)a/arch/x86/kernel/cpu/mtrr/cyrix.c (-2 / +10 lines)
Lines 18-24 cyrix_get_arr(unsigned int reg, unsigned long *base, Link Here
18
18
19
	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
19
	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
20
20
21
	local_irq_save(flags);
21
	local_irq_save_hw(flags);
22
22
23
	ccr3 = getCx86(CX86_CCR3);
23
	ccr3 = getCx86(CX86_CCR3);
24
	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
24
	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
Lines 28-34 cyrix_get_arr(unsigned int reg, unsigned long *base, Link Here
28
	rcr = getCx86(CX86_RCR_BASE + reg);
28
	rcr = getCx86(CX86_RCR_BASE + reg);
29
	setCx86(CX86_CCR3, ccr3);			/* disable MAPEN */
29
	setCx86(CX86_CCR3, ccr3);			/* disable MAPEN */
30
30
31
	local_irq_restore(flags);
31
	local_irq_restore_hw(flags);
32
32
33
	shift = ((unsigned char *) base)[1] & 0x0f;
33
	shift = ((unsigned char *) base)[1] & 0x0f;
34
	*base >>= PAGE_SHIFT;
34
	*base >>= PAGE_SHIFT;
Lines 178-183 static void cyrix_set_arr(unsigned int reg, unsigned long base, Link Here
178
			  unsigned long size, mtrr_type type)
178
			  unsigned long size, mtrr_type type)
179
{
179
{
180
	unsigned char arr, arr_type, arr_size;
180
	unsigned char arr, arr_type, arr_size;
181
	unsigned long flags;
181
182
182
	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
183
	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
183
184
Lines 221-226 static void cyrix_set_arr(unsigned int reg, unsigned long base, Link Here
221
		}
222
		}
222
	}
223
	}
223
224
225
	local_irq_save_hw(flags);
226
224
	prepare_set();
227
	prepare_set();
225
228
226
	base <<= PAGE_SHIFT;
229
	base <<= PAGE_SHIFT;
Lines 230-235 static void cyrix_set_arr(unsigned int reg, unsigned long base, Link Here
230
	setCx86(CX86_RCR_BASE + reg, arr_type);
233
	setCx86(CX86_RCR_BASE + reg, arr_type);
231
234
232
	post_set();
235
	post_set();
236
237
	local_irq_restore_hw(flags);
233
}
238
}
234
239
235
typedef struct {
240
typedef struct {
Lines 247-254 static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 }; Link Here
247
252
248
static void cyrix_set_all(void)
253
static void cyrix_set_all(void)
249
{
254
{
255
	unsigned long flags;
250
	int i;
256
	int i;
251
257
258
	local_irq_save_hw(flags);
252
	prepare_set();
259
	prepare_set();
253
260
254
	/* the CCRs are not contiguous */
261
	/* the CCRs are not contiguous */
Lines 263-268 static void cyrix_set_all(void) Link Here
263
	}
270
	}
264
271
265
	post_set();
272
	post_set();
273
	local_irq_restore_hw(flags);
266
}
274
}
267
275
268
static struct mtrr_ops cyrix_mtrr_ops = {
276
static struct mtrr_ops cyrix_mtrr_ops = {
(-)a/arch/x86/kernel/cpu/mtrr/generic.c (-5 / +5 lines)
Lines 635-641 static void generic_set_all(void) Link Here
635
	unsigned long mask, count;
635
	unsigned long mask, count;
636
	unsigned long flags;
636
	unsigned long flags;
637
637
638
	local_irq_save(flags);
638
	local_irq_save_hw(flags);
639
	prepare_set();
639
	prepare_set();
640
640
641
	/* Actually set the state */
641
	/* Actually set the state */
Lines 645-651 static void generic_set_all(void) Link Here
645
	pat_init();
645
	pat_init();
646
646
647
	post_set();
647
	post_set();
648
	local_irq_restore(flags);
648
	local_irq_restore_hw(flags);
649
649
650
	/* Use the atomic bitops to update the global mask */
650
	/* Use the atomic bitops to update the global mask */
651
	for (count = 0; count < sizeof mask * 8; ++count) {
651
	for (count = 0; count < sizeof mask * 8; ++count) {
Lines 669-680 static void generic_set_all(void) Link Here
669
static void generic_set_mtrr(unsigned int reg, unsigned long base,
669
static void generic_set_mtrr(unsigned int reg, unsigned long base,
670
			     unsigned long size, mtrr_type type)
670
			     unsigned long size, mtrr_type type)
671
{
671
{
672
	unsigned long flags;
672
	unsigned long flags, _flags;
673
	struct mtrr_var_range *vr;
673
	struct mtrr_var_range *vr;
674
674
675
	vr = &mtrr_state.var_ranges[reg];
675
	vr = &mtrr_state.var_ranges[reg];
676
676
677
	local_irq_save(flags);
677
	local_irq_save_full(flags, _flags);
678
	prepare_set();
678
	prepare_set();
679
679
680
	if (size == 0) {
680
	if (size == 0) {
Lines 695-701 static void generic_set_mtrr(unsigned int reg, unsigned long base, Link Here
695
	}
695
	}
696
696
697
	post_set();
697
	post_set();
698
	local_irq_restore(flags);
698
	local_irq_restore_full(flags, _flags);
699
}
699
}
700
700
701
int generic_validate_add_page(unsigned long base, unsigned long size,
701
int generic_validate_add_page(unsigned long base, unsigned long size,
(-)a/arch/x86/kernel/dumpstack.c (+1 lines)
Lines 327-332 die_nmi(char *str, struct pt_regs *regs, int do_panic) Link Here
327
	local_irq_enable();
327
	local_irq_enable();
328
	do_exit(SIGBUS);
328
	do_exit(SIGBUS);
329
}
329
}
330
EXPORT_SYMBOL_GPL(die_nmi);
330
331
331
static int __init oops_setup(char *s)
332
static int __init oops_setup(char *s)
332
{
333
{
(-)a/arch/x86/kernel/dumpstack_32.c (+3 lines)
Lines 108-113 void show_registers(struct pt_regs *regs) Link Here
108
	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
108
	printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
109
		TASK_COMM_LEN, current->comm, task_pid_nr(current),
109
		TASK_COMM_LEN, current->comm, task_pid_nr(current),
110
		current_thread_info(), current, task_thread_info(current));
110
		current_thread_info(), current, task_thread_info(current));
111
#ifdef CONFIG_IPIPE
112
	printk(KERN_EMERG "I-pipe domain %s\n", ipipe_current_domain->name);
113
#endif /* CONFIG_IPIPE */
111
	/*
114
	/*
112
	 * When in-kernel, we also print out the stack and code at the
115
	 * When in-kernel, we also print out the stack and code at the
113
	 * time of the fault..
116
	 * time of the fault..
(-)a/arch/x86/kernel/dumpstack_64.c (+5 lines)
Lines 254-259 void show_registers(struct pt_regs *regs) Link Here
254
	sp = regs->sp;
254
	sp = regs->sp;
255
	printk("CPU %d ", cpu);
255
	printk("CPU %d ", cpu);
256
	__show_regs(regs, 1);
256
	__show_regs(regs, 1);
257
#ifdef CONFIG_IPIPE
258
	if (ipipe_current_domain != ipipe_root_domain)
259
		printk("I-pipe domain %s\n", ipipe_current_domain->name);
260
	else
261
#endif /* CONFIG_IPIPE */
257
	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
262
	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
258
		cur->comm, cur->pid, task_thread_info(cur), cur);
263
		cur->comm, cur->pid, task_thread_info(cur), cur);
259
264
(-)a/arch/x86/kernel/entry_32.S (-24 / +136 lines)
Lines 44-49 Link Here
44
#include <linux/linkage.h>
44
#include <linux/linkage.h>
45
#include <asm/thread_info.h>
45
#include <asm/thread_info.h>
46
#include <asm/irqflags.h>
46
#include <asm/irqflags.h>
47
#include <asm/ipipe_base.h>
47
#include <asm/errno.h>
48
#include <asm/errno.h>
48
#include <asm/segment.h>
49
#include <asm/segment.h>
49
#include <asm/smp.h>
50
#include <asm/smp.h>
Lines 79-84 Link Here
79
80
80
#define nr_syscalls ((syscall_table_size)/4)
81
#define nr_syscalls ((syscall_table_size)/4)
81
82
83
#ifdef CONFIG_IPIPE
84
#define EMULATE_ROOT_IRET(bypass) \
85
				call __ipipe_unstall_iret_root ; \
86
				TRACE_IRQS_ON ; \
87
				bypass: \
88
				movl PT_EAX(%esp),%eax
89
#define TEST_PREEMPTIBLE(regs)  call __ipipe_kpreempt_root ; testl %eax,%eax
90
#define CATCH_ROOT_SYSCALL(bypass1,bypass2)	\
91
				movl  %esp,%eax ; \
92
				call __ipipe_syscall_root ; \
93
				testl  %eax,%eax ; \
94
				js    bypass1 ; \
95
				jne   bypass2 ; \
96
				movl PT_ORIG_EAX(%esp),%eax
97
#define PUSH_XCODE(v)		pushl $ ex_ ## v
98
#define PUSH_XVEC(v)		pushl $ ex_ ## v
99
#define HANDLE_EXCEPTION(code)	movl %code,%ecx ; \
100
				call __ipipe_handle_exception ; \
101
				testl %eax,%eax	; \
102
				jnz restore_ret
103
#define DIVERT_EXCEPTION(code)	movl $(__USER_DS), %ecx	; \
104
				movl %ecx, %ds ; \
105
				movl %ecx, %es ; \
106
				movl %esp, %eax	; \
107
				movl $ex_ ## code,%edx ; \
108
				call __ipipe_divert_exception ; \
109
				testl %eax,%eax	; \
110
				jnz restore_ret
111
112
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
113
# define IPIPE_TRACE_IRQ_ENTER \
114
	lea PT_EIP-4(%esp), %ebp; \
115
	movl PT_ORIG_EAX(%esp), %eax; \
116
	call ipipe_trace_begin
117
# define IPIPE_TRACE_IRQ_EXIT \
118
	pushl %eax; \
119
	movl PT_ORIG_EAX+4(%esp), %eax; \
120
	call ipipe_trace_end; \
121
	popl %eax
122
#else  /* !CONFIG_IPIPE_TRACE_IRQSOFF */
123
#define IPIPE_TRACE_IRQ_ENTER
124
#define IPIPE_TRACE_IRQ_EXIT
125
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
126
#else /* !CONFIG_IPIPE */
127
#define EMULATE_ROOT_IRET(bypass)
128
#define TEST_PREEMPTIBLE(regs)		testl $X86_EFLAGS_IF,PT_EFLAGS(regs)
129
#define CATCH_ROOT_SYSCALL(bypass1,bypass2)
130
#define PUSH_XCODE(v)			pushl $v
131
#define PUSH_XVEC(v)			pushl v
132
#define HANDLE_EXCEPTION(code)		call *%code
133
#define DIVERT_EXCEPTION(code)
134
#endif /* CONFIG_IPIPE */
135
	
82
#ifdef CONFIG_PREEMPT
136
#ifdef CONFIG_PREEMPT
83
#define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
137
#define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
84
#else
138
#else
Lines 318-323 Link Here
318
.endm
372
.endm
319
373
320
ENTRY(ret_from_fork)
374
ENTRY(ret_from_fork)
375
	ENABLE_INTERRUPTS_HW_COND
321
	CFI_STARTPROC
376
	CFI_STARTPROC
322
	pushl %eax
377
	pushl %eax
323
	CFI_ADJUST_CFA_OFFSET 4
378
	CFI_ADJUST_CFA_OFFSET 4
Lines 345-351 END(ret_from_fork) Link Here
345
	RING0_PTREGS_FRAME
400
	RING0_PTREGS_FRAME
346
ret_from_exception:
401
ret_from_exception:
347
	preempt_stop(CLBR_ANY)
402
	preempt_stop(CLBR_ANY)
348
ret_from_intr:
403
ENTRY(ret_from_intr)
349
	GET_THREAD_INFO(%ebp)
404
	GET_THREAD_INFO(%ebp)
350
check_userspace:
405
check_userspace:
351
	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS and CS
406
	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS and CS
Lines 369-382 END(ret_from_exception) Link Here
369
424
370
#ifdef CONFIG_PREEMPT
425
#ifdef CONFIG_PREEMPT
371
ENTRY(resume_kernel)
426
ENTRY(resume_kernel)
372
	DISABLE_INTERRUPTS(CLBR_ANY)
427
	DISABLE_INTERRUPTS_HW(CLBR_ANY)
373
	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
428
	cmpl $0,TI_preempt_count(%ebp)	# non-zero preempt_count ?
374
	jnz restore_all
429
	jnz restore_all
375
need_resched:
430
need_resched:
376
	movl TI_flags(%ebp), %ecx	# need_resched set ?
431
	movl TI_flags(%ebp), %ecx	# need_resched set ?
377
	testb $_TIF_NEED_RESCHED, %cl
432
	testb $_TIF_NEED_RESCHED, %cl
378
	jz restore_all
433
	jz restore_all
379
	testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)	# interrupts off (exception path) ?
434
   	TEST_PREEMPTIBLE(%esp)		# interrupts off (exception path) ?
380
	jz restore_all
435
	jz restore_all
381
	call preempt_schedule_irq
436
	call preempt_schedule_irq
382
	jmp need_resched
437
	jmp need_resched
Lines 424-430 sysenter_past_esp: Link Here
424
	pushl %eax
479
	pushl %eax
425
	CFI_ADJUST_CFA_OFFSET 4
480
	CFI_ADJUST_CFA_OFFSET 4
426
	SAVE_ALL
481
	SAVE_ALL
427
	ENABLE_INTERRUPTS(CLBR_NONE)
482
	ENABLE_INTERRUPTS_HW(CLBR_NONE)
428
483
429
/*
484
/*
430
 * Load the potential sixth argument from user stack.
485
 * Load the potential sixth argument from user stack.
Lines 440-445 sysenter_past_esp: Link Here
440
.previous
495
.previous
441
496
442
	GET_THREAD_INFO(%ebp)
497
	GET_THREAD_INFO(%ebp)
498
	CATCH_ROOT_SYSCALL(sysenter_tail,sysenter_out)
443
499
444
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
500
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
445
	jnz sysenter_audit
501
	jnz sysenter_audit
Lines 448-453 sysenter_do_call: Link Here
448
	jae syscall_badsys
504
	jae syscall_badsys
449
	call *sys_call_table(,%eax,4)
505
	call *sys_call_table(,%eax,4)
450
	movl %eax,PT_EAX(%esp)
506
	movl %eax,PT_EAX(%esp)
507
sysenter_tail:
451
	LOCKDEP_SYS_EXIT
508
	LOCKDEP_SYS_EXIT
452
	DISABLE_INTERRUPTS(CLBR_ANY)
509
	DISABLE_INTERRUPTS(CLBR_ANY)
453
	TRACE_IRQS_OFF
510
	TRACE_IRQS_OFF
Lines 456-465 sysenter_do_call: Link Here
456
	jne sysexit_audit
513
	jne sysexit_audit
457
sysenter_exit:
514
sysenter_exit:
458
/* if something modifies registers it must also disable sysexit */
515
/* if something modifies registers it must also disable sysexit */
516
 	EMULATE_ROOT_IRET(sysenter_out)
459
	movl PT_EIP(%esp), %edx
517
	movl PT_EIP(%esp), %edx
460
	movl PT_OLDESP(%esp), %ecx
518
	movl PT_OLDESP(%esp), %ecx
461
	xorl %ebp,%ebp
519
	xorl %ebp,%ebp
462
	TRACE_IRQS_ON
520
#ifndef CONFIG_IPIPE
521
  	TRACE_IRQS_ON
522
#endif
463
1:	mov  PT_FS(%esp), %fs
523
1:	mov  PT_FS(%esp), %fs
464
	PTGS_TO_GS
524
	PTGS_TO_GS
465
	ENABLE_INTERRUPTS_SYSEXIT
525
	ENABLE_INTERRUPTS_SYSEXIT
Lines 520-525 ENTRY(system_call) Link Here
520
	CFI_ADJUST_CFA_OFFSET 4
580
	CFI_ADJUST_CFA_OFFSET 4
521
	SAVE_ALL
581
	SAVE_ALL
522
	GET_THREAD_INFO(%ebp)
582
	GET_THREAD_INFO(%ebp)
583
 	CATCH_ROOT_SYSCALL(syscall_exit,restore_ret)
523
					# system call tracing in operation / emulation
584
					# system call tracing in operation / emulation
524
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
585
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
525
	jnz syscall_trace_entry
586
	jnz syscall_trace_entry
Lines 552-557 restore_all_notrace: Link Here
552
	CFI_REMEMBER_STATE
613
	CFI_REMEMBER_STATE
553
	je ldt_ss			# returning to user-space with LDT SS
614
	je ldt_ss			# returning to user-space with LDT SS
554
restore_nocheck:
615
restore_nocheck:
616
#ifdef CONFIG_IPIPE
617
	call __ipipe_unstall_iret_root
618
#endif /* CONFIG_IPIPE */
619
restore_ret:
555
	RESTORE_REGS 4			# skip orig_eax/error_code
620
	RESTORE_REGS 4			# skip orig_eax/error_code
556
	CFI_ADJUST_CFA_OFFSET -4
621
	CFI_ADJUST_CFA_OFFSET -4
557
irq_return:
622
irq_return:
Lines 559-565 irq_return: Link Here
559
.section .fixup,"ax"
624
.section .fixup,"ax"
560
ENTRY(iret_exc)
625
ENTRY(iret_exc)
561
	pushl $0			# no error code
626
	pushl $0			# no error code
562
	pushl $do_iret_error
627
  	PUSH_XCODE(do_iret_error)
563
	jmp error_code
628
	jmp error_code
564
.previous
629
.previous
565
.section __ex_table,"a"
630
.section __ex_table,"a"
Lines 613-619 ldt_ss: Link Here
613
	/* Disable interrupts, but do not irqtrace this section: we
678
	/* Disable interrupts, but do not irqtrace this section: we
614
	 * will soon execute iret and the tracer was already set to
679
	 * will soon execute iret and the tracer was already set to
615
	 * the irqstate after the iret */
680
	 * the irqstate after the iret */
616
	DISABLE_INTERRUPTS(CLBR_EAX)
681
	DISABLE_INTERRUPTS_HW(CLBR_EAX)
617
	lss (%esp), %esp		/* switch to espfix segment */
682
	lss (%esp), %esp		/* switch to espfix segment */
618
	CFI_ADJUST_CFA_OFFSET -8
683
	CFI_ADJUST_CFA_OFFSET -8
619
	jmp restore_nocheck
684
	jmp restore_nocheck
Lines 627-632 work_pending: Link Here
627
	testb $_TIF_NEED_RESCHED, %cl
692
	testb $_TIF_NEED_RESCHED, %cl
628
	jz work_notifysig
693
	jz work_notifysig
629
work_resched:
694
work_resched:
695
	ENABLE_INTERRUPTS_HW_COND
630
	call schedule
696
	call schedule
631
	LOCKDEP_SYS_EXIT
697
	LOCKDEP_SYS_EXIT
632
	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
698
	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
Lines 799-804 END(irq_entries_start) Link Here
799
END(interrupt)
865
END(interrupt)
800
.previous
866
.previous
801
867
868
#ifdef CONFIG_IPIPE
869
	.p2align CONFIG_X86_L1_CACHE_SHIFT
870
common_interrupt:
871
	addl $-0x80,(%esp)	/* Adjust vector into the [-256,-1] range */
872
	SAVE_ALL
873
	IPIPE_TRACE_IRQ_ENTER
874
	movl %esp, %eax
875
	call *ipipe_irq_handler
876
	IPIPE_TRACE_IRQ_EXIT
877
	testl %eax,%eax
878
	jnz  ret_from_intr
879
	jmp restore_ret
880
	CFI_ENDPROC
881
882
#define BUILD_INTERRUPT3(name, nr, fn)	\
883
ENTRY(name)				\
884
	RING0_INT_FRAME;		\
885
	pushl $~(nr);			\
886
	CFI_ADJUST_CFA_OFFSET 4;	\
887
	SAVE_ALL;			\
888
	IPIPE_TRACE_IRQ_ENTER;		\
889
 	movl %esp, %eax;		\
890
	call *ipipe_irq_handler;	\
891
	IPIPE_TRACE_IRQ_EXIT;		\
892
	testl %eax,%eax;		\
893
	jnz  ret_from_intr;		\
894
	jmp restore_ret;	\
895
	CFI_ENDPROC
896
897
#define BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(name, nr, smp_##name)
898
899
#ifdef CONFIG_X86_LOCAL_APIC
900
	BUILD_INTERRUPT(ipipe_ipi0,IPIPE_SERVICE_VECTOR0)
901
	BUILD_INTERRUPT(ipipe_ipi1,IPIPE_SERVICE_VECTOR1)
902
	BUILD_INTERRUPT(ipipe_ipi2,IPIPE_SERVICE_VECTOR2)
903
	BUILD_INTERRUPT(ipipe_ipi3,IPIPE_SERVICE_VECTOR3)
904
#ifdef CONFIG_SMP
905
	BUILD_INTERRUPT(ipipe_ipiX,IPIPE_CRITICAL_VECTOR)
906
#endif	
907
#endif
908
909
#else /* !CONFIG_IPIPE */
802
/*
910
/*
803
 * the CPU automatically disables interrupts when executing an IRQ vector,
911
 * the CPU automatically disables interrupts when executing an IRQ vector,
804
 * so IRQ-flags tracing has to follow that:
912
 * so IRQ-flags tracing has to follow that:
Lines 829-834 ENDPROC(name) Link Here
829
937
830
#define BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(name, nr, smp_##name)
938
#define BUILD_INTERRUPT(name, nr)	BUILD_INTERRUPT3(name, nr, smp_##name)
831
939
940
#endif /* !CONFIG_IPIPE */
941
832
/* The include is where all of the SMP etc. interrupts come from */
942
/* The include is where all of the SMP etc. interrupts come from */
833
#include <asm/entry_arch.h>
943
#include <asm/entry_arch.h>
834
944
Lines 836-842 ENTRY(coprocessor_error) Link Here
836
	RING0_INT_FRAME
946
	RING0_INT_FRAME
837
	pushl $0
947
	pushl $0
838
	CFI_ADJUST_CFA_OFFSET 4
948
	CFI_ADJUST_CFA_OFFSET 4
839
	pushl $do_coprocessor_error
949
 	PUSH_XCODE(do_coprocessor_error)
840
	CFI_ADJUST_CFA_OFFSET 4
950
	CFI_ADJUST_CFA_OFFSET 4
841
	jmp error_code
951
	jmp error_code
842
	CFI_ENDPROC
952
	CFI_ENDPROC
Lines 846-852 ENTRY(simd_coprocessor_error) Link Here
846
	RING0_INT_FRAME
956
	RING0_INT_FRAME
847
	pushl $0
957
	pushl $0
848
	CFI_ADJUST_CFA_OFFSET 4
958
	CFI_ADJUST_CFA_OFFSET 4
849
	pushl $do_simd_coprocessor_error
959
 	PUSH_XCODE(do_simd_coprocessor_error)
850
	CFI_ADJUST_CFA_OFFSET 4
960
	CFI_ADJUST_CFA_OFFSET 4
851
	jmp error_code
961
	jmp error_code
852
	CFI_ENDPROC
962
	CFI_ENDPROC
Lines 856-862 ENTRY(device_not_available) Link Here
856
	RING0_INT_FRAME
966
	RING0_INT_FRAME
857
	pushl $-1			# mark this as an int
967
	pushl $-1			# mark this as an int
858
	CFI_ADJUST_CFA_OFFSET 4
968
	CFI_ADJUST_CFA_OFFSET 4
859
	pushl $do_device_not_available
969
 	PUSH_XCODE(do_device_not_available)
860
	CFI_ADJUST_CFA_OFFSET 4
970
	CFI_ADJUST_CFA_OFFSET 4
861
	jmp error_code
971
	jmp error_code
862
	CFI_ENDPROC
972
	CFI_ENDPROC
Lines 881-887 ENTRY(overflow) Link Here
881
	RING0_INT_FRAME
991
	RING0_INT_FRAME
882
	pushl $0
992
	pushl $0
883
	CFI_ADJUST_CFA_OFFSET 4
993
	CFI_ADJUST_CFA_OFFSET 4
884
	pushl $do_overflow
994
 	PUSH_XCODE(do_overflow)
885
	CFI_ADJUST_CFA_OFFSET 4
995
	CFI_ADJUST_CFA_OFFSET 4
886
	jmp error_code
996
	jmp error_code
887
	CFI_ENDPROC
997
	CFI_ENDPROC
Lines 891-897 ENTRY(bounds) Link Here
891
	RING0_INT_FRAME
1001
	RING0_INT_FRAME
892
	pushl $0
1002
	pushl $0
893
	CFI_ADJUST_CFA_OFFSET 4
1003
	CFI_ADJUST_CFA_OFFSET 4
894
	pushl $do_bounds
1004
 	PUSH_XCODE(do_bounds)
895
	CFI_ADJUST_CFA_OFFSET 4
1005
	CFI_ADJUST_CFA_OFFSET 4
896
	jmp error_code
1006
	jmp error_code
897
	CFI_ENDPROC
1007
	CFI_ENDPROC
Lines 901-907 ENTRY(invalid_op) Link Here
901
	RING0_INT_FRAME
1011
	RING0_INT_FRAME
902
	pushl $0
1012
	pushl $0
903
	CFI_ADJUST_CFA_OFFSET 4
1013
	CFI_ADJUST_CFA_OFFSET 4
904
	pushl $do_invalid_op
1014
 	PUSH_XCODE(do_invalid_op)
905
	CFI_ADJUST_CFA_OFFSET 4
1015
	CFI_ADJUST_CFA_OFFSET 4
906
	jmp error_code
1016
	jmp error_code
907
	CFI_ENDPROC
1017
	CFI_ENDPROC
Lines 911-917 ENTRY(coprocessor_segment_overrun) Link Here
911
	RING0_INT_FRAME
1021
	RING0_INT_FRAME
912
	pushl $0
1022
	pushl $0
913
	CFI_ADJUST_CFA_OFFSET 4
1023
	CFI_ADJUST_CFA_OFFSET 4
914
	pushl $do_coprocessor_segment_overrun
1024
  	PUSH_XCODE(do_coprocessor_segment_overrun)
915
	CFI_ADJUST_CFA_OFFSET 4
1025
	CFI_ADJUST_CFA_OFFSET 4
916
	jmp error_code
1026
	jmp error_code
917
	CFI_ENDPROC
1027
	CFI_ENDPROC
Lines 919-925 END(coprocessor_segment_overrun) Link Here
919
1029
920
ENTRY(invalid_TSS)
1030
ENTRY(invalid_TSS)
921
	RING0_EC_FRAME
1031
	RING0_EC_FRAME
922
	pushl $do_invalid_TSS
1032
  	PUSH_XCODE(do_invalid_TSS)
923
	CFI_ADJUST_CFA_OFFSET 4
1033
	CFI_ADJUST_CFA_OFFSET 4
924
	jmp error_code
1034
	jmp error_code
925
	CFI_ENDPROC
1035
	CFI_ENDPROC
Lines 927-933 END(invalid_TSS) Link Here
927
1037
928
ENTRY(segment_not_present)
1038
ENTRY(segment_not_present)
929
	RING0_EC_FRAME
1039
	RING0_EC_FRAME
930
	pushl $do_segment_not_present
1040
  	PUSH_XCODE(do_segment_not_present)
931
	CFI_ADJUST_CFA_OFFSET 4
1041
	CFI_ADJUST_CFA_OFFSET 4
932
	jmp error_code
1042
	jmp error_code
933
	CFI_ENDPROC
1043
	CFI_ENDPROC
Lines 935-941 END(segment_not_present) Link Here
935
1045
936
ENTRY(stack_segment)
1046
ENTRY(stack_segment)
937
	RING0_EC_FRAME
1047
	RING0_EC_FRAME
938
	pushl $do_stack_segment
1048
  	PUSH_XCODE(do_stack_segment)
939
	CFI_ADJUST_CFA_OFFSET 4
1049
	CFI_ADJUST_CFA_OFFSET 4
940
	jmp error_code
1050
	jmp error_code
941
	CFI_ENDPROC
1051
	CFI_ENDPROC
Lines 943-949 END(stack_segment) Link Here
943
1053
944
ENTRY(alignment_check)
1054
ENTRY(alignment_check)
945
	RING0_EC_FRAME
1055
	RING0_EC_FRAME
946
	pushl $do_alignment_check
1056
 	PUSH_XCODE(do_alignment_check)
947
	CFI_ADJUST_CFA_OFFSET 4
1057
	CFI_ADJUST_CFA_OFFSET 4
948
	jmp error_code
1058
	jmp error_code
949
	CFI_ENDPROC
1059
	CFI_ENDPROC
Lines 953-959 ENTRY(divide_error) Link Here
953
	RING0_INT_FRAME
1063
	RING0_INT_FRAME
954
	pushl $0			# no error code
1064
	pushl $0			# no error code
955
	CFI_ADJUST_CFA_OFFSET 4
1065
	CFI_ADJUST_CFA_OFFSET 4
956
	pushl $do_divide_error
1066
 	PUSH_XCODE(do_divide_error)
957
	CFI_ADJUST_CFA_OFFSET 4
1067
	CFI_ADJUST_CFA_OFFSET 4
958
	jmp error_code
1068
	jmp error_code
959
	CFI_ENDPROC
1069
	CFI_ENDPROC
Lines 964-970 ENTRY(machine_check) Link Here
964
	RING0_INT_FRAME
1074
	RING0_INT_FRAME
965
	pushl $0
1075
	pushl $0
966
	CFI_ADJUST_CFA_OFFSET 4
1076
	CFI_ADJUST_CFA_OFFSET 4
967
	pushl machine_check_vector
1077
 	PUSH_XVEC(machine_check_vector)
968
	CFI_ADJUST_CFA_OFFSET 4
1078
	CFI_ADJUST_CFA_OFFSET 4
969
	jmp error_code
1079
	jmp error_code
970
	CFI_ENDPROC
1080
	CFI_ENDPROC
Lines 975-981 ENTRY(spurious_interrupt_bug) Link Here
975
	RING0_INT_FRAME
1085
	RING0_INT_FRAME
976
	pushl $0
1086
	pushl $0
977
	CFI_ADJUST_CFA_OFFSET 4
1087
	CFI_ADJUST_CFA_OFFSET 4
978
	pushl $do_spurious_interrupt_bug
1088
 	PUSH_XCODE(do_spurious_interrupt_bug)
979
	CFI_ADJUST_CFA_OFFSET 4
1089
	CFI_ADJUST_CFA_OFFSET 4
980
	jmp error_code
1090
	jmp error_code
981
	CFI_ENDPROC
1091
	CFI_ENDPROC
Lines 1210-1216 syscall_table_size=(.-sys_call_table) Link Here
1210
1320
1211
ENTRY(page_fault)
1321
ENTRY(page_fault)
1212
	RING0_EC_FRAME
1322
	RING0_EC_FRAME
1213
	pushl $do_page_fault
1323
 	PUSH_XCODE(do_page_fault)
1214
	CFI_ADJUST_CFA_OFFSET 4
1324
	CFI_ADJUST_CFA_OFFSET 4
1215
	ALIGN
1325
	ALIGN
1216
error_code:
1326
error_code:
Lines 1260-1266 error_code: Link Here
1260
	movl %ecx, %es
1370
	movl %ecx, %es
1261
	TRACE_IRQS_OFF
1371
	TRACE_IRQS_OFF
1262
	movl %esp,%eax			# pt_regs pointer
1372
	movl %esp,%eax			# pt_regs pointer
1263
	call *%edi
1373
	HANDLE_EXCEPTION(edi)
1264
	jmp ret_from_exception
1374
	jmp ret_from_exception
1265
	CFI_ENDPROC
1375
	CFI_ENDPROC
1266
END(page_fault)
1376
END(page_fault)
Lines 1304-1309 debug_stack_correct: Link Here
1304
	CFI_ADJUST_CFA_OFFSET 4
1414
	CFI_ADJUST_CFA_OFFSET 4
1305
	SAVE_ALL
1415
	SAVE_ALL
1306
	TRACE_IRQS_OFF
1416
	TRACE_IRQS_OFF
1417
 	DIVERT_EXCEPTION(do_debug)
1307
	xorl %edx,%edx			# error code 0
1418
	xorl %edx,%edx			# error code 0
1308
	movl %esp,%eax			# pt_regs pointer
1419
	movl %esp,%eax			# pt_regs pointer
1309
	call do_debug
1420
	call do_debug
Lines 1404-1409 ENTRY(int3) Link Here
1404
	CFI_ADJUST_CFA_OFFSET 4
1515
	CFI_ADJUST_CFA_OFFSET 4
1405
	SAVE_ALL
1516
	SAVE_ALL
1406
	TRACE_IRQS_OFF
1517
	TRACE_IRQS_OFF
1518
 	DIVERT_EXCEPTION(do_int3)
1407
	xorl %edx,%edx		# zero error code
1519
	xorl %edx,%edx		# zero error code
1408
	movl %esp,%eax		# pt_regs pointer
1520
	movl %esp,%eax		# pt_regs pointer
1409
	call do_int3
1521
	call do_int3
Lines 1413-1419 END(int3) Link Here
1413
1525
1414
ENTRY(general_protection)
1526
ENTRY(general_protection)
1415
	RING0_EC_FRAME
1527
	RING0_EC_FRAME
1416
	pushl $do_general_protection
1528
 	PUSH_XCODE(do_general_protection)
1417
	CFI_ADJUST_CFA_OFFSET 4
1529
	CFI_ADJUST_CFA_OFFSET 4
1418
	jmp error_code
1530
	jmp error_code
1419
	CFI_ENDPROC
1531
	CFI_ENDPROC
(-)a/arch/x86/kernel/entry_64.S (-29 / +201 lines)
Lines 48-53 Link Here
48
#include <asm/unistd.h>
48
#include <asm/unistd.h>
49
#include <asm/thread_info.h>
49
#include <asm/thread_info.h>
50
#include <asm/hw_irq.h>
50
#include <asm/hw_irq.h>
51
#include <asm/ipipe_base.h>
51
#include <asm/page_types.h>
52
#include <asm/page_types.h>
52
#include <asm/irqflags.h>
53
#include <asm/irqflags.h>
53
#include <asm/paravirt.h>
54
#include <asm/paravirt.h>
Lines 61-66 Link Here
61
#define __AUDIT_ARCH_LE	   0x40000000
62
#define __AUDIT_ARCH_LE	   0x40000000
62
63
63
	.code64
64
	.code64
65
66
#ifdef CONFIG_IPIPE
67
#define PREEMPT_SCHEDULE_IRQ		call __ipipe_preempt_schedule_irq
68
#else /* !CONFIG_IPIPE */
69
#define PREEMPT_SCHEDULE_IRQ		call preempt_schedule_irq
70
#endif /* !CONFIG_IPIPE */
71
64
#ifdef CONFIG_FUNCTION_TRACER
72
#ifdef CONFIG_FUNCTION_TRACER
65
#ifdef CONFIG_DYNAMIC_FTRACE
73
#ifdef CONFIG_DYNAMIC_FTRACE
66
ENTRY(mcount)
74
ENTRY(mcount)
Lines 336-342 ENTRY(save_args) Link Here
336
	/*
344
	/*
337
	 * We entered an interrupt context - irqs are off:
345
	 * We entered an interrupt context - irqs are off:
338
	 */
346
	 */
339
2:	TRACE_IRQS_OFF
347
2:
348
#ifndef CONFIG_IPIPE
349
	TRACE_IRQS_OFF
350
#endif
340
	ret
351
	ret
341
	CFI_ENDPROC
352
	CFI_ENDPROC
342
END(save_args)
353
END(save_args)
Lines 402-407 ENTRY(ret_from_fork) Link Here
402
	CFI_ADJUST_CFA_OFFSET 8
413
	CFI_ADJUST_CFA_OFFSET 8
403
	popf					# reset kernel eflags
414
	popf					# reset kernel eflags
404
	CFI_ADJUST_CFA_OFFSET -8
415
	CFI_ADJUST_CFA_OFFSET -8
416
  	ENABLE_INTERRUPTS_HW_COND
405
417
406
	call schedule_tail			# rdi: 'prev' task parameter
418
	call schedule_tail			# rdi: 'prev' task parameter
407
419
Lines 477-482 ENTRY(system_call_after_swapgs) Link Here
477
	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
489
	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
478
	movq  %rcx,RIP-ARGOFFSET(%rsp)
490
	movq  %rcx,RIP-ARGOFFSET(%rsp)
479
	CFI_REL_OFFSET rip,RIP-ARGOFFSET
491
	CFI_REL_OFFSET rip,RIP-ARGOFFSET
492
#ifdef CONFIG_IPIPE
493
	pushq %rdi
494
	pushq %rax
495
	leaq -(ARGOFFSET-16)(%rsp),%rdi	# regs for handler
496
	call	__ipipe_syscall_root_thunk
497
	testl %eax, %eax
498
	popq %rax
499
	popq %rdi
500
	js    ret_from_sys_call
501
	jnz   sysret_fastexit
502
#endif
480
	GET_THREAD_INFO(%rcx)
503
	GET_THREAD_INFO(%rcx)
481
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
504
	testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
482
	jnz tracesys
505
	jnz tracesys
Lines 506-511 sysret_check: Link Here
506
	 * sysretq will re-enable interrupts:
529
	 * sysretq will re-enable interrupts:
507
	 */
530
	 */
508
	TRACE_IRQS_ON
531
	TRACE_IRQS_ON
532
sysret_fastexit:
509
	movq RIP-ARGOFFSET(%rsp),%rcx
533
	movq RIP-ARGOFFSET(%rsp),%rcx
510
	CFI_REGISTER	rip,rcx
534
	CFI_REGISTER	rip,rcx
511
	RESTORE_ARGS 0,-ARG_SKIP,1
535
	RESTORE_ARGS 0,-ARG_SKIP,1
Lines 517-522 sysret_check: Link Here
517
	/* Handle reschedules */
541
	/* Handle reschedules */
518
	/* edx:	work, edi: workmask */
542
	/* edx:	work, edi: workmask */
519
sysret_careful:
543
sysret_careful:
544
	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),%edx
545
	jnz ret_from_sys_call_trace
520
	bt $TIF_NEED_RESCHED,%edx
546
	bt $TIF_NEED_RESCHED,%edx
521
	jnc sysret_signal
547
	jnc sysret_signal
522
	TRACE_IRQS_ON
548
	TRACE_IRQS_ON
Lines 528-533 sysret_careful: Link Here
528
	CFI_ADJUST_CFA_OFFSET -8
554
	CFI_ADJUST_CFA_OFFSET -8
529
	jmp sysret_check
555
	jmp sysret_check
530
556
557
ret_from_sys_call_trace:
558
	TRACE_IRQS_ON
559
	sti
560
	SAVE_REST
561
	FIXUP_TOP_OF_STACK %rdi
562
	movq %rsp,%rdi
563
	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
564
	RESTORE_REST
565
	jmp int_ret_from_sys_call
566
531
	/* Handle a signal */
567
	/* Handle a signal */
532
sysret_signal:
568
sysret_signal:
533
	TRACE_IRQS_ON
569
	TRACE_IRQS_ON
Lines 800-806 END(interrupt) Link Here
800
	CFI_ADJUST_CFA_OFFSET 10*8
836
	CFI_ADJUST_CFA_OFFSET 10*8
801
	call save_args
837
	call save_args
802
	PARTIAL_FRAME 0
838
	PARTIAL_FRAME 0
839
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
840
	pushq %rbp
841
	leaq RIP-8(%rdi), %rbp	# make interrupted address show up in trace
842
	pushq %rdi
843
	movq ORIG_RAX(%rdi), %rdi	# IRQ number
844
	notq %rdi			# ...is inverted, fix up
845
	call ipipe_trace_begin
846
	popq %rdi
847
	popq %rbp
848
849
	call \func
850
851
	pushq %rbp
852
	pushq %rax
853
	movq 8-ARGOFFSET+ORIG_RAX(%rbp), %rdi
854
	leaq 8-ARGOFFSET+RIP-8(%rbp), %rbp
855
	notq %rdi
856
	call ipipe_trace_end
857
	popq %rax
858
	popq %rbp
859
#else
803
	call \func
860
	call \func
861
#endif
804
	.endm
862
	.endm
805
863
806
	/*
864
	/*
Lines 809-817 END(interrupt) Link Here
809
	 */
867
	 */
810
	.p2align CONFIG_X86_L1_CACHE_SHIFT
868
	.p2align CONFIG_X86_L1_CACHE_SHIFT
811
common_interrupt:
869
common_interrupt:
870
#ifdef CONFIG_IPIPE
871
	XCPT_FRAME
872
	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
873
	interrupt *ipipe_irq_handler
874
	testl %eax, %eax
875
	jnz ret_from_intr
876
	decl PER_CPU_VAR(irq_count)
877
	leaveq
878
	CFI_DEF_CFA_REGISTER	rsp
879
	CFI_ADJUST_CFA_OFFSET	-8
880
	testl $3,CS-ARGOFFSET(%rsp)
881
	jz restore_args
882
	jmp retint_swapgs_notrace
883
#else /* !CONFIG_IPIPE */
812
	XCPT_FRAME
884
	XCPT_FRAME
813
	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
885
	addq $-0x80,(%rsp)		/* Adjust vector to [-256,-1] range */
814
	interrupt do_IRQ
886
	interrupt do_IRQ
887
#endif /* !CONFIG_IPIPE */
815
	/* 0(%rsp): old_rsp-ARGOFFSET */
888
	/* 0(%rsp): old_rsp-ARGOFFSET */
816
ret_from_intr:
889
ret_from_intr:
817
	DISABLE_INTERRUPTS(CLBR_NONE)
890
	DISABLE_INTERRUPTS(CLBR_NONE)
Lines 820-826 ret_from_intr: Link Here
820
	leaveq
893
	leaveq
821
	CFI_DEF_CFA_REGISTER	rsp
894
	CFI_DEF_CFA_REGISTER	rsp
822
	CFI_ADJUST_CFA_OFFSET	-8
895
	CFI_ADJUST_CFA_OFFSET	-8
823
exit_intr:
896
ENTRY(exit_intr)
824
	GET_THREAD_INFO(%rcx)
897
	GET_THREAD_INFO(%rcx)
825
	testl $3,CS-ARGOFFSET(%rsp)
898
	testl $3,CS-ARGOFFSET(%rsp)
826
	je retint_kernel
899
	je retint_kernel
Lines 840-859 retint_check: Link Here
840
	jnz  retint_careful
913
	jnz  retint_careful
841
914
842
retint_swapgs:		/* return to user-space */
915
retint_swapgs:		/* return to user-space */
916
	TRACE_IRQS_IRETQ
843
	/*
917
	/*
844
	 * The iretq could re-enable interrupts:
918
	 * The iretq could re-enable interrupts:
845
	 */
919
	 */
846
	DISABLE_INTERRUPTS(CLBR_ANY)
920
retint_swapgs_notrace:
847
	TRACE_IRQS_IRETQ
848
	SWAPGS
921
	SWAPGS
922
retint_noswapgs:
849
	jmp restore_args
923
	jmp restore_args
850
924
851
retint_restore_args:	/* return to kernel space */
925
retint_restore_args:	/* return to kernel space */
852
	DISABLE_INTERRUPTS(CLBR_ANY)
926
	TRACE_IRQS_IRETQ
853
	/*
927
	/*
854
	 * The iretq could re-enable interrupts:
928
	 * The iretq could re-enable interrupts:
855
	 */
929
	 */
856
	TRACE_IRQS_IRETQ
857
restore_args:
930
restore_args:
858
	RESTORE_ARGS 0,8,0
931
	RESTORE_ARGS 0,8,0
859
932
Lines 935-941 ENTRY(retint_kernel) Link Here
935
	jnc  retint_restore_args
1008
	jnc  retint_restore_args
936
	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
1009
	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
937
	jnc  retint_restore_args
1010
	jnc  retint_restore_args
938
	call preempt_schedule_irq
1011
#ifdef CONFIG_IPIPE
1012
	/*
1013
	 * We may have preempted call_softirq before __do_softirq raised or
1014
	 * after it lowered the preemption counter.
1015
	 */
1016
	cmpl $0,PER_CPU_VAR(irq_count)
1017
	jge  retint_restore_args
1018
#endif
1019
	PREEMPT_SCHEDULE_IRQ
939
	jmp exit_intr
1020
	jmp exit_intr
940
#endif
1021
#endif
941
1022
Lines 945-960 END(common_interrupt) Link Here
945
/*
1026
/*
946
 * APIC interrupts.
1027
 * APIC interrupts.
947
 */
1028
 */
948
.macro apicinterrupt num sym do_sym
1029
	.macro apicinterrupt num sym do_sym
949
ENTRY(\sym)
1030
ENTRY(\sym)
950
	INTR_FRAME
1031
	INTR_FRAME
951
	pushq $~(\num)
1032
	pushq $~(\num)
952
	CFI_ADJUST_CFA_OFFSET 8
1033
	CFI_ADJUST_CFA_OFFSET 8
1034
#ifdef CONFIG_IPIPE
1035
	interrupt *ipipe_irq_handler
1036
	testl %eax, %eax
1037
	jnz ret_from_intr
1038
	decl PER_CPU_VAR(irq_count)
1039
	leaveq
1040
	CFI_DEF_CFA_REGISTER	rsp
1041
	CFI_ADJUST_CFA_OFFSET	-8
1042
	testl $3,CS-ARGOFFSET(%rsp)
1043
	jz restore_args
1044
	jmp retint_swapgs_notrace
1045
	CFI_ENDPROC
1046
	.endm
1047
#else /* !CONFIG_IPIPE */
953
	interrupt \do_sym
1048
	interrupt \do_sym
954
	jmp ret_from_intr
1049
	jmp ret_from_intr
955
	CFI_ENDPROC
1050
	CFI_ENDPROC
956
END(\sym)
1051
END(\sym)
957
.endm
1052
.endm
1053
#endif /* !CONFIG_IPIPE */
958
1054
959
#ifdef CONFIG_SMP
1055
#ifdef CONFIG_SMP
960
apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
1056
apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
Lines 979-984 apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \ Link Here
979
	invalidate_interrupt1 smp_invalidate_interrupt
1075
	invalidate_interrupt1 smp_invalidate_interrupt
980
apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
1076
apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
981
	invalidate_interrupt2 smp_invalidate_interrupt
1077
	invalidate_interrupt2 smp_invalidate_interrupt
1078
#ifndef CONFIG_IPIPE
982
apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
1079
apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
983
	invalidate_interrupt3 smp_invalidate_interrupt
1080
	invalidate_interrupt3 smp_invalidate_interrupt
984
apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
1081
apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
Lines 989-994 apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \ Link Here
989
	invalidate_interrupt6 smp_invalidate_interrupt
1086
	invalidate_interrupt6 smp_invalidate_interrupt
990
apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
1087
apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
991
	invalidate_interrupt7 smp_invalidate_interrupt
1088
	invalidate_interrupt7 smp_invalidate_interrupt
1089
#endif /* !CONFIG_IPIPE */
992
#endif
1090
#endif
993
1091
994
apicinterrupt THRESHOLD_APIC_VECTOR \
1092
apicinterrupt THRESHOLD_APIC_VECTOR \
Lines 1023-1029 apicinterrupt LOCAL_PENDING_VECTOR \ Link Here
1023
/*
1121
/*
1024
 * Exception entry points.
1122
 * Exception entry points.
1025
 */
1123
 */
1026
.macro zeroentry sym do_sym
1124
.macro zeroentry sym do_sym ex_code
1027
ENTRY(\sym)
1125
ENTRY(\sym)
1028
	INTR_FRAME
1126
	INTR_FRAME
1029
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1127
	PARAVIRT_ADJUST_EXCEPTION_FRAME
Lines 1034-1046 ENTRY(\sym) Link Here
1034
	DEFAULT_FRAME 0
1132
	DEFAULT_FRAME 0
1035
	movq %rsp,%rdi		/* pt_regs pointer */
1133
	movq %rsp,%rdi		/* pt_regs pointer */
1036
	xorl %esi,%esi		/* no error code */
1134
	xorl %esi,%esi		/* no error code */
1135
#ifdef CONFIG_IPIPE
1136
	movq $\ex_code,%rdx
1137
	call __ipipe_handle_exception   /* handle(regs, error_code, ex_code) */
1138
	testl %eax, %eax
1139
	jz error_exit
1140
	movl %ebx,%eax
1141
	RESTORE_REST
1142
	DISABLE_INTERRUPTS(CLBR_NONE)
1143
	testl %eax,%eax
1144
	jne retint_noswapgs
1145
	jmp retint_swapgs_notrace
1146
#else /* !CONFIG_IPIPE */
1037
	call \do_sym
1147
	call \do_sym
1148
#endif /* !CONFIG_IPIPE */
1038
	jmp error_exit		/* %ebx: no swapgs flag */
1149
	jmp error_exit		/* %ebx: no swapgs flag */
1039
	CFI_ENDPROC
1150
	CFI_ENDPROC
1040
END(\sym)
1151
END(\sym)
1041
.endm
1152
.endm
1042
1153
1043
.macro paranoidzeroentry sym do_sym
1154
.macro paranoidzeroentry sym do_sym ex_code=0
1044
ENTRY(\sym)
1155
ENTRY(\sym)
1045
	INTR_FRAME
1156
	INTR_FRAME
1046
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1157
	PARAVIRT_ADJUST_EXCEPTION_FRAME
Lines 1050-1063 ENTRY(\sym) Link Here
1050
	call save_paranoid
1161
	call save_paranoid
1051
	TRACE_IRQS_OFF
1162
	TRACE_IRQS_OFF
1052
	movq %rsp,%rdi		/* pt_regs pointer */
1163
	movq %rsp,%rdi		/* pt_regs pointer */
1164
#ifdef CONFIG_IPIPE
1165
	.if \ex_code
1166
	movq $\ex_code,%rsi
1167
	call __ipipe_divert_exception   /* handle(regs, ex_code) */
1168
	testl %eax,%eax
1169
	jnz 1f
1170
	movq %rsp,%rdi
1171
	.endif
1172
#endif
1053
	xorl %esi,%esi		/* no error code */
1173
	xorl %esi,%esi		/* no error code */
1054
	call \do_sym
1174
	call \do_sym
1175
#ifdef CONFIG_IPIPE
1176
	xorl %eax,%eax		/* tell paranoid_exit to propagate the exception */
1177
1:
1178
#endif
1055
	jmp paranoid_exit	/* %ebx: no swapgs flag */
1179
	jmp paranoid_exit	/* %ebx: no swapgs flag */
1056
	CFI_ENDPROC
1180
	CFI_ENDPROC
1057
END(\sym)
1181
END(\sym)
1058
.endm
1182
.endm
1059
1183
1060
.macro paranoidzeroentry_ist sym do_sym ist
1184
.macro paranoidzeroentry_ist sym do_sym ist ex_code=0
1061
ENTRY(\sym)
1185
ENTRY(\sym)
1062
	INTR_FRAME
1186
	INTR_FRAME
1063
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1187
	PARAVIRT_ADJUST_EXCEPTION_FRAME
Lines 1067-1083 ENTRY(\sym) Link Here
1067
	call save_paranoid
1191
	call save_paranoid
1068
	TRACE_IRQS_OFF
1192
	TRACE_IRQS_OFF
1069
	movq %rsp,%rdi		/* pt_regs pointer */
1193
	movq %rsp,%rdi		/* pt_regs pointer */
1194
#ifdef CONFIG_IPIPE
1195
	.if \ex_code
1196
	movq $\ex_code,%rsi
1197
	call __ipipe_divert_exception   /* handle(regs, ex_code) */
1198
	testl %eax,%eax
1199
	jnz 1f
1200
	movq %rsp,%rdi
1201
	.endif
1202
#endif
1070
	xorl %esi,%esi		/* no error code */
1203
	xorl %esi,%esi		/* no error code */
1071
	PER_CPU(init_tss, %rbp)
1204
	PER_CPU(init_tss, %rbp)
1072
	subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1205
	subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1073
	call \do_sym
1206
	call \do_sym
1074
	addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1207
	addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1208
#ifdef CONFIG_IPIPE
1209
	xorl %eax,%eax		/* tell paranoid_exit to propagate the exception */
1210
1:
1211
#endif
1075
	jmp paranoid_exit	/* %ebx: no swapgs flag */
1212
	jmp paranoid_exit	/* %ebx: no swapgs flag */
1076
	CFI_ENDPROC
1213
	CFI_ENDPROC
1077
END(\sym)
1214
END(\sym)
1078
.endm
1215
.endm
1079
1216
1080
.macro errorentry sym do_sym
1217
.macro errorentry sym do_sym ex_code
1081
ENTRY(\sym)
1218
ENTRY(\sym)
1082
	XCPT_FRAME
1219
	XCPT_FRAME
1083
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1220
	PARAVIRT_ADJUST_EXCEPTION_FRAME
Lines 1088-1101 ENTRY(\sym) Link Here
1088
	movq %rsp,%rdi			/* pt_regs pointer */
1225
	movq %rsp,%rdi			/* pt_regs pointer */
1089
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
1226
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
1090
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1227
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1228
#ifdef CONFIG_IPIPE
1229
	movq $\ex_code,%rdx
1230
	call __ipipe_handle_exception   /* handle(regs, error_code, ex_code) */
1231
	testl %eax, %eax
1232
	jz error_exit
1233
	movl %ebx,%eax
1234
	RESTORE_REST
1235
	DISABLE_INTERRUPTS(CLBR_NONE)
1236
	testl %eax,%eax
1237
	jne retint_noswapgs
1238
	jmp retint_swapgs_notrace
1239
#else /* !CONFIG_IPIPE */
1091
	call \do_sym
1240
	call \do_sym
1241
#endif /* !CONFIG_IPIPE */
1092
	jmp error_exit			/* %ebx: no swapgs flag */
1242
	jmp error_exit			/* %ebx: no swapgs flag */
1093
	CFI_ENDPROC
1243
	CFI_ENDPROC
1094
END(\sym)
1244
END(\sym)
1095
.endm
1245
.endm
1096
1246
1097
	/* error code is on the stack already */
1247
	/* error code is on the stack already */
1098
.macro paranoiderrorentry sym do_sym
1248
.macro paranoiderrorentry sym do_sym ex_code=0
1099
ENTRY(\sym)
1249
ENTRY(\sym)
1100
	XCPT_FRAME
1250
	XCPT_FRAME
1101
	PARAVIRT_ADJUST_EXCEPTION_FRAME
1251
	PARAVIRT_ADJUST_EXCEPTION_FRAME
Lines 1105-1131 ENTRY(\sym) Link Here
1105
	DEFAULT_FRAME 0
1255
	DEFAULT_FRAME 0
1106
	TRACE_IRQS_OFF
1256
	TRACE_IRQS_OFF
1107
	movq %rsp,%rdi			/* pt_regs pointer */
1257
	movq %rsp,%rdi			/* pt_regs pointer */
1258
#ifdef CONFIG_IPIPE
1259
	.if \ex_code
1260
	movq $\ex_code,%rsi
1261
	call __ipipe_divert_exception   /* handle(regs, ex_code) */
1262
	testl %eax,%eax
1263
	jnz 1f
1264
	movq %rsp,%rdi
1265
	.endif
1266
#endif
1108
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
1267
	movq ORIG_RAX(%rsp),%rsi	/* get error code */
1109
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1268
	movq $-1,ORIG_RAX(%rsp)		/* no syscall to restart */
1110
	call \do_sym
1269
	call \do_sym
1270
#ifdef CONFIG_IPIPE
1271
	xorl %eax,%eax			/* tell paranoid_exit to propagate the exception */
1272
1:
1273
#endif
1111
	jmp paranoid_exit		/* %ebx: no swapgs flag */
1274
	jmp paranoid_exit		/* %ebx: no swapgs flag */
1112
	CFI_ENDPROC
1275
	CFI_ENDPROC
1113
END(\sym)
1276
END(\sym)
1114
.endm
1277
.endm
1115
1278
1116
zeroentry divide_error do_divide_error
1279
zeroentry divide_error do_divide_error ex_do_divide_error
1117
zeroentry overflow do_overflow
1280
zeroentry overflow do_overflow ex_do_overflow
1118
zeroentry bounds do_bounds
1281
zeroentry bounds do_bounds ex_do_bounds
1119
zeroentry invalid_op do_invalid_op
1282
zeroentry invalid_op do_invalid_op ex_do_invalid_op
1120
zeroentry device_not_available do_device_not_available
1283
zeroentry device_not_available do_device_not_available ex_do_device_not_available
1121
paranoiderrorentry double_fault do_double_fault
1284
paranoiderrorentry double_fault do_double_fault
1122
zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
1285
zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun ex_do_coprocessor_segment_overrun
1123
errorentry invalid_TSS do_invalid_TSS
1286
errorentry invalid_TSS do_invalid_TSS ex_do_invalid_TSS
1124
errorentry segment_not_present do_segment_not_present
1287
errorentry segment_not_present do_segment_not_present ex_do_segment_not_present
1125
zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
1288
zeroentry spurious_interrupt_bug do_spurious_interrupt_bug ex_do_spurious_interrupt_bug
1126
zeroentry coprocessor_error do_coprocessor_error
1289
zeroentry coprocessor_error do_coprocessor_error ex_do_coprocessor_error
1127
errorentry alignment_check do_alignment_check
1290
errorentry alignment_check do_alignment_check ex_do_alignment_check
1128
zeroentry simd_coprocessor_error do_simd_coprocessor_error
1291
zeroentry simd_coprocessor_error do_simd_coprocessor_error ex_do_simd_coprocessor_error
1129
1292
1130
	/* Reload gs selector with exception handling */
1293
	/* Reload gs selector with exception handling */
1131
	/* edi:  new selector */
1294
	/* edi:  new selector */
Lines 1255-1268 ENTRY(call_softirq) Link Here
1255
	CFI_REL_OFFSET rbp,0
1418
	CFI_REL_OFFSET rbp,0
1256
	mov  %rsp,%rbp
1419
	mov  %rsp,%rbp
1257
	CFI_DEF_CFA_REGISTER rbp
1420
	CFI_DEF_CFA_REGISTER rbp
1421
	DISABLE_INTERRUPTS_HW_COND
1258
	incl PER_CPU_VAR(irq_count)
1422
	incl PER_CPU_VAR(irq_count)
1259
	cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1423
	cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1424
	ENABLE_INTERRUPTS_HW_COND
1260
	push  %rbp			# backlink for old unwinder
1425
	push  %rbp			# backlink for old unwinder
1261
	call __do_softirq
1426
	call __do_softirq
1427
	DISABLE_INTERRUPTS_HW_COND
1262
	leaveq
1428
	leaveq
1263
	CFI_DEF_CFA_REGISTER	rsp
1429
	CFI_DEF_CFA_REGISTER	rsp
1264
	CFI_ADJUST_CFA_OFFSET   -8
1430
	CFI_ADJUST_CFA_OFFSET   -8
1265
	decl PER_CPU_VAR(irq_count)
1431
	decl PER_CPU_VAR(irq_count)
1432
	ENABLE_INTERRUPTS_HW_COND
1266
	ret
1433
	ret
1267
	CFI_ENDPROC
1434
	CFI_ENDPROC
1268
END(call_softirq)
1435
END(call_softirq)
Lines 1371-1386 END(xen_failsafe_callback) Link Here
1371
 */
1538
 */
1372
	.pushsection .kprobes.text, "ax"
1539
	.pushsection .kprobes.text, "ax"
1373
1540
1374
paranoidzeroentry_ist debug do_debug DEBUG_STACK
1541
paranoidzeroentry_ist debug do_debug DEBUG_STACK ex_do_debug
1375
paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
1542
paranoidzeroentry_ist int3 do_int3 DEBUG_STACK ex_do_int3
1376
paranoiderrorentry stack_segment do_stack_segment
1543
paranoiderrorentry stack_segment do_stack_segment
1377
#ifdef CONFIG_XEN
1544
#ifdef CONFIG_XEN
1378
zeroentry xen_debug do_debug
1545
zeroentry xen_debug do_debug
1379
zeroentry xen_int3 do_int3
1546
zeroentry xen_int3 do_int3
1380
errorentry xen_stack_segment do_stack_segment
1547
errorentry xen_stack_segment do_stack_segment
1381
#endif
1548
#endif
1382
errorentry general_protection do_general_protection
1549
errorentry general_protection do_general_protection ex_do_general_protection
1383
errorentry page_fault do_page_fault
1550
errorentry page_fault do_page_fault ex_do_page_fault
1384
#ifdef CONFIG_X86_MCE
1551
#ifdef CONFIG_X86_MCE
1385
paranoidzeroentry machine_check *machine_check_vector(%rip)
1552
paranoidzeroentry machine_check *machine_check_vector(%rip)
1386
#endif
1553
#endif
Lines 1403-1410 ENTRY(paranoid_exit) Link Here
1403
	INTR_FRAME
1570
	INTR_FRAME
1404
	DISABLE_INTERRUPTS(CLBR_NONE)
1571
	DISABLE_INTERRUPTS(CLBR_NONE)
1405
	TRACE_IRQS_OFF
1572
	TRACE_IRQS_OFF
1573
paranoid_notrace:
1406
	testl %ebx,%ebx				/* swapgs needed? */
1574
	testl %ebx,%ebx				/* swapgs needed? */
1407
	jnz paranoid_restore
1575
	jnz paranoid_restore
1576
#ifdef CONFIG_IPIPE
1577
	testl %eax,%eax
1578
	jnz paranoid_swapgs
1579
#endif
1408
	testl $3,CS(%rsp)
1580
	testl $3,CS(%rsp)
1409
	jnz   paranoid_userspace
1581
	jnz   paranoid_userspace
1410
paranoid_swapgs:
1582
paranoid_swapgs:
(-)a/arch/x86/kernel/i8253.c (+7 lines)
Lines 11-16 Link Here
11
#include <linux/delay.h>
11
#include <linux/delay.h>
12
#include <linux/init.h>
12
#include <linux/init.h>
13
#include <linux/io.h>
13
#include <linux/io.h>
14
#include <linux/ipipe.h>
14
15
15
#include <asm/i8253.h>
16
#include <asm/i8253.h>
16
#include <asm/hpet.h>
17
#include <asm/hpet.h>
Lines 130-135 static cycle_t pit_read(struct clocksource *cs) Link Here
130
	int count;
131
	int count;
131
	u32 jifs;
132
	u32 jifs;
132
133
134
#ifdef CONFIG_IPIPE
135
	if (!__ipipe_pipeline_head_p(ipipe_root_domain))
136
		/* We don't really own the PIT. */
137
		return (cycle_t)(jiffies * LATCH) + (LATCH - 1) - old_count;
138
#endif /* CONFIG_IPIPE */
139
133
	spin_lock_irqsave(&i8253_lock, flags);
140
	spin_lock_irqsave(&i8253_lock, flags);
134
	/*
141
	/*
135
	 * Although our caller may have the read side of xtime_lock,
142
	 * Although our caller may have the read side of xtime_lock,
(-)a/arch/x86/kernel/i8259.c (-7 / +23 lines)
Lines 32-38 Link Here
32
 */
32
 */
33
33
34
static int i8259A_auto_eoi;
34
static int i8259A_auto_eoi;
35
DEFINE_SPINLOCK(i8259A_lock);
35
IPIPE_DEFINE_SPINLOCK(i8259A_lock);
36
static void mask_and_ack_8259A(unsigned int);
36
static void mask_and_ack_8259A(unsigned int);
37
37
38
struct irq_chip i8259A_chip = {
38
struct irq_chip i8259A_chip = {
Lines 69-74 void disable_8259A_irq(unsigned int irq) Link Here
69
	unsigned long flags;
69
	unsigned long flags;
70
70
71
	spin_lock_irqsave(&i8259A_lock, flags);
71
	spin_lock_irqsave(&i8259A_lock, flags);
72
	ipipe_irq_lock(irq);
72
	cached_irq_mask |= mask;
73
	cached_irq_mask |= mask;
73
	if (irq & 8)
74
	if (irq & 8)
74
		outb(cached_slave_mask, PIC_SLAVE_IMR);
75
		outb(cached_slave_mask, PIC_SLAVE_IMR);
Lines 79-93 void disable_8259A_irq(unsigned int irq) Link Here
79
80
80
void enable_8259A_irq(unsigned int irq)
81
void enable_8259A_irq(unsigned int irq)
81
{
82
{
82
	unsigned int mask = ~(1 << irq);
83
	unsigned int mask = (1 << irq);
83
	unsigned long flags;
84
	unsigned long flags;
84
85
85
	spin_lock_irqsave(&i8259A_lock, flags);
86
	spin_lock_irqsave(&i8259A_lock, flags);
86
	cached_irq_mask &= mask;
87
	if (cached_irq_mask & mask) {
87
	if (irq & 8)
88
		cached_irq_mask &= ~mask;
88
		outb(cached_slave_mask, PIC_SLAVE_IMR);
89
		if (irq & 8)
89
	else
90
			outb(cached_slave_mask, PIC_SLAVE_IMR);
90
		outb(cached_master_mask, PIC_MASTER_IMR);
91
		else
92
			outb(cached_master_mask, PIC_MASTER_IMR);
93
		ipipe_irq_unlock(irq);
94
	}
91
	spin_unlock_irqrestore(&i8259A_lock, flags);
95
	spin_unlock_irqrestore(&i8259A_lock, flags);
92
}
96
}
93
97
Lines 168-173 static void mask_and_ack_8259A(unsigned int irq) Link Here
168
	 */
172
	 */
169
	if (cached_irq_mask & irqmask)
173
	if (cached_irq_mask & irqmask)
170
		goto spurious_8259A_irq;
174
		goto spurious_8259A_irq;
175
#ifdef CONFIG_IPIPE
176
	if (irq == 0) {
177
		/*
178
		 * Fast timer ack -- don't mask (unless supposedly
179
		 * spurious). We trace outb's in order to detect
180
		 * broken hardware inducing large delays.
181
		 */
182
		outb(0x60, PIC_MASTER_CMD);	/* Specific EOI to master. */
183
		spin_unlock_irqrestore(&i8259A_lock, flags);
184
		return;
185
	}
186
#endif /* CONFIG_IPIPE */
171
	cached_irq_mask |= irqmask;
187
	cached_irq_mask |= irqmask;
172
188
173
handle_real_irq:
189
handle_real_irq:
(-)a/arch/x86/kernel/ipipe.c (+1084 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   linux/arch/x86/kernel/ipipe.c
3
 *
4
 *   Copyright (C) 2002-2007 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 *
21
 *   Architecture-dependent I-PIPE support for x86.
22
 */
23
24
#include <linux/kernel.h>
25
#include <linux/smp.h>
26
#include <linux/module.h>
27
#include <linux/sched.h>
28
#include <linux/interrupt.h>
29
#include <linux/slab.h>
30
#include <linux/irq.h>
31
#include <linux/clockchips.h>
32
#include <linux/kprobes.h>
33
#include <asm/unistd.h>
34
#include <asm/system.h>
35
#include <asm/atomic.h>
36
#include <asm/hw_irq.h>
37
#include <asm/irq.h>
38
#include <asm/desc.h>
39
#include <asm/io.h>
40
#ifdef CONFIG_X86_LOCAL_APIC
41
#include <asm/tlbflush.h>
42
#include <asm/fixmap.h>
43
#include <asm/bitops.h>
44
#include <asm/mpspec.h>
45
#ifdef CONFIG_X86_IO_APIC
46
#include <asm/io_apic.h>
47
#endif	/* CONFIG_X86_IO_APIC */
48
#include <asm/apic.h>
49
#endif	/* CONFIG_X86_LOCAL_APIC */
50
#include <asm/traps.h>
51
52
int __ipipe_tick_irq = 0;	/* Legacy timer */
53
54
DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
55
56
DEFINE_PER_CPU(unsigned long, __ipipe_cr2);
57
EXPORT_PER_CPU_SYMBOL_GPL(__ipipe_cr2);
58
59
#ifdef CONFIG_SMP
60
61
static cpumask_t __ipipe_cpu_sync_map;
62
63
static cpumask_t __ipipe_cpu_lock_map;
64
65
static unsigned long __ipipe_critical_lock;
66
67
static IPIPE_DEFINE_SPINLOCK(__ipipe_cpu_barrier);
68
69
static atomic_t __ipipe_critical_count = ATOMIC_INIT(0);
70
71
static void (*__ipipe_cpu_sync) (void);
72
73
#endif /* CONFIG_SMP */
74
75
/*
76
 * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
77
 * just like if it has been actually received from a hw source. Also
78
 * works for virtual interrupts.
79
 */
80
int ipipe_trigger_irq(unsigned int irq)
81
{
82
	struct pt_regs regs;
83
	unsigned long flags;
84
85
#ifdef CONFIG_IPIPE_DEBUG
86
	if (irq >= IPIPE_NR_IRQS)
87
		return -EINVAL;
88
	if (ipipe_virtual_irq_p(irq)) {
89
		if (!test_bit(irq - IPIPE_VIRQ_BASE,
90
			      &__ipipe_virtual_irq_map))
91
			return -EINVAL;
92
	} else if (irq_to_desc(irq) == NULL)
93
		return -EINVAL;
94
#endif
95
	local_irq_save_hw(flags);
96
	regs.flags = flags;
97
	regs.orig_ax = irq;	/* Positive value - IRQ won't be acked */
98
	regs.cs = __KERNEL_CS;
99
	__ipipe_handle_irq(&regs);
100
	local_irq_restore_hw(flags);
101
102
	return 1;
103
}
104
105
int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
106
{
107
	info->ncpus = num_online_cpus();
108
	info->cpufreq = ipipe_cpu_freq();
109
	info->archdep.tmirq = __ipipe_tick_irq;
110
#ifdef CONFIG_X86_TSC
111
	info->archdep.tmfreq = ipipe_cpu_freq();
112
#else	/* !CONFIG_X86_TSC */
113
	info->archdep.tmfreq = CLOCK_TICK_RATE;
114
#endif	/* CONFIG_X86_TSC */
115
116
	return 0;
117
}
118
119
#ifdef CONFIG_X86_UV
120
asmlinkage void uv_bau_message_interrupt(struct pt_regs *regs);
121
#endif
122
#ifdef CONFIG_X86_MCE_THRESHOLD
123
asmlinkage void smp_threshold_interrupt(void);
124
#endif
125
#ifdef CONFIG_X86_NEW_MCE
126
asmlinkage void smp_mce_self_interrupt(void);
127
#endif
128
129
static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
130
{
131
	desc->ipipe_ack(irq, desc);
132
}
133
134
void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
135
{
136
	irq_to_desc(irq)->status &= ~IRQ_DISABLED;
137
}
138
139
#ifdef CONFIG_X86_LOCAL_APIC
140
141
static void __ipipe_noack_apic(unsigned irq, struct irq_desc *desc)
142
{
143
}
144
145
static void __ipipe_ack_apic(unsigned irq, struct irq_desc *desc)
146
{
147
	__ack_APIC_irq();
148
}
149
150
static void __ipipe_null_handler(unsigned irq, void *cookie)
151
{
152
}
153
154
#endif	/* CONFIG_X86_LOCAL_APIC */
155
156
/* __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
157
   interrupts are off, and secondary CPUs are still lost in space. */
158
159
void __init __ipipe_enable_pipeline(void)
160
{
161
	unsigned int vector, irq;
162
163
#ifdef CONFIG_X86_LOCAL_APIC
164
165
	/* Map the APIC system vectors. */
166
167
	ipipe_virtualize_irq(ipipe_root_domain,
168
			     ipipe_apic_vector_irq(LOCAL_TIMER_VECTOR),
169
			     (ipipe_irq_handler_t)&smp_apic_timer_interrupt,
170
			     NULL,
171
			     &__ipipe_ack_apic,
172
			     IPIPE_STDROOT_MASK);
173
174
	ipipe_virtualize_irq(ipipe_root_domain,
175
			     ipipe_apic_vector_irq(SPURIOUS_APIC_VECTOR),
176
			     (ipipe_irq_handler_t)&smp_spurious_interrupt,
177
			     NULL,
178
			     &__ipipe_noack_apic,
179
			     IPIPE_STDROOT_MASK);
180
181
	ipipe_virtualize_irq(ipipe_root_domain,
182
			     ipipe_apic_vector_irq(ERROR_APIC_VECTOR),
183
			     (ipipe_irq_handler_t)&smp_error_interrupt,
184
			     NULL,
185
			     &__ipipe_ack_apic,
186
			     IPIPE_STDROOT_MASK);
187
188
	ipipe_virtualize_irq(ipipe_root_domain,
189
			     ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR0),
190
			     &__ipipe_null_handler,
191
			     NULL,
192
			     &__ipipe_ack_apic,
193
			     IPIPE_STDROOT_MASK);
194
195
	ipipe_virtualize_irq(ipipe_root_domain,
196
			     ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR1),
197
			     &__ipipe_null_handler,
198
			     NULL,
199
			     &__ipipe_ack_apic,
200
			     IPIPE_STDROOT_MASK);
201
202
	ipipe_virtualize_irq(ipipe_root_domain,
203
			     ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR2),
204
			     &__ipipe_null_handler,
205
			     NULL,
206
			     &__ipipe_ack_apic,
207
			     IPIPE_STDROOT_MASK);
208
209
	ipipe_virtualize_irq(ipipe_root_domain,
210
			     ipipe_apic_vector_irq(IPIPE_SERVICE_VECTOR3),
211
			     &__ipipe_null_handler,
212
			     NULL,
213
			     &__ipipe_ack_apic,
214
			     IPIPE_STDROOT_MASK);
215
216
#ifdef CONFIG_X86_THERMAL_VECTOR
217
	ipipe_virtualize_irq(ipipe_root_domain,
218
			     ipipe_apic_vector_irq(THERMAL_APIC_VECTOR),
219
			     (ipipe_irq_handler_t)&smp_thermal_interrupt,
220
			     NULL,
221
			     &__ipipe_ack_apic,
222
			     IPIPE_STDROOT_MASK);
223
#endif /* CONFIG_X86_THERMAL_VECTOR */
224
225
#ifdef CONFIG_X86_MCE_THRESHOLD
226
	ipipe_virtualize_irq(ipipe_root_domain,
227
			     ipipe_apic_vector_irq(THRESHOLD_APIC_VECTOR),
228
			     (ipipe_irq_handler_t)&smp_threshold_interrupt,
229
			     NULL,
230
			     &__ipipe_ack_apic,
231
			     IPIPE_STDROOT_MASK);
232
#endif /* CONFIG_X86_MCE_THRESHOLD */
233
234
#ifdef CONFIG_X86_NEW_MCE
235
	ipipe_virtualize_irq(ipipe_root_domain,
236
			     ipipe_apic_vector_irq(MCE_SELF_VECTOR),
237
			     (ipipe_irq_handler_t)&smp_mce_self_interrupt,
238
			     NULL,
239
			     &__ipipe_ack_apic,
240
			     IPIPE_STDROOT_MASK);
241
#endif /* CONFIG_X86_MCE_THRESHOLD */
242
243
#ifdef CONFIG_X86_UV
244
	ipipe_virtualize_irq(ipipe_root_domain,
245
			     ipipe_apic_vector_irq(UV_BAU_MESSAGE),
246
			     (ipipe_irq_handler_t)&uv_bau_message_interrupt,
247
			     NULL,
248
			     &__ipipe_ack_apic,
249
			     IPIPE_STDROOT_MASK);
250
#endif /* CONFIG_X86_UV */
251
252
	ipipe_virtualize_irq(ipipe_root_domain,
253
			     ipipe_apic_vector_irq(GENERIC_INTERRUPT_VECTOR),
254
			     (ipipe_irq_handler_t)&smp_generic_interrupt,
255
			     NULL,
256
			     &__ipipe_ack_apic,
257
			     IPIPE_STDROOT_MASK);
258
259
#ifdef CONFIG_PERF_COUNTERS
260
	ipipe_virtualize_irq(ipipe_root_domain,
261
			     ipipe_apic_vector_irq(LOCAL_PENDING_VECTOR),
262
			     (ipipe_irq_handler_t)&perf_pending_interrupt,
263
			     NULL,
264
			     &__ipipe_ack_apic,
265
			     IPIPE_STDROOT_MASK);
266
#endif /* CONFIG_PERF_COUNTERS */
267
268
#endif	/* CONFIG_X86_LOCAL_APIC */
269
270
#ifdef CONFIG_SMP
271
	ipipe_virtualize_irq(ipipe_root_domain,
272
			     ipipe_apic_vector_irq(RESCHEDULE_VECTOR),
273
			     (ipipe_irq_handler_t)&smp_reschedule_interrupt,
274
			     NULL,
275
			     &__ipipe_ack_apic,
276
			     IPIPE_STDROOT_MASK);
277
278
	for (vector = INVALIDATE_TLB_VECTOR_START;
279
	     vector <= INVALIDATE_TLB_VECTOR_END; ++vector)
280
		ipipe_virtualize_irq(ipipe_root_domain,
281
				     ipipe_apic_vector_irq(vector),
282
				     (ipipe_irq_handler_t)&smp_invalidate_interrupt,
283
				     NULL,
284
				     &__ipipe_ack_apic,
285
				     IPIPE_STDROOT_MASK);
286
287
	ipipe_virtualize_irq(ipipe_root_domain,
288
			     ipipe_apic_vector_irq(CALL_FUNCTION_VECTOR),
289
			     (ipipe_irq_handler_t)&smp_call_function_interrupt,
290
			     NULL,
291
			     &__ipipe_ack_apic,
292
			     IPIPE_STDROOT_MASK);
293
294
	ipipe_virtualize_irq(ipipe_root_domain,
295
			     ipipe_apic_vector_irq(CALL_FUNCTION_SINGLE_VECTOR),
296
			     (ipipe_irq_handler_t)&smp_call_function_single_interrupt,
297
			     NULL,
298
			     &__ipipe_ack_apic,
299
			     IPIPE_STDROOT_MASK);
300
301
	ipipe_virtualize_irq(ipipe_root_domain,
302
			     IRQ_MOVE_CLEANUP_VECTOR,
303
			     (ipipe_irq_handler_t)&smp_irq_move_cleanup_interrupt,
304
			     NULL,
305
			     &__ipipe_ack_apic,
306
			     IPIPE_STDROOT_MASK);
307
308
	ipipe_virtualize_irq(ipipe_root_domain,
309
			     ipipe_apic_vector_irq(REBOOT_VECTOR),
310
			     (ipipe_irq_handler_t)&smp_reboot_interrupt,
311
			     NULL,
312
			     &__ipipe_ack_apic,
313
			     IPIPE_STDROOT_MASK);
314
#else
315
	(void)vector;
316
#endif	/* CONFIG_SMP */
317
318
	/* Finally, virtualize the remaining ISA and IO-APIC
319
	 * interrupts. Interrupts which have already been virtualized
320
	 * will just beget a silent -EPERM error since
321
	 * IPIPE_SYSTEM_MASK has been passed for them, that's ok. */
322
323
	for (irq = 0; irq < NR_IRQS; irq++)
324
		/*
325
		 * Fails for IPIPE_CRITICAL_IPI and IRQ_MOVE_CLEANUP_VECTOR,
326
		 * but that's ok.
327
		 */
328
		ipipe_virtualize_irq(ipipe_root_domain,
329
				     irq,
330
				     (ipipe_irq_handler_t)&do_IRQ,
331
				     NULL,
332
				     &__ipipe_ack_irq,
333
				     IPIPE_STDROOT_MASK);
334
335
#ifdef CONFIG_X86_LOCAL_APIC
336
	/* Eventually allow these vectors to be reprogrammed. */
337
	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI0].control &= ~IPIPE_SYSTEM_MASK;
338
	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI1].control &= ~IPIPE_SYSTEM_MASK;
339
	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI2].control &= ~IPIPE_SYSTEM_MASK;
340
	ipipe_root_domain->irqs[IPIPE_SERVICE_IPI3].control &= ~IPIPE_SYSTEM_MASK;
341
#endif	/* CONFIG_X86_LOCAL_APIC */
342
}
343
344
#ifdef CONFIG_SMP
345
346
cpumask_t __ipipe_set_irq_affinity(unsigned irq, cpumask_t cpumask)
347
{
348
	cpumask_t oldmask;
349
350
	if (irq_to_desc(irq)->chip->set_affinity == NULL)
351
		return CPU_MASK_NONE;
352
353
	if (cpus_empty(cpumask))
354
		return CPU_MASK_NONE; /* Return mask value -- no change. */
355
356
	cpus_and(cpumask, cpumask, cpu_online_map);
357
	if (cpus_empty(cpumask))
358
		return CPU_MASK_NONE;	/* Error -- bad mask value or non-routable IRQ. */
359
360
	cpumask_copy(&oldmask, irq_to_desc(irq)->affinity);
361
	irq_to_desc(irq)->chip->set_affinity(irq, &cpumask);
362
363
	return oldmask;
364
}
365
366
int __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask)
367
{
368
	unsigned long flags;
369
	int self;
370
371
	if (ipi != IPIPE_SERVICE_IPI0 &&
372
	    ipi != IPIPE_SERVICE_IPI1 &&
373
	    ipi != IPIPE_SERVICE_IPI2 &&
374
	    ipi != IPIPE_SERVICE_IPI3)
375
		return -EINVAL;
376
377
	local_irq_save_hw(flags);
378
379
	self = cpu_isset(ipipe_processor_id(),cpumask);
380
	cpu_clear(ipipe_processor_id(), cpumask);
381
382
	if (!cpus_empty(cpumask))
383
		apic->send_IPI_mask(&cpumask, ipipe_apic_irq_vector(ipi));
384
385
	if (self)
386
		ipipe_trigger_irq(ipi);
387
388
	local_irq_restore_hw(flags);
389
390
	return 0;
391
}
392
393
/* Always called with hw interrupts off. */
394
395
void __ipipe_do_critical_sync(unsigned irq, void *cookie)
396
{
397
	int cpu = ipipe_processor_id();
398
399
	cpu_set(cpu, __ipipe_cpu_sync_map);
400
401
	/* Now we are in sync with the lock requestor running on another
402
	   CPU. Enter a spinning wait until he releases the global
403
	   lock. */
404
	spin_lock(&__ipipe_cpu_barrier);
405
406
	/* Got it. Now get out. */
407
408
	if (__ipipe_cpu_sync)
409
		/* Call the sync routine if any. */
410
		__ipipe_cpu_sync();
411
412
	spin_unlock(&__ipipe_cpu_barrier);
413
414
	cpu_clear(cpu, __ipipe_cpu_sync_map);
415
}
416
417
void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd)
418
{
419
	ipd->irqs[IPIPE_CRITICAL_IPI].acknowledge = &__ipipe_ack_apic;
420
	ipd->irqs[IPIPE_CRITICAL_IPI].handler = &__ipipe_do_critical_sync;
421
	ipd->irqs[IPIPE_CRITICAL_IPI].cookie = NULL;
422
	/* Immediately handle in the current domain but *never* pass */
423
	ipd->irqs[IPIPE_CRITICAL_IPI].control =
424
		IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK|IPIPE_SYSTEM_MASK;
425
}
426
427
#endif	/* CONFIG_SMP */
428
429
/*
430
 * ipipe_critical_enter() -- Grab the superlock excluding all CPUs but
431
 * the current one from a critical section. This lock is used when we
432
 * must enforce a global critical section for a single CPU in a
433
 * possibly SMP system whichever context the CPUs are running.
434
 */
435
unsigned long ipipe_critical_enter(void (*syncfn) (void))
436
{
437
	unsigned long flags;
438
439
	local_irq_save_hw(flags);
440
441
#ifdef CONFIG_SMP
442
	if (unlikely(num_online_cpus() == 1))
443
		return flags;
444
445
	{
446
		int cpu = ipipe_processor_id();
447
		cpumask_t lock_map;
448
449
		if (!cpu_test_and_set(cpu, __ipipe_cpu_lock_map)) {
450
			while (test_and_set_bit(0, &__ipipe_critical_lock)) {
451
				int n = 0;
452
				do {
453
					cpu_relax();
454
				} while (++n < cpu);
455
			}
456
457
			spin_lock(&__ipipe_cpu_barrier);
458
459
			__ipipe_cpu_sync = syncfn;
460
461
			/* Send the sync IPI to all processors but the current one. */
462
			apic->send_IPI_allbutself(IPIPE_CRITICAL_VECTOR);
463
464
			cpus_andnot(lock_map, cpu_online_map, __ipipe_cpu_lock_map);
465
466
			while (!cpus_equal(__ipipe_cpu_sync_map, lock_map))
467
				cpu_relax();
468
		}
469
470
		atomic_inc(&__ipipe_critical_count);
471
	}
472
#endif	/* CONFIG_SMP */
473
474
	return flags;
475
}
476
477
/* ipipe_critical_exit() -- Release the superlock. */
478
479
void ipipe_critical_exit(unsigned long flags)
480
{
481
#ifdef CONFIG_SMP
482
	if (num_online_cpus() == 1)
483
		goto out;
484
485
	if (atomic_dec_and_test(&__ipipe_critical_count)) {
486
		spin_unlock(&__ipipe_cpu_barrier);
487
488
		while (!cpus_empty(__ipipe_cpu_sync_map))
489
			cpu_relax();
490
491
		cpu_clear(ipipe_processor_id(), __ipipe_cpu_lock_map);
492
		clear_bit(0, &__ipipe_critical_lock);
493
		smp_mb__after_clear_bit();
494
	}
495
out:
496
#endif	/* CONFIG_SMP */
497
498
	local_irq_restore_hw(flags);
499
}
500
501
static inline void __fixup_if(int s, struct pt_regs *regs)
502
{
503
	/*
504
	 * Have the saved hw state look like the domain stall bit, so
505
	 * that __ipipe_unstall_iret_root() restores the proper
506
	 * pipeline state for the root stage upon exit.
507
	 */
508
	if (s)
509
		regs->flags &= ~X86_EFLAGS_IF;
510
	else
511
		regs->flags |= X86_EFLAGS_IF;
512
}
513
514
#ifdef CONFIG_X86_32
515
516
/*
517
 * Check the stall bit of the root domain to make sure the existing
518
 * preemption opportunity upon in-kernel resumption could be
519
 * exploited. In case a rescheduling could take place, the root stage
520
 * is stalled before the hw interrupts are re-enabled. This routine
521
 * must be called with hw interrupts off.
522
 */
523
524
asmlinkage int __ipipe_kpreempt_root(struct pt_regs regs)
525
{
526
	if (test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)))
527
		/* Root stage is stalled: rescheduling denied. */
528
		return 0;
529
530
	__ipipe_stall_root();
531
	trace_hardirqs_off();
532
	local_irq_enable_hw_notrace();
533
534
	return 1;	/* Ok, may reschedule now. */
535
}
536
537
asmlinkage void __ipipe_unstall_iret_root(struct pt_regs regs)
538
{
539
	struct ipipe_percpu_domain_data *p;
540
541
	/* Emulate IRET's handling of the interrupt flag. */
542
543
	local_irq_disable_hw();
544
545
	p = ipipe_root_cpudom_ptr();
546
547
	/*
548
	 * Restore the software state as it used to be on kernel
549
	 * entry. CAUTION: NMIs must *not* return through this
550
	 * emulation.
551
	 */
552
	if (raw_irqs_disabled_flags(regs.flags)) {
553
		if (!__test_and_set_bit(IPIPE_STALL_FLAG, &p->status))
554
			trace_hardirqs_off();
555
		regs.flags |= X86_EFLAGS_IF;
556
	} else {
557
		if (test_bit(IPIPE_STALL_FLAG, &p->status)) {
558
			trace_hardirqs_on();
559
			__clear_bit(IPIPE_STALL_FLAG, &p->status);
560
		}
561
		/*
562
		 * We could have received and logged interrupts while
563
		 * stalled in the syscall path: play the log now to
564
		 * release any pending event. The SYNC_BIT prevents
565
		 * infinite recursion in case of flooding.
566
		 */
567
		if (unlikely(__ipipe_ipending_p(p)))
568
			__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
569
	}
570
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
571
	ipipe_trace_end(0x8000000D);
572
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
573
}
574
575
#else /* !CONFIG_X86_32 */
576
577
#ifdef CONFIG_PREEMPT
578
579
asmlinkage void preempt_schedule_irq(void);
580
581
void __ipipe_preempt_schedule_irq(void)
582
{
583
	struct ipipe_percpu_domain_data *p; 
584
	unsigned long flags;  
585
	/*  
586
	 * We have no IRQ state fixup on entry to exceptions in 
587
	 * x86_64, so we have to stall the root stage before 
588
	 * rescheduling. 
589
	 */  
590
	BUG_ON(!irqs_disabled_hw());  
591
	local_irq_save(flags);	
592
	local_irq_enable_hw();	
593
	preempt_schedule_irq(); /* Ok, may reschedule now. */  
594
	local_irq_disable_hw(); 
595
596
	/*
597
	 * Flush any pending interrupt that may have been logged after
598
	 * preempt_schedule_irq() stalled the root stage before
599
	 * returning to us, and now.
600
	 */
601
	p = ipipe_root_cpudom_ptr(); 
602
	if (unlikely(__ipipe_ipending_p(p))) { 
603
		add_preempt_count(PREEMPT_ACTIVE);
604
		trace_hardirqs_on();
605
		clear_bit(IPIPE_STALL_FLAG, &p->status); 
606
		__ipipe_sync_pipeline(IPIPE_IRQ_DOALL); 
607
		sub_preempt_count(PREEMPT_ACTIVE);
608
	} 
609
610
	__local_irq_restore_nosync(flags);  
611
}
612
613
#endif	/* CONFIG_PREEMPT */
614
615
#endif /* !CONFIG_X86_32 */
616
617
void __ipipe_halt_root(void)
618
{
619
	struct ipipe_percpu_domain_data *p;
620
621
	/* Emulate sti+hlt sequence over the root domain. */
622
623
	local_irq_disable_hw();
624
625
	p = ipipe_root_cpudom_ptr();
626
627
	trace_hardirqs_on();
628
	clear_bit(IPIPE_STALL_FLAG, &p->status);
629
630
	if (unlikely(__ipipe_ipending_p(p))) {
631
		__ipipe_sync_pipeline(IPIPE_IRQ_DOALL);
632
		local_irq_enable_hw();
633
	} else {
634
#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
635
		ipipe_trace_end(0x8000000E);
636
#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
637
		asm volatile("sti; hlt": : :"memory");
638
	}
639
}
640
641
static void do_machine_check_vector(struct pt_regs *regs, long error_code)
642
{
643
#ifdef CONFIG_X86_MCE
644
#ifdef CONFIG_X86_32
645
	extern void (*machine_check_vector)(struct pt_regs *, long error_code);
646
	machine_check_vector(regs, error_code);
647
#else
648
	do_machine_check(regs, error_code);
649
#endif
650
#endif /* CONFIG_X86_MCE */
651
}
652
653
/* Work around genksyms's issue with over-qualification in decls. */
654
655
typedef void dotraplinkage __ipipe_exhandler(struct pt_regs *, long);
656
657
typedef __ipipe_exhandler *__ipipe_exptr;
658
659
static __ipipe_exptr __ipipe_std_extable[] = {
660
661
	[ex_do_divide_error] = &do_divide_error,
662
	[ex_do_overflow] = &do_overflow,
663
	[ex_do_bounds] = &do_bounds,
664
	[ex_do_invalid_op] = &do_invalid_op,
665
	[ex_do_coprocessor_segment_overrun] = &do_coprocessor_segment_overrun,
666
	[ex_do_invalid_TSS] = &do_invalid_TSS,
667
	[ex_do_segment_not_present] = &do_segment_not_present,
668
	[ex_do_stack_segment] = &do_stack_segment,
669
	[ex_do_general_protection] = do_general_protection,
670
	[ex_do_page_fault] = (__ipipe_exptr)&do_page_fault,
671
	[ex_do_spurious_interrupt_bug] = &do_spurious_interrupt_bug,
672
	[ex_do_coprocessor_error] = &do_coprocessor_error,
673
	[ex_do_alignment_check] = &do_alignment_check,
674
	[ex_machine_check_vector] = &do_machine_check_vector,
675
	[ex_do_simd_coprocessor_error] = &do_simd_coprocessor_error,
676
	[ex_do_device_not_available] = &do_device_not_available,
677
#ifdef CONFIG_X86_32
678
	[ex_do_iret_error] = &do_iret_error,
679
#endif
680
};
681
682
#ifdef CONFIG_KGDB
683
#include <linux/kgdb.h>
684
685
static int __ipipe_xlate_signo[] = {
686
687
	[ex_do_divide_error] = SIGFPE,
688
	[ex_do_debug] = SIGTRAP,
689
	[2] = -1,
690
	[ex_do_int3] = SIGTRAP,
691
	[ex_do_overflow] = SIGSEGV,
692
	[ex_do_bounds] = SIGSEGV,
693
	[ex_do_invalid_op] = SIGILL,
694
	[ex_do_device_not_available] = -1,
695
	[8] = -1,
696
	[ex_do_coprocessor_segment_overrun] = SIGFPE,
697
	[ex_do_invalid_TSS] = SIGSEGV,
698
	[ex_do_segment_not_present] = SIGBUS,
699
	[ex_do_stack_segment] = SIGBUS,
700
	[ex_do_general_protection] = SIGSEGV,
701
	[ex_do_page_fault] = SIGSEGV,
702
	[ex_do_spurious_interrupt_bug] = -1,
703
	[ex_do_coprocessor_error] = -1,
704
	[ex_do_alignment_check] = SIGBUS,
705
	[ex_machine_check_vector] = -1,
706
	[ex_do_simd_coprocessor_error] = -1,
707
	[20 ... 31] = -1,
708
#ifdef CONFIG_X86_32
709
	[ex_do_iret_error] = SIGSEGV,
710
#endif
711
};
712
#endif /* CONFIG_KGDB */
713
714
int __ipipe_handle_exception(struct pt_regs *regs, long error_code, int vector)
715
{
716
	bool root_entry = false;
717
	unsigned long flags = 0;
718
	unsigned long cr2 = 0;
719
720
	if (ipipe_root_domain_p) {
721
		root_entry = true;
722
723
		local_save_flags(flags);
724
		/*
725
		 * Replicate hw interrupt state into the virtual mask
726
		 * before calling the I-pipe event handler over the
727
		 * root domain. Also required later when calling the
728
		 * Linux exception handler.
729
		 */
730
		if (irqs_disabled_hw())
731
			local_irq_disable();
732
	}
733
#ifdef CONFIG_KGDB
734
	/* catch exception KGDB is interested in over non-root domains */
735
	else if (__ipipe_xlate_signo[vector] >= 0 &&
736
		 !kgdb_handle_exception(vector, __ipipe_xlate_signo[vector],
737
					error_code, regs))
738
		return 1;
739
#endif /* CONFIG_KGDB */
740
741
	if (vector == ex_do_page_fault)
742
		cr2 = native_read_cr2();
743
744
	if (unlikely(ipipe_trap_notify(vector, regs))) {
745
		if (root_entry)
746
			local_irq_restore_nosync(flags);
747
		return 1;
748
	}
749
750
	if (likely(ipipe_root_domain_p)) {
751
		/*
752
		 * In case we faulted in the iret path, regs.flags do not
753
		 * match the root domain state. The fault handler or the
754
		 * low-level return code may evaluate it. Fix this up, either
755
		 * by the root state sampled on entry or, if we migrated to
756
		 * root, with the current state.
757
		 */
758
		__fixup_if(root_entry ? raw_irqs_disabled_flags(flags) :
759
					raw_irqs_disabled(), regs);
760
	} else {
761
		/* Detect unhandled faults over non-root domains. */
762
		struct ipipe_domain *ipd = ipipe_current_domain;
763
764
		/* Switch to root so that Linux can handle the fault cleanly. */
765
		__ipipe_current_domain = ipipe_root_domain;
766
767
		ipipe_trace_panic_freeze();
768
769
		/* Always warn about user land and unfixable faults. */
770
		if ((error_code & 4) || !search_exception_tables(instruction_pointer(regs))) {
771
			printk(KERN_ERR "BUG: Unhandled exception over domain"
772
			       " %s at 0x%lx - switching to ROOT\n",
773
			       ipd->name, instruction_pointer(regs));
774
			dump_stack();
775
			ipipe_trace_panic_dump();
776
#ifdef CONFIG_IPIPE_DEBUG
777
		/* Also report fixable ones when debugging is enabled. */
778
		} else {
779
			printk(KERN_WARNING "WARNING: Fixable exception over "
780
			       "domain %s at 0x%lx - switching to ROOT\n",
781
			       ipd->name, instruction_pointer(regs));
782
			dump_stack();
783
			ipipe_trace_panic_dump();
784
#endif /* CONFIG_IPIPE_DEBUG */
785
		}
786
	}
787
788
	if (vector == ex_do_page_fault)
789
		write_cr2(cr2);
790
791
	__ipipe_std_extable[vector](regs, error_code);
792
793
	/*
794
	 * Relevant for 64-bit: Restore root domain state as the low-level
795
	 * return code will not align it to regs.flags.
796
	 */
797
	if (root_entry)
798
		local_irq_restore_nosync(flags);
799
800
	return 0;
801
}
802
803
int __ipipe_divert_exception(struct pt_regs *regs, int vector)
804
{
805
	bool root_entry = false;
806
	unsigned long flags = 0;
807
808
	if (ipipe_root_domain_p) {
809
		root_entry = true;
810
811
		local_save_flags(flags);
812
813
		if (irqs_disabled_hw()) {
814
			/*
815
			 * Same root state handling as in
816
			 * __ipipe_handle_exception.
817
			 */
818
			local_irq_disable();
819
		}
820
	}
821
#ifdef CONFIG_KGDB
822
	/* catch int1 and int3 over non-root domains */
823
	else {
824
#ifdef CONFIG_X86_32
825
		if (vector != ex_do_device_not_available)
826
#endif
827
		{
828
			unsigned int condition = 0;
829
830
			if (vector == 1)
831
				get_debugreg(condition, 6);
832
			if (!kgdb_handle_exception(vector, SIGTRAP, condition, regs))
833
				return 1;
834
		}
835
	}
836
#endif /* CONFIG_KGDB */
837
838
	if (unlikely(ipipe_trap_notify(vector, regs))) {
839
		if (root_entry)
840
			local_irq_restore_nosync(flags);
841
		return 1;
842
	}
843
844
	/* see __ipipe_handle_exception */
845
	if (likely(ipipe_root_domain_p))
846
		__fixup_if(root_entry ? raw_irqs_disabled_flags(flags) :
847
					raw_irqs_disabled(), regs);
848
	/*
849
	 * No need to restore root state in the 64-bit case, the Linux handler
850
	 * and the return code will take care of it.
851
	 */
852
853
	return 0;
854
}
855
856
int __ipipe_syscall_root(struct pt_regs *regs)
857
{
858
	struct ipipe_percpu_domain_data *p;
859
	unsigned long flags;
860
        int ret;
861
862
        /*
863
         * This routine either returns:
864
         * 0 -- if the syscall is to be passed to Linux;
865
         * >0 -- if the syscall should not be passed to Linux, and no
866
         * tail work should be performed;
867
         * <0 -- if the syscall should not be passed to Linux but the
868
         * tail work has to be performed (for handling signals etc).
869
         */
870
871
        if (!__ipipe_syscall_watched_p(current, regs->orig_ax) ||
872
            !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
873
                return 0;
874
875
        ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
876
        if (!ipipe_root_domain_p) {
877
#ifdef CONFIG_X86_64
878
		local_irq_disable_hw();
879
#endif
880
		return 1;
881
	}
882
883
	local_irq_save_hw(flags);
884
	p = ipipe_root_cpudom_ptr();
885
#ifdef CONFIG_X86_32
886
	/*
887
	 * Fix-up only required on 32-bit as only here the IRET return code
888
	 * will evaluate the flags.
889
	 */
890
	__fixup_if(test_bit(IPIPE_STALL_FLAG, &p->status), regs);
891
#endif
892
	/*
893
	 * If allowed, sync pending VIRQs before _TIF_NEED_RESCHED is
894
	 * tested.
895
	 */
896
	if (__ipipe_ipending_p(p))
897
		__ipipe_sync_pipeline(IPIPE_IRQ_DOVIRT);
898
#ifdef CONFIG_X86_64
899
	if (!ret)
900
#endif
901
		local_irq_restore_hw(flags);
902
903
	return -ret;
904
}
905
906
/*
907
 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
908
 * interrupt protection log is maintained here for each domain.  Hw
909
 * interrupts are off on entry.
910
 */
911
int __ipipe_handle_irq(struct pt_regs *regs)
912
{
913
	struct ipipe_domain *this_domain, *next_domain;
914
	unsigned int vector = regs->orig_ax, irq;
915
	struct list_head *head, *pos;
916
	int m_ack;
917
918
	if ((long)regs->orig_ax < 0) {
919
		vector = ~vector;
920
#ifdef CONFIG_X86_LOCAL_APIC
921
		if (vector >= FIRST_SYSTEM_VECTOR)
922
			irq = ipipe_apic_vector_irq(vector);
923
#ifdef CONFIG_SMP
924
		else if (vector == IRQ_MOVE_CLEANUP_VECTOR)
925
			irq = vector;
926
#endif /* CONFIG_SMP */
927
		else
928
#endif /* CONFIG_X86_LOCAL_APIC */
929
			irq = __get_cpu_var(vector_irq)[vector];
930
		m_ack = 0;
931
	} else { /* This is a self-triggered one. */
932
		irq = vector;
933
		m_ack = 1;
934
	}
935
936
	this_domain = ipipe_current_domain;
937
938
	if (test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))
939
		head = &this_domain->p_link;
940
	else {
941
		head = __ipipe_pipeline.next;
942
		next_domain = list_entry(head, struct ipipe_domain, p_link);
943
		if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
944
			if (!m_ack && next_domain->irqs[irq].acknowledge)
945
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
946
			__ipipe_dispatch_wired(next_domain, irq);
947
			goto finalize_nosync;
948
		}
949
	}
950
951
	/* Ack the interrupt. */
952
953
	pos = head;
954
955
	while (pos != &__ipipe_pipeline) {
956
		next_domain = list_entry(pos, struct ipipe_domain, p_link);
957
		if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
958
			__ipipe_set_irq_pending(next_domain, irq);
959
			if (!m_ack && next_domain->irqs[irq].acknowledge) {
960
				next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
961
				m_ack = 1;
962
			}
963
		}
964
		if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
965
			break;
966
		pos = next_domain->p_link.next;
967
	}
968
969
	/*
970
	 * If the interrupt preempted the head domain, then do not
971
	 * even try to walk the pipeline, unless an interrupt is
972
	 * pending for it.
973
	 */
974
	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
975
	    !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
976
		goto finalize_nosync;
977
978
	/*
979
	 * Now walk the pipeline, yielding control to the highest
980
	 * priority domain that has pending interrupt(s) or
981
	 * immediately to the current domain if the interrupt has been
982
	 * marked as 'sticky'. This search does not go beyond the
983
	 * current domain in the pipeline.
984
	 */
985
986
	__ipipe_walk_pipeline(head);
987
988
finalize_nosync:
989
990
	/*
991
	 * Given our deferred dispatching model for regular IRQs, we
992
	 * only record CPU regs for the last timer interrupt, so that
993
	 * the timer handler charges CPU times properly. It is assumed
994
	 * that other interrupt handlers don't actually care for such
995
	 * information.
996
	 */
997
998
	if (irq == __ipipe_tick_irq) {
999
		struct pt_regs *tick_regs = &__raw_get_cpu_var(__ipipe_tick_regs);
1000
		tick_regs->flags = regs->flags;
1001
		tick_regs->cs = regs->cs;
1002
		tick_regs->ip = regs->ip;
1003
		tick_regs->bp = regs->bp;
1004
#ifdef CONFIG_X86_64
1005
		tick_regs->ss = regs->ss;
1006
		tick_regs->sp = regs->sp;
1007
#endif
1008
		if (!ipipe_root_domain_p)
1009
			tick_regs->flags &= ~X86_EFLAGS_IF;
1010
	}
1011
1012
	if (!ipipe_root_domain_p ||
1013
	    test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)))
1014
		return 0;
1015
1016
#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
1017
	/*
1018
	 * Prevent a spurious rescheduling from being triggered on
1019
	 * preemptible kernels along the way out through
1020
	 * ret_from_intr.
1021
	 */
1022
	if ((long)regs->orig_ax < 0)
1023
		__set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
1024
#endif	/* CONFIG_SMP */
1025
1026
	return 1;
1027
}
1028
1029
int __ipipe_check_tickdev(const char *devname)
1030
{
1031
#ifdef CONFIG_X86_LOCAL_APIC
1032
	if (!strcmp(devname, "lapic"))
1033
		return __ipipe_check_lapic();
1034
#endif
1035
1036
	return 1;
1037
}
1038
1039
void *ipipe_irq_handler = __ipipe_handle_irq;
1040
EXPORT_SYMBOL(ipipe_irq_handler);
1041
EXPORT_SYMBOL(io_apic_irqs);
1042
EXPORT_PER_CPU_SYMBOL(__ipipe_tick_regs);
1043
__attribute__((regparm(3))) void do_notify_resume(struct pt_regs *, void *, __u32);
1044
EXPORT_SYMBOL(do_notify_resume);
1045
extern void *sys_call_table;
1046
EXPORT_SYMBOL(sys_call_table);
1047
#ifdef CONFIG_X86_32
1048
extern void ret_from_intr(void);
1049
EXPORT_SYMBOL(ret_from_intr);
1050
extern spinlock_t i8259A_lock;
1051
extern struct desc_struct idt_table[];
1052
#else
1053
extern ipipe_spinlock_t i8259A_lock;
1054
extern gate_desc idt_table[];
1055
#endif
1056
EXPORT_PER_CPU_SYMBOL(vector_irq);
1057
EXPORT_SYMBOL(idt_table);
1058
EXPORT_SYMBOL(i8259A_lock);
1059
EXPORT_SYMBOL(__ipipe_sync_stage);
1060
EXPORT_SYMBOL(kill_proc_info);
1061
EXPORT_SYMBOL(find_task_by_pid_ns);
1062
1063
EXPORT_SYMBOL(__ipipe_tick_irq);
1064
1065
EXPORT_SYMBOL_GPL(irq_to_desc);
1066
struct task_struct *__switch_to(struct task_struct *prev_p,
1067
				struct task_struct *next_p);
1068
EXPORT_SYMBOL_GPL(__switch_to);
1069
EXPORT_SYMBOL_GPL(show_stack);
1070
1071
EXPORT_PER_CPU_SYMBOL_GPL(init_tss);
1072
#ifdef CONFIG_SMP
1073
EXPORT_PER_CPU_SYMBOL_GPL(cpu_tlbstate);
1074
#endif /* CONFIG_SMP */
1075
1076
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
1077
EXPORT_SYMBOL(tasklist_lock);
1078
#endif /* CONFIG_SMP || CONFIG_DEBUG_SPINLOCK */
1079
1080
#if defined(CONFIG_CC_STACKPROTECTOR) && defined(CONFIG_X86_64)
1081
EXPORT_PER_CPU_SYMBOL_GPL(irq_stack_union);
1082
#endif
1083
1084
EXPORT_SYMBOL(__ipipe_halt_root);
(-)a/arch/x86/kernel/irq.c (-3 / +4 lines)
Lines 38-44 void ack_bad_irq(unsigned int irq) Link Here
38
	 * completely.
38
	 * completely.
39
	 * But only ack when the APIC is enabled -AK
39
	 * But only ack when the APIC is enabled -AK
40
	 */
40
	 */
41
	ack_APIC_irq();
41
	__ack_APIC_irq();
42
}
42
}
43
43
44
#define irq_stats(x)		(&per_cpu(irq_stat, x))
44
#define irq_stats(x)		(&per_cpu(irq_stat, x))
Lines 231-241 unsigned int __irq_entry do_IRQ(struct pt_regs *regs) Link Here
231
	unsigned vector = ~regs->orig_ax;
231
	unsigned vector = ~regs->orig_ax;
232
	unsigned irq;
232
	unsigned irq;
233
233
234
	irq = __get_cpu_var(vector_irq)[vector];
235
	__ipipe_move_root_irq(irq);
236
234
	exit_idle();
237
	exit_idle();
235
	irq_enter();
238
	irq_enter();
236
239
237
	irq = __get_cpu_var(vector_irq)[vector];
238
239
	if (!handle_irq(irq, regs)) {
240
	if (!handle_irq(irq, regs)) {
240
		ack_APIC_irq();
241
		ack_APIC_irq();
241
242
(-)a/arch/x86/kernel/irqinit.c (+12 lines)
Lines 159-169 static void __init smp_intr_init(void) Link Here
159
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
159
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
160
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
160
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
161
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
161
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
162
#ifndef CONFIG_IPIPE
162
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
163
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
163
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
164
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
164
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
165
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
165
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
166
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
166
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
167
	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
168
#endif
167
169
168
	/* IPI for generic function call */
170
	/* IPI for generic function call */
169
	alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
171
	alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
Lines 178-183 static void __init smp_intr_init(void) Link Here
178
180
179
	/* IPI used for rebooting/stopping */
181
	/* IPI used for rebooting/stopping */
180
	alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
182
	alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
183
#if defined(CONFIG_IPIPE) && defined(CONFIG_X86_32)
184
	/* IPI for critical lock */
185
	alloc_intr_gate(IPIPE_CRITICAL_VECTOR, ipipe_ipiX);
186
#endif
181
#endif
187
#endif
182
#endif /* CONFIG_SMP */
188
#endif /* CONFIG_SMP */
183
}
189
}
Lines 212-217 static void __init apic_intr_init(void) Link Here
212
	alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
218
	alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
213
# endif
219
# endif
214
220
221
#if defined(CONFIG_IPIPE) && defined(CONFIG_X86_32)
222
	alloc_intr_gate(IPIPE_SERVICE_VECTOR0, ipipe_ipi0);
223
	alloc_intr_gate(IPIPE_SERVICE_VECTOR1, ipipe_ipi1);
224
	alloc_intr_gate(IPIPE_SERVICE_VECTOR2, ipipe_ipi2);
225
	alloc_intr_gate(IPIPE_SERVICE_VECTOR3, ipipe_ipi3);
226
#endif
215
#endif
227
#endif
216
}
228
}
217
229
(-)a/arch/x86/kernel/process.c (-2 / +19 lines)
Lines 35-41 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) Link Here
35
			return -ENOMEM;
35
			return -ENOMEM;
36
		WARN_ON((unsigned long)dst->thread.xstate & 15);
36
		WARN_ON((unsigned long)dst->thread.xstate & 15);
37
		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
37
		memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
38
	} else {
39
#ifdef CONFIG_IPIPE
40
		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
41
						      GFP_KERNEL);
42
		if (!dst->thread.xstate)
43
			return -ENOMEM;
44
#endif
38
	}
45
	}
46
39
	return 0;
47
	return 0;
40
}
48
}
41
49
Lines 61-66 void arch_task_cache_init(void) Link Here
61
        	kmem_cache_create("task_xstate", xstate_size,
69
        	kmem_cache_create("task_xstate", xstate_size,
62
				  __alignof__(union thread_xstate),
70
				  __alignof__(union thread_xstate),
63
				  SLAB_PANIC | SLAB_NOTRACK, NULL);
71
				  SLAB_PANIC | SLAB_NOTRACK, NULL);
72
#ifdef CONFIG_IPIPE
73
	current->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
74
						  GFP_KERNEL);
75
#endif
64
}
76
}
65
77
66
/*
78
/*
Lines 309-315 EXPORT_SYMBOL(default_idle); Link Here
309
321
310
void stop_this_cpu(void *dummy)
322
void stop_this_cpu(void *dummy)
311
{
323
{
312
	local_irq_disable();
324
	local_irq_disable_hw();
313
	/*
325
	/*
314
	 * Remove this CPU:
326
	 * Remove this CPU:
315
	 */
327
	 */
Lines 534-539 static void c1e_idle(void) Link Here
534
546
535
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
547
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
536
{
548
{
549
#ifdef CONFIG_IPIPE
550
#define default_to_mwait force_mwait
551
#else
552
#define default_to_mwait 1
553
#endif
537
#ifdef CONFIG_SMP
554
#ifdef CONFIG_SMP
538
	if (pm_idle == poll_idle && smp_num_siblings > 1) {
555
	if (pm_idle == poll_idle && smp_num_siblings > 1) {
539
		printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
556
		printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
Lines 543-549 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) Link Here
543
	if (pm_idle)
560
	if (pm_idle)
544
		return;
561
		return;
545
562
546
	if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
563
	if (default_to_mwait && cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
547
		/*
564
		/*
548
		 * One CPU supports mwait => All CPUs supports mwait
565
		 * One CPU supports mwait => All CPUs supports mwait
549
		 */
566
		 */
(-)a/arch/x86/kernel/process_32.c (-1 / +3 lines)
Lines 305-314 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) Link Here
305
	regs->cs		= __USER_CS;
305
	regs->cs		= __USER_CS;
306
	regs->ip		= new_ip;
306
	regs->ip		= new_ip;
307
	regs->sp		= new_sp;
307
	regs->sp		= new_sp;
308
#ifndef CONFIG_IPIPE	/* Lazily handled, init_fpu() will reset the state. */
308
	/*
309
	/*
309
	 * Free the old FP and other extended state
310
	 * Free the old FP and other extended state
310
	 */
311
	 */
311
	free_thread_xstate(current);
312
	free_thread_xstate(current);
313
#endif
312
}
314
}
313
EXPORT_SYMBOL_GPL(start_thread);
315
EXPORT_SYMBOL_GPL(start_thread);
314
316
Lines 345-351 __switch_to(struct task_struct *prev_p, struct task_struct *next_p) Link Here
345
{
347
{
346
	struct thread_struct *prev = &prev_p->thread,
348
	struct thread_struct *prev = &prev_p->thread,
347
				 *next = &next_p->thread;
349
				 *next = &next_p->thread;
348
	int cpu = smp_processor_id();
350
	int cpu = raw_smp_processor_id();
349
	struct tss_struct *tss = &per_cpu(init_tss, cpu);
351
	struct tss_struct *tss = &per_cpu(init_tss, cpu);
350
	bool preload_fpu;
352
	bool preload_fpu;
351
353
(-)a/arch/x86/kernel/process_64.c (-1 / +6 lines)
Lines 58-63 asmlinkage extern void ret_from_fork(void); Link Here
58
DEFINE_PER_CPU(unsigned long, old_rsp);
58
DEFINE_PER_CPU(unsigned long, old_rsp);
59
static DEFINE_PER_CPU(unsigned char, is_idle);
59
static DEFINE_PER_CPU(unsigned char, is_idle);
60
60
61
asmlinkage extern void thread_return(void);
62
61
unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
63
unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
62
64
63
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
65
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
Lines 292-297 int copy_thread(unsigned long clone_flags, unsigned long sp, Link Here
292
	p->thread.sp = (unsigned long) childregs;
294
	p->thread.sp = (unsigned long) childregs;
293
	p->thread.sp0 = (unsigned long) (childregs+1);
295
	p->thread.sp0 = (unsigned long) (childregs+1);
294
	p->thread.usersp = me->thread.usersp;
296
	p->thread.usersp = me->thread.usersp;
297
 	p->thread.rip = (unsigned long) thread_return;
295
298
296
	set_tsk_thread_flag(p, TIF_FORK);
299
	set_tsk_thread_flag(p, TIF_FORK);
297
300
Lines 358-367 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) Link Here
358
	regs->ss		= __USER_DS;
361
	regs->ss		= __USER_DS;
359
	regs->flags		= 0x200;
362
	regs->flags		= 0x200;
360
	set_fs(USER_DS);
363
	set_fs(USER_DS);
364
#ifndef CONFIG_IPIPE	/* Lazily handled, init_fpu() will reset the state. */
361
	/*
365
	/*
362
	 * Free the old FP and other extended state
366
	 * Free the old FP and other extended state
363
	 */
367
	 */
364
	free_thread_xstate(current);
368
	free_thread_xstate(current);
369
#endif
365
}
370
}
366
EXPORT_SYMBOL_GPL(start_thread);
371
EXPORT_SYMBOL_GPL(start_thread);
367
372
Lines 380-386 __switch_to(struct task_struct *prev_p, struct task_struct *next_p) Link Here
380
{
385
{
381
	struct thread_struct *prev = &prev_p->thread;
386
	struct thread_struct *prev = &prev_p->thread;
382
	struct thread_struct *next = &next_p->thread;
387
	struct thread_struct *next = &next_p->thread;
383
	int cpu = smp_processor_id();
388
	int cpu = raw_smp_processor_id();
384
	struct tss_struct *tss = &per_cpu(init_tss, cpu);
389
	struct tss_struct *tss = &per_cpu(init_tss, cpu);
385
	unsigned fsindex, gsindex;
390
	unsigned fsindex, gsindex;
386
	bool preload_fpu;
391
	bool preload_fpu;
(-)a/arch/x86/kernel/smp.c (-2 / +2 lines)
Lines 184-192 static void native_smp_send_stop(void) Link Here
184
			udelay(1);
184
			udelay(1);
185
	}
185
	}
186
186
187
	local_irq_save(flags);
187
	local_irq_save_hw(flags);
188
	disable_local_APIC();
188
	disable_local_APIC();
189
	local_irq_restore(flags);
189
	local_irq_restore_hw(flags);
190
}
190
}
191
191
192
/*
192
/*
(-)a/arch/x86/kernel/smpboot.c (-4 / +4 lines)
Lines 266-272 static void __cpuinit smp_callin(void) Link Here
266
/*
266
/*
267
 * Activate a secondary processor.
267
 * Activate a secondary processor.
268
 */
268
 */
269
notrace static void __cpuinit start_secondary(void *unused)
269
static void __cpuinit start_secondary(void *unused)
270
{
270
{
271
	/*
271
	/*
272
	 * Don't put *anything* before cpu_init(), SMP booting is too
272
	 * Don't put *anything* before cpu_init(), SMP booting is too
Lines 837-843 do_rest: Link Here
837
int __cpuinit native_cpu_up(unsigned int cpu)
837
int __cpuinit native_cpu_up(unsigned int cpu)
838
{
838
{
839
	int apicid = apic->cpu_present_to_apicid(cpu);
839
	int apicid = apic->cpu_present_to_apicid(cpu);
840
	unsigned long flags;
840
 	unsigned long flags, _flags;
841
	int err;
841
	int err;
842
842
843
	WARN_ON(irqs_disabled());
843
	WARN_ON(irqs_disabled());
Lines 889-897 int __cpuinit native_cpu_up(unsigned int cpu) Link Here
889
	 * Check TSC synchronization with the AP (keep irqs disabled
889
	 * Check TSC synchronization with the AP (keep irqs disabled
890
	 * while doing so):
890
	 * while doing so):
891
	 */
891
	 */
892
	local_irq_save(flags);
892
	local_irq_save_full(flags, _flags);
893
	check_tsc_sync_source(cpu);
893
	check_tsc_sync_source(cpu);
894
	local_irq_restore(flags);
894
	local_irq_restore_full(flags, _flags);
895
895
896
	while (!cpu_online(cpu)) {
896
	while (!cpu_online(cpu)) {
897
		cpu_relax();
897
		cpu_relax();
(-)a/arch/x86/kernel/time.c (-2 / +3 lines)
Lines 70-80 static irqreturn_t timer_interrupt(int irq, void *dev_id) Link Here
70
		 * manually to deassert NMI lines for the watchdog if run
70
		 * manually to deassert NMI lines for the watchdog if run
71
		 * on an 82489DX-based system.
71
		 * on an 82489DX-based system.
72
		 */
72
		 */
73
		spin_lock(&i8259A_lock);
73
		unsigned long flags;
74
		spin_lock_irqsave_cond(&i8259A_lock,flags);
74
		outb(0x0c, PIC_MASTER_OCW3);
75
		outb(0x0c, PIC_MASTER_OCW3);
75
		/* Ack the IRQ; AEOI will end it automatically. */
76
		/* Ack the IRQ; AEOI will end it automatically. */
76
		inb(PIC_MASTER_POLL);
77
		inb(PIC_MASTER_POLL);
77
		spin_unlock(&i8259A_lock);
78
		spin_unlock_irqrestore_cond(&i8259A_lock,flags);
78
	}
79
	}
79
80
80
	global_clock_event->event_handler(global_clock_event);
81
	global_clock_event->event_handler(global_clock_event);
(-)a/arch/x86/kernel/traps.c (+4 lines)
Lines 805-810 void __math_state_restore(void) Link Here
805
	 */
805
	 */
806
	if (unlikely(restore_fpu_checking(tsk))) {
806
	if (unlikely(restore_fpu_checking(tsk))) {
807
		stts();
807
		stts();
808
		local_irq_enable_hw_cond();
808
		force_sig(SIGSEGV, tsk);
809
		force_sig(SIGSEGV, tsk);
809
		return;
810
		return;
810
	}
811
	}
Lines 827-832 asmlinkage void math_state_restore(void) Link Here
827
{
828
{
828
	struct thread_info *thread = current_thread_info();
829
	struct thread_info *thread = current_thread_info();
829
	struct task_struct *tsk = thread->task;
830
	struct task_struct *tsk = thread->task;
831
	unsigned long flags;
830
832
831
	if (!tsk_used_math(tsk)) {
833
	if (!tsk_used_math(tsk)) {
832
		local_irq_enable();
834
		local_irq_enable();
Lines 843-851 asmlinkage void math_state_restore(void) Link Here
843
		local_irq_disable();
845
		local_irq_disable();
844
	}
846
	}
845
847
848
  	local_irq_save_hw_cond(flags);
846
	clts();				/* Allow maths ops (or we recurse) */
849
	clts();				/* Allow maths ops (or we recurse) */
847
850
848
	__math_state_restore();
851
	__math_state_restore();
852
 	local_irq_restore_hw_cond(flags);
849
}
853
}
850
EXPORT_SYMBOL_GPL(math_state_restore);
854
EXPORT_SYMBOL_GPL(math_state_restore);
851
855
(-)a/arch/x86/kernel/vm86_32.c (+4 lines)
Lines 148-159 struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) Link Here
148
		do_exit(SIGSEGV);
148
		do_exit(SIGSEGV);
149
	}
149
	}
150
150
151
 	local_irq_disable_hw_cond();
151
	tss = &per_cpu(init_tss, get_cpu());
152
	tss = &per_cpu(init_tss, get_cpu());
152
	current->thread.sp0 = current->thread.saved_sp0;
153
	current->thread.sp0 = current->thread.saved_sp0;
153
	current->thread.sysenter_cs = __KERNEL_CS;
154
	current->thread.sysenter_cs = __KERNEL_CS;
154
	load_sp0(tss, &current->thread);
155
	load_sp0(tss, &current->thread);
155
	current->thread.saved_sp0 = 0;
156
	current->thread.saved_sp0 = 0;
156
	put_cpu();
157
	put_cpu();
158
 	local_irq_enable_hw_cond();
157
159
158
	ret = KVM86->regs32;
160
	ret = KVM86->regs32;
159
161
Lines 324-335 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk Link Here
324
	tsk->thread.saved_fs = info->regs32->fs;
326
	tsk->thread.saved_fs = info->regs32->fs;
325
	tsk->thread.saved_gs = get_user_gs(info->regs32);
327
	tsk->thread.saved_gs = get_user_gs(info->regs32);
326
328
329
 	local_irq_disable_hw_cond();
327
	tss = &per_cpu(init_tss, get_cpu());
330
	tss = &per_cpu(init_tss, get_cpu());
328
	tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
331
	tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
329
	if (cpu_has_sep)
332
	if (cpu_has_sep)
330
		tsk->thread.sysenter_cs = 0;
333
		tsk->thread.sysenter_cs = 0;
331
	load_sp0(tss, &tsk->thread);
334
	load_sp0(tss, &tsk->thread);
332
	put_cpu();
335
	put_cpu();
336
 	local_irq_enable_hw_cond();
333
337
334
	tsk->thread.screen_bitmap = info->screen_bitmap;
338
	tsk->thread.screen_bitmap = info->screen_bitmap;
335
	if (info->flags & VM86_SCREEN_BITMAP)
339
	if (info->flags & VM86_SCREEN_BITMAP)
(-)a/arch/x86/lib/mmx_32.c (-1 / +1 lines)
Lines 30-36 void *_mmx_memcpy(void *to, const void *from, size_t len) Link Here
30
	void *p;
30
	void *p;
31
	int i;
31
	int i;
32
32
33
	if (unlikely(in_interrupt()))
33
	if (unlikely(!ipipe_root_domain_p || in_interrupt()))
34
		return __memcpy(to, from, len);
34
		return __memcpy(to, from, len);
35
35
36
	p = to;
36
	p = to;
(-)a/arch/x86/lib/thunk_64.S (+4 lines)
Lines 65-70 Link Here
65
	thunk lockdep_sys_exit_thunk,lockdep_sys_exit
65
	thunk lockdep_sys_exit_thunk,lockdep_sys_exit
66
#endif
66
#endif
67
	
67
	
68
#ifdef CONFIG_IPIPE
69
	thunk_retrax __ipipe_syscall_root_thunk,__ipipe_syscall_root
70
#endif
71
	
68
	/* SAVE_ARGS below is used only for the .cfi directives it contains. */
72
	/* SAVE_ARGS below is used only for the .cfi directives it contains. */
69
	CFI_STARTPROC
73
	CFI_STARTPROC
70
	SAVE_ARGS
74
	SAVE_ARGS
(-)a/arch/x86/mm/fault.c (-37 / +86 lines)
Lines 1-3 Link Here
1
1
/*
2
/*
2
 *  Copyright (C) 1995  Linus Torvalds
3
 *  Copyright (C) 1995  Linus Torvalds
3
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
Lines 323-365 out: Link Here
323
324
324
#else /* CONFIG_X86_64: */
325
#else /* CONFIG_X86_64: */
325
326
326
void vmalloc_sync_all(void)
327
static inline int vmalloc_sync_one(pgd_t *pgd, unsigned long address)
327
{
328
	unsigned long address;
329
330
	for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
331
	     address += PGDIR_SIZE) {
332
333
		const pgd_t *pgd_ref = pgd_offset_k(address);
334
		unsigned long flags;
335
		struct page *page;
336
337
		if (pgd_none(*pgd_ref))
338
			continue;
339
340
		spin_lock_irqsave(&pgd_lock, flags);
341
		list_for_each_entry(page, &pgd_list, lru) {
342
			pgd_t *pgd;
343
			pgd = (pgd_t *)page_address(page) + pgd_index(address);
344
			if (pgd_none(*pgd))
345
				set_pgd(pgd, *pgd_ref);
346
			else
347
				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
348
		}
349
		spin_unlock_irqrestore(&pgd_lock, flags);
350
	}
351
}
352
353
/*
354
 * 64-bit:
355
 *
356
 *   Handle a fault on the vmalloc area
357
 *
358
 * This assumes no large pages in there.
359
 */
360
static noinline int vmalloc_fault(unsigned long address)
361
{
328
{
362
	pgd_t *pgd, *pgd_ref;
329
	pgd_t *pgd_ref;
363
	pud_t *pud, *pud_ref;
330
	pud_t *pud, *pud_ref;
364
	pmd_t *pmd, *pmd_ref;
331
	pmd_t *pmd, *pmd_ref;
365
	pte_t *pte, *pte_ref;
332
	pte_t *pte, *pte_ref;
Lines 373-379 static noinline int vmalloc_fault(unsigned long address) Link Here
373
	 * happen within a race in page table update. In the later
340
	 * happen within a race in page table update. In the later
374
	 * case just flush:
341
	 * case just flush:
375
	 */
342
	 */
376
	pgd = pgd_offset(current->active_mm, address);
377
	pgd_ref = pgd_offset_k(address);
343
	pgd_ref = pgd_offset_k(address);
378
	if (pgd_none(*pgd_ref))
344
	if (pgd_none(*pgd_ref))
379
		return -1;
345
		return -1;
Lines 421-426 static noinline int vmalloc_fault(unsigned long address) Link Here
421
	return 0;
387
	return 0;
422
}
388
}
423
389
390
void vmalloc_sync_all(void)
391
{
392
	unsigned long address;
393
394
	for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
395
	     address += PGDIR_SIZE) {
396
397
		const pgd_t *pgd_ref = pgd_offset_k(address);
398
		unsigned long flags;
399
		struct page *page;
400
401
		if (pgd_none(*pgd_ref))
402
			continue;
403
404
		spin_lock_irqsave(&pgd_lock, flags);
405
		list_for_each_entry(page, &pgd_list, lru) {
406
			pgd_t *pgd;
407
			pgd = (pgd_t *)page_address(page) + pgd_index(address);
408
			if (pgd_none(*pgd))
409
				set_pgd(pgd, *pgd_ref);
410
			else
411
				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
412
		}
413
		spin_unlock_irqrestore(&pgd_lock, flags);
414
	}
415
}
416
417
/*
418
 * 64-bit:
419
 *
420
 *   Handle a fault on the vmalloc area
421
 *
422
 * This assumes no large pages in there.
423
 */
424
static noinline int vmalloc_fault(unsigned long address)
425
{
426
	pgd_t *pgd = pgd = pgd_offset(current->active_mm, address);
427
	return vmalloc_sync_one(pgd, address);
428
}
429
424
static const char errata93_warning[] =
430
static const char errata93_warning[] =
425
KERN_ERR 
431
KERN_ERR 
426
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
432
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
Lines 958-963 do_page_fault(struct pt_regs *regs, unsigned long error_code) Link Here
958
	/* Get the faulting address: */
964
	/* Get the faulting address: */
959
	address = read_cr2();
965
	address = read_cr2();
960
966
967
	if (!__ipipe_pipeline_head_p(ipipe_root_domain))
968
		local_irq_enable_hw_cond();
969
961
	/*
970
	/*
962
	 * Detect and handle instructions that would cause a page fault for
971
	 * Detect and handle instructions that would cause a page fault for
963
	 * both a tracked kernel page and a userspace page.
972
	 * both a tracked kernel page and a userspace page.
Lines 1137-1139 good_area: Link Here
1137
1146
1138
	up_read(&mm->mmap_sem);
1147
	up_read(&mm->mmap_sem);
1139
}
1148
}
1149
1150
#ifdef CONFIG_IPIPE
1151
void __ipipe_pin_range_globally(unsigned long start, unsigned long end)
1152
{
1153
#ifdef CONFIG_X86_32
1154
	unsigned long next, addr = start;
1155
1156
	do {
1157
		unsigned long flags;
1158
		struct page *page;
1159
1160
		next = pgd_addr_end(addr, end);
1161
		spin_lock_irqsave(&pgd_lock, flags);
1162
		list_for_each_entry(page, &pgd_list, lru)
1163
			vmalloc_sync_one(page_address(page), addr);
1164
		spin_unlock_irqrestore(&pgd_lock, flags);
1165
1166
	} while (addr = next, addr != end);
1167
#else
1168
	unsigned long next, addr = start;
1169
	int ret = 0;
1170
1171
	do {
1172
		struct page *page;
1173
1174
		next = pgd_addr_end(addr, end);
1175
		spin_lock(&pgd_lock);
1176
		list_for_each_entry(page, &pgd_list, lru) {
1177
			pgd_t *pgd;
1178
			pgd = (pgd_t *)page_address(page) + pgd_index(addr);
1179
			ret = vmalloc_sync_one(pgd, addr);
1180
			if (ret)
1181
				break;
1182
		}
1183
		spin_unlock(&pgd_lock);
1184
		addr = next;
1185
	} while (!ret && addr != end);
1186
#endif
1187
}
1188
#endif /* CONFIG_IPIPE */
(-)a/arch/x86/mm/tlb.c (+7 lines)
Lines 57-67 static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; Link Here
57
 */
57
 */
58
void leave_mm(int cpu)
58
void leave_mm(int cpu)
59
{
59
{
60
 	unsigned long flags;
61
60
	if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
62
	if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
61
		BUG();
63
		BUG();
64
 	local_irq_save_hw_cond(flags);
62
	cpumask_clear_cpu(cpu,
65
	cpumask_clear_cpu(cpu,
63
			  mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
66
			  mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
64
	load_cr3(swapper_pg_dir);
67
	load_cr3(swapper_pg_dir);
68
 	local_irq_restore_hw_cond(flags);
65
}
69
}
66
EXPORT_SYMBOL_GPL(leave_mm);
70
EXPORT_SYMBOL_GPL(leave_mm);
67
71
Lines 192-197 static void flush_tlb_others_ipi(const struct cpumask *cpumask, Link Here
192
		apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
196
		apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
193
			      INVALIDATE_TLB_VECTOR_START + sender);
197
			      INVALIDATE_TLB_VECTOR_START + sender);
194
198
199
#ifdef CONFIG_IPIPE
200
		WARN_ON_ONCE(irqs_disabled_hw());
201
#endif
195
		while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
202
		while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
196
			cpu_relax();
203
			cpu_relax();
197
	}
204
	}
(-)a/drivers/pci/htirq.c (-1 / +1 lines)
Lines 21-27 Link Here
21
 * With multiple simultaneous hypertransport irq devices it might pay
21
 * With multiple simultaneous hypertransport irq devices it might pay
22
 * to make this more fine grained.  But start with simple, stupid, and correct.
22
 * to make this more fine grained.  But start with simple, stupid, and correct.
23
 */
23
 */
24
static DEFINE_SPINLOCK(ht_irq_lock);
24
static IPIPE_DEFINE_SPINLOCK(ht_irq_lock);
25
25
26
struct ht_irq_cfg {
26
struct ht_irq_cfg {
27
	struct pci_dev *dev;
27
	struct pci_dev *dev;
(-)a/drivers/serial/8250.c (+47 lines)
Lines 3016-3021 static int serial8250_resume(struct platform_device *dev) Link Here
3016
	return 0;
3016
	return 0;
3017
}
3017
}
3018
3018
3019
#if defined(CONFIG_IPIPE_DEBUG) && defined(CONFIG_SERIAL_8250_CONSOLE)
3020
3021
#include <stdarg.h>
3022
3023
void __weak __ipipe_serial_debug(const char *fmt, ...)
3024
{
3025
        struct uart_8250_port *up = &serial8250_ports[0];
3026
        unsigned int ier, count;
3027
        unsigned long flags;
3028
        char buf[128];
3029
        va_list ap;
3030
3031
        va_start(ap, fmt);
3032
        vsprintf(buf, fmt, ap);
3033
        va_end(ap);
3034
        count = strlen(buf);
3035
3036
        touch_nmi_watchdog();
3037
3038
        local_irq_save_hw(flags);
3039
3040
        /*
3041
         *      First save the IER then disable the interrupts
3042
        */
3043
        ier = serial_in(up, UART_IER);
3044
3045
        if (up->capabilities & UART_CAP_UUE)
3046
                serial_out(up, UART_IER, UART_IER_UUE);
3047
        else
3048
                serial_out(up, UART_IER, 0);
3049
3050
        uart_console_write(&up->port, buf, count, serial8250_console_putchar);
3051
3052
        /*
3053
         *      Finally, wait for transmitter to become empty
3054
         *      and restore the IER
3055
         */
3056
        wait_for_xmitr(up, BOTH_EMPTY);
3057
        serial_out(up, UART_IER, ier);
3058
3059
        local_irq_restore_hw(flags);
3060
}
3061
3062
EXPORT_SYMBOL(__ipipe_serial_debug);
3063
3064
#endif
3065
3019
static struct platform_driver serial8250_isa_driver = {
3066
static struct platform_driver serial8250_isa_driver = {
3020
	.probe		= serial8250_probe,
3067
	.probe		= serial8250_probe,
3021
	.remove		= __devexit_p(serial8250_remove),
3068
	.remove		= __devexit_p(serial8250_remove),
(-)a/fs/exec.c (+3 lines)
Lines 715-720 static int exec_mmap(struct mm_struct *mm) Link Here
715
{
715
{
716
	struct task_struct *tsk;
716
	struct task_struct *tsk;
717
	struct mm_struct * old_mm, *active_mm;
717
	struct mm_struct * old_mm, *active_mm;
718
	unsigned long flags;
718
719
719
	/* Notify parent that we're no longer interested in the old VM */
720
	/* Notify parent that we're no longer interested in the old VM */
720
	tsk = current;
721
	tsk = current;
Lines 737-744 static int exec_mmap(struct mm_struct *mm) Link Here
737
	task_lock(tsk);
738
	task_lock(tsk);
738
	active_mm = tsk->active_mm;
739
	active_mm = tsk->active_mm;
739
	tsk->mm = mm;
740
	tsk->mm = mm;
741
	ipipe_mm_switch_protect(flags);
740
	tsk->active_mm = mm;
742
	tsk->active_mm = mm;
741
	activate_mm(active_mm, mm);
743
	activate_mm(active_mm, mm);
744
	ipipe_mm_switch_unprotect(flags);
742
	task_unlock(tsk);
745
	task_unlock(tsk);
743
	arch_pick_mmap_layout(mm);
746
	arch_pick_mmap_layout(mm);
744
	if (old_mm) {
747
	if (old_mm) {
(-)a/include/asm-generic/atomic.h (-6 / +6 lines)
Lines 60-70 static inline int atomic_add_return(int i, atomic_t *v) Link Here
60
	unsigned long flags;
60
	unsigned long flags;
61
	int temp;
61
	int temp;
62
62
63
	local_irq_save(flags);
63
	local_irq_save_hw(flags);
64
	temp = v->counter;
64
	temp = v->counter;
65
	temp += i;
65
	temp += i;
66
	v->counter = temp;
66
	v->counter = temp;
67
	local_irq_restore(flags);
67
	local_irq_restore_hw(flags);
68
68
69
	return temp;
69
	return temp;
70
}
70
}
Lines 82-92 static inline int atomic_sub_return(int i, atomic_t *v) Link Here
82
	unsigned long flags;
82
	unsigned long flags;
83
	int temp;
83
	int temp;
84
84
85
	local_irq_save(flags);
85
	local_irq_save_hw(flags);
86
	temp = v->counter;
86
	temp = v->counter;
87
	temp -= i;
87
	temp -= i;
88
	v->counter = temp;
88
	v->counter = temp;
89
	local_irq_restore(flags);
89
	local_irq_restore_hw(flags);
90
90
91
	return temp;
91
	return temp;
92
}
92
}
Lines 139-147 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) Link Here
139
	unsigned long flags;
139
	unsigned long flags;
140
140
141
	mask = ~mask;
141
	mask = ~mask;
142
	local_irq_save(flags);
142
	local_irq_save_hw(flags);
143
	*addr &= mask;
143
	*addr &= mask;
144
	local_irq_restore(flags);
144
	local_irq_restore_hw(flags);
145
}
145
}
146
146
147
#define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
147
#define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
(-)a/include/asm-generic/bitops/atomic.h (-4 / +4 lines)
Lines 21-40 extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; Link Here
21
 * this is the substitute */
21
 * this is the substitute */
22
#define _atomic_spin_lock_irqsave(l,f) do {	\
22
#define _atomic_spin_lock_irqsave(l,f) do {	\
23
	raw_spinlock_t *s = ATOMIC_HASH(l);	\
23
	raw_spinlock_t *s = ATOMIC_HASH(l);	\
24
	local_irq_save(f);			\
24
	local_irq_save_hw(f);			\
25
	__raw_spin_lock(s);			\
25
	__raw_spin_lock(s);			\
26
} while(0)
26
} while(0)
27
27
28
#define _atomic_spin_unlock_irqrestore(l,f) do {	\
28
#define _atomic_spin_unlock_irqrestore(l,f) do {	\
29
	raw_spinlock_t *s = ATOMIC_HASH(l);		\
29
	raw_spinlock_t *s = ATOMIC_HASH(l);		\
30
	__raw_spin_unlock(s);				\
30
	__raw_spin_unlock(s);				\
31
	local_irq_restore(f);				\
31
	local_irq_restore_hw(f);			\
32
} while(0)
32
} while(0)
33
33
34
34
35
#else
35
#else
36
#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
36
#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save_hw(f); } while (0)
37
#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
37
#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore_hw(f); } while (0)
38
#endif
38
#endif
39
39
40
/*
40
/*
(-)a/include/asm-generic/cmpxchg-local.h (-4 / +4 lines)
Lines 20-26 static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, Link Here
20
	if (size == 8 && sizeof(unsigned long) != 8)
20
	if (size == 8 && sizeof(unsigned long) != 8)
21
		wrong_size_cmpxchg(ptr);
21
		wrong_size_cmpxchg(ptr);
22
22
23
	local_irq_save(flags);
23
	local_irq_save_hw(flags);
24
	switch (size) {
24
	switch (size) {
25
	case 1: prev = *(u8 *)ptr;
25
	case 1: prev = *(u8 *)ptr;
26
		if (prev == old)
26
		if (prev == old)
Lines 41-47 static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, Link Here
41
	default:
41
	default:
42
		wrong_size_cmpxchg(ptr);
42
		wrong_size_cmpxchg(ptr);
43
	}
43
	}
44
	local_irq_restore(flags);
44
	local_irq_restore_hw(flags);
45
	return prev;
45
	return prev;
46
}
46
}
47
47
Lines 54-64 static inline u64 __cmpxchg64_local_generic(volatile void *ptr, Link Here
54
	u64 prev;
54
	u64 prev;
55
	unsigned long flags;
55
	unsigned long flags;
56
56
57
	local_irq_save(flags);
57
	local_irq_save_hw(flags);
58
	prev = *(u64 *)ptr;
58
	prev = *(u64 *)ptr;
59
	if (prev == old)
59
	if (prev == old)
60
		*(u64 *)ptr = new;
60
		*(u64 *)ptr = new;
61
	local_irq_restore(flags);
61
	local_irq_restore_hw(flags);
62
	return prev;
62
	return prev;
63
}
63
}
64
64
(-)a/include/asm-generic/percpu.h (+15 lines)
Lines 56-61 extern unsigned long __per_cpu_offset[NR_CPUS]; Link Here
56
#define __raw_get_cpu_var(var) \
56
#define __raw_get_cpu_var(var) \
57
	(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
57
	(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
58
58
59
#ifdef CONFIG_IPIPE
60
#if defined(CONFIG_IPIPE_DEBUG_INTERNAL) && defined(CONFIG_SMP)
61
extern int __ipipe_check_percpu_access(void);
62
#define __ipipe_local_cpu_offset				\
63
	({							\
64
		WARN_ON_ONCE(__ipipe_check_percpu_access());	\
65
		__my_cpu_offset;				\
66
	})
67
#else
68
#define __ipipe_local_cpu_offset  __my_cpu_offset
69
#endif
70
#define __ipipe_get_cpu_var(var) \
71
	(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __ipipe_local_cpu_offset))
72
#endif /* CONFIG_IPIPE */
59
73
60
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
74
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
61
extern void setup_per_cpu_areas(void);
75
extern void setup_per_cpu_areas(void);
Lines 66-71 extern void setup_per_cpu_areas(void); Link Here
66
#define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu_var(var)))
80
#define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu_var(var)))
67
#define __get_cpu_var(var)			per_cpu_var(var)
81
#define __get_cpu_var(var)			per_cpu_var(var)
68
#define __raw_get_cpu_var(var)			per_cpu_var(var)
82
#define __raw_get_cpu_var(var)			per_cpu_var(var)
83
#define __ipipe_get_cpu_var(var)		__raw_get_cpu_var(var)
69
84
70
#endif	/* SMP */
85
#endif	/* SMP */
71
86
(-)a/include/linux/hardirq.h (-16 / +20 lines)
Lines 183-206 extern void irq_enter(void); Link Here
183
 */
183
 */
184
extern void irq_exit(void);
184
extern void irq_exit(void);
185
185
186
#define nmi_enter()						\
186
#define nmi_enter()							\
187
	do {							\
187
	do {								\
188
		ftrace_nmi_enter();				\
188
		if (likely(!ipipe_test_foreign_stack())) {		\
189
		BUG_ON(in_nmi());				\
189
			ftrace_nmi_enter();				\
190
		add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET);	\
190
			BUG_ON(in_nmi());				\
191
		lockdep_off();					\
191
			add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET);	\
192
		rcu_nmi_enter();				\
192
			lockdep_off();					\
193
		trace_hardirq_enter();				\
193
			rcu_nmi_enter();				\
194
			trace_hardirq_enter();				\
195
		}							\
194
	} while (0)
196
	} while (0)
195
197
196
#define nmi_exit()						\
198
#define nmi_exit()							\
197
	do {							\
199
	do {								\
198
		trace_hardirq_exit();				\
200
		if (likely(!ipipe_test_foreign_stack())) {		\
199
		rcu_nmi_exit();					\
201
			trace_hardirq_exit();				\
200
		lockdep_on();					\
202
			rcu_nmi_exit();					\
201
		BUG_ON(!in_nmi());				\
203
			lockdep_on();					\
202
		sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET);	\
204
			BUG_ON(!in_nmi());				\
203
		ftrace_nmi_exit();				\
205
			sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET);	\
206
			ftrace_nmi_exit();				\
207
		}							\
204
	} while (0)
208
	} while (0)
205
209
206
#endif /* LINUX_HARDIRQ_H */
210
#endif /* LINUX_HARDIRQ_H */
(-)a/include/linux/ipipe.h (+690 lines)
Line 0 Link Here
1
/* -*- linux-c -*-
2
 * include/linux/ipipe.h
3
 *
4
 * Copyright (C) 2002-2007 Philippe Gerum.
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 * USA; either version 2 of the License, or (at your option) any later
10
 * version.
11
 *
12
 * This program is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with this program; if not, write to the Free Software
19
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __LINUX_IPIPE_H
23
#define __LINUX_IPIPE_H
24
25
#include <linux/spinlock.h>
26
#include <linux/cache.h>
27
#include <linux/percpu.h>
28
#include <linux/mutex.h>
29
#include <linux/linkage.h>
30
#include <linux/ipipe_base.h>
31
#include <linux/ipipe_compat.h>
32
#include <asm/ipipe.h>
33
34
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
35
36
#include <linux/cpumask.h>
37
#include <asm/system.h>
38
39
static inline int ipipe_disable_context_check(int cpu)
40
{
41
	return xchg(&per_cpu(ipipe_percpu_context_check, cpu), 0);
42
}
43
44
static inline void ipipe_restore_context_check(int cpu, int old_state)
45
{
46
	per_cpu(ipipe_percpu_context_check, cpu) = old_state;
47
}
48
49
static inline void ipipe_context_check_off(void)
50
{
51
	int cpu;
52
	for_each_online_cpu(cpu)
53
		per_cpu(ipipe_percpu_context_check, cpu) = 0;
54
}
55
56
#else	/* !CONFIG_IPIPE_DEBUG_CONTEXT */
57
58
static inline int ipipe_disable_context_check(int cpu)
59
{
60
	return 0;
61
}
62
63
static inline void ipipe_restore_context_check(int cpu, int old_state) { }
64
65
static inline void ipipe_context_check_off(void) { }
66
67
#endif	/* !CONFIG_IPIPE_DEBUG_CONTEXT */
68
69
#ifdef CONFIG_IPIPE
70
71
#define IPIPE_VERSION_STRING	IPIPE_ARCH_STRING
72
#define IPIPE_RELEASE_NUMBER	((IPIPE_MAJOR_NUMBER << 16) | \
73
				 (IPIPE_MINOR_NUMBER <<  8) | \
74
				 (IPIPE_PATCH_NUMBER))
75
76
#ifndef BROKEN_BUILTIN_RETURN_ADDRESS
77
#define __BUILTIN_RETURN_ADDRESS0 ((unsigned long)__builtin_return_address(0))
78
#define __BUILTIN_RETURN_ADDRESS1 ((unsigned long)__builtin_return_address(1))
79
#endif /* !BUILTIN_RETURN_ADDRESS */
80
81
#define IPIPE_ROOT_PRIO		100
82
#define IPIPE_ROOT_ID		0
83
#define IPIPE_ROOT_NPTDKEYS	4	/* Must be <= BITS_PER_LONG */
84
85
#define IPIPE_RESET_TIMER	0x1
86
#define IPIPE_GRAB_TIMER	0x2
87
88
/* Global domain flags */
89
#define IPIPE_SPRINTK_FLAG	0	/* Synchronous printk() allowed */
90
#define IPIPE_AHEAD_FLAG	1	/* Domain always heads the pipeline */
91
92
/* Interrupt control bits */
93
#define IPIPE_HANDLE_FLAG	0
94
#define IPIPE_PASS_FLAG		1
95
#define IPIPE_ENABLE_FLAG	2
96
#define IPIPE_DYNAMIC_FLAG	IPIPE_HANDLE_FLAG
97
#define IPIPE_STICKY_FLAG	3
98
#define IPIPE_SYSTEM_FLAG	4
99
#define IPIPE_LOCK_FLAG		5
100
#define IPIPE_WIRED_FLAG	6
101
#define IPIPE_EXCLUSIVE_FLAG	7
102
103
#define IPIPE_HANDLE_MASK	(1 << IPIPE_HANDLE_FLAG)
104
#define IPIPE_PASS_MASK		(1 << IPIPE_PASS_FLAG)
105
#define IPIPE_ENABLE_MASK	(1 << IPIPE_ENABLE_FLAG)
106
#define IPIPE_DYNAMIC_MASK	IPIPE_HANDLE_MASK
107
#define IPIPE_STICKY_MASK	(1 << IPIPE_STICKY_FLAG)
108
#define IPIPE_SYSTEM_MASK	(1 << IPIPE_SYSTEM_FLAG)
109
#define IPIPE_LOCK_MASK		(1 << IPIPE_LOCK_FLAG)
110
#define IPIPE_WIRED_MASK	(1 << IPIPE_WIRED_FLAG)
111
#define IPIPE_EXCLUSIVE_MASK	(1 << IPIPE_EXCLUSIVE_FLAG)
112
113
#define IPIPE_DEFAULT_MASK	(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK)
114
#define IPIPE_STDROOT_MASK	(IPIPE_HANDLE_MASK|IPIPE_PASS_MASK|IPIPE_SYSTEM_MASK)
115
116
#define IPIPE_EVENT_SELF        0x80000000
117
118
#define IPIPE_NR_CPUS		NR_CPUS
119
120
/* This accessor assumes hw IRQs are off on SMP; allows assignment. */
121
#define __ipipe_current_domain	__ipipe_get_cpu_var(ipipe_percpu_domain)
122
/* This read-only accessor makes sure that hw IRQs are off on SMP. */
123
#define ipipe_current_domain				\
124
	({						\
125
		struct ipipe_domain *__ipd__;		\
126
		unsigned long __flags__;		\
127
		local_irq_save_hw_smp(__flags__);	\
128
		__ipd__ = __ipipe_current_domain;	\
129
		local_irq_restore_hw_smp(__flags__);	\
130
		__ipd__;				\
131
	})
132
133
#define ipipe_virtual_irq_p(irq)	((irq) >= IPIPE_VIRQ_BASE && \
134
					 (irq) < IPIPE_NR_IRQS)
135
136
#define IPIPE_SAME_HANDLER	((ipipe_irq_handler_t)(-1))
137
138
struct irq_desc;
139
140
typedef void (*ipipe_irq_ackfn_t)(unsigned irq, struct irq_desc *desc);
141
142
typedef int (*ipipe_event_handler_t)(unsigned event,
143
				     struct ipipe_domain *from,
144
				     void *data);
145
struct ipipe_domain {
146
147
	int slot;			/* Slot number in percpu domain data array. */
148
	struct list_head p_link;	/* Link in pipeline */
149
	ipipe_event_handler_t evhand[IPIPE_NR_EVENTS]; /* Event handlers. */
150
	unsigned long long evself;	/* Self-monitored event bits. */
151
152
	struct irqdesc {
153
		unsigned long control;
154
		ipipe_irq_ackfn_t acknowledge;
155
		ipipe_irq_handler_t handler;
156
		void *cookie;
157
	} ____cacheline_aligned irqs[IPIPE_NR_IRQS];
158
159
	int priority;
160
	void *pdd;
161
	unsigned long flags;
162
	unsigned domid;
163
	const char *name;
164
	struct mutex mutex;
165
};
166
167
#define IPIPE_HEAD_PRIORITY	(-1) /* For domains always heading the pipeline */
168
169
struct ipipe_domain_attr {
170
171
	unsigned domid;		/* Domain identifier -- Magic value set by caller */
172
	const char *name;	/* Domain name -- Warning: won't be dup'ed! */
173
	int priority;		/* Priority in interrupt pipeline */
174
	void (*entry) (void);	/* Domain entry point */
175
	void *pdd;		/* Per-domain (opaque) data pointer */
176
};
177
178
#define __ipipe_irq_cookie(ipd, irq)		(ipd)->irqs[irq].cookie
179
#define __ipipe_irq_handler(ipd, irq)		(ipd)->irqs[irq].handler
180
#define __ipipe_cpudata_irq_hits(ipd, cpu, irq)	ipipe_percpudom(ipd, irqall, cpu)[irq]
181
182
extern unsigned __ipipe_printk_virq;
183
184
extern unsigned long __ipipe_virtual_irq_map;
185
186
extern struct list_head __ipipe_pipeline;
187
188
extern int __ipipe_event_monitors[];
189
190
/* Private interface */
191
192
void ipipe_init_early(void);
193
194
void ipipe_init(void);
195
196
#ifdef CONFIG_PROC_FS
197
void ipipe_init_proc(void);
198
199
#ifdef CONFIG_IPIPE_TRACE
200
void __ipipe_init_tracer(void);
201
#else /* !CONFIG_IPIPE_TRACE */
202
#define __ipipe_init_tracer()       do { } while(0)
203
#endif /* CONFIG_IPIPE_TRACE */
204
205
#else	/* !CONFIG_PROC_FS */
206
#define ipipe_init_proc()	do { } while(0)
207
#endif	/* CONFIG_PROC_FS */
208
209
void __ipipe_init_stage(struct ipipe_domain *ipd);
210
211
void __ipipe_cleanup_domain(struct ipipe_domain *ipd);
212
213
void __ipipe_add_domain_proc(struct ipipe_domain *ipd);
214
215
void __ipipe_remove_domain_proc(struct ipipe_domain *ipd);
216
217
void __ipipe_flush_printk(unsigned irq, void *cookie);
218
219
void __ipipe_walk_pipeline(struct list_head *pos);
220
221
void __ipipe_pend_irq(unsigned irq, struct list_head *head);
222
223
int __ipipe_dispatch_event(unsigned event, void *data);
224
225
void __ipipe_dispatch_wired_nocheck(struct ipipe_domain *head, unsigned irq);
226
227
void __ipipe_dispatch_wired(struct ipipe_domain *head, unsigned irq);
228
229
void __ipipe_sync_stage(int dovirt);
230
231
void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned irq);
232
233
void __ipipe_lock_irq(struct ipipe_domain *ipd, int cpu, unsigned irq);
234
235
void __ipipe_unlock_irq(struct ipipe_domain *ipd, unsigned irq);
236
237
void __ipipe_pin_range_globally(unsigned long start, unsigned long end);
238
239
/* Must be called hw IRQs off. */
240
static inline void ipipe_irq_lock(unsigned irq)
241
{
242
	__ipipe_lock_irq(__ipipe_current_domain, ipipe_processor_id(), irq);
243
}
244
245
/* Must be called hw IRQs off. */
246
static inline void ipipe_irq_unlock(unsigned irq)
247
{
248
	__ipipe_unlock_irq(__ipipe_current_domain, irq);
249
}
250
251
#ifndef __ipipe_sync_pipeline
252
#define __ipipe_sync_pipeline(dovirt) __ipipe_sync_stage(dovirt)
253
#endif
254
255
#ifndef __ipipe_run_irqtail
256
#define __ipipe_run_irqtail() do { } while(0)
257
#endif
258
259
#define __ipipe_pipeline_head_p(ipd) (&(ipd)->p_link == __ipipe_pipeline.next)
260
261
#define __ipipe_ipending_p(p)	((p)->irqpend_himap != 0)
262
263
/*
264
 * Keep the following as a macro, so that client code could check for
265
 * the support of the invariant pipeline head optimization.
266
 */
267
#define __ipipe_pipeline_head() \
268
	list_entry(__ipipe_pipeline.next, struct ipipe_domain, p_link)
269
270
#define local_irq_enable_hw_cond()		local_irq_enable_hw()
271
#define local_irq_disable_hw_cond()		local_irq_disable_hw()
272
#define local_irq_save_hw_cond(flags)		local_irq_save_hw(flags)
273
#define local_irq_restore_hw_cond(flags)	local_irq_restore_hw(flags)
274
275
#ifdef CONFIG_SMP
276
cpumask_t __ipipe_set_irq_affinity(unsigned irq, cpumask_t cpumask);
277
int __ipipe_send_ipi(unsigned ipi, cpumask_t cpumask);
278
#define local_irq_save_hw_smp(flags)		local_irq_save_hw(flags)
279
#define local_irq_restore_hw_smp(flags)		local_irq_restore_hw(flags)
280
#else /* !CONFIG_SMP */
281
#define local_irq_save_hw_smp(flags)		do { (void)(flags); } while(0)
282
#define local_irq_restore_hw_smp(flags)		do { } while(0)
283
#endif /* CONFIG_SMP */
284
285
#define local_irq_save_full(vflags, rflags)		\
286
	do {						\
287
		local_irq_save(vflags);			\
288
		local_irq_save_hw(rflags);		\
289
	} while(0)
290
291
#define local_irq_restore_full(vflags, rflags)		\
292
	do {						\
293
		local_irq_restore_hw(rflags);		\
294
		local_irq_restore(vflags);		\
295
	} while(0)
296
297
static inline void __local_irq_restore_nosync(unsigned long x)
298
{
299
	struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
300
301
	if (raw_irqs_disabled_flags(x)) {
302
		set_bit(IPIPE_STALL_FLAG, &p->status);
303
		trace_hardirqs_off();
304
	} else {
305
		trace_hardirqs_on();
306
		clear_bit(IPIPE_STALL_FLAG, &p->status);
307
	}
308
}
309
310
static inline void local_irq_restore_nosync(unsigned long x)
311
{
312
	unsigned long flags;
313
	local_irq_save_hw_smp(flags);
314
	__local_irq_restore_nosync(x);
315
	local_irq_restore_hw_smp(flags);
316
}
317
318
#define __ipipe_root_domain_p	(__ipipe_current_domain == ipipe_root_domain)
319
#define ipipe_root_domain_p	(ipipe_current_domain == ipipe_root_domain)
320
321
static inline int __ipipe_event_monitored_p(int ev)
322
{
323
	if (__ipipe_event_monitors[ev] > 0)
324
		return 1;
325
326
	return (ipipe_current_domain->evself & (1LL << ev)) != 0;
327
}
328
329
#define ipipe_sigwake_notify(p)	\
330
do {					\
331
	if (((p)->flags & PF_EVNOTIFY) && __ipipe_event_monitored_p(IPIPE_EVENT_SIGWAKE)) \
332
		__ipipe_dispatch_event(IPIPE_EVENT_SIGWAKE, p);		\
333
} while(0)
334
335
#define ipipe_exit_notify(p)	\
336
do {				\
337
	if (((p)->flags & PF_EVNOTIFY) && __ipipe_event_monitored_p(IPIPE_EVENT_EXIT)) \
338
		__ipipe_dispatch_event(IPIPE_EVENT_EXIT, p);		\
339
} while(0)
340
341
#define ipipe_setsched_notify(p)	\
342
do {					\
343
	if (((p)->flags & PF_EVNOTIFY) && __ipipe_event_monitored_p(IPIPE_EVENT_SETSCHED)) \
344
		__ipipe_dispatch_event(IPIPE_EVENT_SETSCHED, p);	\
345
} while(0)
346
347
#define ipipe_schedule_notify(prev, next)				\
348
do {									\
349
	if ((((prev)->flags|(next)->flags) & PF_EVNOTIFY) &&		\
350
	    __ipipe_event_monitored_p(IPIPE_EVENT_SCHEDULE))		\
351
		__ipipe_dispatch_event(IPIPE_EVENT_SCHEDULE,next);	\
352
} while(0)
353
354
#define ipipe_trap_notify(ex, regs)					\
355
({									\
356
	unsigned long __flags__;					\
357
	int __ret__ = 0;						\
358
	local_irq_save_hw_smp(__flags__);				\
359
	if ((test_bit(IPIPE_NOSTACK_FLAG, &ipipe_this_cpudom_var(status)) || \
360
	     ((current)->flags & PF_EVNOTIFY)) &&			\
361
	    __ipipe_event_monitored_p(ex)) {				\
362
		local_irq_restore_hw_smp(__flags__);			\
363
		__ret__ = __ipipe_dispatch_event(ex, regs);		\
364
	} else								\
365
		local_irq_restore_hw_smp(__flags__);			\
366
	__ret__;							\
367
})
368
369
static inline void ipipe_init_notify(struct task_struct *p)
370
{
371
	if (__ipipe_event_monitored_p(IPIPE_EVENT_INIT))
372
		__ipipe_dispatch_event(IPIPE_EVENT_INIT, p);
373
}
374
375
struct mm_struct;
376
377
static inline void ipipe_cleanup_notify(struct mm_struct *mm)
378
{
379
	if (__ipipe_event_monitored_p(IPIPE_EVENT_CLEANUP))
380
		__ipipe_dispatch_event(IPIPE_EVENT_CLEANUP, mm);
381
}
382
383
/* Public interface */
384
385
int ipipe_register_domain(struct ipipe_domain *ipd,
386
			  struct ipipe_domain_attr *attr);
387
388
int ipipe_unregister_domain(struct ipipe_domain *ipd);
389
390
void ipipe_suspend_domain(void);
391
392
int ipipe_virtualize_irq(struct ipipe_domain *ipd,
393
			 unsigned irq,
394
			 ipipe_irq_handler_t handler,
395
			 void *cookie,
396
			 ipipe_irq_ackfn_t acknowledge,
397
			 unsigned modemask);
398
399
int ipipe_control_irq(unsigned irq,
400
		      unsigned clrmask,
401
		      unsigned setmask);
402
403
unsigned ipipe_alloc_virq(void);
404
405
int ipipe_free_virq(unsigned virq);
406
407
int ipipe_trigger_irq(unsigned irq);
408
409
static inline void __ipipe_propagate_irq(unsigned irq)
410
{
411
	struct list_head *next = __ipipe_current_domain->p_link.next;
412
	if (next == &ipipe_root.p_link) {
413
		/* Fast path: root must handle all interrupts. */
414
		__ipipe_set_irq_pending(&ipipe_root, irq);
415
		return;
416
	}
417
	__ipipe_pend_irq(irq, next);
418
}
419
420
static inline void __ipipe_schedule_irq(unsigned irq)
421
{
422
	__ipipe_pend_irq(irq, &__ipipe_current_domain->p_link);
423
}
424
425
static inline void __ipipe_schedule_irq_head(unsigned irq)
426
{
427
	__ipipe_set_irq_pending(__ipipe_pipeline_head(), irq);
428
}
429
430
static inline void __ipipe_schedule_irq_root(unsigned irq)
431
{
432
	__ipipe_set_irq_pending(&ipipe_root, irq);
433
}
434
435
static inline void ipipe_propagate_irq(unsigned irq)
436
{
437
	unsigned long flags;
438
439
	local_irq_save_hw(flags);
440
	__ipipe_propagate_irq(irq);
441
	local_irq_restore_hw(flags);
442
}
443
444
static inline void ipipe_schedule_irq(unsigned irq)
445
{
446
	unsigned long flags;
447
448
	local_irq_save_hw(flags);
449
	__ipipe_schedule_irq(irq);
450
	local_irq_restore_hw(flags);
451
}
452
453
static inline void ipipe_schedule_irq_head(unsigned irq)
454
{
455
	unsigned long flags;
456
457
	local_irq_save_hw(flags);
458
	__ipipe_schedule_irq_head(irq);
459
	local_irq_restore_hw(flags);
460
}
461
462
static inline void ipipe_schedule_irq_root(unsigned irq)
463
{
464
	unsigned long flags;
465
466
	local_irq_save_hw(flags);
467
	__ipipe_schedule_irq_root(irq);
468
	local_irq_restore_hw(flags);
469
}
470
471
void ipipe_stall_pipeline_from(struct ipipe_domain *ipd);
472
473
unsigned long ipipe_test_and_stall_pipeline_from(struct ipipe_domain *ipd);
474
475
unsigned long ipipe_test_and_unstall_pipeline_from(struct ipipe_domain *ipd);
476
477
static inline void ipipe_unstall_pipeline_from(struct ipipe_domain *ipd)
478
{
479
	ipipe_test_and_unstall_pipeline_from(ipd);
480
}
481
482
void ipipe_restore_pipeline_from(struct ipipe_domain *ipd,
483
					  unsigned long x);
484
485
static inline unsigned long ipipe_test_pipeline_from(struct ipipe_domain *ipd)
486
{
487
	return test_bit(IPIPE_STALL_FLAG, &ipipe_cpudom_var(ipd, status));
488
}
489
490
static inline void ipipe_stall_pipeline_head(void)
491
{
492
	local_irq_disable_hw();
493
	__set_bit(IPIPE_STALL_FLAG, &ipipe_head_cpudom_var(status));
494
}
495
496
static inline unsigned long ipipe_test_and_stall_pipeline_head(void)
497
{
498
	local_irq_disable_hw();
499
	return __test_and_set_bit(IPIPE_STALL_FLAG, &ipipe_head_cpudom_var(status));
500
}
501
502
void ipipe_unstall_pipeline_head(void);
503
504
void __ipipe_restore_pipeline_head(unsigned long x);
505
506
static inline void ipipe_restore_pipeline_head(unsigned long x)
507
{
508
	/* On some archs, __test_and_set_bit() might return different
509
	 * truth value than test_bit(), so we test the exclusive OR of
510
	 * both statuses, assuming that the lowest bit is always set in
511
	 * the truth value (if this is wrong, the failed optimization will
512
	 * be caught in __ipipe_restore_pipeline_head() if
513
	 * CONFIG_DEBUG_KERNEL is set). */
514
	if ((x ^ test_bit(IPIPE_STALL_FLAG, &ipipe_head_cpudom_var(status))) & 1)
515
		__ipipe_restore_pipeline_head(x);
516
}
517
518
#define ipipe_unstall_pipeline() \
519
	ipipe_unstall_pipeline_from(ipipe_current_domain)
520
521
#define ipipe_test_and_unstall_pipeline() \
522
	ipipe_test_and_unstall_pipeline_from(ipipe_current_domain)
523
524
#define ipipe_test_pipeline() \
525
	ipipe_test_pipeline_from(ipipe_current_domain)
526
527
#define ipipe_test_and_stall_pipeline() \
528
	ipipe_test_and_stall_pipeline_from(ipipe_current_domain)
529
530
#define ipipe_stall_pipeline() \
531
	ipipe_stall_pipeline_from(ipipe_current_domain)
532
533
#define ipipe_restore_pipeline(x) \
534
	ipipe_restore_pipeline_from(ipipe_current_domain, (x))
535
536
void ipipe_init_attr(struct ipipe_domain_attr *attr);
537
538
int ipipe_get_sysinfo(struct ipipe_sysinfo *sysinfo);
539
540
unsigned long ipipe_critical_enter(void (*syncfn) (void));
541
542
void ipipe_critical_exit(unsigned long flags);
543
544
static inline void ipipe_set_printk_sync(struct ipipe_domain *ipd)
545
{
546
	set_bit(IPIPE_SPRINTK_FLAG, &ipd->flags);
547
}
548
549
static inline void ipipe_set_printk_async(struct ipipe_domain *ipd)
550
{
551
	clear_bit(IPIPE_SPRINTK_FLAG, &ipd->flags);
552
}
553
554
static inline void ipipe_set_foreign_stack(struct ipipe_domain *ipd)
555
{
556
	/* Must be called hw interrupts off. */
557
	__set_bit(IPIPE_NOSTACK_FLAG, &ipipe_cpudom_var(ipd, status));
558
}
559
560
static inline void ipipe_clear_foreign_stack(struct ipipe_domain *ipd)
561
{
562
	/* Must be called hw interrupts off. */
563
	__clear_bit(IPIPE_NOSTACK_FLAG, &ipipe_cpudom_var(ipd, status));
564
}
565
566
static inline int ipipe_test_foreign_stack(void)
567
{
568
	/* Must be called hw interrupts off. */
569
	return test_bit(IPIPE_NOSTACK_FLAG, &ipipe_this_cpudom_var(status));
570
}
571
572
#ifndef ipipe_safe_current
573
#define ipipe_safe_current()					\
574
({								\
575
	struct task_struct *p;					\
576
	unsigned long flags;					\
577
	local_irq_save_hw_smp(flags);				\
578
	p = ipipe_test_foreign_stack() ? &init_task : current;	\
579
	local_irq_restore_hw_smp(flags);			\
580
	p; \
581
})
582
#endif
583
584
ipipe_event_handler_t ipipe_catch_event(struct ipipe_domain *ipd,
585
					unsigned event,
586
					ipipe_event_handler_t handler);
587
588
cpumask_t ipipe_set_irq_affinity(unsigned irq,
589
				 cpumask_t cpumask);
590
591
int ipipe_send_ipi(unsigned ipi,
592
		   cpumask_t cpumask);
593
594
int ipipe_setscheduler_root(struct task_struct *p,
595
			    int policy,
596
			    int prio);
597
598
int ipipe_reenter_root(struct task_struct *prev,
599
		       int policy,
600
		       int prio);
601
602
int ipipe_alloc_ptdkey(void);
603
604
int ipipe_free_ptdkey(int key);
605
606
int ipipe_set_ptd(int key,
607
		  void *value);
608
609
void *ipipe_get_ptd(int key);
610
611
int ipipe_disable_ondemand_mappings(struct task_struct *tsk);
612
613
static inline void ipipe_nmi_enter(void)
614
{
615
	int cpu = ipipe_processor_id();
616
617
	per_cpu(ipipe_nmi_saved_root, cpu) = ipipe_root_cpudom_var(status);
618
	__set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
619
620
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
621
	per_cpu(ipipe_saved_context_check_state, cpu) =
622
		ipipe_disable_context_check(cpu);
623
#endif /* CONFIG_IPIPE_DEBUG_CONTEXT */
624
}
625
626
static inline void ipipe_nmi_exit(void)
627
{
628
	int cpu = ipipe_processor_id();
629
630
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
631
	ipipe_restore_context_check
632
		(cpu, per_cpu(ipipe_saved_context_check_state, cpu));
633
#endif /* CONFIG_IPIPE_DEBUG_CONTEXT */
634
635
	if (!test_bit(IPIPE_STALL_FLAG, &per_cpu(ipipe_nmi_saved_root, cpu)))
636
		__clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
637
}
638
639
#else	/* !CONFIG_IPIPE */
640
641
#define ipipe_init_early()		do { } while(0)
642
#define ipipe_init()			do { } while(0)
643
#define ipipe_suspend_domain()		do { } while(0)
644
#define ipipe_sigwake_notify(p)		do { } while(0)
645
#define ipipe_setsched_notify(p)	do { } while(0)
646
#define ipipe_init_notify(p)		do { } while(0)
647
#define ipipe_exit_notify(p)		do { } while(0)
648
#define ipipe_cleanup_notify(mm)	do { } while(0)
649
#define ipipe_trap_notify(t,r)		0
650
#define ipipe_init_proc()		do { } while(0)
651
652
static inline void __ipipe_pin_range_globally(unsigned long start,
653
					      unsigned long end)
654
{
655
}
656
657
static inline int ipipe_test_foreign_stack(void)
658
{
659
	return 0;
660
}
661
662
#define local_irq_enable_hw_cond()		do { } while(0)
663
#define local_irq_disable_hw_cond()		do { } while(0)
664
#define local_irq_save_hw_cond(flags)		do { (void)(flags); } while(0)
665
#define local_irq_restore_hw_cond(flags)	do { } while(0)
666
#define local_irq_save_hw_smp(flags)		do { (void)(flags); } while(0)
667
#define local_irq_restore_hw_smp(flags)		do { } while(0)
668
669
#define ipipe_irq_lock(irq)		do { } while(0)
670
#define ipipe_irq_unlock(irq)		do { } while(0)
671
672
#define __ipipe_root_domain_p		1
673
#define ipipe_root_domain_p		1
674
#define ipipe_safe_current		current
675
#define ipipe_processor_id()		smp_processor_id()
676
677
#define ipipe_nmi_enter()		do { } while (0)
678
#define ipipe_nmi_exit()		do { } while (0)
679
680
#define local_irq_disable_head()	local_irq_disable()
681
682
#define local_irq_save_full(vflags, rflags)	do { (void)(vflags); local_irq_save(rflags); } while(0)
683
#define local_irq_restore_full(vflags, rflags)	do { (void)(vflags); local_irq_restore(rflags); } while(0)
684
#define local_irq_restore_nosync(vflags)	local_irq_restore(vflags)
685
686
#define __ipipe_pipeline_head_p(ipd)	1
687
688
#endif	/* CONFIG_IPIPE */
689
690
#endif	/* !__LINUX_IPIPE_H */
(-)a/include/linux/ipipe_base.h (+118 lines)
Line 0 Link Here
1
/* -*- linux-c -*-
2
 * include/linux/ipipe_base.h
3
 *
4
 * Copyright (C) 2002-2007 Philippe Gerum.
5
 *               2007 Jan Kiszka.
6
 *
7
 * This program is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
10
 * USA; either version 2 of the License, or (at your option) any later
11
 * version.
12
 *
13
 * This program is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
 * GNU General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU General Public License
19
 * along with this program; if not, write to the Free Software
20
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21
 */
22
23
#ifndef __LINUX_IPIPE_BASE_H
24
#define __LINUX_IPIPE_BASE_H
25
26
#ifdef CONFIG_IPIPE
27
28
#include <asm/ipipe_base.h>
29
30
#define __bpl_up(x)		(((x)+(BITS_PER_LONG-1)) & ~(BITS_PER_LONG-1))
31
/* Number of virtual IRQs (must be a multiple of BITS_PER_LONG) */
32
#define IPIPE_NR_VIRQS		BITS_PER_LONG
33
/* First virtual IRQ # (must be aligned on BITS_PER_LONG) */
34
#define IPIPE_VIRQ_BASE		__bpl_up(IPIPE_NR_XIRQS)
35
/* Total number of IRQ slots */
36
#define IPIPE_NR_IRQS		(IPIPE_VIRQ_BASE+IPIPE_NR_VIRQS)
37
38
#define IPIPE_IRQ_LOMAPSZ	(IPIPE_NR_IRQS / BITS_PER_LONG)
39
#if IPIPE_IRQ_LOMAPSZ > BITS_PER_LONG
40
/*
41
 * We need a 3-level mapping. This allows us to handle up to 32k IRQ
42
 * vectors on 32bit machines, 256k on 64bit ones.
43
 */
44
#define __IPIPE_3LEVEL_IRQMAP	1
45
#define IPIPE_IRQ_MDMAPSZ	(__bpl_up(IPIPE_IRQ_LOMAPSZ) / BITS_PER_LONG)
46
#else
47
/*
48
 * 2-level mapping is enough. This allows us to handle up to 1024 IRQ
49
 * vectors on 32bit machines, 4096 on 64bit ones.
50
 */
51
#define __IPIPE_2LEVEL_IRQMAP	1
52
#endif
53
54
#define IPIPE_IRQ_DOALL		0
55
#define IPIPE_IRQ_DOVIRT	1
56
57
/* Per-cpu pipeline status */
58
#define IPIPE_STALL_FLAG	0	/* Stalls a pipeline stage -- guaranteed at bit #0 */
59
#define IPIPE_SYNC_FLAG		1	/* The interrupt syncer is running for the domain */
60
#define IPIPE_NOSTACK_FLAG	2	/* Domain currently runs on a foreign stack */
61
62
#define IPIPE_STALL_MASK	(1L << IPIPE_STALL_FLAG)
63
#define IPIPE_SYNC_MASK		(1L << IPIPE_SYNC_FLAG)
64
#define IPIPE_NOSTACK_MASK	(1L << IPIPE_NOSTACK_FLAG)
65
66
typedef void (*ipipe_irq_handler_t)(unsigned int irq,
67
				    void *cookie);
68
69
extern struct ipipe_domain ipipe_root;
70
71
#define ipipe_root_domain (&ipipe_root)
72
73
void __ipipe_unstall_root(void);
74
75
void __ipipe_restore_root(unsigned long x);
76
77
#define ipipe_preempt_disable(flags)		\
78
	do {					\
79
		local_irq_save_hw(flags);	\
80
		if (__ipipe_root_domain_p)	\
81
			preempt_disable();	\
82
	} while (0)
83
84
#define ipipe_preempt_enable(flags)			\
85
	do {						\
86
		if (__ipipe_root_domain_p) {		\
87
			preempt_enable_no_resched();	\
88
			local_irq_restore_hw(flags);	\
89
			preempt_check_resched();	\
90
		} else					\
91
			local_irq_restore_hw(flags);	\
92
	} while (0)
93
 
94
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
95
void ipipe_check_context(struct ipipe_domain *border_ipd);
96
#else /* !CONFIG_IPIPE_DEBUG_CONTEXT */
97
static inline void ipipe_check_context(struct ipipe_domain *border_ipd) { }
98
#endif /* !CONFIG_IPIPE_DEBUG_CONTEXT */
99
100
/* Generic features */
101
102
#ifdef CONFIG_GENERIC_CLOCKEVENTS
103
#define __IPIPE_FEATURE_REQUEST_TICKDEV    1
104
#endif
105
#define __IPIPE_FEATURE_DELAYED_ATOMICSW   1
106
#define __IPIPE_FEATURE_FASTPEND_IRQ       1
107
#define __IPIPE_FEATURE_TRACE_EVENT	   1
108
109
#else /* !CONFIG_IPIPE */
110
#define ipipe_preempt_disable(flags)	do { \
111
						preempt_disable(); \
112
						(void)(flags); \
113
					} while (0)
114
#define ipipe_preempt_enable(flags)	preempt_enable()
115
#define ipipe_check_context(ipd)	do { } while(0)
116
#endif	/* CONFIG_IPIPE */
117
118
#endif	/* !__LINUX_IPIPE_BASE_H */
(-)a/include/linux/ipipe_compat.h (+54 lines)
Line 0 Link Here
1
/* -*- linux-c -*-
2
 * include/linux/ipipe_compat.h
3
 *
4
 * Copyright (C) 2007 Philippe Gerum.
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 * USA; either version 2 of the License, or (at your option) any later
10
 * version.
11
 *
12
 * This program is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with this program; if not, write to the Free Software
19
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __LINUX_IPIPE_COMPAT_H
23
#define __LINUX_IPIPE_COMPAT_H
24
25
#ifdef CONFIG_IPIPE_COMPAT
26
/*
27
 * OBSOLETE: defined only for backward compatibility. Will be removed
28
 * in future releases, please update client code accordingly.
29
 */
30
31
#ifdef CONFIG_SMP
32
#define ipipe_declare_cpuid	int cpuid
33
#define ipipe_load_cpuid()	do { \
34
					cpuid = ipipe_processor_id();	\
35
				} while(0)
36
#define ipipe_lock_cpu(flags)	do { \
37
					local_irq_save_hw(flags); \
38
					cpuid = ipipe_processor_id(); \
39
				} while(0)
40
#define ipipe_unlock_cpu(flags)	local_irq_restore_hw(flags)
41
#define ipipe_get_cpu(flags)	ipipe_lock_cpu(flags)
42
#define ipipe_put_cpu(flags)	ipipe_unlock_cpu(flags)
43
#else /* !CONFIG_SMP */
44
#define ipipe_declare_cpuid	const int cpuid = 0
45
#define ipipe_load_cpuid()	do { } while(0)
46
#define ipipe_lock_cpu(flags)	local_irq_save_hw(flags)
47
#define ipipe_unlock_cpu(flags)	local_irq_restore_hw(flags)
48
#define ipipe_get_cpu(flags)	do { (void)(flags); } while(0)
49
#define ipipe_put_cpu(flags)	do { } while(0)
50
#endif /* CONFIG_SMP */
51
52
#endif /* CONFIG_IPIPE_COMPAT */
53
54
#endif	/* !__LINUX_IPIPE_COMPAT_H */
(-)a/include/linux/ipipe_lock.h (+144 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   include/linux/ipipe_lock.h
3
 *
4
 *   Copyright (C) 2009 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __LINUX_IPIPE_LOCK_H
23
#define __LINUX_IPIPE_LOCK_H
24
25
typedef struct {
26
	raw_spinlock_t bare_lock;
27
} __ipipe_spinlock_t;
28
29
#define ipipe_lock_p(lock)						\
30
	__builtin_types_compatible_p(typeof(lock), __ipipe_spinlock_t *)
31
32
#define common_lock_p(lock)						\
33
	__builtin_types_compatible_p(typeof(lock), spinlock_t *)
34
35
#define bare_lock(lock)	(&((__ipipe_spinlock_t *)(lock))->bare_lock)
36
#define std_lock(lock)	((spinlock_t *)(lock))
37
38
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
39
40
extern int __bad_spinlock_type(void);
41
#define PICK_SPINLOCK_IRQSAVE(lock, flags)				\
42
	do {								\
43
		if (ipipe_lock_p(lock))					\
44
			(flags) = __ipipe_spin_lock_irqsave(bare_lock(lock)); \
45
		else if (common_lock_p(lock))				\
46
			(flags) = _spin_lock_irqsave(std_lock(lock));	\
47
		else __bad_spinlock_type();				\
48
	} while (0)
49
50
#else /* !(CONFIG_SMP || CONFIG_DEBUG_SPINLOCK) */
51
52
#define PICK_SPINLOCK_IRQSAVE(lock, flags)				\
53
	do {								\
54
		if (ipipe_lock_p(lock))					\
55
			(flags) = __ipipe_spin_lock_irqsave(bare_lock(lock)); \
56
		else if (common_lock_p(lock))				\
57
			_spin_lock_irqsave(std_lock(lock), flags);	\
58
	} while (0)
59
60
#endif /* !(CONFIG_SMP || CONFIG_DEBUG_SPINLOCK) */
61
62
#define PICK_SPINUNLOCK_IRQRESTORE(lock, flags)				\
63
	do {								\
64
		if (ipipe_lock_p(lock))					\
65
			__ipipe_spin_unlock_irqrestore(bare_lock(lock), flags); \
66
		else if (common_lock_p(lock))				\
67
			_spin_unlock_irqrestore(std_lock(lock), flags); \
68
	} while (0)
69
70
#define PICK_SPINOP(op, lock)						\
71
	do {								\
72
		if (ipipe_lock_p(lock))					\
73
			__raw_spin##op(bare_lock(lock));		\
74
		else if (common_lock_p(lock))				\
75
			_spin##op(std_lock(lock));			\
76
	} while (0)
77
78
#define PICK_SPINOP_IRQ(op, lock)					\
79
	do {								\
80
		if (ipipe_lock_p(lock))					\
81
			__ipipe_spin##op##_irq(bare_lock(lock));	\
82
		else if (common_lock_p(lock))				\
83
			_spin##op##_irq(std_lock(lock));		\
84
	} while (0)
85
86
#define __raw_spin_lock_init(lock)					\
87
	do {								\
88
		IPIPE_DEFINE_SPINLOCK(__lock__);			\
89
		*((ipipe_spinlock_t *)lock) = __lock__;			\
90
	} while (0)
91
92
#ifdef CONFIG_IPIPE
93
94
#define ipipe_spinlock_t		__ipipe_spinlock_t
95
#define IPIPE_DEFINE_SPINLOCK(x)	ipipe_spinlock_t x = IPIPE_SPIN_LOCK_UNLOCKED
96
#define IPIPE_DECLARE_SPINLOCK(x)	extern ipipe_spinlock_t x
97
#define IPIPE_SPIN_LOCK_UNLOCKED	\
98
	(__ipipe_spinlock_t) {	.bare_lock = __RAW_SPIN_LOCK_UNLOCKED }
99
100
#define spin_lock_irqsave_cond(lock, flags) \
101
	spin_lock_irqsave(lock, flags)
102
103
#define spin_unlock_irqrestore_cond(lock, flags) \
104
	spin_unlock_irqrestore(lock, flags)
105
106
void __ipipe_spin_lock_irq(raw_spinlock_t *lock);
107
108
void __ipipe_spin_unlock_irq(raw_spinlock_t *lock);
109
110
unsigned long __ipipe_spin_lock_irqsave(raw_spinlock_t *lock);
111
112
void __ipipe_spin_unlock_irqrestore(raw_spinlock_t *lock,
113
				    unsigned long x);
114
115
void __ipipe_spin_unlock_irqbegin(ipipe_spinlock_t *lock);
116
117
void __ipipe_spin_unlock_irqcomplete(unsigned long x);
118
119
#else /* !CONFIG_IPIPE */
120
121
#define ipipe_spinlock_t		spinlock_t
122
#define IPIPE_DEFINE_SPINLOCK(x)	DEFINE_SPINLOCK(x)
123
#define IPIPE_DECLARE_SPINLOCK(x)	extern spinlock_t x
124
#define IPIPE_SPIN_LOCK_UNLOCKED        SPIN_LOCK_UNLOCKED
125
126
#define spin_lock_irqsave_cond(lock, flags)		\
127
	do {						\
128
		(void)(flags);				\
129
		spin_lock(lock);			\
130
	} while(0)
131
132
#define spin_unlock_irqrestore_cond(lock, flags)	\
133
	spin_unlock(lock)
134
135
#define __ipipe_spin_lock_irq(lock)		do { } while (0)
136
#define __ipipe_spin_unlock_irq(lock)		do { } while (0)
137
#define __ipipe_spin_lock_irqsave(lock)		0
138
#define __ipipe_spin_unlock_irqrestore(lock, x)	do { (void)(x); } while (0)
139
#define __ipipe_spin_unlock_irqbegin(lock)	do { } while (0)
140
#define __ipipe_spin_unlock_irqcomplete(x)	do { (void)(x); } while (0)
141
142
#endif /* !CONFIG_IPIPE */
143
144
#endif /* !__LINUX_IPIPE_LOCK_H */
(-)a/include/linux/ipipe_percpu.h (+89 lines)
Line 0 Link Here
1
/*   -*- linux-c -*-
2
 *   include/linux/ipipe_percpu.h
3
 *
4
 *   Copyright (C) 2007 Philippe Gerum.
5
 *
6
 *   This program is free software; you can redistribute it and/or modify
7
 *   it under the terms of the GNU General Public License as published by
8
 *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9
 *   USA; either version 2 of the License, or (at your option) any later
10
 *   version.
11
 *
12
 *   This program is distributed in the hope that it will be useful,
13
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 *   GNU General Public License for more details.
16
 *
17
 *   You should have received a copy of the GNU General Public License
18
 *   along with this program; if not, write to the Free Software
19
 *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
 */
21
22
#ifndef __LINUX_IPIPE_PERCPU_H
23
#define __LINUX_IPIPE_PERCPU_H
24
25
#include <asm/percpu.h>
26
#include <asm/ptrace.h>
27
28
struct ipipe_domain;
29
30
struct ipipe_percpu_domain_data {
31
	unsigned long status;	/* <= Must be first in struct. */
32
	unsigned long irqpend_himap;
33
#ifdef __IPIPE_3LEVEL_IRQMAP
34
	unsigned long irqpend_mdmap[IPIPE_IRQ_MDMAPSZ];
35
#endif
36
	unsigned long irqpend_lomap[IPIPE_IRQ_LOMAPSZ];
37
	unsigned long irqheld_map[IPIPE_IRQ_LOMAPSZ];
38
	unsigned long irqall[IPIPE_NR_IRQS];
39
	u64 evsync;
40
};
41
42
/*
43
 * CAREFUL: all accessors based on __raw_get_cpu_var() you may find in
44
 * this file should be used only while hw interrupts are off, to
45
 * prevent from CPU migration regardless of the running domain.
46
 */
47
#ifdef CONFIG_SMP
48
#define ipipe_percpudom_ptr(ipd, cpu)	\
49
	(&per_cpu(ipipe_percpu_darray, cpu)[(ipd)->slot])
50
#define ipipe_cpudom_ptr(ipd)	\
51
	(&__ipipe_get_cpu_var(ipipe_percpu_darray)[(ipd)->slot])
52
#else
53
DECLARE_PER_CPU(struct ipipe_percpu_domain_data *, ipipe_percpu_daddr[CONFIG_IPIPE_DOMAINS]);
54
#define ipipe_percpudom_ptr(ipd, cpu)	\
55
	(per_cpu(ipipe_percpu_daddr, cpu)[(ipd)->slot])
56
#define ipipe_cpudom_ptr(ipd)	\
57
	(__ipipe_get_cpu_var(ipipe_percpu_daddr)[(ipd)->slot])
58
#endif
59
#define ipipe_percpudom(ipd, var, cpu)	(ipipe_percpudom_ptr(ipd, cpu)->var)
60
#define ipipe_cpudom_var(ipd, var)	(ipipe_cpudom_ptr(ipd)->var)
61
62
#define IPIPE_ROOT_SLOT			0
63
#define IPIPE_HEAD_SLOT			(CONFIG_IPIPE_DOMAINS - 1)
64
65
DECLARE_PER_CPU(struct ipipe_percpu_domain_data, ipipe_percpu_darray[CONFIG_IPIPE_DOMAINS]);
66
67
DECLARE_PER_CPU(struct ipipe_domain *, ipipe_percpu_domain);
68
69
DECLARE_PER_CPU(unsigned long, ipipe_nmi_saved_root);
70
71
#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
72
DECLARE_PER_CPU(int, ipipe_percpu_context_check);
73
DECLARE_PER_CPU(int, ipipe_saved_context_check_state);
74
#endif
75
76
#define ipipe_root_cpudom_ptr(var)	\
77
	(&__ipipe_get_cpu_var(ipipe_percpu_darray)[IPIPE_ROOT_SLOT])
78